1 //===-- Value.cpp - Implement the Value class -----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the Value, ValueHandle, and User classes.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "llvm/IR/Value.h"
14 #include "LLVMContextImpl.h"
15 #include "llvm/ADT/DenseMap.h"
16 #include "llvm/ADT/SetVector.h"
17 #include "llvm/ADT/SmallString.h"
18 #include "llvm/IR/Constant.h"
19 #include "llvm/IR/Constants.h"
20 #include "llvm/IR/DataLayout.h"
21 #include "llvm/IR/DerivedTypes.h"
22 #include "llvm/IR/DerivedUser.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/InstrTypes.h"
25 #include "llvm/IR/Instructions.h"
26 #include "llvm/IR/IntrinsicInst.h"
27 #include "llvm/IR/Module.h"
28 #include "llvm/IR/Operator.h"
29 #include "llvm/IR/Statepoint.h"
30 #include "llvm/IR/ValueHandle.h"
31 #include "llvm/IR/ValueSymbolTable.h"
32 #include "llvm/Support/CommandLine.h"
33 #include "llvm/Support/Debug.h"
34 #include "llvm/Support/ErrorHandling.h"
35 #include "llvm/Support/ManagedStatic.h"
36 #include "llvm/Support/raw_ostream.h"
37 #include <algorithm>
38
39 using namespace llvm;
40
41 static cl::opt<unsigned> NonGlobalValueMaxNameSize(
42 "non-global-value-max-name-size", cl::Hidden, cl::init(1024),
43 cl::desc("Maximum size for the name of non-global values."));
44
45 //===----------------------------------------------------------------------===//
46 // Value Class
47 //===----------------------------------------------------------------------===//
checkType(Type * Ty)48 static inline Type *checkType(Type *Ty) {
49 assert(Ty && "Value defined with a null type: Error!");
50 return Ty;
51 }
52
Value(Type * ty,unsigned scid)53 Value::Value(Type *ty, unsigned scid)
54 : VTy(checkType(ty)), UseList(nullptr), SubclassID(scid), HasValueHandle(0),
55 SubclassOptionalData(0), SubclassData(0), NumUserOperands(0),
56 IsUsedByMD(false), HasName(false), HasMetadata(false) {
57 static_assert(ConstantFirstVal == 0, "!(SubclassID < ConstantFirstVal)");
58 // FIXME: Why isn't this in the subclass gunk??
59 // Note, we cannot call isa<CallInst> before the CallInst has been
60 // constructed.
61 if (SubclassID == Instruction::Call || SubclassID == Instruction::Invoke ||
62 SubclassID == Instruction::CallBr)
63 assert((VTy->isFirstClassType() || VTy->isVoidTy() || VTy->isStructTy()) &&
64 "invalid CallInst type!");
65 else if (SubclassID != BasicBlockVal &&
66 (/*SubclassID < ConstantFirstVal ||*/ SubclassID > ConstantLastVal))
67 assert((VTy->isFirstClassType() || VTy->isVoidTy()) &&
68 "Cannot create non-first-class values except for constants!");
69 static_assert(sizeof(Value) == 2 * sizeof(void *) + 2 * sizeof(unsigned),
70 "Value too big");
71 }
72
~Value()73 Value::~Value() {
74 // Notify all ValueHandles (if present) that this value is going away.
75 if (HasValueHandle)
76 ValueHandleBase::ValueIsDeleted(this);
77 if (isUsedByMetadata())
78 ValueAsMetadata::handleDeletion(this);
79
80 // Remove associated metadata from context.
81 if (HasMetadata)
82 clearMetadata();
83
84 #ifndef NDEBUG // Only in -g mode...
85 // Check to make sure that there are no uses of this value that are still
86 // around when the value is destroyed. If there are, then we have a dangling
87 // reference and something is wrong. This code is here to print out where
88 // the value is still being referenced.
89 //
90 // Note that use_empty() cannot be called here, as it eventually downcasts
91 // 'this' to GlobalValue (derived class of Value), but GlobalValue has already
92 // been destructed, so accessing it is UB.
93 //
94 if (!materialized_use_empty()) {
95 dbgs() << "While deleting: " << *VTy << " %" << getName() << "\n";
96 for (auto *U : users())
97 dbgs() << "Use still stuck around after Def is destroyed:" << *U << "\n";
98 }
99 #endif
100 assert(materialized_use_empty() && "Uses remain when a value is destroyed!");
101
102 // If this value is named, destroy the name. This should not be in a symtab
103 // at this point.
104 destroyValueName();
105 }
106
deleteValue()107 void Value::deleteValue() {
108 switch (getValueID()) {
109 #define HANDLE_VALUE(Name) \
110 case Value::Name##Val: \
111 delete static_cast<Name *>(this); \
112 break;
113 #define HANDLE_MEMORY_VALUE(Name) \
114 case Value::Name##Val: \
115 static_cast<DerivedUser *>(this)->DeleteValue( \
116 static_cast<DerivedUser *>(this)); \
117 break;
118 #define HANDLE_CONSTANT(Name) \
119 case Value::Name##Val: \
120 llvm_unreachable("constants should be destroyed with destroyConstant"); \
121 break;
122 #define HANDLE_INSTRUCTION(Name) /* nothing */
123 #include "llvm/IR/Value.def"
124
125 #define HANDLE_INST(N, OPC, CLASS) \
126 case Value::InstructionVal + Instruction::OPC: \
127 delete static_cast<CLASS *>(this); \
128 break;
129 #define HANDLE_USER_INST(N, OPC, CLASS)
130 #include "llvm/IR/Instruction.def"
131
132 default:
133 llvm_unreachable("attempting to delete unknown value kind");
134 }
135 }
136
destroyValueName()137 void Value::destroyValueName() {
138 ValueName *Name = getValueName();
139 if (Name) {
140 MallocAllocator Allocator;
141 Name->Destroy(Allocator);
142 }
143 setValueName(nullptr);
144 }
145
hasNUses(unsigned N) const146 bool Value::hasNUses(unsigned N) const {
147 return hasNItems(use_begin(), use_end(), N);
148 }
149
hasNUsesOrMore(unsigned N) const150 bool Value::hasNUsesOrMore(unsigned N) const {
151 return hasNItemsOrMore(use_begin(), use_end(), N);
152 }
153
hasOneUser() const154 bool Value::hasOneUser() const {
155 if (use_empty())
156 return false;
157 if (hasOneUse())
158 return true;
159 return std::equal(++user_begin(), user_end(), user_begin());
160 }
161
isUnDroppableUser(const User * U)162 static bool isUnDroppableUser(const User *U) { return !U->isDroppable(); }
163
getSingleUndroppableUse()164 Use *Value::getSingleUndroppableUse() {
165 Use *Result = nullptr;
166 for (Use &U : uses()) {
167 if (!U.getUser()->isDroppable()) {
168 if (Result)
169 return nullptr;
170 Result = &U;
171 }
172 }
173 return Result;
174 }
175
hasNUndroppableUses(unsigned int N) const176 bool Value::hasNUndroppableUses(unsigned int N) const {
177 return hasNItems(user_begin(), user_end(), N, isUnDroppableUser);
178 }
179
hasNUndroppableUsesOrMore(unsigned int N) const180 bool Value::hasNUndroppableUsesOrMore(unsigned int N) const {
181 return hasNItemsOrMore(user_begin(), user_end(), N, isUnDroppableUser);
182 }
183
dropDroppableUses(llvm::function_ref<bool (const Use *)> ShouldDrop)184 void Value::dropDroppableUses(
185 llvm::function_ref<bool(const Use *)> ShouldDrop) {
186 SmallVector<Use *, 8> ToBeEdited;
187 for (Use &U : uses())
188 if (U.getUser()->isDroppable() && ShouldDrop(&U))
189 ToBeEdited.push_back(&U);
190 for (Use *U : ToBeEdited)
191 dropDroppableUse(*U);
192 }
193
dropDroppableUsesIn(User & Usr)194 void Value::dropDroppableUsesIn(User &Usr) {
195 assert(Usr.isDroppable() && "Expected a droppable user!");
196 for (Use &UsrOp : Usr.operands()) {
197 if (UsrOp.get() == this)
198 dropDroppableUse(UsrOp);
199 }
200 }
201
dropDroppableUse(Use & U)202 void Value::dropDroppableUse(Use &U) {
203 U.removeFromList();
204 if (auto *Assume = dyn_cast<IntrinsicInst>(U.getUser())) {
205 assert(Assume->getIntrinsicID() == Intrinsic::assume);
206 unsigned OpNo = U.getOperandNo();
207 if (OpNo == 0)
208 U.set(ConstantInt::getTrue(Assume->getContext()));
209 else {
210 U.set(UndefValue::get(U.get()->getType()));
211 CallInst::BundleOpInfo &BOI = Assume->getBundleOpInfoForOperand(OpNo);
212 BOI.Tag = Assume->getContext().pImpl->getOrInsertBundleTag("ignore");
213 }
214 return;
215 }
216
217 llvm_unreachable("unkown droppable use");
218 }
219
isUsedInBasicBlock(const BasicBlock * BB) const220 bool Value::isUsedInBasicBlock(const BasicBlock *BB) const {
221 // This can be computed either by scanning the instructions in BB, or by
222 // scanning the use list of this Value. Both lists can be very long, but
223 // usually one is quite short.
224 //
225 // Scan both lists simultaneously until one is exhausted. This limits the
226 // search to the shorter list.
227 BasicBlock::const_iterator BI = BB->begin(), BE = BB->end();
228 const_user_iterator UI = user_begin(), UE = user_end();
229 for (; BI != BE && UI != UE; ++BI, ++UI) {
230 // Scan basic block: Check if this Value is used by the instruction at BI.
231 if (is_contained(BI->operands(), this))
232 return true;
233 // Scan use list: Check if the use at UI is in BB.
234 const auto *User = dyn_cast<Instruction>(*UI);
235 if (User && User->getParent() == BB)
236 return true;
237 }
238 return false;
239 }
240
getNumUses() const241 unsigned Value::getNumUses() const {
242 return (unsigned)std::distance(use_begin(), use_end());
243 }
244
getSymTab(Value * V,ValueSymbolTable * & ST)245 static bool getSymTab(Value *V, ValueSymbolTable *&ST) {
246 ST = nullptr;
247 if (Instruction *I = dyn_cast<Instruction>(V)) {
248 if (BasicBlock *P = I->getParent())
249 if (Function *PP = P->getParent())
250 ST = PP->getValueSymbolTable();
251 } else if (BasicBlock *BB = dyn_cast<BasicBlock>(V)) {
252 if (Function *P = BB->getParent())
253 ST = P->getValueSymbolTable();
254 } else if (GlobalValue *GV = dyn_cast<GlobalValue>(V)) {
255 if (Module *P = GV->getParent())
256 ST = &P->getValueSymbolTable();
257 } else if (Argument *A = dyn_cast<Argument>(V)) {
258 if (Function *P = A->getParent())
259 ST = P->getValueSymbolTable();
260 } else {
261 assert(isa<Constant>(V) && "Unknown value type!");
262 return true; // no name is setable for this.
263 }
264 return false;
265 }
266
getValueName() const267 ValueName *Value::getValueName() const {
268 if (!HasName) return nullptr;
269
270 LLVMContext &Ctx = getContext();
271 auto I = Ctx.pImpl->ValueNames.find(this);
272 assert(I != Ctx.pImpl->ValueNames.end() &&
273 "No name entry found!");
274
275 return I->second;
276 }
277
setValueName(ValueName * VN)278 void Value::setValueName(ValueName *VN) {
279 LLVMContext &Ctx = getContext();
280
281 assert(HasName == Ctx.pImpl->ValueNames.count(this) &&
282 "HasName bit out of sync!");
283
284 if (!VN) {
285 if (HasName)
286 Ctx.pImpl->ValueNames.erase(this);
287 HasName = false;
288 return;
289 }
290
291 HasName = true;
292 Ctx.pImpl->ValueNames[this] = VN;
293 }
294
getName() const295 StringRef Value::getName() const {
296 // Make sure the empty string is still a C string. For historical reasons,
297 // some clients want to call .data() on the result and expect it to be null
298 // terminated.
299 if (!hasName())
300 return StringRef("", 0);
301 return getValueName()->getKey();
302 }
303
setNameImpl(const Twine & NewName)304 void Value::setNameImpl(const Twine &NewName) {
305 // Fast-path: LLVMContext can be set to strip out non-GlobalValue names
306 if (getContext().shouldDiscardValueNames() && !isa<GlobalValue>(this))
307 return;
308
309 // Fast path for common IRBuilder case of setName("") when there is no name.
310 if (NewName.isTriviallyEmpty() && !hasName())
311 return;
312
313 SmallString<256> NameData;
314 StringRef NameRef = NewName.toStringRef(NameData);
315 assert(NameRef.find_first_of(0) == StringRef::npos &&
316 "Null bytes are not allowed in names");
317
318 // Name isn't changing?
319 if (getName() == NameRef)
320 return;
321
322 // Cap the size of non-GlobalValue names.
323 if (NameRef.size() > NonGlobalValueMaxNameSize && !isa<GlobalValue>(this))
324 NameRef =
325 NameRef.substr(0, std::max(1u, (unsigned)NonGlobalValueMaxNameSize));
326
327 assert(!getType()->isVoidTy() && "Cannot assign a name to void values!");
328
329 // Get the symbol table to update for this object.
330 ValueSymbolTable *ST;
331 if (getSymTab(this, ST))
332 return; // Cannot set a name on this value (e.g. constant).
333
334 if (!ST) { // No symbol table to update? Just do the change.
335 if (NameRef.empty()) {
336 // Free the name for this value.
337 destroyValueName();
338 return;
339 }
340
341 // NOTE: Could optimize for the case the name is shrinking to not deallocate
342 // then reallocated.
343 destroyValueName();
344
345 // Create the new name.
346 MallocAllocator Allocator;
347 setValueName(ValueName::Create(NameRef, Allocator));
348 getValueName()->setValue(this);
349 return;
350 }
351
352 // NOTE: Could optimize for the case the name is shrinking to not deallocate
353 // then reallocated.
354 if (hasName()) {
355 // Remove old name.
356 ST->removeValueName(getValueName());
357 destroyValueName();
358
359 if (NameRef.empty())
360 return;
361 }
362
363 // Name is changing to something new.
364 setValueName(ST->createValueName(NameRef, this));
365 }
366
setName(const Twine & NewName)367 void Value::setName(const Twine &NewName) {
368 setNameImpl(NewName);
369 if (Function *F = dyn_cast<Function>(this))
370 F->recalculateIntrinsicID();
371 }
372
takeName(Value * V)373 void Value::takeName(Value *V) {
374 ValueSymbolTable *ST = nullptr;
375 // If this value has a name, drop it.
376 if (hasName()) {
377 // Get the symtab this is in.
378 if (getSymTab(this, ST)) {
379 // We can't set a name on this value, but we need to clear V's name if
380 // it has one.
381 if (V->hasName()) V->setName("");
382 return; // Cannot set a name on this value (e.g. constant).
383 }
384
385 // Remove old name.
386 if (ST)
387 ST->removeValueName(getValueName());
388 destroyValueName();
389 }
390
391 // Now we know that this has no name.
392
393 // If V has no name either, we're done.
394 if (!V->hasName()) return;
395
396 // Get this's symtab if we didn't before.
397 if (!ST) {
398 if (getSymTab(this, ST)) {
399 // Clear V's name.
400 V->setName("");
401 return; // Cannot set a name on this value (e.g. constant).
402 }
403 }
404
405 // Get V's ST, this should always succed, because V has a name.
406 ValueSymbolTable *VST;
407 bool Failure = getSymTab(V, VST);
408 assert(!Failure && "V has a name, so it should have a ST!"); (void)Failure;
409
410 // If these values are both in the same symtab, we can do this very fast.
411 // This works even if both values have no symtab yet.
412 if (ST == VST) {
413 // Take the name!
414 setValueName(V->getValueName());
415 V->setValueName(nullptr);
416 getValueName()->setValue(this);
417 return;
418 }
419
420 // Otherwise, things are slightly more complex. Remove V's name from VST and
421 // then reinsert it into ST.
422
423 if (VST)
424 VST->removeValueName(V->getValueName());
425 setValueName(V->getValueName());
426 V->setValueName(nullptr);
427 getValueName()->setValue(this);
428
429 if (ST)
430 ST->reinsertValue(this);
431 }
432
433 #ifndef NDEBUG
getNameOrAsOperand() const434 std::string Value::getNameOrAsOperand() const {
435 if (!getName().empty())
436 return std::string(getName());
437
438 std::string BBName;
439 raw_string_ostream OS(BBName);
440 printAsOperand(OS, false);
441 return OS.str();
442 }
443 #endif
444
assertModuleIsMaterializedImpl() const445 void Value::assertModuleIsMaterializedImpl() const {
446 #ifndef NDEBUG
447 const GlobalValue *GV = dyn_cast<GlobalValue>(this);
448 if (!GV)
449 return;
450 const Module *M = GV->getParent();
451 if (!M)
452 return;
453 assert(M->isMaterialized());
454 #endif
455 }
456
457 #ifndef NDEBUG
contains(SmallPtrSetImpl<ConstantExpr * > & Cache,ConstantExpr * Expr,Constant * C)458 static bool contains(SmallPtrSetImpl<ConstantExpr *> &Cache, ConstantExpr *Expr,
459 Constant *C) {
460 if (!Cache.insert(Expr).second)
461 return false;
462
463 for (auto &O : Expr->operands()) {
464 if (O == C)
465 return true;
466 auto *CE = dyn_cast<ConstantExpr>(O);
467 if (!CE)
468 continue;
469 if (contains(Cache, CE, C))
470 return true;
471 }
472 return false;
473 }
474
contains(Value * Expr,Value * V)475 static bool contains(Value *Expr, Value *V) {
476 if (Expr == V)
477 return true;
478
479 auto *C = dyn_cast<Constant>(V);
480 if (!C)
481 return false;
482
483 auto *CE = dyn_cast<ConstantExpr>(Expr);
484 if (!CE)
485 return false;
486
487 SmallPtrSet<ConstantExpr *, 4> Cache;
488 return contains(Cache, CE, C);
489 }
490 #endif // NDEBUG
491
doRAUW(Value * New,ReplaceMetadataUses ReplaceMetaUses)492 void Value::doRAUW(Value *New, ReplaceMetadataUses ReplaceMetaUses) {
493 assert(New && "Value::replaceAllUsesWith(<null>) is invalid!");
494 assert(!contains(New, this) &&
495 "this->replaceAllUsesWith(expr(this)) is NOT valid!");
496 assert(New->getType() == getType() &&
497 "replaceAllUses of value with new value of different type!");
498
499 // Notify all ValueHandles (if present) that this value is going away.
500 if (HasValueHandle)
501 ValueHandleBase::ValueIsRAUWd(this, New);
502 if (ReplaceMetaUses == ReplaceMetadataUses::Yes && isUsedByMetadata())
503 ValueAsMetadata::handleRAUW(this, New);
504
505 while (!materialized_use_empty()) {
506 Use &U = *UseList;
507 // Must handle Constants specially, we cannot call replaceUsesOfWith on a
508 // constant because they are uniqued.
509 if (auto *C = dyn_cast<Constant>(U.getUser())) {
510 if (!isa<GlobalValue>(C)) {
511 C->handleOperandChange(this, New);
512 continue;
513 }
514 }
515
516 U.set(New);
517 }
518
519 if (BasicBlock *BB = dyn_cast<BasicBlock>(this))
520 BB->replaceSuccessorsPhiUsesWith(cast<BasicBlock>(New));
521 }
522
replaceAllUsesWith(Value * New)523 void Value::replaceAllUsesWith(Value *New) {
524 doRAUW(New, ReplaceMetadataUses::Yes);
525 }
526
replaceNonMetadataUsesWith(Value * New)527 void Value::replaceNonMetadataUsesWith(Value *New) {
528 doRAUW(New, ReplaceMetadataUses::No);
529 }
530
531 // Like replaceAllUsesWith except it does not handle constants or basic blocks.
532 // This routine leaves uses within BB.
replaceUsesOutsideBlock(Value * New,BasicBlock * BB)533 void Value::replaceUsesOutsideBlock(Value *New, BasicBlock *BB) {
534 assert(New && "Value::replaceUsesOutsideBlock(<null>, BB) is invalid!");
535 assert(!contains(New, this) &&
536 "this->replaceUsesOutsideBlock(expr(this), BB) is NOT valid!");
537 assert(New->getType() == getType() &&
538 "replaceUses of value with new value of different type!");
539 assert(BB && "Basic block that may contain a use of 'New' must be defined\n");
540
541 replaceUsesWithIf(New, [BB](Use &U) {
542 auto *I = dyn_cast<Instruction>(U.getUser());
543 // Don't replace if it's an instruction in the BB basic block.
544 return !I || I->getParent() != BB;
545 });
546 }
547
548 namespace {
549 // Various metrics for how much to strip off of pointers.
550 enum PointerStripKind {
551 PSK_ZeroIndices,
552 PSK_ZeroIndicesAndAliases,
553 PSK_ZeroIndicesSameRepresentation,
554 PSK_ZeroIndicesAndInvariantGroups,
555 PSK_InBoundsConstantIndices,
556 PSK_InBounds
557 };
558
NoopCallback(const Value *)559 template <PointerStripKind StripKind> static void NoopCallback(const Value *) {}
560
561 template <PointerStripKind StripKind>
stripPointerCastsAndOffsets(const Value * V,function_ref<void (const Value *)> Func=NoopCallback<StripKind>)562 static const Value *stripPointerCastsAndOffsets(
563 const Value *V,
564 function_ref<void(const Value *)> Func = NoopCallback<StripKind>) {
565 if (!V->getType()->isPointerTy())
566 return V;
567
568 // Even though we don't look through PHI nodes, we could be called on an
569 // instruction in an unreachable block, which may be on a cycle.
570 SmallPtrSet<const Value *, 4> Visited;
571
572 Visited.insert(V);
573 do {
574 Func(V);
575 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
576 switch (StripKind) {
577 case PSK_ZeroIndices:
578 case PSK_ZeroIndicesAndAliases:
579 case PSK_ZeroIndicesSameRepresentation:
580 case PSK_ZeroIndicesAndInvariantGroups:
581 if (!GEP->hasAllZeroIndices())
582 return V;
583 break;
584 case PSK_InBoundsConstantIndices:
585 if (!GEP->hasAllConstantIndices())
586 return V;
587 LLVM_FALLTHROUGH;
588 case PSK_InBounds:
589 if (!GEP->isInBounds())
590 return V;
591 break;
592 }
593 V = GEP->getPointerOperand();
594 } else if (Operator::getOpcode(V) == Instruction::BitCast) {
595 V = cast<Operator>(V)->getOperand(0);
596 if (!V->getType()->isPointerTy())
597 return V;
598 } else if (StripKind != PSK_ZeroIndicesSameRepresentation &&
599 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
600 // TODO: If we know an address space cast will not change the
601 // representation we could look through it here as well.
602 V = cast<Operator>(V)->getOperand(0);
603 } else if (StripKind == PSK_ZeroIndicesAndAliases && isa<GlobalAlias>(V)) {
604 V = cast<GlobalAlias>(V)->getAliasee();
605 } else {
606 if (const auto *Call = dyn_cast<CallBase>(V)) {
607 if (const Value *RV = Call->getReturnedArgOperand()) {
608 V = RV;
609 continue;
610 }
611 // The result of launder.invariant.group must alias it's argument,
612 // but it can't be marked with returned attribute, that's why it needs
613 // special case.
614 if (StripKind == PSK_ZeroIndicesAndInvariantGroups &&
615 (Call->getIntrinsicID() == Intrinsic::launder_invariant_group ||
616 Call->getIntrinsicID() == Intrinsic::strip_invariant_group)) {
617 V = Call->getArgOperand(0);
618 continue;
619 }
620 }
621 return V;
622 }
623 assert(V->getType()->isPointerTy() && "Unexpected operand type!");
624 } while (Visited.insert(V).second);
625
626 return V;
627 }
628 } // end anonymous namespace
629
stripPointerCasts() const630 const Value *Value::stripPointerCasts() const {
631 return stripPointerCastsAndOffsets<PSK_ZeroIndices>(this);
632 }
633
stripPointerCastsAndAliases() const634 const Value *Value::stripPointerCastsAndAliases() const {
635 return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndAliases>(this);
636 }
637
stripPointerCastsSameRepresentation() const638 const Value *Value::stripPointerCastsSameRepresentation() const {
639 return stripPointerCastsAndOffsets<PSK_ZeroIndicesSameRepresentation>(this);
640 }
641
stripInBoundsConstantOffsets() const642 const Value *Value::stripInBoundsConstantOffsets() const {
643 return stripPointerCastsAndOffsets<PSK_InBoundsConstantIndices>(this);
644 }
645
stripPointerCastsAndInvariantGroups() const646 const Value *Value::stripPointerCastsAndInvariantGroups() const {
647 return stripPointerCastsAndOffsets<PSK_ZeroIndicesAndInvariantGroups>(this);
648 }
649
stripAndAccumulateConstantOffsets(const DataLayout & DL,APInt & Offset,bool AllowNonInbounds,function_ref<bool (Value &,APInt &)> ExternalAnalysis) const650 const Value *Value::stripAndAccumulateConstantOffsets(
651 const DataLayout &DL, APInt &Offset, bool AllowNonInbounds,
652 function_ref<bool(Value &, APInt &)> ExternalAnalysis) const {
653 if (!getType()->isPtrOrPtrVectorTy())
654 return this;
655
656 unsigned BitWidth = Offset.getBitWidth();
657 assert(BitWidth == DL.getIndexTypeSizeInBits(getType()) &&
658 "The offset bit width does not match the DL specification.");
659
660 // Even though we don't look through PHI nodes, we could be called on an
661 // instruction in an unreachable block, which may be on a cycle.
662 SmallPtrSet<const Value *, 4> Visited;
663 Visited.insert(this);
664 const Value *V = this;
665 do {
666 if (auto *GEP = dyn_cast<GEPOperator>(V)) {
667 // If in-bounds was requested, we do not strip non-in-bounds GEPs.
668 if (!AllowNonInbounds && !GEP->isInBounds())
669 return V;
670
671 // If one of the values we have visited is an addrspacecast, then
672 // the pointer type of this GEP may be different from the type
673 // of the Ptr parameter which was passed to this function. This
674 // means when we construct GEPOffset, we need to use the size
675 // of GEP's pointer type rather than the size of the original
676 // pointer type.
677 APInt GEPOffset(DL.getIndexTypeSizeInBits(V->getType()), 0);
678 if (!GEP->accumulateConstantOffset(DL, GEPOffset, ExternalAnalysis))
679 return V;
680
681 // Stop traversal if the pointer offset wouldn't fit in the bit-width
682 // provided by the Offset argument. This can happen due to AddrSpaceCast
683 // stripping.
684 if (GEPOffset.getMinSignedBits() > BitWidth)
685 return V;
686
687 // External Analysis can return a result higher/lower than the value
688 // represents. We need to detect overflow/underflow.
689 APInt GEPOffsetST = GEPOffset.sextOrTrunc(BitWidth);
690 if (!ExternalAnalysis) {
691 Offset += GEPOffsetST;
692 } else {
693 bool Overflow = false;
694 APInt OldOffset = Offset;
695 Offset = Offset.sadd_ov(GEPOffsetST, Overflow);
696 if (Overflow) {
697 Offset = OldOffset;
698 return V;
699 }
700 }
701 V = GEP->getPointerOperand();
702 } else if (Operator::getOpcode(V) == Instruction::BitCast ||
703 Operator::getOpcode(V) == Instruction::AddrSpaceCast) {
704 V = cast<Operator>(V)->getOperand(0);
705 } else if (auto *GA = dyn_cast<GlobalAlias>(V)) {
706 if (!GA->isInterposable())
707 V = GA->getAliasee();
708 } else if (const auto *Call = dyn_cast<CallBase>(V)) {
709 if (const Value *RV = Call->getReturnedArgOperand())
710 V = RV;
711 }
712 assert(V->getType()->isPtrOrPtrVectorTy() && "Unexpected operand type!");
713 } while (Visited.insert(V).second);
714
715 return V;
716 }
717
718 const Value *
stripInBoundsOffsets(function_ref<void (const Value *)> Func) const719 Value::stripInBoundsOffsets(function_ref<void(const Value *)> Func) const {
720 return stripPointerCastsAndOffsets<PSK_InBounds>(this, Func);
721 }
722
getPointerDereferenceableBytes(const DataLayout & DL,bool & CanBeNull) const723 uint64_t Value::getPointerDereferenceableBytes(const DataLayout &DL,
724 bool &CanBeNull) const {
725 assert(getType()->isPointerTy() && "must be pointer");
726
727 uint64_t DerefBytes = 0;
728 CanBeNull = false;
729 if (const Argument *A = dyn_cast<Argument>(this)) {
730 DerefBytes = A->getDereferenceableBytes();
731 if (DerefBytes == 0) {
732 // Handle byval/byref/inalloca/preallocated arguments
733 if (Type *ArgMemTy = A->getPointeeInMemoryValueType()) {
734 if (ArgMemTy->isSized()) {
735 // FIXME: Why isn't this the type alloc size?
736 DerefBytes = DL.getTypeStoreSize(ArgMemTy).getKnownMinSize();
737 }
738 }
739 }
740
741 if (DerefBytes == 0) {
742 DerefBytes = A->getDereferenceableOrNullBytes();
743 CanBeNull = true;
744 }
745 } else if (const auto *Call = dyn_cast<CallBase>(this)) {
746 DerefBytes = Call->getDereferenceableBytes(AttributeList::ReturnIndex);
747 if (DerefBytes == 0) {
748 DerefBytes =
749 Call->getDereferenceableOrNullBytes(AttributeList::ReturnIndex);
750 CanBeNull = true;
751 }
752 } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
753 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_dereferenceable)) {
754 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
755 DerefBytes = CI->getLimitedValue();
756 }
757 if (DerefBytes == 0) {
758 if (MDNode *MD =
759 LI->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
760 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
761 DerefBytes = CI->getLimitedValue();
762 }
763 CanBeNull = true;
764 }
765 } else if (auto *IP = dyn_cast<IntToPtrInst>(this)) {
766 if (MDNode *MD = IP->getMetadata(LLVMContext::MD_dereferenceable)) {
767 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
768 DerefBytes = CI->getLimitedValue();
769 }
770 if (DerefBytes == 0) {
771 if (MDNode *MD =
772 IP->getMetadata(LLVMContext::MD_dereferenceable_or_null)) {
773 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
774 DerefBytes = CI->getLimitedValue();
775 }
776 CanBeNull = true;
777 }
778 } else if (auto *AI = dyn_cast<AllocaInst>(this)) {
779 if (!AI->isArrayAllocation()) {
780 DerefBytes =
781 DL.getTypeStoreSize(AI->getAllocatedType()).getKnownMinSize();
782 CanBeNull = false;
783 }
784 } else if (auto *GV = dyn_cast<GlobalVariable>(this)) {
785 if (GV->getValueType()->isSized() && !GV->hasExternalWeakLinkage()) {
786 // TODO: Don't outright reject hasExternalWeakLinkage but set the
787 // CanBeNull flag.
788 DerefBytes = DL.getTypeStoreSize(GV->getValueType()).getFixedSize();
789 CanBeNull = false;
790 }
791 }
792 return DerefBytes;
793 }
794
getPointerAlignment(const DataLayout & DL) const795 Align Value::getPointerAlignment(const DataLayout &DL) const {
796 assert(getType()->isPointerTy() && "must be pointer");
797 if (auto *GO = dyn_cast<GlobalObject>(this)) {
798 if (isa<Function>(GO)) {
799 Align FunctionPtrAlign = DL.getFunctionPtrAlign().valueOrOne();
800 switch (DL.getFunctionPtrAlignType()) {
801 case DataLayout::FunctionPtrAlignType::Independent:
802 return FunctionPtrAlign;
803 case DataLayout::FunctionPtrAlignType::MultipleOfFunctionAlign:
804 return std::max(FunctionPtrAlign, GO->getAlign().valueOrOne());
805 }
806 llvm_unreachable("Unhandled FunctionPtrAlignType");
807 }
808 const MaybeAlign Alignment(GO->getAlignment());
809 if (!Alignment) {
810 if (auto *GVar = dyn_cast<GlobalVariable>(GO)) {
811 Type *ObjectType = GVar->getValueType();
812 if (ObjectType->isSized()) {
813 // If the object is defined in the current Module, we'll be giving
814 // it the preferred alignment. Otherwise, we have to assume that it
815 // may only have the minimum ABI alignment.
816 if (GVar->isStrongDefinitionForLinker())
817 return DL.getPreferredAlign(GVar);
818 else
819 return DL.getABITypeAlign(ObjectType);
820 }
821 }
822 }
823 return Alignment.valueOrOne();
824 } else if (const Argument *A = dyn_cast<Argument>(this)) {
825 const MaybeAlign Alignment = A->getParamAlign();
826 if (!Alignment && A->hasStructRetAttr()) {
827 // An sret parameter has at least the ABI alignment of the return type.
828 Type *EltTy = A->getParamStructRetType();
829 if (EltTy->isSized())
830 return DL.getABITypeAlign(EltTy);
831 }
832 return Alignment.valueOrOne();
833 } else if (const AllocaInst *AI = dyn_cast<AllocaInst>(this)) {
834 return AI->getAlign();
835 } else if (const auto *Call = dyn_cast<CallBase>(this)) {
836 MaybeAlign Alignment = Call->getRetAlign();
837 if (!Alignment && Call->getCalledFunction())
838 Alignment = Call->getCalledFunction()->getAttributes().getRetAlignment();
839 return Alignment.valueOrOne();
840 } else if (const LoadInst *LI = dyn_cast<LoadInst>(this)) {
841 if (MDNode *MD = LI->getMetadata(LLVMContext::MD_align)) {
842 ConstantInt *CI = mdconst::extract<ConstantInt>(MD->getOperand(0));
843 return Align(CI->getLimitedValue());
844 }
845 } else if (auto *CstPtr = dyn_cast<Constant>(this)) {
846 if (auto *CstInt = dyn_cast_or_null<ConstantInt>(ConstantExpr::getPtrToInt(
847 const_cast<Constant *>(CstPtr), DL.getIntPtrType(getType()),
848 /*OnlyIfReduced=*/true))) {
849 size_t TrailingZeros = CstInt->getValue().countTrailingZeros();
850 // While the actual alignment may be large, elsewhere we have
851 // an arbitrary upper alignmet limit, so let's clamp to it.
852 return Align(TrailingZeros < Value::MaxAlignmentExponent
853 ? uint64_t(1) << TrailingZeros
854 : Value::MaximumAlignment);
855 }
856 }
857 return Align(1);
858 }
859
DoPHITranslation(const BasicBlock * CurBB,const BasicBlock * PredBB) const860 const Value *Value::DoPHITranslation(const BasicBlock *CurBB,
861 const BasicBlock *PredBB) const {
862 auto *PN = dyn_cast<PHINode>(this);
863 if (PN && PN->getParent() == CurBB)
864 return PN->getIncomingValueForBlock(PredBB);
865 return this;
866 }
867
getContext() const868 LLVMContext &Value::getContext() const { return VTy->getContext(); }
869
reverseUseList()870 void Value::reverseUseList() {
871 if (!UseList || !UseList->Next)
872 // No need to reverse 0 or 1 uses.
873 return;
874
875 Use *Head = UseList;
876 Use *Current = UseList->Next;
877 Head->Next = nullptr;
878 while (Current) {
879 Use *Next = Current->Next;
880 Current->Next = Head;
881 Head->Prev = &Current->Next;
882 Head = Current;
883 Current = Next;
884 }
885 UseList = Head;
886 Head->Prev = &UseList;
887 }
888
isSwiftError() const889 bool Value::isSwiftError() const {
890 auto *Arg = dyn_cast<Argument>(this);
891 if (Arg)
892 return Arg->hasSwiftErrorAttr();
893 auto *Alloca = dyn_cast<AllocaInst>(this);
894 if (!Alloca)
895 return false;
896 return Alloca->isSwiftError();
897 }
898
899 //===----------------------------------------------------------------------===//
900 // ValueHandleBase Class
901 //===----------------------------------------------------------------------===//
902
AddToExistingUseList(ValueHandleBase ** List)903 void ValueHandleBase::AddToExistingUseList(ValueHandleBase **List) {
904 assert(List && "Handle list is null?");
905
906 // Splice ourselves into the list.
907 Next = *List;
908 *List = this;
909 setPrevPtr(List);
910 if (Next) {
911 Next->setPrevPtr(&Next);
912 assert(getValPtr() == Next->getValPtr() && "Added to wrong list?");
913 }
914 }
915
AddToExistingUseListAfter(ValueHandleBase * List)916 void ValueHandleBase::AddToExistingUseListAfter(ValueHandleBase *List) {
917 assert(List && "Must insert after existing node");
918
919 Next = List->Next;
920 setPrevPtr(&List->Next);
921 List->Next = this;
922 if (Next)
923 Next->setPrevPtr(&Next);
924 }
925
AddToUseList()926 void ValueHandleBase::AddToUseList() {
927 assert(getValPtr() && "Null pointer doesn't have a use list!");
928
929 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
930
931 if (getValPtr()->HasValueHandle) {
932 // If this value already has a ValueHandle, then it must be in the
933 // ValueHandles map already.
934 ValueHandleBase *&Entry = pImpl->ValueHandles[getValPtr()];
935 assert(Entry && "Value doesn't have any handles?");
936 AddToExistingUseList(&Entry);
937 return;
938 }
939
940 // Ok, it doesn't have any handles yet, so we must insert it into the
941 // DenseMap. However, doing this insertion could cause the DenseMap to
942 // reallocate itself, which would invalidate all of the PrevP pointers that
943 // point into the old table. Handle this by checking for reallocation and
944 // updating the stale pointers only if needed.
945 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
946 const void *OldBucketPtr = Handles.getPointerIntoBucketsArray();
947
948 ValueHandleBase *&Entry = Handles[getValPtr()];
949 assert(!Entry && "Value really did already have handles?");
950 AddToExistingUseList(&Entry);
951 getValPtr()->HasValueHandle = true;
952
953 // If reallocation didn't happen or if this was the first insertion, don't
954 // walk the table.
955 if (Handles.isPointerIntoBucketsArray(OldBucketPtr) ||
956 Handles.size() == 1) {
957 return;
958 }
959
960 // Okay, reallocation did happen. Fix the Prev Pointers.
961 for (DenseMap<Value*, ValueHandleBase*>::iterator I = Handles.begin(),
962 E = Handles.end(); I != E; ++I) {
963 assert(I->second && I->first == I->second->getValPtr() &&
964 "List invariant broken!");
965 I->second->setPrevPtr(&I->second);
966 }
967 }
968
RemoveFromUseList()969 void ValueHandleBase::RemoveFromUseList() {
970 assert(getValPtr() && getValPtr()->HasValueHandle &&
971 "Pointer doesn't have a use list!");
972
973 // Unlink this from its use list.
974 ValueHandleBase **PrevPtr = getPrevPtr();
975 assert(*PrevPtr == this && "List invariant broken");
976
977 *PrevPtr = Next;
978 if (Next) {
979 assert(Next->getPrevPtr() == &Next && "List invariant broken");
980 Next->setPrevPtr(PrevPtr);
981 return;
982 }
983
984 // If the Next pointer was null, then it is possible that this was the last
985 // ValueHandle watching VP. If so, delete its entry from the ValueHandles
986 // map.
987 LLVMContextImpl *pImpl = getValPtr()->getContext().pImpl;
988 DenseMap<Value*, ValueHandleBase*> &Handles = pImpl->ValueHandles;
989 if (Handles.isPointerIntoBucketsArray(PrevPtr)) {
990 Handles.erase(getValPtr());
991 getValPtr()->HasValueHandle = false;
992 }
993 }
994
ValueIsDeleted(Value * V)995 void ValueHandleBase::ValueIsDeleted(Value *V) {
996 assert(V->HasValueHandle && "Should only be called if ValueHandles present");
997
998 // Get the linked list base, which is guaranteed to exist since the
999 // HasValueHandle flag is set.
1000 LLVMContextImpl *pImpl = V->getContext().pImpl;
1001 ValueHandleBase *Entry = pImpl->ValueHandles[V];
1002 assert(Entry && "Value bit set but no entries exist");
1003
1004 // We use a local ValueHandleBase as an iterator so that ValueHandles can add
1005 // and remove themselves from the list without breaking our iteration. This
1006 // is not really an AssertingVH; we just have to give ValueHandleBase a kind.
1007 // Note that we deliberately do not the support the case when dropping a value
1008 // handle results in a new value handle being permanently added to the list
1009 // (as might occur in theory for CallbackVH's): the new value handle will not
1010 // be processed and the checking code will mete out righteous punishment if
1011 // the handle is still present once we have finished processing all the other
1012 // value handles (it is fine to momentarily add then remove a value handle).
1013 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1014 Iterator.RemoveFromUseList();
1015 Iterator.AddToExistingUseListAfter(Entry);
1016 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1017
1018 switch (Entry->getKind()) {
1019 case Assert:
1020 break;
1021 case Weak:
1022 case WeakTracking:
1023 // WeakTracking and Weak just go to null, which unlinks them
1024 // from the list.
1025 Entry->operator=(nullptr);
1026 break;
1027 case Callback:
1028 // Forward to the subclass's implementation.
1029 static_cast<CallbackVH*>(Entry)->deleted();
1030 break;
1031 }
1032 }
1033
1034 // All callbacks, weak references, and assertingVHs should be dropped by now.
1035 if (V->HasValueHandle) {
1036 #ifndef NDEBUG // Only in +Asserts mode...
1037 dbgs() << "While deleting: " << *V->getType() << " %" << V->getName()
1038 << "\n";
1039 if (pImpl->ValueHandles[V]->getKind() == Assert)
1040 llvm_unreachable("An asserting value handle still pointed to this"
1041 " value!");
1042
1043 #endif
1044 llvm_unreachable("All references to V were not removed?");
1045 }
1046 }
1047
ValueIsRAUWd(Value * Old,Value * New)1048 void ValueHandleBase::ValueIsRAUWd(Value *Old, Value *New) {
1049 assert(Old->HasValueHandle &&"Should only be called if ValueHandles present");
1050 assert(Old != New && "Changing value into itself!");
1051 assert(Old->getType() == New->getType() &&
1052 "replaceAllUses of value with new value of different type!");
1053
1054 // Get the linked list base, which is guaranteed to exist since the
1055 // HasValueHandle flag is set.
1056 LLVMContextImpl *pImpl = Old->getContext().pImpl;
1057 ValueHandleBase *Entry = pImpl->ValueHandles[Old];
1058
1059 assert(Entry && "Value bit set but no entries exist");
1060
1061 // We use a local ValueHandleBase as an iterator so that
1062 // ValueHandles can add and remove themselves from the list without
1063 // breaking our iteration. This is not really an AssertingVH; we
1064 // just have to give ValueHandleBase some kind.
1065 for (ValueHandleBase Iterator(Assert, *Entry); Entry; Entry = Iterator.Next) {
1066 Iterator.RemoveFromUseList();
1067 Iterator.AddToExistingUseListAfter(Entry);
1068 assert(Entry->Next == &Iterator && "Loop invariant broken.");
1069
1070 switch (Entry->getKind()) {
1071 case Assert:
1072 case Weak:
1073 // Asserting and Weak handles do not follow RAUW implicitly.
1074 break;
1075 case WeakTracking:
1076 // Weak goes to the new value, which will unlink it from Old's list.
1077 Entry->operator=(New);
1078 break;
1079 case Callback:
1080 // Forward to the subclass's implementation.
1081 static_cast<CallbackVH*>(Entry)->allUsesReplacedWith(New);
1082 break;
1083 }
1084 }
1085
1086 #ifndef NDEBUG
1087 // If any new weak value handles were added while processing the
1088 // list, then complain about it now.
1089 if (Old->HasValueHandle)
1090 for (Entry = pImpl->ValueHandles[Old]; Entry; Entry = Entry->Next)
1091 switch (Entry->getKind()) {
1092 case WeakTracking:
1093 dbgs() << "After RAUW from " << *Old->getType() << " %"
1094 << Old->getName() << " to " << *New->getType() << " %"
1095 << New->getName() << "\n";
1096 llvm_unreachable(
1097 "A weak tracking value handle still pointed to the old value!\n");
1098 default:
1099 break;
1100 }
1101 #endif
1102 }
1103
1104 // Pin the vtable to this file.
anchor()1105 void CallbackVH::anchor() {}
1106