1 //===- llvm/IR/Metadata.h - Metadata definitions ----------------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 /// @file 10 /// This file contains the declarations for metadata subclasses. 11 /// They represent the different flavors of metadata that live in LLVM. 12 // 13 //===----------------------------------------------------------------------===// 14 15 #ifndef LLVM_IR_METADATA_H 16 #define LLVM_IR_METADATA_H 17 18 #include "llvm/ADT/ArrayRef.h" 19 #include "llvm/ADT/DenseMap.h" 20 #include "llvm/ADT/DenseMapInfo.h" 21 #include "llvm/ADT/PointerUnion.h" 22 #include "llvm/ADT/SmallVector.h" 23 #include "llvm/ADT/StringRef.h" 24 #include "llvm/ADT/ilist_node.h" 25 #include "llvm/ADT/iterator_range.h" 26 #include "llvm/IR/Constant.h" 27 #include "llvm/IR/LLVMContext.h" 28 #include "llvm/IR/Value.h" 29 #include "llvm/Support/CBindingWrapping.h" 30 #include "llvm/Support/Casting.h" 31 #include "llvm/Support/ErrorHandling.h" 32 #include <cassert> 33 #include <cstddef> 34 #include <cstdint> 35 #include <iterator> 36 #include <memory> 37 #include <string> 38 #include <type_traits> 39 #include <utility> 40 41 namespace llvm { 42 43 class Module; 44 class ModuleSlotTracker; 45 class raw_ostream; 46 template <typename T> class StringMapEntry; 47 template <typename ValueTy> class StringMapEntryStorage; 48 class Type; 49 50 enum LLVMConstants : uint32_t { 51 DEBUG_METADATA_VERSION = 3 // Current debug info version number. 52 }; 53 54 /// Magic number in the value profile metadata showing a target has been 55 /// promoted for the instruction and shouldn't be promoted again. 56 const uint64_t NOMORE_ICP_MAGICNUM = -1; 57 58 /// Root of the metadata hierarchy. 59 /// 60 /// This is a root class for typeless data in the IR. 61 class Metadata { 62 friend class ReplaceableMetadataImpl; 63 64 /// RTTI. 65 const unsigned char SubclassID; 66 67 protected: 68 /// Active type of storage. 69 enum StorageType { Uniqued, Distinct, Temporary }; 70 71 /// Storage flag for non-uniqued, otherwise unowned, metadata. 72 unsigned char Storage : 7; 73 74 unsigned char SubclassData1 : 1; 75 unsigned short SubclassData16 = 0; 76 unsigned SubclassData32 = 0; 77 78 public: 79 enum MetadataKind { 80 #define HANDLE_METADATA_LEAF(CLASS) CLASS##Kind, 81 #include "llvm/IR/Metadata.def" 82 }; 83 84 protected: 85 Metadata(unsigned ID, StorageType Storage) 86 : SubclassID(ID), Storage(Storage), SubclassData1(false) { 87 static_assert(sizeof(*this) == 8, "Metadata fields poorly packed"); 88 } 89 90 ~Metadata() = default; 91 92 /// Default handling of a changed operand, which asserts. 93 /// 94 /// If subclasses pass themselves in as owners to a tracking node reference, 95 /// they must provide an implementation of this method. 96 void handleChangedOperand(void *, Metadata *) { 97 llvm_unreachable("Unimplemented in Metadata subclass"); 98 } 99 100 public: 101 unsigned getMetadataID() const { return SubclassID; } 102 103 /// User-friendly dump. 104 /// 105 /// If \c M is provided, metadata nodes will be numbered canonically; 106 /// otherwise, pointer addresses are substituted. 107 /// 108 /// Note: this uses an explicit overload instead of default arguments so that 109 /// the nullptr version is easy to call from a debugger. 110 /// 111 /// @{ 112 void dump() const; 113 void dump(const Module *M) const; 114 /// @} 115 116 /// Print. 117 /// 118 /// Prints definition of \c this. 119 /// 120 /// If \c M is provided, metadata nodes will be numbered canonically; 121 /// otherwise, pointer addresses are substituted. 122 /// @{ 123 void print(raw_ostream &OS, const Module *M = nullptr, 124 bool IsForDebug = false) const; 125 void print(raw_ostream &OS, ModuleSlotTracker &MST, const Module *M = nullptr, 126 bool IsForDebug = false) const; 127 /// @} 128 129 /// Print as operand. 130 /// 131 /// Prints reference of \c this. 132 /// 133 /// If \c M is provided, metadata nodes will be numbered canonically; 134 /// otherwise, pointer addresses are substituted. 135 /// @{ 136 void printAsOperand(raw_ostream &OS, const Module *M = nullptr) const; 137 void printAsOperand(raw_ostream &OS, ModuleSlotTracker &MST, 138 const Module *M = nullptr) const; 139 /// @} 140 }; 141 142 // Create wrappers for C Binding types (see CBindingWrapping.h). 143 DEFINE_ISA_CONVERSION_FUNCTIONS(Metadata, LLVMMetadataRef) 144 145 // Specialized opaque metadata conversions. 146 inline Metadata **unwrap(LLVMMetadataRef *MDs) { 147 return reinterpret_cast<Metadata**>(MDs); 148 } 149 150 #define HANDLE_METADATA(CLASS) class CLASS; 151 #include "llvm/IR/Metadata.def" 152 153 // Provide specializations of isa so that we don't need definitions of 154 // subclasses to see if the metadata is a subclass. 155 #define HANDLE_METADATA_LEAF(CLASS) \ 156 template <> struct isa_impl<CLASS, Metadata> { \ 157 static inline bool doit(const Metadata &MD) { \ 158 return MD.getMetadataID() == Metadata::CLASS##Kind; \ 159 } \ 160 }; 161 #include "llvm/IR/Metadata.def" 162 163 inline raw_ostream &operator<<(raw_ostream &OS, const Metadata &MD) { 164 MD.print(OS); 165 return OS; 166 } 167 168 /// Metadata wrapper in the Value hierarchy. 169 /// 170 /// A member of the \a Value hierarchy to represent a reference to metadata. 171 /// This allows, e.g., intrinsics to have metadata as operands. 172 /// 173 /// Notably, this is the only thing in either hierarchy that is allowed to 174 /// reference \a LocalAsMetadata. 175 class MetadataAsValue : public Value { 176 friend class ReplaceableMetadataImpl; 177 friend class LLVMContextImpl; 178 179 Metadata *MD; 180 181 MetadataAsValue(Type *Ty, Metadata *MD); 182 183 /// Drop use of metadata (during teardown). 184 void dropUse() { MD = nullptr; } 185 186 public: 187 ~MetadataAsValue(); 188 189 static MetadataAsValue *get(LLVMContext &Context, Metadata *MD); 190 static MetadataAsValue *getIfExists(LLVMContext &Context, Metadata *MD); 191 192 Metadata *getMetadata() const { return MD; } 193 194 static bool classof(const Value *V) { 195 return V->getValueID() == MetadataAsValueVal; 196 } 197 198 private: 199 void handleChangedMetadata(Metadata *MD); 200 void track(); 201 void untrack(); 202 }; 203 204 /// API for tracking metadata references through RAUW and deletion. 205 /// 206 /// Shared API for updating \a Metadata pointers in subclasses that support 207 /// RAUW. 208 /// 209 /// This API is not meant to be used directly. See \a TrackingMDRef for a 210 /// user-friendly tracking reference. 211 class MetadataTracking { 212 public: 213 /// Track the reference to metadata. 214 /// 215 /// Register \c MD with \c *MD, if the subclass supports tracking. If \c *MD 216 /// gets RAUW'ed, \c MD will be updated to the new address. If \c *MD gets 217 /// deleted, \c MD will be set to \c nullptr. 218 /// 219 /// If tracking isn't supported, \c *MD will not change. 220 /// 221 /// \return true iff tracking is supported by \c MD. 222 static bool track(Metadata *&MD) { 223 return track(&MD, *MD, static_cast<Metadata *>(nullptr)); 224 } 225 226 /// Track the reference to metadata for \a Metadata. 227 /// 228 /// As \a track(Metadata*&), but with support for calling back to \c Owner to 229 /// tell it that its operand changed. This could trigger \c Owner being 230 /// re-uniqued. 231 static bool track(void *Ref, Metadata &MD, Metadata &Owner) { 232 return track(Ref, MD, &Owner); 233 } 234 235 /// Track the reference to metadata for \a MetadataAsValue. 236 /// 237 /// As \a track(Metadata*&), but with support for calling back to \c Owner to 238 /// tell it that its operand changed. This could trigger \c Owner being 239 /// re-uniqued. 240 static bool track(void *Ref, Metadata &MD, MetadataAsValue &Owner) { 241 return track(Ref, MD, &Owner); 242 } 243 244 /// Stop tracking a reference to metadata. 245 /// 246 /// Stops \c *MD from tracking \c MD. 247 static void untrack(Metadata *&MD) { untrack(&MD, *MD); } 248 static void untrack(void *Ref, Metadata &MD); 249 250 /// Move tracking from one reference to another. 251 /// 252 /// Semantically equivalent to \c untrack(MD) followed by \c track(New), 253 /// except that ownership callbacks are maintained. 254 /// 255 /// Note: it is an error if \c *MD does not equal \c New. 256 /// 257 /// \return true iff tracking is supported by \c MD. 258 static bool retrack(Metadata *&MD, Metadata *&New) { 259 return retrack(&MD, *MD, &New); 260 } 261 static bool retrack(void *Ref, Metadata &MD, void *New); 262 263 /// Check whether metadata is replaceable. 264 static bool isReplaceable(const Metadata &MD); 265 266 using OwnerTy = PointerUnion<MetadataAsValue *, Metadata *>; 267 268 private: 269 /// Track a reference to metadata for an owner. 270 /// 271 /// Generalized version of tracking. 272 static bool track(void *Ref, Metadata &MD, OwnerTy Owner); 273 }; 274 275 /// Shared implementation of use-lists for replaceable metadata. 276 /// 277 /// Most metadata cannot be RAUW'ed. This is a shared implementation of 278 /// use-lists and associated API for the two that support it (\a ValueAsMetadata 279 /// and \a TempMDNode). 280 class ReplaceableMetadataImpl { 281 friend class MetadataTracking; 282 283 public: 284 using OwnerTy = MetadataTracking::OwnerTy; 285 286 private: 287 LLVMContext &Context; 288 uint64_t NextIndex = 0; 289 SmallDenseMap<void *, std::pair<OwnerTy, uint64_t>, 4> UseMap; 290 291 public: 292 ReplaceableMetadataImpl(LLVMContext &Context) : Context(Context) {} 293 294 ~ReplaceableMetadataImpl() { 295 assert(UseMap.empty() && "Cannot destroy in-use replaceable metadata"); 296 } 297 298 LLVMContext &getContext() const { return Context; } 299 300 /// Replace all uses of this with MD. 301 /// 302 /// Replace all uses of this with \c MD, which is allowed to be null. 303 void replaceAllUsesWith(Metadata *MD); 304 /// Replace all uses of the constant with Undef in debug info metadata 305 static void SalvageDebugInfo(const Constant &C); 306 /// Returns the list of all DIArgList users of this. 307 SmallVector<Metadata *> getAllArgListUsers(); 308 309 /// Resolve all uses of this. 310 /// 311 /// Resolve all uses of this, turning off RAUW permanently. If \c 312 /// ResolveUsers, call \a MDNode::resolve() on any users whose last operand 313 /// is resolved. 314 void resolveAllUses(bool ResolveUsers = true); 315 316 private: 317 void addRef(void *Ref, OwnerTy Owner); 318 void dropRef(void *Ref); 319 void moveRef(void *Ref, void *New, const Metadata &MD); 320 321 /// Lazily construct RAUW support on MD. 322 /// 323 /// If this is an unresolved MDNode, RAUW support will be created on-demand. 324 /// ValueAsMetadata always has RAUW support. 325 static ReplaceableMetadataImpl *getOrCreate(Metadata &MD); 326 327 /// Get RAUW support on MD, if it exists. 328 static ReplaceableMetadataImpl *getIfExists(Metadata &MD); 329 330 /// Check whether this node will support RAUW. 331 /// 332 /// Returns \c true unless getOrCreate() would return null. 333 static bool isReplaceable(const Metadata &MD); 334 }; 335 336 /// Value wrapper in the Metadata hierarchy. 337 /// 338 /// This is a custom value handle that allows other metadata to refer to 339 /// classes in the Value hierarchy. 340 /// 341 /// Because of full uniquing support, each value is only wrapped by a single \a 342 /// ValueAsMetadata object, so the lookup maps are far more efficient than 343 /// those using ValueHandleBase. 344 class ValueAsMetadata : public Metadata, ReplaceableMetadataImpl { 345 friend class ReplaceableMetadataImpl; 346 friend class LLVMContextImpl; 347 348 Value *V; 349 350 /// Drop users without RAUW (during teardown). 351 void dropUsers() { 352 ReplaceableMetadataImpl::resolveAllUses(/* ResolveUsers */ false); 353 } 354 355 protected: 356 ValueAsMetadata(unsigned ID, Value *V) 357 : Metadata(ID, Uniqued), ReplaceableMetadataImpl(V->getContext()), V(V) { 358 assert(V && "Expected valid value"); 359 } 360 361 ~ValueAsMetadata() = default; 362 363 public: 364 static ValueAsMetadata *get(Value *V); 365 366 static ConstantAsMetadata *getConstant(Value *C) { 367 return cast<ConstantAsMetadata>(get(C)); 368 } 369 370 static LocalAsMetadata *getLocal(Value *Local) { 371 return cast<LocalAsMetadata>(get(Local)); 372 } 373 374 static ValueAsMetadata *getIfExists(Value *V); 375 376 static ConstantAsMetadata *getConstantIfExists(Value *C) { 377 return cast_or_null<ConstantAsMetadata>(getIfExists(C)); 378 } 379 380 static LocalAsMetadata *getLocalIfExists(Value *Local) { 381 return cast_or_null<LocalAsMetadata>(getIfExists(Local)); 382 } 383 384 Value *getValue() const { return V; } 385 Type *getType() const { return V->getType(); } 386 LLVMContext &getContext() const { return V->getContext(); } 387 388 SmallVector<Metadata *> getAllArgListUsers() { 389 return ReplaceableMetadataImpl::getAllArgListUsers(); 390 } 391 392 static void handleDeletion(Value *V); 393 static void handleRAUW(Value *From, Value *To); 394 395 protected: 396 /// Handle collisions after \a Value::replaceAllUsesWith(). 397 /// 398 /// RAUW isn't supported directly for \a ValueAsMetadata, but if the wrapped 399 /// \a Value gets RAUW'ed and the target already exists, this is used to 400 /// merge the two metadata nodes. 401 void replaceAllUsesWith(Metadata *MD) { 402 ReplaceableMetadataImpl::replaceAllUsesWith(MD); 403 } 404 405 public: 406 static bool classof(const Metadata *MD) { 407 return MD->getMetadataID() == LocalAsMetadataKind || 408 MD->getMetadataID() == ConstantAsMetadataKind; 409 } 410 }; 411 412 class ConstantAsMetadata : public ValueAsMetadata { 413 friend class ValueAsMetadata; 414 415 ConstantAsMetadata(Constant *C) 416 : ValueAsMetadata(ConstantAsMetadataKind, C) {} 417 418 public: 419 static ConstantAsMetadata *get(Constant *C) { 420 return ValueAsMetadata::getConstant(C); 421 } 422 423 static ConstantAsMetadata *getIfExists(Constant *C) { 424 return ValueAsMetadata::getConstantIfExists(C); 425 } 426 427 Constant *getValue() const { 428 return cast<Constant>(ValueAsMetadata::getValue()); 429 } 430 431 static bool classof(const Metadata *MD) { 432 return MD->getMetadataID() == ConstantAsMetadataKind; 433 } 434 }; 435 436 class LocalAsMetadata : public ValueAsMetadata { 437 friend class ValueAsMetadata; 438 439 LocalAsMetadata(Value *Local) 440 : ValueAsMetadata(LocalAsMetadataKind, Local) { 441 assert(!isa<Constant>(Local) && "Expected local value"); 442 } 443 444 public: 445 static LocalAsMetadata *get(Value *Local) { 446 return ValueAsMetadata::getLocal(Local); 447 } 448 449 static LocalAsMetadata *getIfExists(Value *Local) { 450 return ValueAsMetadata::getLocalIfExists(Local); 451 } 452 453 static bool classof(const Metadata *MD) { 454 return MD->getMetadataID() == LocalAsMetadataKind; 455 } 456 }; 457 458 /// Transitional API for extracting constants from Metadata. 459 /// 460 /// This namespace contains transitional functions for metadata that points to 461 /// \a Constants. 462 /// 463 /// In prehistory -- when metadata was a subclass of \a Value -- \a MDNode 464 /// operands could refer to any \a Value. There's was a lot of code like this: 465 /// 466 /// \code 467 /// MDNode *N = ...; 468 /// auto *CI = dyn_cast<ConstantInt>(N->getOperand(2)); 469 /// \endcode 470 /// 471 /// Now that \a Value and \a Metadata are in separate hierarchies, maintaining 472 /// the semantics for \a isa(), \a cast(), \a dyn_cast() (etc.) requires three 473 /// steps: cast in the \a Metadata hierarchy, extraction of the \a Value, and 474 /// cast in the \a Value hierarchy. Besides creating boiler-plate, this 475 /// requires subtle control flow changes. 476 /// 477 /// The end-goal is to create a new type of metadata, called (e.g.) \a MDInt, 478 /// so that metadata can refer to numbers without traversing a bridge to the \a 479 /// Value hierarchy. In this final state, the code above would look like this: 480 /// 481 /// \code 482 /// MDNode *N = ...; 483 /// auto *MI = dyn_cast<MDInt>(N->getOperand(2)); 484 /// \endcode 485 /// 486 /// The API in this namespace supports the transition. \a MDInt doesn't exist 487 /// yet, and even once it does, changing each metadata schema to use it is its 488 /// own mini-project. In the meantime this API prevents us from introducing 489 /// complex and bug-prone control flow that will disappear in the end. In 490 /// particular, the above code looks like this: 491 /// 492 /// \code 493 /// MDNode *N = ...; 494 /// auto *CI = mdconst::dyn_extract<ConstantInt>(N->getOperand(2)); 495 /// \endcode 496 /// 497 /// The full set of provided functions includes: 498 /// 499 /// mdconst::hasa <=> isa 500 /// mdconst::extract <=> cast 501 /// mdconst::extract_or_null <=> cast_or_null 502 /// mdconst::dyn_extract <=> dyn_cast 503 /// mdconst::dyn_extract_or_null <=> dyn_cast_or_null 504 /// 505 /// The target of the cast must be a subclass of \a Constant. 506 namespace mdconst { 507 508 namespace detail { 509 510 template <class T> T &make(); 511 template <class T, class Result> struct HasDereference { 512 using Yes = char[1]; 513 using No = char[2]; 514 template <size_t N> struct SFINAE {}; 515 516 template <class U, class V> 517 static Yes &hasDereference(SFINAE<sizeof(static_cast<V>(*make<U>()))> * = 0); 518 template <class U, class V> static No &hasDereference(...); 519 520 static const bool value = 521 sizeof(hasDereference<T, Result>(nullptr)) == sizeof(Yes); 522 }; 523 template <class V, class M> struct IsValidPointer { 524 static const bool value = std::is_base_of<Constant, V>::value && 525 HasDereference<M, const Metadata &>::value; 526 }; 527 template <class V, class M> struct IsValidReference { 528 static const bool value = std::is_base_of<Constant, V>::value && 529 std::is_convertible<M, const Metadata &>::value; 530 }; 531 532 } // end namespace detail 533 534 /// Check whether Metadata has a Value. 535 /// 536 /// As an analogue to \a isa(), check whether \c MD has an \a Value inside of 537 /// type \c X. 538 template <class X, class Y> 539 inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, bool> 540 hasa(Y &&MD) { 541 assert(MD && "Null pointer sent into hasa"); 542 if (auto *V = dyn_cast<ConstantAsMetadata>(MD)) 543 return isa<X>(V->getValue()); 544 return false; 545 } 546 template <class X, class Y> 547 inline std::enable_if_t<detail::IsValidReference<X, Y &>::value, bool> 548 hasa(Y &MD) { 549 return hasa(&MD); 550 } 551 552 /// Extract a Value from Metadata. 553 /// 554 /// As an analogue to \a cast(), extract the \a Value subclass \c X from \c MD. 555 template <class X, class Y> 556 inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *> 557 extract(Y &&MD) { 558 return cast<X>(cast<ConstantAsMetadata>(MD)->getValue()); 559 } 560 template <class X, class Y> 561 inline std::enable_if_t<detail::IsValidReference<X, Y &>::value, X *> 562 extract(Y &MD) { 563 return extract(&MD); 564 } 565 566 /// Extract a Value from Metadata, allowing null. 567 /// 568 /// As an analogue to \a cast_or_null(), extract the \a Value subclass \c X 569 /// from \c MD, allowing \c MD to be null. 570 template <class X, class Y> 571 inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *> 572 extract_or_null(Y &&MD) { 573 if (auto *V = cast_or_null<ConstantAsMetadata>(MD)) 574 return cast<X>(V->getValue()); 575 return nullptr; 576 } 577 578 /// Extract a Value from Metadata, if any. 579 /// 580 /// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X 581 /// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a 582 /// Value it does contain is of the wrong subclass. 583 template <class X, class Y> 584 inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *> 585 dyn_extract(Y &&MD) { 586 if (auto *V = dyn_cast<ConstantAsMetadata>(MD)) 587 return dyn_cast<X>(V->getValue()); 588 return nullptr; 589 } 590 591 /// Extract a Value from Metadata, if any, allowing null. 592 /// 593 /// As an analogue to \a dyn_cast_or_null(), extract the \a Value subclass \c X 594 /// from \c MD, return null if \c MD doesn't contain a \a Value or if the \a 595 /// Value it does contain is of the wrong subclass, allowing \c MD to be null. 596 template <class X, class Y> 597 inline std::enable_if_t<detail::IsValidPointer<X, Y>::value, X *> 598 dyn_extract_or_null(Y &&MD) { 599 if (auto *V = dyn_cast_or_null<ConstantAsMetadata>(MD)) 600 return dyn_cast<X>(V->getValue()); 601 return nullptr; 602 } 603 604 } // end namespace mdconst 605 606 //===----------------------------------------------------------------------===// 607 /// A single uniqued string. 608 /// 609 /// These are used to efficiently contain a byte sequence for metadata. 610 /// MDString is always unnamed. 611 class MDString : public Metadata { 612 friend class StringMapEntryStorage<MDString>; 613 614 StringMapEntry<MDString> *Entry = nullptr; 615 616 MDString() : Metadata(MDStringKind, Uniqued) {} 617 618 public: 619 MDString(const MDString &) = delete; 620 MDString &operator=(MDString &&) = delete; 621 MDString &operator=(const MDString &) = delete; 622 623 static MDString *get(LLVMContext &Context, StringRef Str); 624 static MDString *get(LLVMContext &Context, const char *Str) { 625 return get(Context, Str ? StringRef(Str) : StringRef()); 626 } 627 628 StringRef getString() const; 629 630 unsigned getLength() const { return (unsigned)getString().size(); } 631 632 using iterator = StringRef::iterator; 633 634 /// Pointer to the first byte of the string. 635 iterator begin() const { return getString().begin(); } 636 637 /// Pointer to one byte past the end of the string. 638 iterator end() const { return getString().end(); } 639 640 const unsigned char *bytes_begin() const { return getString().bytes_begin(); } 641 const unsigned char *bytes_end() const { return getString().bytes_end(); } 642 643 /// Methods for support type inquiry through isa, cast, and dyn_cast. 644 static bool classof(const Metadata *MD) { 645 return MD->getMetadataID() == MDStringKind; 646 } 647 }; 648 649 /// A collection of metadata nodes that might be associated with a 650 /// memory access used by the alias-analysis infrastructure. 651 struct AAMDNodes { 652 explicit AAMDNodes() = default; 653 explicit AAMDNodes(MDNode *T, MDNode *TS, MDNode *S, MDNode *N) 654 : TBAA(T), TBAAStruct(TS), Scope(S), NoAlias(N) {} 655 656 bool operator==(const AAMDNodes &A) const { 657 return TBAA == A.TBAA && TBAAStruct == A.TBAAStruct && Scope == A.Scope && 658 NoAlias == A.NoAlias; 659 } 660 661 bool operator!=(const AAMDNodes &A) const { return !(*this == A); } 662 663 explicit operator bool() const { 664 return TBAA || TBAAStruct || Scope || NoAlias; 665 } 666 667 /// The tag for type-based alias analysis. 668 MDNode *TBAA = nullptr; 669 670 /// The tag for type-based alias analysis (tbaa struct). 671 MDNode *TBAAStruct = nullptr; 672 673 /// The tag for alias scope specification (used with noalias). 674 MDNode *Scope = nullptr; 675 676 /// The tag specifying the noalias scope. 677 MDNode *NoAlias = nullptr; 678 679 // Shift tbaa Metadata node to start off bytes later 680 static MDNode *shiftTBAA(MDNode *M, size_t off); 681 682 // Shift tbaa.struct Metadata node to start off bytes later 683 static MDNode *shiftTBAAStruct(MDNode *M, size_t off); 684 685 // Extend tbaa Metadata node to apply to a series of bytes of length len. 686 // A size of -1 denotes an unknown size. 687 static MDNode *extendToTBAA(MDNode *TBAA, ssize_t len); 688 689 /// Given two sets of AAMDNodes that apply to the same pointer, 690 /// give the best AAMDNodes that are compatible with both (i.e. a set of 691 /// nodes whose allowable aliasing conclusions are a subset of those 692 /// allowable by both of the inputs). However, for efficiency 693 /// reasons, do not create any new MDNodes. 694 AAMDNodes intersect(const AAMDNodes &Other) const { 695 AAMDNodes Result; 696 Result.TBAA = Other.TBAA == TBAA ? TBAA : nullptr; 697 Result.TBAAStruct = Other.TBAAStruct == TBAAStruct ? TBAAStruct : nullptr; 698 Result.Scope = Other.Scope == Scope ? Scope : nullptr; 699 Result.NoAlias = Other.NoAlias == NoAlias ? NoAlias : nullptr; 700 return Result; 701 } 702 703 /// Create a new AAMDNode that describes this AAMDNode after applying a 704 /// constant offset to the start of the pointer. 705 AAMDNodes shift(size_t Offset) const { 706 AAMDNodes Result; 707 Result.TBAA = TBAA ? shiftTBAA(TBAA, Offset) : nullptr; 708 Result.TBAAStruct = 709 TBAAStruct ? shiftTBAAStruct(TBAAStruct, Offset) : nullptr; 710 Result.Scope = Scope; 711 Result.NoAlias = NoAlias; 712 return Result; 713 } 714 715 /// Create a new AAMDNode that describes this AAMDNode after extending it to 716 /// apply to a series of bytes of length Len. A size of -1 denotes an unknown 717 /// size. 718 AAMDNodes extendTo(ssize_t Len) const { 719 AAMDNodes Result; 720 Result.TBAA = TBAA ? extendToTBAA(TBAA, Len) : nullptr; 721 // tbaa.struct contains (offset, size, type) triples. Extending the length 722 // of the tbaa.struct doesn't require changing this (though more information 723 // could be provided by adding more triples at subsequent lengths). 724 Result.TBAAStruct = TBAAStruct; 725 Result.Scope = Scope; 726 Result.NoAlias = NoAlias; 727 return Result; 728 } 729 730 /// Given two sets of AAMDNodes applying to potentially different locations, 731 /// determine the best AAMDNodes that apply to both. 732 AAMDNodes merge(const AAMDNodes &Other) const; 733 734 /// Determine the best AAMDNodes after concatenating two different locations 735 /// together. Different from `merge`, where different locations should 736 /// overlap each other, `concat` puts non-overlapping locations together. 737 AAMDNodes concat(const AAMDNodes &Other) const; 738 }; 739 740 // Specialize DenseMapInfo for AAMDNodes. 741 template<> 742 struct DenseMapInfo<AAMDNodes> { 743 static inline AAMDNodes getEmptyKey() { 744 return AAMDNodes(DenseMapInfo<MDNode *>::getEmptyKey(), 745 nullptr, nullptr, nullptr); 746 } 747 748 static inline AAMDNodes getTombstoneKey() { 749 return AAMDNodes(DenseMapInfo<MDNode *>::getTombstoneKey(), 750 nullptr, nullptr, nullptr); 751 } 752 753 static unsigned getHashValue(const AAMDNodes &Val) { 754 return DenseMapInfo<MDNode *>::getHashValue(Val.TBAA) ^ 755 DenseMapInfo<MDNode *>::getHashValue(Val.TBAAStruct) ^ 756 DenseMapInfo<MDNode *>::getHashValue(Val.Scope) ^ 757 DenseMapInfo<MDNode *>::getHashValue(Val.NoAlias); 758 } 759 760 static bool isEqual(const AAMDNodes &LHS, const AAMDNodes &RHS) { 761 return LHS == RHS; 762 } 763 }; 764 765 /// Tracking metadata reference owned by Metadata. 766 /// 767 /// Similar to \a TrackingMDRef, but it's expected to be owned by an instance 768 /// of \a Metadata, which has the option of registering itself for callbacks to 769 /// re-unique itself. 770 /// 771 /// In particular, this is used by \a MDNode. 772 class MDOperand { 773 Metadata *MD = nullptr; 774 775 public: 776 MDOperand() = default; 777 MDOperand(const MDOperand &) = delete; 778 MDOperand(MDOperand &&Op) { 779 MD = Op.MD; 780 if (MD) 781 (void)MetadataTracking::retrack(Op.MD, MD); 782 Op.MD = nullptr; 783 } 784 MDOperand &operator=(const MDOperand &) = delete; 785 MDOperand &operator=(MDOperand &&Op) { 786 MD = Op.MD; 787 if (MD) 788 (void)MetadataTracking::retrack(Op.MD, MD); 789 Op.MD = nullptr; 790 return *this; 791 } 792 793 // Check if MDOperand is of type MDString and equals `Str`. 794 bool equalsStr(StringRef Str) const { 795 return isa<MDString>(this->get()) && 796 cast<MDString>(this->get())->getString() == Str; 797 } 798 799 ~MDOperand() { untrack(); } 800 801 Metadata *get() const { return MD; } 802 operator Metadata *() const { return get(); } 803 Metadata *operator->() const { return get(); } 804 Metadata &operator*() const { return *get(); } 805 806 void reset() { 807 untrack(); 808 MD = nullptr; 809 } 810 void reset(Metadata *MD, Metadata *Owner) { 811 untrack(); 812 this->MD = MD; 813 track(Owner); 814 } 815 816 private: 817 void track(Metadata *Owner) { 818 if (MD) { 819 if (Owner) 820 MetadataTracking::track(this, *MD, *Owner); 821 else 822 MetadataTracking::track(MD); 823 } 824 } 825 826 void untrack() { 827 assert(static_cast<void *>(this) == &MD && "Expected same address"); 828 if (MD) 829 MetadataTracking::untrack(MD); 830 } 831 }; 832 833 template <> struct simplify_type<MDOperand> { 834 using SimpleType = Metadata *; 835 836 static SimpleType getSimplifiedValue(MDOperand &MD) { return MD.get(); } 837 }; 838 839 template <> struct simplify_type<const MDOperand> { 840 using SimpleType = Metadata *; 841 842 static SimpleType getSimplifiedValue(const MDOperand &MD) { return MD.get(); } 843 }; 844 845 /// Pointer to the context, with optional RAUW support. 846 /// 847 /// Either a raw (non-null) pointer to the \a LLVMContext, or an owned pointer 848 /// to \a ReplaceableMetadataImpl (which has a reference to \a LLVMContext). 849 class ContextAndReplaceableUses { 850 PointerUnion<LLVMContext *, ReplaceableMetadataImpl *> Ptr; 851 852 public: 853 ContextAndReplaceableUses(LLVMContext &Context) : Ptr(&Context) {} 854 ContextAndReplaceableUses( 855 std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) 856 : Ptr(ReplaceableUses.release()) { 857 assert(getReplaceableUses() && "Expected non-null replaceable uses"); 858 } 859 ContextAndReplaceableUses() = delete; 860 ContextAndReplaceableUses(ContextAndReplaceableUses &&) = delete; 861 ContextAndReplaceableUses(const ContextAndReplaceableUses &) = delete; 862 ContextAndReplaceableUses &operator=(ContextAndReplaceableUses &&) = delete; 863 ContextAndReplaceableUses & 864 operator=(const ContextAndReplaceableUses &) = delete; 865 ~ContextAndReplaceableUses() { delete getReplaceableUses(); } 866 867 operator LLVMContext &() { return getContext(); } 868 869 /// Whether this contains RAUW support. 870 bool hasReplaceableUses() const { 871 return isa<ReplaceableMetadataImpl *>(Ptr); 872 } 873 874 LLVMContext &getContext() const { 875 if (hasReplaceableUses()) 876 return getReplaceableUses()->getContext(); 877 return *cast<LLVMContext *>(Ptr); 878 } 879 880 ReplaceableMetadataImpl *getReplaceableUses() const { 881 if (hasReplaceableUses()) 882 return cast<ReplaceableMetadataImpl *>(Ptr); 883 return nullptr; 884 } 885 886 /// Ensure that this has RAUW support, and then return it. 887 ReplaceableMetadataImpl *getOrCreateReplaceableUses() { 888 if (!hasReplaceableUses()) 889 makeReplaceable(std::make_unique<ReplaceableMetadataImpl>(getContext())); 890 return getReplaceableUses(); 891 } 892 893 /// Assign RAUW support to this. 894 /// 895 /// Make this replaceable, taking ownership of \c ReplaceableUses (which must 896 /// not be null). 897 void 898 makeReplaceable(std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses) { 899 assert(ReplaceableUses && "Expected non-null replaceable uses"); 900 assert(&ReplaceableUses->getContext() == &getContext() && 901 "Expected same context"); 902 delete getReplaceableUses(); 903 Ptr = ReplaceableUses.release(); 904 } 905 906 /// Drop RAUW support. 907 /// 908 /// Cede ownership of RAUW support, returning it. 909 std::unique_ptr<ReplaceableMetadataImpl> takeReplaceableUses() { 910 assert(hasReplaceableUses() && "Expected to own replaceable uses"); 911 std::unique_ptr<ReplaceableMetadataImpl> ReplaceableUses( 912 getReplaceableUses()); 913 Ptr = &ReplaceableUses->getContext(); 914 return ReplaceableUses; 915 } 916 }; 917 918 struct TempMDNodeDeleter { 919 inline void operator()(MDNode *Node) const; 920 }; 921 922 #define HANDLE_MDNODE_LEAF(CLASS) \ 923 using Temp##CLASS = std::unique_ptr<CLASS, TempMDNodeDeleter>; 924 #define HANDLE_MDNODE_BRANCH(CLASS) HANDLE_MDNODE_LEAF(CLASS) 925 #include "llvm/IR/Metadata.def" 926 927 /// Metadata node. 928 /// 929 /// Metadata nodes can be uniqued, like constants, or distinct. Temporary 930 /// metadata nodes (with full support for RAUW) can be used to delay uniquing 931 /// until forward references are known. The basic metadata node is an \a 932 /// MDTuple. 933 /// 934 /// There is limited support for RAUW at construction time. At construction 935 /// time, if any operand is a temporary node (or an unresolved uniqued node, 936 /// which indicates a transitive temporary operand), the node itself will be 937 /// unresolved. As soon as all operands become resolved, it will drop RAUW 938 /// support permanently. 939 /// 940 /// If an unresolved node is part of a cycle, \a resolveCycles() needs 941 /// to be called on some member of the cycle once all temporary nodes have been 942 /// replaced. 943 /// 944 /// MDNodes can be large or small, as well as resizable or non-resizable. 945 /// Large MDNodes' operands are allocated in a separate storage vector, 946 /// whereas small MDNodes' operands are co-allocated. Distinct and temporary 947 /// MDnodes are resizable, but only MDTuples support this capability. 948 /// 949 /// Clients can add operands to resizable MDNodes using push_back(). 950 class MDNode : public Metadata { 951 friend class ReplaceableMetadataImpl; 952 friend class LLVMContextImpl; 953 friend class DIArgList; 954 955 /// The header that is coallocated with an MDNode along with its "small" 956 /// operands. It is located immediately before the main body of the node. 957 /// The operands are in turn located immediately before the header. 958 /// For resizable MDNodes, the space for the storage vector is also allocated 959 /// immediately before the header, overlapping with the operands. 960 /// Explicity set alignment because bitfields by default have an 961 /// alignment of 1 on z/OS. 962 struct alignas(alignof(size_t)) Header { 963 bool IsResizable : 1; 964 bool IsLarge : 1; 965 size_t SmallSize : 4; 966 size_t SmallNumOps : 4; 967 size_t : sizeof(size_t) * CHAR_BIT - 10; 968 969 unsigned NumUnresolved = 0; 970 using LargeStorageVector = SmallVector<MDOperand, 0>; 971 972 static constexpr size_t NumOpsFitInVector = 973 sizeof(LargeStorageVector) / sizeof(MDOperand); 974 static_assert( 975 NumOpsFitInVector * sizeof(MDOperand) == sizeof(LargeStorageVector), 976 "sizeof(LargeStorageVector) must be a multiple of sizeof(MDOperand)"); 977 978 static constexpr size_t MaxSmallSize = 15; 979 980 static constexpr size_t getOpSize(unsigned NumOps) { 981 return sizeof(MDOperand) * NumOps; 982 } 983 /// Returns the number of operands the node has space for based on its 984 /// allocation characteristics. 985 static size_t getSmallSize(size_t NumOps, bool IsResizable, bool IsLarge) { 986 return IsLarge ? NumOpsFitInVector 987 : std::max(NumOps, NumOpsFitInVector * IsResizable); 988 } 989 /// Returns the number of bytes allocated for operands and header. 990 static size_t getAllocSize(StorageType Storage, size_t NumOps) { 991 return getOpSize( 992 getSmallSize(NumOps, isResizable(Storage), isLarge(NumOps))) + 993 sizeof(Header); 994 } 995 996 /// Only temporary and distinct nodes are resizable. 997 static bool isResizable(StorageType Storage) { return Storage != Uniqued; } 998 static bool isLarge(size_t NumOps) { return NumOps > MaxSmallSize; } 999 1000 size_t getAllocSize() const { 1001 return getOpSize(SmallSize) + sizeof(Header); 1002 } 1003 void *getAllocation() { 1004 return reinterpret_cast<char *>(this + 1) - 1005 alignTo(getAllocSize(), alignof(uint64_t)); 1006 } 1007 1008 void *getLargePtr() const { 1009 static_assert(alignof(LargeStorageVector) <= alignof(Header), 1010 "LargeStorageVector too strongly aligned"); 1011 return reinterpret_cast<char *>(const_cast<Header *>(this)) - 1012 sizeof(LargeStorageVector); 1013 } 1014 1015 void *getSmallPtr(); 1016 1017 LargeStorageVector &getLarge() { 1018 assert(IsLarge); 1019 return *reinterpret_cast<LargeStorageVector *>(getLargePtr()); 1020 } 1021 1022 const LargeStorageVector &getLarge() const { 1023 assert(IsLarge); 1024 return *reinterpret_cast<const LargeStorageVector *>(getLargePtr()); 1025 } 1026 1027 void resizeSmall(size_t NumOps); 1028 void resizeSmallToLarge(size_t NumOps); 1029 void resize(size_t NumOps); 1030 1031 explicit Header(size_t NumOps, StorageType Storage); 1032 ~Header(); 1033 1034 MutableArrayRef<MDOperand> operands() { 1035 if (IsLarge) 1036 return getLarge(); 1037 return MutableArrayRef( 1038 reinterpret_cast<MDOperand *>(this) - SmallSize, SmallNumOps); 1039 } 1040 1041 ArrayRef<MDOperand> operands() const { 1042 if (IsLarge) 1043 return getLarge(); 1044 return ArrayRef(reinterpret_cast<const MDOperand *>(this) - SmallSize, 1045 SmallNumOps); 1046 } 1047 1048 unsigned getNumOperands() const { 1049 if (!IsLarge) 1050 return SmallNumOps; 1051 return getLarge().size(); 1052 } 1053 }; 1054 1055 Header &getHeader() { return *(reinterpret_cast<Header *>(this) - 1); } 1056 1057 const Header &getHeader() const { 1058 return *(reinterpret_cast<const Header *>(this) - 1); 1059 } 1060 1061 ContextAndReplaceableUses Context; 1062 1063 protected: 1064 MDNode(LLVMContext &Context, unsigned ID, StorageType Storage, 1065 ArrayRef<Metadata *> Ops1, ArrayRef<Metadata *> Ops2 = std::nullopt); 1066 ~MDNode() = default; 1067 1068 void *operator new(size_t Size, size_t NumOps, StorageType Storage); 1069 void operator delete(void *Mem); 1070 1071 /// Required by std, but never called. 1072 void operator delete(void *, unsigned) { 1073 llvm_unreachable("Constructor throws?"); 1074 } 1075 1076 /// Required by std, but never called. 1077 void operator delete(void *, unsigned, bool) { 1078 llvm_unreachable("Constructor throws?"); 1079 } 1080 1081 void dropAllReferences(); 1082 1083 MDOperand *mutable_begin() { return getHeader().operands().begin(); } 1084 MDOperand *mutable_end() { return getHeader().operands().end(); } 1085 1086 using mutable_op_range = iterator_range<MDOperand *>; 1087 1088 mutable_op_range mutable_operands() { 1089 return mutable_op_range(mutable_begin(), mutable_end()); 1090 } 1091 1092 public: 1093 MDNode(const MDNode &) = delete; 1094 void operator=(const MDNode &) = delete; 1095 void *operator new(size_t) = delete; 1096 1097 static inline MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs); 1098 static inline MDTuple *getIfExists(LLVMContext &Context, 1099 ArrayRef<Metadata *> MDs); 1100 static inline MDTuple *getDistinct(LLVMContext &Context, 1101 ArrayRef<Metadata *> MDs); 1102 static inline TempMDTuple getTemporary(LLVMContext &Context, 1103 ArrayRef<Metadata *> MDs); 1104 1105 /// Create a (temporary) clone of this. 1106 TempMDNode clone() const; 1107 1108 /// Deallocate a node created by getTemporary. 1109 /// 1110 /// Calls \c replaceAllUsesWith(nullptr) before deleting, so any remaining 1111 /// references will be reset. 1112 static void deleteTemporary(MDNode *N); 1113 1114 LLVMContext &getContext() const { return Context.getContext(); } 1115 1116 /// Replace a specific operand. 1117 void replaceOperandWith(unsigned I, Metadata *New); 1118 1119 /// Check if node is fully resolved. 1120 /// 1121 /// If \a isTemporary(), this always returns \c false; if \a isDistinct(), 1122 /// this always returns \c true. 1123 /// 1124 /// If \a isUniqued(), returns \c true if this has already dropped RAUW 1125 /// support (because all operands are resolved). 1126 /// 1127 /// As forward declarations are resolved, their containers should get 1128 /// resolved automatically. However, if this (or one of its operands) is 1129 /// involved in a cycle, \a resolveCycles() needs to be called explicitly. 1130 bool isResolved() const { return !isTemporary() && !getNumUnresolved(); } 1131 1132 bool isUniqued() const { return Storage == Uniqued; } 1133 bool isDistinct() const { return Storage == Distinct; } 1134 bool isTemporary() const { return Storage == Temporary; } 1135 1136 /// RAUW a temporary. 1137 /// 1138 /// \pre \a isTemporary() must be \c true. 1139 void replaceAllUsesWith(Metadata *MD) { 1140 assert(isTemporary() && "Expected temporary node"); 1141 if (Context.hasReplaceableUses()) 1142 Context.getReplaceableUses()->replaceAllUsesWith(MD); 1143 } 1144 1145 /// Resolve cycles. 1146 /// 1147 /// Once all forward declarations have been resolved, force cycles to be 1148 /// resolved. 1149 /// 1150 /// \pre No operands (or operands' operands, etc.) have \a isTemporary(). 1151 void resolveCycles(); 1152 1153 /// Resolve a unique, unresolved node. 1154 void resolve(); 1155 1156 /// Replace a temporary node with a permanent one. 1157 /// 1158 /// Try to create a uniqued version of \c N -- in place, if possible -- and 1159 /// return it. If \c N cannot be uniqued, return a distinct node instead. 1160 template <class T> 1161 static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *> 1162 replaceWithPermanent(std::unique_ptr<T, TempMDNodeDeleter> N) { 1163 return cast<T>(N.release()->replaceWithPermanentImpl()); 1164 } 1165 1166 /// Replace a temporary node with a uniqued one. 1167 /// 1168 /// Create a uniqued version of \c N -- in place, if possible -- and return 1169 /// it. Takes ownership of the temporary node. 1170 /// 1171 /// \pre N does not self-reference. 1172 template <class T> 1173 static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *> 1174 replaceWithUniqued(std::unique_ptr<T, TempMDNodeDeleter> N) { 1175 return cast<T>(N.release()->replaceWithUniquedImpl()); 1176 } 1177 1178 /// Replace a temporary node with a distinct one. 1179 /// 1180 /// Create a distinct version of \c N -- in place, if possible -- and return 1181 /// it. Takes ownership of the temporary node. 1182 template <class T> 1183 static std::enable_if_t<std::is_base_of<MDNode, T>::value, T *> 1184 replaceWithDistinct(std::unique_ptr<T, TempMDNodeDeleter> N) { 1185 return cast<T>(N.release()->replaceWithDistinctImpl()); 1186 } 1187 1188 /// Print in tree shape. 1189 /// 1190 /// Prints definition of \c this in tree shape. 1191 /// 1192 /// If \c M is provided, metadata nodes will be numbered canonically; 1193 /// otherwise, pointer addresses are substituted. 1194 /// @{ 1195 void printTree(raw_ostream &OS, const Module *M = nullptr) const; 1196 void printTree(raw_ostream &OS, ModuleSlotTracker &MST, 1197 const Module *M = nullptr) const; 1198 /// @} 1199 1200 /// User-friendly dump in tree shape. 1201 /// 1202 /// If \c M is provided, metadata nodes will be numbered canonically; 1203 /// otherwise, pointer addresses are substituted. 1204 /// 1205 /// Note: this uses an explicit overload instead of default arguments so that 1206 /// the nullptr version is easy to call from a debugger. 1207 /// 1208 /// @{ 1209 void dumpTree() const; 1210 void dumpTree(const Module *M) const; 1211 /// @} 1212 1213 private: 1214 MDNode *replaceWithPermanentImpl(); 1215 MDNode *replaceWithUniquedImpl(); 1216 MDNode *replaceWithDistinctImpl(); 1217 1218 protected: 1219 /// Set an operand. 1220 /// 1221 /// Sets the operand directly, without worrying about uniquing. 1222 void setOperand(unsigned I, Metadata *New); 1223 1224 unsigned getNumUnresolved() const { return getHeader().NumUnresolved; } 1225 1226 void setNumUnresolved(unsigned N) { getHeader().NumUnresolved = N; } 1227 void storeDistinctInContext(); 1228 template <class T, class StoreT> 1229 static T *storeImpl(T *N, StorageType Storage, StoreT &Store); 1230 template <class T> static T *storeImpl(T *N, StorageType Storage); 1231 1232 /// Resize the node to hold \a NumOps operands. 1233 /// 1234 /// \pre \a isTemporary() or \a isDistinct() 1235 /// \pre MetadataID == MDTupleKind 1236 void resize(size_t NumOps) { 1237 assert(!isUniqued() && "Resizing is not supported for uniqued nodes"); 1238 assert(getMetadataID() == MDTupleKind && 1239 "Resizing is not supported for this node kind"); 1240 getHeader().resize(NumOps); 1241 } 1242 1243 private: 1244 void handleChangedOperand(void *Ref, Metadata *New); 1245 1246 /// Drop RAUW support, if any. 1247 void dropReplaceableUses(); 1248 1249 void resolveAfterOperandChange(Metadata *Old, Metadata *New); 1250 void decrementUnresolvedOperandCount(); 1251 void countUnresolvedOperands(); 1252 1253 /// Mutate this to be "uniqued". 1254 /// 1255 /// Mutate this so that \a isUniqued(). 1256 /// \pre \a isTemporary(). 1257 /// \pre already added to uniquing set. 1258 void makeUniqued(); 1259 1260 /// Mutate this to be "distinct". 1261 /// 1262 /// Mutate this so that \a isDistinct(). 1263 /// \pre \a isTemporary(). 1264 void makeDistinct(); 1265 1266 void deleteAsSubclass(); 1267 MDNode *uniquify(); 1268 void eraseFromStore(); 1269 1270 template <class NodeTy> struct HasCachedHash; 1271 template <class NodeTy> 1272 static void dispatchRecalculateHash(NodeTy *N, std::true_type) { 1273 N->recalculateHash(); 1274 } 1275 template <class NodeTy> 1276 static void dispatchRecalculateHash(NodeTy *, std::false_type) {} 1277 template <class NodeTy> 1278 static void dispatchResetHash(NodeTy *N, std::true_type) { 1279 N->setHash(0); 1280 } 1281 template <class NodeTy> 1282 static void dispatchResetHash(NodeTy *, std::false_type) {} 1283 1284 /// Merge branch weights from two direct callsites. 1285 static MDNode *mergeDirectCallProfMetadata(MDNode *A, MDNode *B, 1286 const Instruction *AInstr, 1287 const Instruction *BInstr); 1288 1289 public: 1290 using op_iterator = const MDOperand *; 1291 using op_range = iterator_range<op_iterator>; 1292 1293 op_iterator op_begin() const { 1294 return const_cast<MDNode *>(this)->mutable_begin(); 1295 } 1296 1297 op_iterator op_end() const { 1298 return const_cast<MDNode *>(this)->mutable_end(); 1299 } 1300 1301 ArrayRef<MDOperand> operands() const { return getHeader().operands(); } 1302 1303 const MDOperand &getOperand(unsigned I) const { 1304 assert(I < getNumOperands() && "Out of range"); 1305 return getHeader().operands()[I]; 1306 } 1307 1308 /// Return number of MDNode operands. 1309 unsigned getNumOperands() const { return getHeader().getNumOperands(); } 1310 1311 /// Methods for support type inquiry through isa, cast, and dyn_cast: 1312 static bool classof(const Metadata *MD) { 1313 switch (MD->getMetadataID()) { 1314 default: 1315 return false; 1316 #define HANDLE_MDNODE_LEAF(CLASS) \ 1317 case CLASS##Kind: \ 1318 return true; 1319 #include "llvm/IR/Metadata.def" 1320 } 1321 } 1322 1323 /// Check whether MDNode is a vtable access. 1324 bool isTBAAVtableAccess() const; 1325 1326 /// Methods for metadata merging. 1327 static MDNode *concatenate(MDNode *A, MDNode *B); 1328 static MDNode *intersect(MDNode *A, MDNode *B); 1329 static MDNode *getMostGenericTBAA(MDNode *A, MDNode *B); 1330 static MDNode *getMostGenericFPMath(MDNode *A, MDNode *B); 1331 static MDNode *getMostGenericRange(MDNode *A, MDNode *B); 1332 static MDNode *getMostGenericAliasScope(MDNode *A, MDNode *B); 1333 static MDNode *getMostGenericAlignmentOrDereferenceable(MDNode *A, MDNode *B); 1334 /// Merge !prof metadata from two instructions. 1335 /// Currently only implemented with direct callsites with branch weights. 1336 static MDNode *getMergedProfMetadata(MDNode *A, MDNode *B, 1337 const Instruction *AInstr, 1338 const Instruction *BInstr); 1339 }; 1340 1341 /// Tuple of metadata. 1342 /// 1343 /// This is the simple \a MDNode arbitrary tuple. Nodes are uniqued by 1344 /// default based on their operands. 1345 class MDTuple : public MDNode { 1346 friend class LLVMContextImpl; 1347 friend class MDNode; 1348 1349 MDTuple(LLVMContext &C, StorageType Storage, unsigned Hash, 1350 ArrayRef<Metadata *> Vals) 1351 : MDNode(C, MDTupleKind, Storage, Vals) { 1352 setHash(Hash); 1353 } 1354 1355 ~MDTuple() { dropAllReferences(); } 1356 1357 void setHash(unsigned Hash) { SubclassData32 = Hash; } 1358 void recalculateHash(); 1359 1360 static MDTuple *getImpl(LLVMContext &Context, ArrayRef<Metadata *> MDs, 1361 StorageType Storage, bool ShouldCreate = true); 1362 1363 TempMDTuple cloneImpl() const { 1364 ArrayRef<MDOperand> Operands = operands(); 1365 return getTemporary(getContext(), SmallVector<Metadata *, 4>( 1366 Operands.begin(), Operands.end())); 1367 } 1368 1369 public: 1370 /// Get the hash, if any. 1371 unsigned getHash() const { return SubclassData32; } 1372 1373 static MDTuple *get(LLVMContext &Context, ArrayRef<Metadata *> MDs) { 1374 return getImpl(Context, MDs, Uniqued); 1375 } 1376 1377 static MDTuple *getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) { 1378 return getImpl(Context, MDs, Uniqued, /* ShouldCreate */ false); 1379 } 1380 1381 /// Return a distinct node. 1382 /// 1383 /// Return a distinct node -- i.e., a node that is not uniqued. 1384 static MDTuple *getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) { 1385 return getImpl(Context, MDs, Distinct); 1386 } 1387 1388 /// Return a temporary node. 1389 /// 1390 /// For use in constructing cyclic MDNode structures. A temporary MDNode is 1391 /// not uniqued, may be RAUW'd, and must be manually deleted with 1392 /// deleteTemporary. 1393 static TempMDTuple getTemporary(LLVMContext &Context, 1394 ArrayRef<Metadata *> MDs) { 1395 return TempMDTuple(getImpl(Context, MDs, Temporary)); 1396 } 1397 1398 /// Return a (temporary) clone of this. 1399 TempMDTuple clone() const { return cloneImpl(); } 1400 1401 /// Append an element to the tuple. This will resize the node. 1402 void push_back(Metadata *MD) { 1403 size_t NumOps = getNumOperands(); 1404 resize(NumOps + 1); 1405 setOperand(NumOps, MD); 1406 } 1407 1408 /// Shrink the operands by 1. 1409 void pop_back() { resize(getNumOperands() - 1); } 1410 1411 static bool classof(const Metadata *MD) { 1412 return MD->getMetadataID() == MDTupleKind; 1413 } 1414 }; 1415 1416 MDTuple *MDNode::get(LLVMContext &Context, ArrayRef<Metadata *> MDs) { 1417 return MDTuple::get(Context, MDs); 1418 } 1419 1420 MDTuple *MDNode::getIfExists(LLVMContext &Context, ArrayRef<Metadata *> MDs) { 1421 return MDTuple::getIfExists(Context, MDs); 1422 } 1423 1424 MDTuple *MDNode::getDistinct(LLVMContext &Context, ArrayRef<Metadata *> MDs) { 1425 return MDTuple::getDistinct(Context, MDs); 1426 } 1427 1428 TempMDTuple MDNode::getTemporary(LLVMContext &Context, 1429 ArrayRef<Metadata *> MDs) { 1430 return MDTuple::getTemporary(Context, MDs); 1431 } 1432 1433 void TempMDNodeDeleter::operator()(MDNode *Node) const { 1434 MDNode::deleteTemporary(Node); 1435 } 1436 1437 /// This is a simple wrapper around an MDNode which provides a higher-level 1438 /// interface by hiding the details of how alias analysis information is encoded 1439 /// in its operands. 1440 class AliasScopeNode { 1441 const MDNode *Node = nullptr; 1442 1443 public: 1444 AliasScopeNode() = default; 1445 explicit AliasScopeNode(const MDNode *N) : Node(N) {} 1446 1447 /// Get the MDNode for this AliasScopeNode. 1448 const MDNode *getNode() const { return Node; } 1449 1450 /// Get the MDNode for this AliasScopeNode's domain. 1451 const MDNode *getDomain() const { 1452 if (Node->getNumOperands() < 2) 1453 return nullptr; 1454 return dyn_cast_or_null<MDNode>(Node->getOperand(1)); 1455 } 1456 StringRef getName() const { 1457 if (Node->getNumOperands() > 2) 1458 if (MDString *N = dyn_cast_or_null<MDString>(Node->getOperand(2))) 1459 return N->getString(); 1460 return StringRef(); 1461 } 1462 }; 1463 1464 /// Typed iterator through MDNode operands. 1465 /// 1466 /// An iterator that transforms an \a MDNode::iterator into an iterator over a 1467 /// particular Metadata subclass. 1468 template <class T> class TypedMDOperandIterator { 1469 MDNode::op_iterator I = nullptr; 1470 1471 public: 1472 using iterator_category = std::input_iterator_tag; 1473 using value_type = T *; 1474 using difference_type = std::ptrdiff_t; 1475 using pointer = void; 1476 using reference = T *; 1477 1478 TypedMDOperandIterator() = default; 1479 explicit TypedMDOperandIterator(MDNode::op_iterator I) : I(I) {} 1480 1481 T *operator*() const { return cast_or_null<T>(*I); } 1482 1483 TypedMDOperandIterator &operator++() { 1484 ++I; 1485 return *this; 1486 } 1487 1488 TypedMDOperandIterator operator++(int) { 1489 TypedMDOperandIterator Temp(*this); 1490 ++I; 1491 return Temp; 1492 } 1493 1494 bool operator==(const TypedMDOperandIterator &X) const { return I == X.I; } 1495 bool operator!=(const TypedMDOperandIterator &X) const { return I != X.I; } 1496 }; 1497 1498 /// Typed, array-like tuple of metadata. 1499 /// 1500 /// This is a wrapper for \a MDTuple that makes it act like an array holding a 1501 /// particular type of metadata. 1502 template <class T> class MDTupleTypedArrayWrapper { 1503 const MDTuple *N = nullptr; 1504 1505 public: 1506 MDTupleTypedArrayWrapper() = default; 1507 MDTupleTypedArrayWrapper(const MDTuple *N) : N(N) {} 1508 1509 template <class U> 1510 MDTupleTypedArrayWrapper( 1511 const MDTupleTypedArrayWrapper<U> &Other, 1512 std::enable_if_t<std::is_convertible<U *, T *>::value> * = nullptr) 1513 : N(Other.get()) {} 1514 1515 template <class U> 1516 explicit MDTupleTypedArrayWrapper( 1517 const MDTupleTypedArrayWrapper<U> &Other, 1518 std::enable_if_t<!std::is_convertible<U *, T *>::value> * = nullptr) 1519 : N(Other.get()) {} 1520 1521 explicit operator bool() const { return get(); } 1522 explicit operator MDTuple *() const { return get(); } 1523 1524 MDTuple *get() const { return const_cast<MDTuple *>(N); } 1525 MDTuple *operator->() const { return get(); } 1526 MDTuple &operator*() const { return *get(); } 1527 1528 // FIXME: Fix callers and remove condition on N. 1529 unsigned size() const { return N ? N->getNumOperands() : 0u; } 1530 bool empty() const { return N ? N->getNumOperands() == 0 : true; } 1531 T *operator[](unsigned I) const { return cast_or_null<T>(N->getOperand(I)); } 1532 1533 // FIXME: Fix callers and remove condition on N. 1534 using iterator = TypedMDOperandIterator<T>; 1535 1536 iterator begin() const { return N ? iterator(N->op_begin()) : iterator(); } 1537 iterator end() const { return N ? iterator(N->op_end()) : iterator(); } 1538 }; 1539 1540 #define HANDLE_METADATA(CLASS) \ 1541 using CLASS##Array = MDTupleTypedArrayWrapper<CLASS>; 1542 #include "llvm/IR/Metadata.def" 1543 1544 /// Placeholder metadata for operands of distinct MDNodes. 1545 /// 1546 /// This is a lightweight placeholder for an operand of a distinct node. It's 1547 /// purpose is to help track forward references when creating a distinct node. 1548 /// This allows distinct nodes involved in a cycle to be constructed before 1549 /// their operands without requiring a heavyweight temporary node with 1550 /// full-blown RAUW support. 1551 /// 1552 /// Each placeholder supports only a single MDNode user. Clients should pass 1553 /// an ID, retrieved via \a getID(), to indicate the "real" operand that this 1554 /// should be replaced with. 1555 /// 1556 /// While it would be possible to implement move operators, they would be 1557 /// fairly expensive. Leave them unimplemented to discourage their use 1558 /// (clients can use std::deque, std::list, BumpPtrAllocator, etc.). 1559 class DistinctMDOperandPlaceholder : public Metadata { 1560 friend class MetadataTracking; 1561 1562 Metadata **Use = nullptr; 1563 1564 public: 1565 explicit DistinctMDOperandPlaceholder(unsigned ID) 1566 : Metadata(DistinctMDOperandPlaceholderKind, Distinct) { 1567 SubclassData32 = ID; 1568 } 1569 1570 DistinctMDOperandPlaceholder() = delete; 1571 DistinctMDOperandPlaceholder(DistinctMDOperandPlaceholder &&) = delete; 1572 DistinctMDOperandPlaceholder(const DistinctMDOperandPlaceholder &) = delete; 1573 1574 ~DistinctMDOperandPlaceholder() { 1575 if (Use) 1576 *Use = nullptr; 1577 } 1578 1579 unsigned getID() const { return SubclassData32; } 1580 1581 /// Replace the use of this with MD. 1582 void replaceUseWith(Metadata *MD) { 1583 if (!Use) 1584 return; 1585 *Use = MD; 1586 1587 if (*Use) 1588 MetadataTracking::track(*Use); 1589 1590 Metadata *T = cast<Metadata>(this); 1591 MetadataTracking::untrack(T); 1592 assert(!Use && "Use is still being tracked despite being untracked!"); 1593 } 1594 }; 1595 1596 //===----------------------------------------------------------------------===// 1597 /// A tuple of MDNodes. 1598 /// 1599 /// Despite its name, a NamedMDNode isn't itself an MDNode. 1600 /// 1601 /// NamedMDNodes are named module-level entities that contain lists of MDNodes. 1602 /// 1603 /// It is illegal for a NamedMDNode to appear as an operand of an MDNode. 1604 class NamedMDNode : public ilist_node<NamedMDNode> { 1605 friend class LLVMContextImpl; 1606 friend class Module; 1607 1608 std::string Name; 1609 Module *Parent = nullptr; 1610 void *Operands; // SmallVector<TrackingMDRef, 4> 1611 1612 void setParent(Module *M) { Parent = M; } 1613 1614 explicit NamedMDNode(const Twine &N); 1615 1616 template <class T1, class T2> class op_iterator_impl { 1617 friend class NamedMDNode; 1618 1619 const NamedMDNode *Node = nullptr; 1620 unsigned Idx = 0; 1621 1622 op_iterator_impl(const NamedMDNode *N, unsigned i) : Node(N), Idx(i) {} 1623 1624 public: 1625 using iterator_category = std::bidirectional_iterator_tag; 1626 using value_type = T2; 1627 using difference_type = std::ptrdiff_t; 1628 using pointer = value_type *; 1629 using reference = value_type &; 1630 1631 op_iterator_impl() = default; 1632 1633 bool operator==(const op_iterator_impl &o) const { return Idx == o.Idx; } 1634 bool operator!=(const op_iterator_impl &o) const { return Idx != o.Idx; } 1635 1636 op_iterator_impl &operator++() { 1637 ++Idx; 1638 return *this; 1639 } 1640 1641 op_iterator_impl operator++(int) { 1642 op_iterator_impl tmp(*this); 1643 operator++(); 1644 return tmp; 1645 } 1646 1647 op_iterator_impl &operator--() { 1648 --Idx; 1649 return *this; 1650 } 1651 1652 op_iterator_impl operator--(int) { 1653 op_iterator_impl tmp(*this); 1654 operator--(); 1655 return tmp; 1656 } 1657 1658 T1 operator*() const { return Node->getOperand(Idx); } 1659 }; 1660 1661 public: 1662 NamedMDNode(const NamedMDNode &) = delete; 1663 ~NamedMDNode(); 1664 1665 /// Drop all references and remove the node from parent module. 1666 void eraseFromParent(); 1667 1668 /// Remove all uses and clear node vector. 1669 void dropAllReferences() { clearOperands(); } 1670 /// Drop all references to this node's operands. 1671 void clearOperands(); 1672 1673 /// Get the module that holds this named metadata collection. 1674 inline Module *getParent() { return Parent; } 1675 inline const Module *getParent() const { return Parent; } 1676 1677 MDNode *getOperand(unsigned i) const; 1678 unsigned getNumOperands() const; 1679 void addOperand(MDNode *M); 1680 void setOperand(unsigned I, MDNode *New); 1681 StringRef getName() const; 1682 void print(raw_ostream &ROS, bool IsForDebug = false) const; 1683 void print(raw_ostream &ROS, ModuleSlotTracker &MST, 1684 bool IsForDebug = false) const; 1685 void dump() const; 1686 1687 // --------------------------------------------------------------------------- 1688 // Operand Iterator interface... 1689 // 1690 using op_iterator = op_iterator_impl<MDNode *, MDNode>; 1691 1692 op_iterator op_begin() { return op_iterator(this, 0); } 1693 op_iterator op_end() { return op_iterator(this, getNumOperands()); } 1694 1695 using const_op_iterator = op_iterator_impl<const MDNode *, MDNode>; 1696 1697 const_op_iterator op_begin() const { return const_op_iterator(this, 0); } 1698 const_op_iterator op_end() const { return const_op_iterator(this, getNumOperands()); } 1699 1700 inline iterator_range<op_iterator> operands() { 1701 return make_range(op_begin(), op_end()); 1702 } 1703 inline iterator_range<const_op_iterator> operands() const { 1704 return make_range(op_begin(), op_end()); 1705 } 1706 }; 1707 1708 // Create wrappers for C Binding types (see CBindingWrapping.h). 1709 DEFINE_ISA_CONVERSION_FUNCTIONS(NamedMDNode, LLVMNamedMDNodeRef) 1710 1711 } // end namespace llvm 1712 1713 #endif // LLVM_IR_METADATA_H 1714