1 //===- Attributor.h --- Module-wide attribute deduction ---------*- C++ -*-===// 2 // 3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4 // See https://llvm.org/LICENSE.txt for license information. 5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6 // 7 //===----------------------------------------------------------------------===// 8 // 9 // Attributor: An inter procedural (abstract) "attribute" deduction framework. 10 // 11 // The Attributor framework is an inter procedural abstract analysis (fixpoint 12 // iteration analysis). The goal is to allow easy deduction of new attributes as 13 // well as information exchange between abstract attributes in-flight. 14 // 15 // The Attributor class is the driver and the link between the various abstract 16 // attributes. The Attributor will iterate until a fixpoint state is reached by 17 // all abstract attributes in-flight, or until it will enforce a pessimistic fix 18 // point because an iteration limit is reached. 19 // 20 // Abstract attributes, derived from the AbstractAttribute class, actually 21 // describe properties of the code. They can correspond to actual LLVM-IR 22 // attributes, or they can be more general, ultimately unrelated to LLVM-IR 23 // attributes. The latter is useful when an abstract attributes provides 24 // information to other abstract attributes in-flight but we might not want to 25 // manifest the information. The Attributor allows to query in-flight abstract 26 // attributes through the `Attributor::getAAFor` method (see the method 27 // description for an example). If the method is used by an abstract attribute 28 // P, and it results in an abstract attribute Q, the Attributor will 29 // automatically capture a potential dependence from Q to P. This dependence 30 // will cause P to be reevaluated whenever Q changes in the future. 31 // 32 // The Attributor will only reevaluate abstract attributes that might have 33 // changed since the last iteration. That means that the Attribute will not 34 // revisit all instructions/blocks/functions in the module but only query 35 // an update from a subset of the abstract attributes. 36 // 37 // The update method `AbstractAttribute::updateImpl` is implemented by the 38 // specific "abstract attribute" subclasses. The method is invoked whenever the 39 // currently assumed state (see the AbstractState class) might not be valid 40 // anymore. This can, for example, happen if the state was dependent on another 41 // abstract attribute that changed. In every invocation, the update method has 42 // to adjust the internal state of an abstract attribute to a point that is 43 // justifiable by the underlying IR and the current state of abstract attributes 44 // in-flight. Since the IR is given and assumed to be valid, the information 45 // derived from it can be assumed to hold. However, information derived from 46 // other abstract attributes is conditional on various things. If the justifying 47 // state changed, the `updateImpl` has to revisit the situation and potentially 48 // find another justification or limit the optimistic assumes made. 49 // 50 // Change is the key in this framework. Until a state of no-change, thus a 51 // fixpoint, is reached, the Attributor will query the abstract attributes 52 // in-flight to re-evaluate their state. If the (current) state is too 53 // optimistic, hence it cannot be justified anymore through other abstract 54 // attributes or the state of the IR, the state of the abstract attribute will 55 // have to change. Generally, we assume abstract attribute state to be a finite 56 // height lattice and the update function to be monotone. However, these 57 // conditions are not enforced because the iteration limit will guarantee 58 // termination. If an optimistic fixpoint is reached, or a pessimistic fix 59 // point is enforced after a timeout, the abstract attributes are tasked to 60 // manifest their result in the IR for passes to come. 61 // 62 // Attribute manifestation is not mandatory. If desired, there is support to 63 // generate a single or multiple LLVM-IR attributes already in the helper struct 64 // IRAttribute. In the simplest case, a subclass inherits from IRAttribute with 65 // a proper Attribute::AttrKind as template parameter. The Attributor 66 // manifestation framework will then create and place a new attribute if it is 67 // allowed to do so (based on the abstract state). Other use cases can be 68 // achieved by overloading AbstractAttribute or IRAttribute methods. 69 // 70 // 71 // The "mechanics" of adding a new "abstract attribute": 72 // - Define a class (transitively) inheriting from AbstractAttribute and one 73 // (which could be the same) that (transitively) inherits from AbstractState. 74 // For the latter, consider the already available BooleanState and 75 // {Inc,Dec,Bit}IntegerState if they fit your needs, e.g., you require only a 76 // number tracking or bit-encoding. 77 // - Implement all pure methods. Also use overloading if the attribute is not 78 // conforming with the "default" behavior: A (set of) LLVM-IR attribute(s) for 79 // an argument, call site argument, function return value, or function. See 80 // the class and method descriptions for more information on the two 81 // "Abstract" classes and their respective methods. 82 // - Register opportunities for the new abstract attribute in the 83 // `Attributor::identifyDefaultAbstractAttributes` method if it should be 84 // counted as a 'default' attribute. 85 // - Add sufficient tests. 86 // - Add a Statistics object for bookkeeping. If it is a simple (set of) 87 // attribute(s) manifested through the Attributor manifestation framework, see 88 // the bookkeeping function in Attributor.cpp. 89 // - If instructions with a certain opcode are interesting to the attribute, add 90 // that opcode to the switch in `Attributor::identifyAbstractAttributes`. This 91 // will make it possible to query all those instructions through the 92 // `InformationCache::getOpcodeInstMapForFunction` interface and eliminate the 93 // need to traverse the IR repeatedly. 94 // 95 //===----------------------------------------------------------------------===// 96 97 #ifndef LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H 98 #define LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H 99 100 #include "llvm/ADT/DenseSet.h" 101 #include "llvm/ADT/GraphTraits.h" 102 #include "llvm/ADT/MapVector.h" 103 #include "llvm/ADT/STLExtras.h" 104 #include "llvm/ADT/SetVector.h" 105 #include "llvm/Analysis/AssumeBundleQueries.h" 106 #include "llvm/Analysis/CFG.h" 107 #include "llvm/Analysis/CGSCCPassManager.h" 108 #include "llvm/Analysis/LazyCallGraph.h" 109 #include "llvm/Analysis/LoopInfo.h" 110 #include "llvm/Analysis/MustExecute.h" 111 #include "llvm/Analysis/PostDominators.h" 112 #include "llvm/Analysis/TargetLibraryInfo.h" 113 #include "llvm/IR/AbstractCallSite.h" 114 #include "llvm/IR/ConstantRange.h" 115 #include "llvm/IR/PassManager.h" 116 #include "llvm/Support/Allocator.h" 117 #include "llvm/Support/Casting.h" 118 #include "llvm/Support/TimeProfiler.h" 119 #include "llvm/Transforms/Utils/CallGraphUpdater.h" 120 121 namespace llvm { 122 123 struct AADepGraphNode; 124 struct AADepGraph; 125 struct Attributor; 126 struct AbstractAttribute; 127 struct InformationCache; 128 struct AAIsDead; 129 130 class AAManager; 131 class AAResults; 132 class Function; 133 134 /// The value passed to the line option that defines the maximal initialization 135 /// chain length. 136 extern unsigned MaxInitializationChainLength; 137 138 ///{ 139 enum class ChangeStatus { 140 CHANGED, 141 UNCHANGED, 142 }; 143 144 ChangeStatus operator|(ChangeStatus l, ChangeStatus r); 145 ChangeStatus operator&(ChangeStatus l, ChangeStatus r); 146 147 enum class DepClassTy { 148 REQUIRED, ///< The target cannot be valid if the source is not. 149 OPTIONAL, ///< The target may be valid if the source is not. 150 NONE, ///< Do not track a dependence between source and target. 151 }; 152 ///} 153 154 /// The data structure for the nodes of a dependency graph 155 struct AADepGraphNode { 156 public: ~AADepGraphNodeAADepGraphNode157 virtual ~AADepGraphNode(){}; 158 using DepTy = PointerIntPair<AADepGraphNode *, 1>; 159 160 protected: 161 /// Set of dependency graph nodes which should be updated if this one 162 /// is updated. The bit encodes if it is optional. 163 TinyPtrVector<DepTy> Deps; 164 DepGetValAADepGraphNode165 static AADepGraphNode *DepGetVal(DepTy &DT) { return DT.getPointer(); } DepGetValAAAADepGraphNode166 static AbstractAttribute *DepGetValAA(DepTy &DT) { 167 return cast<AbstractAttribute>(DT.getPointer()); 168 } 169 170 operator AbstractAttribute *() { return cast<AbstractAttribute>(this); } 171 172 public: 173 using iterator = 174 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>; 175 using aaiterator = 176 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetValAA)>; 177 beginAADepGraphNode178 aaiterator begin() { return aaiterator(Deps.begin(), &DepGetValAA); } endAADepGraphNode179 aaiterator end() { return aaiterator(Deps.end(), &DepGetValAA); } child_beginAADepGraphNode180 iterator child_begin() { return iterator(Deps.begin(), &DepGetVal); } child_endAADepGraphNode181 iterator child_end() { return iterator(Deps.end(), &DepGetVal); } 182 printAADepGraphNode183 virtual void print(raw_ostream &OS) const { OS << "AADepNode Impl\n"; } getDepsAADepGraphNode184 TinyPtrVector<DepTy> &getDeps() { return Deps; } 185 186 friend struct Attributor; 187 friend struct AADepGraph; 188 }; 189 190 /// The data structure for the dependency graph 191 /// 192 /// Note that in this graph if there is an edge from A to B (A -> B), 193 /// then it means that B depends on A, and when the state of A is 194 /// updated, node B should also be updated 195 struct AADepGraph { AADepGraphAADepGraph196 AADepGraph() {} ~AADepGraphAADepGraph197 ~AADepGraph() {} 198 199 using DepTy = AADepGraphNode::DepTy; DepGetValAADepGraph200 static AADepGraphNode *DepGetVal(DepTy &DT) { return DT.getPointer(); } 201 using iterator = 202 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>; 203 204 /// There is no root node for the dependency graph. But the SCCIterator 205 /// requires a single entry point, so we maintain a fake("synthetic") root 206 /// node that depends on every node. 207 AADepGraphNode SyntheticRoot; GetEntryNodeAADepGraph208 AADepGraphNode *GetEntryNode() { return &SyntheticRoot; } 209 beginAADepGraph210 iterator begin() { return SyntheticRoot.child_begin(); } endAADepGraph211 iterator end() { return SyntheticRoot.child_end(); } 212 213 void viewGraph(); 214 215 /// Dump graph to file 216 void dumpGraph(); 217 218 /// Print dependency graph 219 void print(); 220 }; 221 222 /// Helper to describe and deal with positions in the LLVM-IR. 223 /// 224 /// A position in the IR is described by an anchor value and an "offset" that 225 /// could be the argument number, for call sites and arguments, or an indicator 226 /// of the "position kind". The kinds, specified in the Kind enum below, include 227 /// the locations in the attribute list, i.a., function scope and return value, 228 /// as well as a distinction between call sites and functions. Finally, there 229 /// are floating values that do not have a corresponding attribute list 230 /// position. 231 struct IRPosition { 232 // NOTE: In the future this definition can be changed to support recursive 233 // functions. 234 using CallBaseContext = CallBase; 235 236 /// The positions we distinguish in the IR. 237 enum Kind : char { 238 IRP_INVALID, ///< An invalid position. 239 IRP_FLOAT, ///< A position that is not associated with a spot suitable 240 ///< for attributes. This could be any value or instruction. 241 IRP_RETURNED, ///< An attribute for the function return value. 242 IRP_CALL_SITE_RETURNED, ///< An attribute for a call site return value. 243 IRP_FUNCTION, ///< An attribute for a function (scope). 244 IRP_CALL_SITE, ///< An attribute for a call site (function scope). 245 IRP_ARGUMENT, ///< An attribute for a function argument. 246 IRP_CALL_SITE_ARGUMENT, ///< An attribute for a call site argument. 247 }; 248 249 /// Default constructor available to create invalid positions implicitly. All 250 /// other positions need to be created explicitly through the appropriate 251 /// static member function. IRPositionIRPosition252 IRPosition() : Enc(nullptr, ENC_VALUE) { verify(); } 253 254 /// Create a position describing the value of \p V. 255 static const IRPosition value(const Value &V, 256 const CallBaseContext *CBContext = nullptr) { 257 if (auto *Arg = dyn_cast<Argument>(&V)) 258 return IRPosition::argument(*Arg, CBContext); 259 if (auto *CB = dyn_cast<CallBase>(&V)) 260 return IRPosition::callsite_returned(*CB); 261 return IRPosition(const_cast<Value &>(V), IRP_FLOAT, CBContext); 262 } 263 264 /// Create a position describing the function scope of \p F. 265 /// \p CBContext is used for call base specific analysis. 266 static const IRPosition function(const Function &F, 267 const CallBaseContext *CBContext = nullptr) { 268 return IRPosition(const_cast<Function &>(F), IRP_FUNCTION, CBContext); 269 } 270 271 /// Create a position describing the returned value of \p F. 272 /// \p CBContext is used for call base specific analysis. 273 static const IRPosition returned(const Function &F, 274 const CallBaseContext *CBContext = nullptr) { 275 return IRPosition(const_cast<Function &>(F), IRP_RETURNED, CBContext); 276 } 277 278 /// Create a position describing the argument \p Arg. 279 /// \p CBContext is used for call base specific analysis. 280 static const IRPosition argument(const Argument &Arg, 281 const CallBaseContext *CBContext = nullptr) { 282 return IRPosition(const_cast<Argument &>(Arg), IRP_ARGUMENT, CBContext); 283 } 284 285 /// Create a position describing the function scope of \p CB. callsite_functionIRPosition286 static const IRPosition callsite_function(const CallBase &CB) { 287 return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE); 288 } 289 290 /// Create a position describing the returned value of \p CB. callsite_returnedIRPosition291 static const IRPosition callsite_returned(const CallBase &CB) { 292 return IRPosition(const_cast<CallBase &>(CB), IRP_CALL_SITE_RETURNED); 293 } 294 295 /// Create a position describing the argument of \p CB at position \p ArgNo. callsite_argumentIRPosition296 static const IRPosition callsite_argument(const CallBase &CB, 297 unsigned ArgNo) { 298 return IRPosition(const_cast<Use &>(CB.getArgOperandUse(ArgNo)), 299 IRP_CALL_SITE_ARGUMENT); 300 } 301 302 /// Create a position describing the argument of \p ACS at position \p ArgNo. callsite_argumentIRPosition303 static const IRPosition callsite_argument(AbstractCallSite ACS, 304 unsigned ArgNo) { 305 if (ACS.getNumArgOperands() <= ArgNo) 306 return IRPosition(); 307 int CSArgNo = ACS.getCallArgOperandNo(ArgNo); 308 if (CSArgNo >= 0) 309 return IRPosition::callsite_argument( 310 cast<CallBase>(*ACS.getInstruction()), CSArgNo); 311 return IRPosition(); 312 } 313 314 /// Create a position with function scope matching the "context" of \p IRP. 315 /// If \p IRP is a call site (see isAnyCallSitePosition()) then the result 316 /// will be a call site position, otherwise the function position of the 317 /// associated function. 318 static const IRPosition 319 function_scope(const IRPosition &IRP, 320 const CallBaseContext *CBContext = nullptr) { 321 if (IRP.isAnyCallSitePosition()) { 322 return IRPosition::callsite_function( 323 cast<CallBase>(IRP.getAnchorValue())); 324 } 325 assert(IRP.getAssociatedFunction()); 326 return IRPosition::function(*IRP.getAssociatedFunction(), CBContext); 327 } 328 329 bool operator==(const IRPosition &RHS) const { 330 return Enc == RHS.Enc && RHS.CBContext == CBContext; 331 } 332 bool operator!=(const IRPosition &RHS) const { return !(*this == RHS); } 333 334 /// Return the value this abstract attribute is anchored with. 335 /// 336 /// The anchor value might not be the associated value if the latter is not 337 /// sufficient to determine where arguments will be manifested. This is, so 338 /// far, only the case for call site arguments as the value is not sufficient 339 /// to pinpoint them. Instead, we can use the call site as an anchor. getAnchorValueIRPosition340 Value &getAnchorValue() const { 341 switch (getEncodingBits()) { 342 case ENC_VALUE: 343 case ENC_RETURNED_VALUE: 344 case ENC_FLOATING_FUNCTION: 345 return *getAsValuePtr(); 346 case ENC_CALL_SITE_ARGUMENT_USE: 347 return *(getAsUsePtr()->getUser()); 348 default: 349 llvm_unreachable("Unkown encoding!"); 350 }; 351 } 352 353 /// Return the associated function, if any. getAssociatedFunctionIRPosition354 Function *getAssociatedFunction() const { 355 if (auto *CB = dyn_cast<CallBase>(&getAnchorValue())) { 356 // We reuse the logic that associates callback calles to arguments of a 357 // call site here to identify the callback callee as the associated 358 // function. 359 if (Argument *Arg = getAssociatedArgument()) 360 return Arg->getParent(); 361 return CB->getCalledFunction(); 362 } 363 return getAnchorScope(); 364 } 365 366 /// Return the associated argument, if any. 367 Argument *getAssociatedArgument() const; 368 369 /// Return true if the position refers to a function interface, that is the 370 /// function scope, the function return, or an argument. isFnInterfaceKindIRPosition371 bool isFnInterfaceKind() const { 372 switch (getPositionKind()) { 373 case IRPosition::IRP_FUNCTION: 374 case IRPosition::IRP_RETURNED: 375 case IRPosition::IRP_ARGUMENT: 376 return true; 377 default: 378 return false; 379 } 380 } 381 382 /// Return the Function surrounding the anchor value. getAnchorScopeIRPosition383 Function *getAnchorScope() const { 384 Value &V = getAnchorValue(); 385 if (isa<Function>(V)) 386 return &cast<Function>(V); 387 if (isa<Argument>(V)) 388 return cast<Argument>(V).getParent(); 389 if (isa<Instruction>(V)) 390 return cast<Instruction>(V).getFunction(); 391 return nullptr; 392 } 393 394 /// Return the context instruction, if any. getCtxIIRPosition395 Instruction *getCtxI() const { 396 Value &V = getAnchorValue(); 397 if (auto *I = dyn_cast<Instruction>(&V)) 398 return I; 399 if (auto *Arg = dyn_cast<Argument>(&V)) 400 if (!Arg->getParent()->isDeclaration()) 401 return &Arg->getParent()->getEntryBlock().front(); 402 if (auto *F = dyn_cast<Function>(&V)) 403 if (!F->isDeclaration()) 404 return &(F->getEntryBlock().front()); 405 return nullptr; 406 } 407 408 /// Return the value this abstract attribute is associated with. getAssociatedValueIRPosition409 Value &getAssociatedValue() const { 410 if (getCallSiteArgNo() < 0 || isa<Argument>(&getAnchorValue())) 411 return getAnchorValue(); 412 assert(isa<CallBase>(&getAnchorValue()) && "Expected a call base!"); 413 return *cast<CallBase>(&getAnchorValue()) 414 ->getArgOperand(getCallSiteArgNo()); 415 } 416 417 /// Return the type this abstract attribute is associated with. getAssociatedTypeIRPosition418 Type *getAssociatedType() const { 419 if (getPositionKind() == IRPosition::IRP_RETURNED) 420 return getAssociatedFunction()->getReturnType(); 421 return getAssociatedValue().getType(); 422 } 423 424 /// Return the callee argument number of the associated value if it is an 425 /// argument or call site argument, otherwise a negative value. In contrast to 426 /// `getCallSiteArgNo` this method will always return the "argument number" 427 /// from the perspective of the callee. This may not the same as the call site 428 /// if this is a callback call. getCalleeArgNoIRPosition429 int getCalleeArgNo() const { 430 return getArgNo(/* CallbackCalleeArgIfApplicable */ true); 431 } 432 433 /// Return the call site argument number of the associated value if it is an 434 /// argument or call site argument, otherwise a negative value. In contrast to 435 /// `getCalleArgNo` this method will always return the "operand number" from 436 /// the perspective of the call site. This may not the same as the callee 437 /// perspective if this is a callback call. getCallSiteArgNoIRPosition438 int getCallSiteArgNo() const { 439 return getArgNo(/* CallbackCalleeArgIfApplicable */ false); 440 } 441 442 /// Return the index in the attribute list for this position. getAttrIdxIRPosition443 unsigned getAttrIdx() const { 444 switch (getPositionKind()) { 445 case IRPosition::IRP_INVALID: 446 case IRPosition::IRP_FLOAT: 447 break; 448 case IRPosition::IRP_FUNCTION: 449 case IRPosition::IRP_CALL_SITE: 450 return AttributeList::FunctionIndex; 451 case IRPosition::IRP_RETURNED: 452 case IRPosition::IRP_CALL_SITE_RETURNED: 453 return AttributeList::ReturnIndex; 454 case IRPosition::IRP_ARGUMENT: 455 case IRPosition::IRP_CALL_SITE_ARGUMENT: 456 return getCallSiteArgNo() + AttributeList::FirstArgIndex; 457 } 458 llvm_unreachable( 459 "There is no attribute index for a floating or invalid position!"); 460 } 461 462 /// Return the associated position kind. getPositionKindIRPosition463 Kind getPositionKind() const { 464 char EncodingBits = getEncodingBits(); 465 if (EncodingBits == ENC_CALL_SITE_ARGUMENT_USE) 466 return IRP_CALL_SITE_ARGUMENT; 467 if (EncodingBits == ENC_FLOATING_FUNCTION) 468 return IRP_FLOAT; 469 470 Value *V = getAsValuePtr(); 471 if (!V) 472 return IRP_INVALID; 473 if (isa<Argument>(V)) 474 return IRP_ARGUMENT; 475 if (isa<Function>(V)) 476 return isReturnPosition(EncodingBits) ? IRP_RETURNED : IRP_FUNCTION; 477 if (isa<CallBase>(V)) 478 return isReturnPosition(EncodingBits) ? IRP_CALL_SITE_RETURNED 479 : IRP_CALL_SITE; 480 return IRP_FLOAT; 481 } 482 483 /// TODO: Figure out if the attribute related helper functions should live 484 /// here or somewhere else. 485 486 /// Return true if any kind in \p AKs existing in the IR at a position that 487 /// will affect this one. See also getAttrs(...). 488 /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions, 489 /// e.g., the function position if this is an 490 /// argument position, should be ignored. 491 bool hasAttr(ArrayRef<Attribute::AttrKind> AKs, 492 bool IgnoreSubsumingPositions = false, 493 Attributor *A = nullptr) const; 494 495 /// Return the attributes of any kind in \p AKs existing in the IR at a 496 /// position that will affect this one. While each position can only have a 497 /// single attribute of any kind in \p AKs, there are "subsuming" positions 498 /// that could have an attribute as well. This method returns all attributes 499 /// found in \p Attrs. 500 /// \param IgnoreSubsumingPositions Flag to determine if subsuming positions, 501 /// e.g., the function position if this is an 502 /// argument position, should be ignored. 503 void getAttrs(ArrayRef<Attribute::AttrKind> AKs, 504 SmallVectorImpl<Attribute> &Attrs, 505 bool IgnoreSubsumingPositions = false, 506 Attributor *A = nullptr) const; 507 508 /// Remove the attribute of kind \p AKs existing in the IR at this position. removeAttrsIRPosition509 void removeAttrs(ArrayRef<Attribute::AttrKind> AKs) const { 510 if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT) 511 return; 512 513 AttributeList AttrList; 514 auto *CB = dyn_cast<CallBase>(&getAnchorValue()); 515 if (CB) 516 AttrList = CB->getAttributes(); 517 else 518 AttrList = getAssociatedFunction()->getAttributes(); 519 520 LLVMContext &Ctx = getAnchorValue().getContext(); 521 for (Attribute::AttrKind AK : AKs) 522 AttrList = AttrList.removeAttribute(Ctx, getAttrIdx(), AK); 523 524 if (CB) 525 CB->setAttributes(AttrList); 526 else 527 getAssociatedFunction()->setAttributes(AttrList); 528 } 529 isAnyCallSitePositionIRPosition530 bool isAnyCallSitePosition() const { 531 switch (getPositionKind()) { 532 case IRPosition::IRP_CALL_SITE: 533 case IRPosition::IRP_CALL_SITE_RETURNED: 534 case IRPosition::IRP_CALL_SITE_ARGUMENT: 535 return true; 536 default: 537 return false; 538 } 539 } 540 541 /// Return true if the position is an argument or call site argument. isArgumentPositionIRPosition542 bool isArgumentPosition() const { 543 switch (getPositionKind()) { 544 case IRPosition::IRP_ARGUMENT: 545 case IRPosition::IRP_CALL_SITE_ARGUMENT: 546 return true; 547 default: 548 return false; 549 } 550 } 551 552 /// Return the same position without the call base context. stripCallBaseContextIRPosition553 IRPosition stripCallBaseContext() const { 554 IRPosition Result = *this; 555 Result.CBContext = nullptr; 556 return Result; 557 } 558 559 /// Get the call base context from the position. getCallBaseContextIRPosition560 const CallBaseContext *getCallBaseContext() const { return CBContext; } 561 562 /// Check if the position has any call base context. hasCallBaseContextIRPosition563 bool hasCallBaseContext() const { return CBContext != nullptr; } 564 565 /// Special DenseMap key values. 566 /// 567 ///{ 568 static const IRPosition EmptyKey; 569 static const IRPosition TombstoneKey; 570 ///} 571 572 /// Conversion into a void * to allow reuse of pointer hashing. 573 operator void *() const { return Enc.getOpaqueValue(); } 574 575 private: 576 /// Private constructor for special values only! 577 explicit IRPosition(void *Ptr, const CallBaseContext *CBContext = nullptr) CBContextIRPosition578 : CBContext(CBContext) { 579 Enc.setFromOpaqueValue(Ptr); 580 } 581 582 /// IRPosition anchored at \p AnchorVal with kind/argument numbet \p PK. 583 explicit IRPosition(Value &AnchorVal, Kind PK, 584 const CallBaseContext *CBContext = nullptr) CBContextIRPosition585 : CBContext(CBContext) { 586 switch (PK) { 587 case IRPosition::IRP_INVALID: 588 llvm_unreachable("Cannot create invalid IRP with an anchor value!"); 589 break; 590 case IRPosition::IRP_FLOAT: 591 // Special case for floating functions. 592 if (isa<Function>(AnchorVal)) 593 Enc = {&AnchorVal, ENC_FLOATING_FUNCTION}; 594 else 595 Enc = {&AnchorVal, ENC_VALUE}; 596 break; 597 case IRPosition::IRP_FUNCTION: 598 case IRPosition::IRP_CALL_SITE: 599 Enc = {&AnchorVal, ENC_VALUE}; 600 break; 601 case IRPosition::IRP_RETURNED: 602 case IRPosition::IRP_CALL_SITE_RETURNED: 603 Enc = {&AnchorVal, ENC_RETURNED_VALUE}; 604 break; 605 case IRPosition::IRP_ARGUMENT: 606 Enc = {&AnchorVal, ENC_VALUE}; 607 break; 608 case IRPosition::IRP_CALL_SITE_ARGUMENT: 609 llvm_unreachable( 610 "Cannot create call site argument IRP with an anchor value!"); 611 break; 612 } 613 verify(); 614 } 615 616 /// Return the callee argument number of the associated value if it is an 617 /// argument or call site argument. See also `getCalleeArgNo` and 618 /// `getCallSiteArgNo`. getArgNoIRPosition619 int getArgNo(bool CallbackCalleeArgIfApplicable) const { 620 if (CallbackCalleeArgIfApplicable) 621 if (Argument *Arg = getAssociatedArgument()) 622 return Arg->getArgNo(); 623 switch (getPositionKind()) { 624 case IRPosition::IRP_ARGUMENT: 625 return cast<Argument>(getAsValuePtr())->getArgNo(); 626 case IRPosition::IRP_CALL_SITE_ARGUMENT: { 627 Use &U = *getAsUsePtr(); 628 return cast<CallBase>(U.getUser())->getArgOperandNo(&U); 629 } 630 default: 631 return -1; 632 } 633 } 634 635 /// IRPosition for the use \p U. The position kind \p PK needs to be 636 /// IRP_CALL_SITE_ARGUMENT, the anchor value is the user, the associated value 637 /// the used value. IRPositionIRPosition638 explicit IRPosition(Use &U, Kind PK) { 639 assert(PK == IRP_CALL_SITE_ARGUMENT && 640 "Use constructor is for call site arguments only!"); 641 Enc = {&U, ENC_CALL_SITE_ARGUMENT_USE}; 642 verify(); 643 } 644 645 /// Verify internal invariants. 646 void verify(); 647 648 /// Return the attributes of kind \p AK existing in the IR as attribute. 649 bool getAttrsFromIRAttr(Attribute::AttrKind AK, 650 SmallVectorImpl<Attribute> &Attrs) const; 651 652 /// Return the attributes of kind \p AK existing in the IR as operand bundles 653 /// of an llvm.assume. 654 bool getAttrsFromAssumes(Attribute::AttrKind AK, 655 SmallVectorImpl<Attribute> &Attrs, 656 Attributor &A) const; 657 658 /// Return the underlying pointer as Value *, valid for all positions but 659 /// IRP_CALL_SITE_ARGUMENT. getAsValuePtrIRPosition660 Value *getAsValuePtr() const { 661 assert(getEncodingBits() != ENC_CALL_SITE_ARGUMENT_USE && 662 "Not a value pointer!"); 663 return reinterpret_cast<Value *>(Enc.getPointer()); 664 } 665 666 /// Return the underlying pointer as Use *, valid only for 667 /// IRP_CALL_SITE_ARGUMENT positions. getAsUsePtrIRPosition668 Use *getAsUsePtr() const { 669 assert(getEncodingBits() == ENC_CALL_SITE_ARGUMENT_USE && 670 "Not a value pointer!"); 671 return reinterpret_cast<Use *>(Enc.getPointer()); 672 } 673 674 /// Return true if \p EncodingBits describe a returned or call site returned 675 /// position. isReturnPositionIRPosition676 static bool isReturnPosition(char EncodingBits) { 677 return EncodingBits == ENC_RETURNED_VALUE; 678 } 679 680 /// Return true if the encoding bits describe a returned or call site returned 681 /// position. isReturnPositionIRPosition682 bool isReturnPosition() const { return isReturnPosition(getEncodingBits()); } 683 684 /// The encoding of the IRPosition is a combination of a pointer and two 685 /// encoding bits. The values of the encoding bits are defined in the enum 686 /// below. The pointer is either a Value* (for the first three encoding bit 687 /// combinations) or Use* (for ENC_CALL_SITE_ARGUMENT_USE). 688 /// 689 ///{ 690 enum { 691 ENC_VALUE = 0b00, 692 ENC_RETURNED_VALUE = 0b01, 693 ENC_FLOATING_FUNCTION = 0b10, 694 ENC_CALL_SITE_ARGUMENT_USE = 0b11, 695 }; 696 697 // Reserve the maximal amount of bits so there is no need to mask out the 698 // remaining ones. We will not encode anything else in the pointer anyway. 699 static constexpr int NumEncodingBits = 700 PointerLikeTypeTraits<void *>::NumLowBitsAvailable; 701 static_assert(NumEncodingBits >= 2, "At least two bits are required!"); 702 703 /// The pointer with the encoding bits. 704 PointerIntPair<void *, NumEncodingBits, char> Enc; 705 ///} 706 707 /// Call base context. Used for callsite specific analysis. 708 const CallBaseContext *CBContext = nullptr; 709 710 /// Return the encoding bits. getEncodingBitsIRPosition711 char getEncodingBits() const { return Enc.getInt(); } 712 }; 713 714 /// Helper that allows IRPosition as a key in a DenseMap. 715 template <> struct DenseMapInfo<IRPosition> { 716 static inline IRPosition getEmptyKey() { return IRPosition::EmptyKey; } 717 static inline IRPosition getTombstoneKey() { 718 return IRPosition::TombstoneKey; 719 } 720 static unsigned getHashValue(const IRPosition &IRP) { 721 return (DenseMapInfo<void *>::getHashValue(IRP) << 4) ^ 722 (DenseMapInfo<Value *>::getHashValue(IRP.getCallBaseContext())); 723 } 724 725 static bool isEqual(const IRPosition &a, const IRPosition &b) { 726 return a == b; 727 } 728 }; 729 730 /// A visitor class for IR positions. 731 /// 732 /// Given a position P, the SubsumingPositionIterator allows to visit "subsuming 733 /// positions" wrt. attributes/information. Thus, if a piece of information 734 /// holds for a subsuming position, it also holds for the position P. 735 /// 736 /// The subsuming positions always include the initial position and then, 737 /// depending on the position kind, additionally the following ones: 738 /// - for IRP_RETURNED: 739 /// - the function (IRP_FUNCTION) 740 /// - for IRP_ARGUMENT: 741 /// - the function (IRP_FUNCTION) 742 /// - for IRP_CALL_SITE: 743 /// - the callee (IRP_FUNCTION), if known 744 /// - for IRP_CALL_SITE_RETURNED: 745 /// - the callee (IRP_RETURNED), if known 746 /// - the call site (IRP_FUNCTION) 747 /// - the callee (IRP_FUNCTION), if known 748 /// - for IRP_CALL_SITE_ARGUMENT: 749 /// - the argument of the callee (IRP_ARGUMENT), if known 750 /// - the callee (IRP_FUNCTION), if known 751 /// - the position the call site argument is associated with if it is not 752 /// anchored to the call site, e.g., if it is an argument then the argument 753 /// (IRP_ARGUMENT) 754 class SubsumingPositionIterator { 755 SmallVector<IRPosition, 4> IRPositions; 756 using iterator = decltype(IRPositions)::iterator; 757 758 public: 759 SubsumingPositionIterator(const IRPosition &IRP); 760 iterator begin() { return IRPositions.begin(); } 761 iterator end() { return IRPositions.end(); } 762 }; 763 764 /// Wrapper for FunctoinAnalysisManager. 765 struct AnalysisGetter { 766 template <typename Analysis> 767 typename Analysis::Result *getAnalysis(const Function &F) { 768 if (!FAM || !F.getParent()) 769 return nullptr; 770 return &FAM->getResult<Analysis>(const_cast<Function &>(F)); 771 } 772 773 AnalysisGetter(FunctionAnalysisManager &FAM) : FAM(&FAM) {} 774 AnalysisGetter() {} 775 776 private: 777 FunctionAnalysisManager *FAM = nullptr; 778 }; 779 780 /// Data structure to hold cached (LLVM-IR) information. 781 /// 782 /// All attributes are given an InformationCache object at creation time to 783 /// avoid inspection of the IR by all of them individually. This default 784 /// InformationCache will hold information required by 'default' attributes, 785 /// thus the ones deduced when Attributor::identifyDefaultAbstractAttributes(..) 786 /// is called. 787 /// 788 /// If custom abstract attributes, registered manually through 789 /// Attributor::registerAA(...), need more information, especially if it is not 790 /// reusable, it is advised to inherit from the InformationCache and cast the 791 /// instance down in the abstract attributes. 792 struct InformationCache { 793 InformationCache(const Module &M, AnalysisGetter &AG, 794 BumpPtrAllocator &Allocator, SetVector<Function *> *CGSCC) 795 : DL(M.getDataLayout()), Allocator(Allocator), 796 Explorer( 797 /* ExploreInterBlock */ true, /* ExploreCFGForward */ true, 798 /* ExploreCFGBackward */ true, 799 /* LIGetter */ 800 [&](const Function &F) { return AG.getAnalysis<LoopAnalysis>(F); }, 801 /* DTGetter */ 802 [&](const Function &F) { 803 return AG.getAnalysis<DominatorTreeAnalysis>(F); 804 }, 805 /* PDTGetter */ 806 [&](const Function &F) { 807 return AG.getAnalysis<PostDominatorTreeAnalysis>(F); 808 }), 809 AG(AG), CGSCC(CGSCC) { 810 if (CGSCC) 811 initializeModuleSlice(*CGSCC); 812 } 813 814 ~InformationCache() { 815 // The FunctionInfo objects are allocated via a BumpPtrAllocator, we call 816 // the destructor manually. 817 for (auto &It : FuncInfoMap) 818 It.getSecond()->~FunctionInfo(); 819 } 820 821 /// Apply \p CB to all uses of \p F. If \p LookThroughConstantExprUses is 822 /// true, constant expression users are not given to \p CB but their uses are 823 /// traversed transitively. 824 template <typename CBTy> 825 static void foreachUse(Function &F, CBTy CB, 826 bool LookThroughConstantExprUses = true) { 827 SmallVector<Use *, 8> Worklist(make_pointer_range(F.uses())); 828 829 for (unsigned Idx = 0; Idx < Worklist.size(); ++Idx) { 830 Use &U = *Worklist[Idx]; 831 832 // Allow use in constant bitcasts and simply look through them. 833 if (LookThroughConstantExprUses && isa<ConstantExpr>(U.getUser())) { 834 for (Use &CEU : cast<ConstantExpr>(U.getUser())->uses()) 835 Worklist.push_back(&CEU); 836 continue; 837 } 838 839 CB(U); 840 } 841 } 842 843 /// Initialize the ModuleSlice member based on \p SCC. ModuleSlices contains 844 /// (a subset of) all functions that we can look at during this SCC traversal. 845 /// This includes functions (transitively) called from the SCC and the 846 /// (transitive) callers of SCC functions. We also can look at a function if 847 /// there is a "reference edge", i.a., if the function somehow uses (!=calls) 848 /// a function in the SCC or a caller of a function in the SCC. 849 void initializeModuleSlice(SetVector<Function *> &SCC) { 850 ModuleSlice.insert(SCC.begin(), SCC.end()); 851 852 SmallPtrSet<Function *, 16> Seen; 853 SmallVector<Function *, 16> Worklist(SCC.begin(), SCC.end()); 854 while (!Worklist.empty()) { 855 Function *F = Worklist.pop_back_val(); 856 ModuleSlice.insert(F); 857 858 for (Instruction &I : instructions(*F)) 859 if (auto *CB = dyn_cast<CallBase>(&I)) 860 if (Function *Callee = CB->getCalledFunction()) 861 if (Seen.insert(Callee).second) 862 Worklist.push_back(Callee); 863 } 864 865 Seen.clear(); 866 Worklist.append(SCC.begin(), SCC.end()); 867 while (!Worklist.empty()) { 868 Function *F = Worklist.pop_back_val(); 869 ModuleSlice.insert(F); 870 871 // Traverse all transitive uses. 872 foreachUse(*F, [&](Use &U) { 873 if (auto *UsrI = dyn_cast<Instruction>(U.getUser())) 874 if (Seen.insert(UsrI->getFunction()).second) 875 Worklist.push_back(UsrI->getFunction()); 876 }); 877 } 878 } 879 880 /// The slice of the module we are allowed to look at. 881 SmallPtrSet<Function *, 8> ModuleSlice; 882 883 /// A vector type to hold instructions. 884 using InstructionVectorTy = SmallVector<Instruction *, 8>; 885 886 /// A map type from opcodes to instructions with this opcode. 887 using OpcodeInstMapTy = DenseMap<unsigned, InstructionVectorTy *>; 888 889 /// Return the map that relates "interesting" opcodes with all instructions 890 /// with that opcode in \p F. 891 OpcodeInstMapTy &getOpcodeInstMapForFunction(const Function &F) { 892 return getFunctionInfo(F).OpcodeInstMap; 893 } 894 895 /// Return the instructions in \p F that may read or write memory. 896 InstructionVectorTy &getReadOrWriteInstsForFunction(const Function &F) { 897 return getFunctionInfo(F).RWInsts; 898 } 899 900 /// Return MustBeExecutedContextExplorer 901 MustBeExecutedContextExplorer &getMustBeExecutedContextExplorer() { 902 return Explorer; 903 } 904 905 /// Return TargetLibraryInfo for function \p F. 906 TargetLibraryInfo *getTargetLibraryInfoForFunction(const Function &F) { 907 return AG.getAnalysis<TargetLibraryAnalysis>(F); 908 } 909 910 /// Return AliasAnalysis Result for function \p F. 911 AAResults *getAAResultsForFunction(const Function &F); 912 913 /// Return true if \p Arg is involved in a must-tail call, thus the argument 914 /// of the caller or callee. 915 bool isInvolvedInMustTailCall(const Argument &Arg) { 916 FunctionInfo &FI = getFunctionInfo(*Arg.getParent()); 917 return FI.CalledViaMustTail || FI.ContainsMustTailCall; 918 } 919 920 /// Return the analysis result from a pass \p AP for function \p F. 921 template <typename AP> 922 typename AP::Result *getAnalysisResultForFunction(const Function &F) { 923 return AG.getAnalysis<AP>(F); 924 } 925 926 /// Return SCC size on call graph for function \p F or 0 if unknown. 927 unsigned getSccSize(const Function &F) { 928 if (CGSCC && CGSCC->count(const_cast<Function *>(&F))) 929 return CGSCC->size(); 930 return 0; 931 } 932 933 /// Return datalayout used in the module. 934 const DataLayout &getDL() { return DL; } 935 936 /// Return the map conaining all the knowledge we have from `llvm.assume`s. 937 const RetainedKnowledgeMap &getKnowledgeMap() const { return KnowledgeMap; } 938 939 /// Return if \p To is potentially reachable form \p From or not 940 /// If the same query was answered, return cached result 941 bool getPotentiallyReachable(const Instruction &From, const Instruction &To) { 942 auto KeyPair = std::make_pair(&From, &To); 943 auto Iter = PotentiallyReachableMap.find(KeyPair); 944 if (Iter != PotentiallyReachableMap.end()) 945 return Iter->second; 946 const Function &F = *From.getFunction(); 947 bool Result = isPotentiallyReachable( 948 &From, &To, nullptr, AG.getAnalysis<DominatorTreeAnalysis>(F), 949 AG.getAnalysis<LoopAnalysis>(F)); 950 PotentiallyReachableMap.insert(std::make_pair(KeyPair, Result)); 951 return Result; 952 } 953 954 /// Check whether \p F is part of module slice. 955 bool isInModuleSlice(const Function &F) { 956 return ModuleSlice.count(const_cast<Function *>(&F)); 957 } 958 959 private: 960 struct FunctionInfo { 961 ~FunctionInfo(); 962 963 /// A nested map that remembers all instructions in a function with a 964 /// certain instruction opcode (Instruction::getOpcode()). 965 OpcodeInstMapTy OpcodeInstMap; 966 967 /// A map from functions to their instructions that may read or write 968 /// memory. 969 InstructionVectorTy RWInsts; 970 971 /// Function is called by a `musttail` call. 972 bool CalledViaMustTail; 973 974 /// Function contains a `musttail` call. 975 bool ContainsMustTailCall; 976 }; 977 978 /// A map type from functions to informatio about it. 979 DenseMap<const Function *, FunctionInfo *> FuncInfoMap; 980 981 /// Return information about the function \p F, potentially by creating it. 982 FunctionInfo &getFunctionInfo(const Function &F) { 983 FunctionInfo *&FI = FuncInfoMap[&F]; 984 if (!FI) { 985 FI = new (Allocator) FunctionInfo(); 986 initializeInformationCache(F, *FI); 987 } 988 return *FI; 989 } 990 991 /// Initialize the function information cache \p FI for the function \p F. 992 /// 993 /// This method needs to be called for all function that might be looked at 994 /// through the information cache interface *prior* to looking at them. 995 void initializeInformationCache(const Function &F, FunctionInfo &FI); 996 997 /// The datalayout used in the module. 998 const DataLayout &DL; 999 1000 /// The allocator used to allocate memory, e.g. for `FunctionInfo`s. 1001 BumpPtrAllocator &Allocator; 1002 1003 /// MustBeExecutedContextExplorer 1004 MustBeExecutedContextExplorer Explorer; 1005 1006 /// A map with knowledge retained in `llvm.assume` instructions. 1007 RetainedKnowledgeMap KnowledgeMap; 1008 1009 /// Getters for analysis. 1010 AnalysisGetter &AG; 1011 1012 /// The underlying CGSCC, or null if not available. 1013 SetVector<Function *> *CGSCC; 1014 1015 /// Set of inlineable functions 1016 SmallPtrSet<const Function *, 8> InlineableFunctions; 1017 1018 /// A map for caching results of queries for isPotentiallyReachable 1019 DenseMap<std::pair<const Instruction *, const Instruction *>, bool> 1020 PotentiallyReachableMap; 1021 1022 /// Give the Attributor access to the members so 1023 /// Attributor::identifyDefaultAbstractAttributes(...) can initialize them. 1024 friend struct Attributor; 1025 }; 1026 1027 /// The fixpoint analysis framework that orchestrates the attribute deduction. 1028 /// 1029 /// The Attributor provides a general abstract analysis framework (guided 1030 /// fixpoint iteration) as well as helper functions for the deduction of 1031 /// (LLVM-IR) attributes. However, also other code properties can be deduced, 1032 /// propagated, and ultimately manifested through the Attributor framework. This 1033 /// is particularly useful if these properties interact with attributes and a 1034 /// co-scheduled deduction allows to improve the solution. Even if not, thus if 1035 /// attributes/properties are completely isolated, they should use the 1036 /// Attributor framework to reduce the number of fixpoint iteration frameworks 1037 /// in the code base. Note that the Attributor design makes sure that isolated 1038 /// attributes are not impacted, in any way, by others derived at the same time 1039 /// if there is no cross-reasoning performed. 1040 /// 1041 /// The public facing interface of the Attributor is kept simple and basically 1042 /// allows abstract attributes to one thing, query abstract attributes 1043 /// in-flight. There are two reasons to do this: 1044 /// a) The optimistic state of one abstract attribute can justify an 1045 /// optimistic state of another, allowing to framework to end up with an 1046 /// optimistic (=best possible) fixpoint instead of one based solely on 1047 /// information in the IR. 1048 /// b) This avoids reimplementing various kinds of lookups, e.g., to check 1049 /// for existing IR attributes, in favor of a single lookups interface 1050 /// provided by an abstract attribute subclass. 1051 /// 1052 /// NOTE: The mechanics of adding a new "concrete" abstract attribute are 1053 /// described in the file comment. 1054 struct Attributor { 1055 /// Constructor 1056 /// 1057 /// \param Functions The set of functions we are deriving attributes for. 1058 /// \param InfoCache Cache to hold various information accessible for 1059 /// the abstract attributes. 1060 /// \param CGUpdater Helper to update an underlying call graph. 1061 /// \param Allowed If not null, a set limiting the attribute opportunities. 1062 /// \param DeleteFns Whether to delete functions 1063 Attributor(SetVector<Function *> &Functions, InformationCache &InfoCache, 1064 CallGraphUpdater &CGUpdater, 1065 DenseSet<const char *> *Allowed = nullptr, bool DeleteFns = true) 1066 : Allocator(InfoCache.Allocator), Functions(Functions), 1067 InfoCache(InfoCache), CGUpdater(CGUpdater), Allowed(Allowed), 1068 DeleteFns(DeleteFns) {} 1069 1070 ~Attributor(); 1071 1072 /// Run the analyses until a fixpoint is reached or enforced (timeout). 1073 /// 1074 /// The attributes registered with this Attributor can be used after as long 1075 /// as the Attributor is not destroyed (it owns the attributes now). 1076 /// 1077 /// \Returns CHANGED if the IR was changed, otherwise UNCHANGED. 1078 ChangeStatus run(); 1079 1080 /// Lookup an abstract attribute of type \p AAType at position \p IRP. While 1081 /// no abstract attribute is found equivalent positions are checked, see 1082 /// SubsumingPositionIterator. Thus, the returned abstract attribute 1083 /// might be anchored at a different position, e.g., the callee if \p IRP is a 1084 /// call base. 1085 /// 1086 /// This method is the only (supported) way an abstract attribute can retrieve 1087 /// information from another abstract attribute. As an example, take an 1088 /// abstract attribute that determines the memory access behavior for a 1089 /// argument (readnone, readonly, ...). It should use `getAAFor` to get the 1090 /// most optimistic information for other abstract attributes in-flight, e.g. 1091 /// the one reasoning about the "captured" state for the argument or the one 1092 /// reasoning on the memory access behavior of the function as a whole. 1093 /// 1094 /// If the DepClass enum is set to `DepClassTy::None` the dependence from 1095 /// \p QueryingAA to the return abstract attribute is not automatically 1096 /// recorded. This should only be used if the caller will record the 1097 /// dependence explicitly if necessary, thus if it the returned abstract 1098 /// attribute is used for reasoning. To record the dependences explicitly use 1099 /// the `Attributor::recordDependence` method. 1100 template <typename AAType> 1101 const AAType &getAAFor(const AbstractAttribute &QueryingAA, 1102 const IRPosition &IRP, DepClassTy DepClass) { 1103 return getOrCreateAAFor<AAType>(IRP, &QueryingAA, DepClass, 1104 /* ForceUpdate */ false); 1105 } 1106 1107 /// Similar to getAAFor but the return abstract attribute will be updated (via 1108 /// `AbstractAttribute::update`) even if it is found in the cache. This is 1109 /// especially useful for AAIsDead as changes in liveness can make updates 1110 /// possible/useful that were not happening before as the abstract attribute 1111 /// was assumed dead. 1112 template <typename AAType> 1113 const AAType &getAndUpdateAAFor(const AbstractAttribute &QueryingAA, 1114 const IRPosition &IRP, DepClassTy DepClass) { 1115 return getOrCreateAAFor<AAType>(IRP, &QueryingAA, DepClass, 1116 /* ForceUpdate */ true); 1117 } 1118 1119 /// The version of getAAFor that allows to omit a querying abstract 1120 /// attribute. Using this after Attributor started running is restricted to 1121 /// only the Attributor itself. Initial seeding of AAs can be done via this 1122 /// function. 1123 /// NOTE: ForceUpdate is ignored in any stage other than the update stage. 1124 template <typename AAType> 1125 const AAType & 1126 getOrCreateAAFor(IRPosition IRP, const AbstractAttribute *QueryingAA, 1127 DepClassTy DepClass, bool ForceUpdate = false) { 1128 if (!shouldPropagateCallBaseContext(IRP)) 1129 IRP = IRP.stripCallBaseContext(); 1130 1131 if (AAType *AAPtr = lookupAAFor<AAType>(IRP, QueryingAA, DepClass)) { 1132 if (ForceUpdate && Phase == AttributorPhase::UPDATE) 1133 updateAA(*AAPtr); 1134 return *AAPtr; 1135 } 1136 1137 // No matching attribute found, create one. 1138 // Use the static create method. 1139 auto &AA = AAType::createForPosition(IRP, *this); 1140 1141 // If we are currenty seeding attributes, enforce seeding rules. 1142 if (Phase == AttributorPhase::SEEDING && !shouldSeedAttribute(AA)) { 1143 AA.getState().indicatePessimisticFixpoint(); 1144 return AA; 1145 } 1146 1147 registerAA(AA); 1148 1149 // For now we ignore naked and optnone functions. 1150 bool Invalidate = Allowed && !Allowed->count(&AAType::ID); 1151 const Function *FnScope = IRP.getAnchorScope(); 1152 if (FnScope) 1153 Invalidate |= FnScope->hasFnAttribute(Attribute::Naked) || 1154 FnScope->hasFnAttribute(Attribute::OptimizeNone); 1155 1156 // Avoid too many nested initializations to prevent a stack overflow. 1157 Invalidate |= InitializationChainLength > MaxInitializationChainLength; 1158 1159 // Bootstrap the new attribute with an initial update to propagate 1160 // information, e.g., function -> call site. If it is not on a given 1161 // Allowed we will not perform updates at all. 1162 if (Invalidate) { 1163 AA.getState().indicatePessimisticFixpoint(); 1164 return AA; 1165 } 1166 1167 { 1168 TimeTraceScope TimeScope(AA.getName() + "::initialize"); 1169 ++InitializationChainLength; 1170 AA.initialize(*this); 1171 --InitializationChainLength; 1172 } 1173 1174 // Initialize and update is allowed for code outside of the current function 1175 // set, but only if it is part of module slice we are allowed to look at. 1176 // Only exception is AAIsDeadFunction whose initialization is prevented 1177 // directly, since we don't to compute it twice. 1178 if (FnScope && !Functions.count(const_cast<Function *>(FnScope))) { 1179 if (!getInfoCache().isInModuleSlice(*FnScope)) { 1180 AA.getState().indicatePessimisticFixpoint(); 1181 return AA; 1182 } 1183 } 1184 1185 // If this is queried in the manifest stage, we force the AA to indicate 1186 // pessimistic fixpoint immediately. 1187 if (Phase == AttributorPhase::MANIFEST) { 1188 AA.getState().indicatePessimisticFixpoint(); 1189 return AA; 1190 } 1191 1192 // Allow seeded attributes to declare dependencies. 1193 // Remember the seeding state. 1194 AttributorPhase OldPhase = Phase; 1195 Phase = AttributorPhase::UPDATE; 1196 1197 updateAA(AA); 1198 1199 Phase = OldPhase; 1200 1201 if (QueryingAA && AA.getState().isValidState()) 1202 recordDependence(AA, const_cast<AbstractAttribute &>(*QueryingAA), 1203 DepClass); 1204 return AA; 1205 } 1206 template <typename AAType> 1207 const AAType &getOrCreateAAFor(const IRPosition &IRP) { 1208 return getOrCreateAAFor<AAType>(IRP, /* QueryingAA */ nullptr, 1209 DepClassTy::NONE); 1210 } 1211 1212 /// Return the attribute of \p AAType for \p IRP if existing. This also allows 1213 /// non-AA users lookup. 1214 template <typename AAType> 1215 AAType *lookupAAFor(const IRPosition &IRP, 1216 const AbstractAttribute *QueryingAA = nullptr, 1217 DepClassTy DepClass = DepClassTy::OPTIONAL) { 1218 static_assert(std::is_base_of<AbstractAttribute, AAType>::value, 1219 "Cannot query an attribute with a type not derived from " 1220 "'AbstractAttribute'!"); 1221 // Lookup the abstract attribute of type AAType. If found, return it after 1222 // registering a dependence of QueryingAA on the one returned attribute. 1223 AbstractAttribute *AAPtr = AAMap.lookup({&AAType::ID, IRP}); 1224 if (!AAPtr) 1225 return nullptr; 1226 1227 AAType *AA = static_cast<AAType *>(AAPtr); 1228 1229 // Do not register a dependence on an attribute with an invalid state. 1230 if (DepClass != DepClassTy::NONE && QueryingAA && 1231 AA->getState().isValidState()) 1232 recordDependence(*AA, const_cast<AbstractAttribute &>(*QueryingAA), 1233 DepClass); 1234 return AA; 1235 } 1236 1237 /// Explicitly record a dependence from \p FromAA to \p ToAA, that is if 1238 /// \p FromAA changes \p ToAA should be updated as well. 1239 /// 1240 /// This method should be used in conjunction with the `getAAFor` method and 1241 /// with the DepClass enum passed to the method set to None. This can 1242 /// be beneficial to avoid false dependences but it requires the users of 1243 /// `getAAFor` to explicitly record true dependences through this method. 1244 /// The \p DepClass flag indicates if the dependence is striclty necessary. 1245 /// That means for required dependences, if \p FromAA changes to an invalid 1246 /// state, \p ToAA can be moved to a pessimistic fixpoint because it required 1247 /// information from \p FromAA but none are available anymore. 1248 void recordDependence(const AbstractAttribute &FromAA, 1249 const AbstractAttribute &ToAA, DepClassTy DepClass); 1250 1251 /// Introduce a new abstract attribute into the fixpoint analysis. 1252 /// 1253 /// Note that ownership of the attribute is given to the Attributor. It will 1254 /// invoke delete for the Attributor on destruction of the Attributor. 1255 /// 1256 /// Attributes are identified by their IR position (AAType::getIRPosition()) 1257 /// and the address of their static member (see AAType::ID). 1258 template <typename AAType> AAType ®isterAA(AAType &AA) { 1259 static_assert(std::is_base_of<AbstractAttribute, AAType>::value, 1260 "Cannot register an attribute with a type not derived from " 1261 "'AbstractAttribute'!"); 1262 // Put the attribute in the lookup map structure and the container we use to 1263 // keep track of all attributes. 1264 const IRPosition &IRP = AA.getIRPosition(); 1265 AbstractAttribute *&AAPtr = AAMap[{&AAType::ID, IRP}]; 1266 1267 assert(!AAPtr && "Attribute already in map!"); 1268 AAPtr = &AA; 1269 1270 // Register AA with the synthetic root only before the manifest stage. 1271 if (Phase == AttributorPhase::SEEDING || Phase == AttributorPhase::UPDATE) 1272 DG.SyntheticRoot.Deps.push_back( 1273 AADepGraphNode::DepTy(&AA, unsigned(DepClassTy::REQUIRED))); 1274 1275 return AA; 1276 } 1277 1278 /// Return the internal information cache. 1279 InformationCache &getInfoCache() { return InfoCache; } 1280 1281 /// Return true if this is a module pass, false otherwise. 1282 bool isModulePass() const { 1283 return !Functions.empty() && 1284 Functions.size() == Functions.front()->getParent()->size(); 1285 } 1286 1287 /// Return true if we derive attributes for \p Fn 1288 bool isRunOn(Function &Fn) const { 1289 return Functions.empty() || Functions.count(&Fn); 1290 } 1291 1292 /// Determine opportunities to derive 'default' attributes in \p F and create 1293 /// abstract attribute objects for them. 1294 /// 1295 /// \param F The function that is checked for attribute opportunities. 1296 /// 1297 /// Note that abstract attribute instances are generally created even if the 1298 /// IR already contains the information they would deduce. The most important 1299 /// reason for this is the single interface, the one of the abstract attribute 1300 /// instance, which can be queried without the need to look at the IR in 1301 /// various places. 1302 void identifyDefaultAbstractAttributes(Function &F); 1303 1304 /// Determine whether the function \p F is IPO amendable 1305 /// 1306 /// If a function is exactly defined or it has alwaysinline attribute 1307 /// and is viable to be inlined, we say it is IPO amendable 1308 bool isFunctionIPOAmendable(const Function &F) { 1309 return F.hasExactDefinition() || InfoCache.InlineableFunctions.count(&F); 1310 } 1311 1312 /// Mark the internal function \p F as live. 1313 /// 1314 /// This will trigger the identification and initialization of attributes for 1315 /// \p F. 1316 void markLiveInternalFunction(const Function &F) { 1317 assert(F.hasLocalLinkage() && 1318 "Only local linkage is assumed dead initially."); 1319 1320 identifyDefaultAbstractAttributes(const_cast<Function &>(F)); 1321 } 1322 1323 /// Helper function to remove callsite. 1324 void removeCallSite(CallInst *CI) { 1325 if (!CI) 1326 return; 1327 1328 CGUpdater.removeCallSite(*CI); 1329 } 1330 1331 /// Record that \p U is to be replaces with \p NV after information was 1332 /// manifested. This also triggers deletion of trivially dead istructions. 1333 bool changeUseAfterManifest(Use &U, Value &NV) { 1334 Value *&V = ToBeChangedUses[&U]; 1335 if (V && (V->stripPointerCasts() == NV.stripPointerCasts() || 1336 isa_and_nonnull<UndefValue>(V))) 1337 return false; 1338 assert((!V || V == &NV || isa<UndefValue>(NV)) && 1339 "Use was registered twice for replacement with different values!"); 1340 V = &NV; 1341 return true; 1342 } 1343 1344 /// Helper function to replace all uses of \p V with \p NV. Return true if 1345 /// there is any change. The flag \p ChangeDroppable indicates if dropppable 1346 /// uses should be changed too. 1347 bool changeValueAfterManifest(Value &V, Value &NV, 1348 bool ChangeDroppable = true) { 1349 bool Changed = false; 1350 for (auto &U : V.uses()) 1351 if (ChangeDroppable || !U.getUser()->isDroppable()) 1352 Changed |= changeUseAfterManifest(U, NV); 1353 1354 return Changed; 1355 } 1356 1357 /// Record that \p I is to be replaced with `unreachable` after information 1358 /// was manifested. 1359 void changeToUnreachableAfterManifest(Instruction *I) { 1360 ToBeChangedToUnreachableInsts.insert(I); 1361 } 1362 1363 /// Record that \p II has at least one dead successor block. This information 1364 /// is used, e.g., to replace \p II with a call, after information was 1365 /// manifested. 1366 void registerInvokeWithDeadSuccessor(InvokeInst &II) { 1367 InvokeWithDeadSuccessor.push_back(&II); 1368 } 1369 1370 /// Record that \p I is deleted after information was manifested. This also 1371 /// triggers deletion of trivially dead istructions. 1372 void deleteAfterManifest(Instruction &I) { ToBeDeletedInsts.insert(&I); } 1373 1374 /// Record that \p BB is deleted after information was manifested. This also 1375 /// triggers deletion of trivially dead istructions. 1376 void deleteAfterManifest(BasicBlock &BB) { ToBeDeletedBlocks.insert(&BB); } 1377 1378 /// Record that \p F is deleted after information was manifested. 1379 void deleteAfterManifest(Function &F) { 1380 if (DeleteFns) 1381 ToBeDeletedFunctions.insert(&F); 1382 } 1383 1384 /// If \p V is assumed to be a constant, return it, if it is unclear yet, 1385 /// return None, otherwise return `nullptr`. 1386 Optional<Constant *> getAssumedConstant(const Value &V, 1387 const AbstractAttribute &AA, 1388 bool &UsedAssumedInformation); 1389 1390 /// Return true if \p AA (or its context instruction) is assumed dead. 1391 /// 1392 /// If \p LivenessAA is not provided it is queried. 1393 bool isAssumedDead(const AbstractAttribute &AA, const AAIsDead *LivenessAA, 1394 bool CheckBBLivenessOnly = false, 1395 DepClassTy DepClass = DepClassTy::OPTIONAL); 1396 1397 /// Return true if \p I is assumed dead. 1398 /// 1399 /// If \p LivenessAA is not provided it is queried. 1400 bool isAssumedDead(const Instruction &I, const AbstractAttribute *QueryingAA, 1401 const AAIsDead *LivenessAA, 1402 bool CheckBBLivenessOnly = false, 1403 DepClassTy DepClass = DepClassTy::OPTIONAL); 1404 1405 /// Return true if \p U is assumed dead. 1406 /// 1407 /// If \p FnLivenessAA is not provided it is queried. 1408 bool isAssumedDead(const Use &U, const AbstractAttribute *QueryingAA, 1409 const AAIsDead *FnLivenessAA, 1410 bool CheckBBLivenessOnly = false, 1411 DepClassTy DepClass = DepClassTy::OPTIONAL); 1412 1413 /// Return true if \p IRP is assumed dead. 1414 /// 1415 /// If \p FnLivenessAA is not provided it is queried. 1416 bool isAssumedDead(const IRPosition &IRP, const AbstractAttribute *QueryingAA, 1417 const AAIsDead *FnLivenessAA, 1418 bool CheckBBLivenessOnly = false, 1419 DepClassTy DepClass = DepClassTy::OPTIONAL); 1420 1421 /// Check \p Pred on all (transitive) uses of \p V. 1422 /// 1423 /// This method will evaluate \p Pred on all (transitive) uses of the 1424 /// associated value and return true if \p Pred holds every time. 1425 bool checkForAllUses(function_ref<bool(const Use &, bool &)> Pred, 1426 const AbstractAttribute &QueryingAA, const Value &V, 1427 DepClassTy LivenessDepClass = DepClassTy::OPTIONAL); 1428 1429 /// Helper struct used in the communication between an abstract attribute (AA) 1430 /// that wants to change the signature of a function and the Attributor which 1431 /// applies the changes. The struct is partially initialized with the 1432 /// information from the AA (see the constructor). All other members are 1433 /// provided by the Attributor prior to invoking any callbacks. 1434 struct ArgumentReplacementInfo { 1435 /// Callee repair callback type 1436 /// 1437 /// The function repair callback is invoked once to rewire the replacement 1438 /// arguments in the body of the new function. The argument replacement info 1439 /// is passed, as build from the registerFunctionSignatureRewrite call, as 1440 /// well as the replacement function and an iteratore to the first 1441 /// replacement argument. 1442 using CalleeRepairCBTy = std::function<void( 1443 const ArgumentReplacementInfo &, Function &, Function::arg_iterator)>; 1444 1445 /// Abstract call site (ACS) repair callback type 1446 /// 1447 /// The abstract call site repair callback is invoked once on every abstract 1448 /// call site of the replaced function (\see ReplacedFn). The callback needs 1449 /// to provide the operands for the call to the new replacement function. 1450 /// The number and type of the operands appended to the provided vector 1451 /// (second argument) is defined by the number and types determined through 1452 /// the replacement type vector (\see ReplacementTypes). The first argument 1453 /// is the ArgumentReplacementInfo object registered with the Attributor 1454 /// through the registerFunctionSignatureRewrite call. 1455 using ACSRepairCBTy = 1456 std::function<void(const ArgumentReplacementInfo &, AbstractCallSite, 1457 SmallVectorImpl<Value *> &)>; 1458 1459 /// Simple getters, see the corresponding members for details. 1460 ///{ 1461 1462 Attributor &getAttributor() const { return A; } 1463 const Function &getReplacedFn() const { return ReplacedFn; } 1464 const Argument &getReplacedArg() const { return ReplacedArg; } 1465 unsigned getNumReplacementArgs() const { return ReplacementTypes.size(); } 1466 const SmallVectorImpl<Type *> &getReplacementTypes() const { 1467 return ReplacementTypes; 1468 } 1469 1470 ///} 1471 1472 private: 1473 /// Constructor that takes the argument to be replaced, the types of 1474 /// the replacement arguments, as well as callbacks to repair the call sites 1475 /// and new function after the replacement happened. 1476 ArgumentReplacementInfo(Attributor &A, Argument &Arg, 1477 ArrayRef<Type *> ReplacementTypes, 1478 CalleeRepairCBTy &&CalleeRepairCB, 1479 ACSRepairCBTy &&ACSRepairCB) 1480 : A(A), ReplacedFn(*Arg.getParent()), ReplacedArg(Arg), 1481 ReplacementTypes(ReplacementTypes.begin(), ReplacementTypes.end()), 1482 CalleeRepairCB(std::move(CalleeRepairCB)), 1483 ACSRepairCB(std::move(ACSRepairCB)) {} 1484 1485 /// Reference to the attributor to allow access from the callbacks. 1486 Attributor &A; 1487 1488 /// The "old" function replaced by ReplacementFn. 1489 const Function &ReplacedFn; 1490 1491 /// The "old" argument replaced by new ones defined via ReplacementTypes. 1492 const Argument &ReplacedArg; 1493 1494 /// The types of the arguments replacing ReplacedArg. 1495 const SmallVector<Type *, 8> ReplacementTypes; 1496 1497 /// Callee repair callback, see CalleeRepairCBTy. 1498 const CalleeRepairCBTy CalleeRepairCB; 1499 1500 /// Abstract call site (ACS) repair callback, see ACSRepairCBTy. 1501 const ACSRepairCBTy ACSRepairCB; 1502 1503 /// Allow access to the private members from the Attributor. 1504 friend struct Attributor; 1505 }; 1506 1507 /// Check if we can rewrite a function signature. 1508 /// 1509 /// The argument \p Arg is replaced with new ones defined by the number, 1510 /// order, and types in \p ReplacementTypes. 1511 /// 1512 /// \returns True, if the replacement can be registered, via 1513 /// registerFunctionSignatureRewrite, false otherwise. 1514 bool isValidFunctionSignatureRewrite(Argument &Arg, 1515 ArrayRef<Type *> ReplacementTypes); 1516 1517 /// Register a rewrite for a function signature. 1518 /// 1519 /// The argument \p Arg is replaced with new ones defined by the number, 1520 /// order, and types in \p ReplacementTypes. The rewiring at the call sites is 1521 /// done through \p ACSRepairCB and at the callee site through 1522 /// \p CalleeRepairCB. 1523 /// 1524 /// \returns True, if the replacement was registered, false otherwise. 1525 bool registerFunctionSignatureRewrite( 1526 Argument &Arg, ArrayRef<Type *> ReplacementTypes, 1527 ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB, 1528 ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB); 1529 1530 /// Check \p Pred on all function call sites. 1531 /// 1532 /// This method will evaluate \p Pred on call sites and return 1533 /// true if \p Pred holds in every call sites. However, this is only possible 1534 /// all call sites are known, hence the function has internal linkage. 1535 /// If true is returned, \p AllCallSitesKnown is set if all possible call 1536 /// sites of the function have been visited. 1537 bool checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred, 1538 const AbstractAttribute &QueryingAA, 1539 bool RequireAllCallSites, bool &AllCallSitesKnown); 1540 1541 /// Check \p Pred on all values potentially returned by \p F. 1542 /// 1543 /// This method will evaluate \p Pred on all values potentially returned by 1544 /// the function associated with \p QueryingAA. The returned values are 1545 /// matched with their respective return instructions. Returns true if \p Pred 1546 /// holds on all of them. 1547 bool checkForAllReturnedValuesAndReturnInsts( 1548 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred, 1549 const AbstractAttribute &QueryingAA); 1550 1551 /// Check \p Pred on all values potentially returned by the function 1552 /// associated with \p QueryingAA. 1553 /// 1554 /// This is the context insensitive version of the method above. 1555 bool checkForAllReturnedValues(function_ref<bool(Value &)> Pred, 1556 const AbstractAttribute &QueryingAA); 1557 1558 /// Check \p Pred on all instructions with an opcode present in \p Opcodes. 1559 /// 1560 /// This method will evaluate \p Pred on all instructions with an opcode 1561 /// present in \p Opcode and return true if \p Pred holds on all of them. 1562 bool checkForAllInstructions(function_ref<bool(Instruction &)> Pred, 1563 const AbstractAttribute &QueryingAA, 1564 const ArrayRef<unsigned> &Opcodes, 1565 bool CheckBBLivenessOnly = false); 1566 1567 /// Check \p Pred on all call-like instructions (=CallBased derived). 1568 /// 1569 /// See checkForAllCallLikeInstructions(...) for more information. 1570 bool checkForAllCallLikeInstructions(function_ref<bool(Instruction &)> Pred, 1571 const AbstractAttribute &QueryingAA) { 1572 return checkForAllInstructions(Pred, QueryingAA, 1573 {(unsigned)Instruction::Invoke, 1574 (unsigned)Instruction::CallBr, 1575 (unsigned)Instruction::Call}); 1576 } 1577 1578 /// Check \p Pred on all Read/Write instructions. 1579 /// 1580 /// This method will evaluate \p Pred on all instructions that read or write 1581 /// to memory present in the information cache and return true if \p Pred 1582 /// holds on all of them. 1583 bool checkForAllReadWriteInstructions(function_ref<bool(Instruction &)> Pred, 1584 AbstractAttribute &QueryingAA); 1585 1586 /// Create a shallow wrapper for \p F such that \p F has internal linkage 1587 /// afterwards. It also sets the original \p F 's name to anonymous 1588 /// 1589 /// A wrapper is a function with the same type (and attributes) as \p F 1590 /// that will only call \p F and return the result, if any. 1591 /// 1592 /// Assuming the declaration of looks like: 1593 /// rty F(aty0 arg0, ..., atyN argN); 1594 /// 1595 /// The wrapper will then look as follows: 1596 /// rty wrapper(aty0 arg0, ..., atyN argN) { 1597 /// return F(arg0, ..., argN); 1598 /// } 1599 /// 1600 static void createShallowWrapper(Function &F); 1601 1602 /// Return the data layout associated with the anchor scope. 1603 const DataLayout &getDataLayout() const { return InfoCache.DL; } 1604 1605 /// The allocator used to allocate memory, e.g. for `AbstractAttribute`s. 1606 BumpPtrAllocator &Allocator; 1607 1608 private: 1609 /// This method will do fixpoint iteration until fixpoint or the 1610 /// maximum iteration count is reached. 1611 /// 1612 /// If the maximum iteration count is reached, This method will 1613 /// indicate pessimistic fixpoint on attributes that transitively depend 1614 /// on attributes that were scheduled for an update. 1615 void runTillFixpoint(); 1616 1617 /// Gets called after scheduling, manifests attributes to the LLVM IR. 1618 ChangeStatus manifestAttributes(); 1619 1620 /// Gets called after attributes have been manifested, cleans up the IR. 1621 /// Deletes dead functions, blocks and instructions. 1622 /// Rewrites function signitures and updates the call graph. 1623 ChangeStatus cleanupIR(); 1624 1625 /// Identify internal functions that are effectively dead, thus not reachable 1626 /// from a live entry point. The functions are added to ToBeDeletedFunctions. 1627 void identifyDeadInternalFunctions(); 1628 1629 /// Run `::update` on \p AA and track the dependences queried while doing so. 1630 /// Also adjust the state if we know further updates are not necessary. 1631 ChangeStatus updateAA(AbstractAttribute &AA); 1632 1633 /// Remember the dependences on the top of the dependence stack such that they 1634 /// may trigger further updates. (\see DependenceStack) 1635 void rememberDependences(); 1636 1637 /// Check \p Pred on all call sites of \p Fn. 1638 /// 1639 /// This method will evaluate \p Pred on call sites and return 1640 /// true if \p Pred holds in every call sites. However, this is only possible 1641 /// all call sites are known, hence the function has internal linkage. 1642 /// If true is returned, \p AllCallSitesKnown is set if all possible call 1643 /// sites of the function have been visited. 1644 bool checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred, 1645 const Function &Fn, bool RequireAllCallSites, 1646 const AbstractAttribute *QueryingAA, 1647 bool &AllCallSitesKnown); 1648 1649 /// Determine if CallBase context in \p IRP should be propagated. 1650 bool shouldPropagateCallBaseContext(const IRPosition &IRP); 1651 1652 /// Apply all requested function signature rewrites 1653 /// (\see registerFunctionSignatureRewrite) and return Changed if the module 1654 /// was altered. 1655 ChangeStatus 1656 rewriteFunctionSignatures(SmallPtrSetImpl<Function *> &ModifiedFns); 1657 1658 /// Check if the Attribute \p AA should be seeded. 1659 /// See getOrCreateAAFor. 1660 bool shouldSeedAttribute(AbstractAttribute &AA); 1661 1662 /// A nested map to lookup abstract attributes based on the argument position 1663 /// on the outer level, and the addresses of the static member (AAType::ID) on 1664 /// the inner level. 1665 ///{ 1666 using AAMapKeyTy = std::pair<const char *, IRPosition>; 1667 DenseMap<AAMapKeyTy, AbstractAttribute *> AAMap; 1668 ///} 1669 1670 /// Map to remember all requested signature changes (= argument replacements). 1671 DenseMap<Function *, SmallVector<std::unique_ptr<ArgumentReplacementInfo>, 8>> 1672 ArgumentReplacementMap; 1673 1674 /// The set of functions we are deriving attributes for. 1675 SetVector<Function *> &Functions; 1676 1677 /// The information cache that holds pre-processed (LLVM-IR) information. 1678 InformationCache &InfoCache; 1679 1680 /// Helper to update an underlying call graph. 1681 CallGraphUpdater &CGUpdater; 1682 1683 /// Abstract Attribute dependency graph 1684 AADepGraph DG; 1685 1686 /// Set of functions for which we modified the content such that it might 1687 /// impact the call graph. 1688 SmallPtrSet<Function *, 8> CGModifiedFunctions; 1689 1690 /// Information about a dependence. If FromAA is changed ToAA needs to be 1691 /// updated as well. 1692 struct DepInfo { 1693 const AbstractAttribute *FromAA; 1694 const AbstractAttribute *ToAA; 1695 DepClassTy DepClass; 1696 }; 1697 1698 /// The dependence stack is used to track dependences during an 1699 /// `AbstractAttribute::update` call. As `AbstractAttribute::update` can be 1700 /// recursive we might have multiple vectors of dependences in here. The stack 1701 /// size, should be adjusted according to the expected recursion depth and the 1702 /// inner dependence vector size to the expected number of dependences per 1703 /// abstract attribute. Since the inner vectors are actually allocated on the 1704 /// stack we can be generous with their size. 1705 using DependenceVector = SmallVector<DepInfo, 8>; 1706 SmallVector<DependenceVector *, 16> DependenceStack; 1707 1708 /// If not null, a set limiting the attribute opportunities. 1709 const DenseSet<const char *> *Allowed; 1710 1711 /// Whether to delete functions. 1712 const bool DeleteFns; 1713 1714 /// A set to remember the functions we already assume to be live and visited. 1715 DenseSet<const Function *> VisitedFunctions; 1716 1717 /// Uses we replace with a new value after manifest is done. We will remove 1718 /// then trivially dead instructions as well. 1719 DenseMap<Use *, Value *> ToBeChangedUses; 1720 1721 /// Instructions we replace with `unreachable` insts after manifest is done. 1722 SmallDenseSet<WeakVH, 16> ToBeChangedToUnreachableInsts; 1723 1724 /// Invoke instructions with at least a single dead successor block. 1725 SmallVector<WeakVH, 16> InvokeWithDeadSuccessor; 1726 1727 /// A flag that indicates which stage of the process we are in. Initially, the 1728 /// phase is SEEDING. Phase is changed in `Attributor::run()` 1729 enum class AttributorPhase { 1730 SEEDING, 1731 UPDATE, 1732 MANIFEST, 1733 CLEANUP, 1734 } Phase = AttributorPhase::SEEDING; 1735 1736 /// The current initialization chain length. Tracked to avoid stack overflows. 1737 unsigned InitializationChainLength = 0; 1738 1739 /// Functions, blocks, and instructions we delete after manifest is done. 1740 /// 1741 ///{ 1742 SmallPtrSet<Function *, 8> ToBeDeletedFunctions; 1743 SmallPtrSet<BasicBlock *, 8> ToBeDeletedBlocks; 1744 SmallDenseSet<WeakVH, 8> ToBeDeletedInsts; 1745 ///} 1746 1747 friend AADepGraph; 1748 }; 1749 1750 /// An interface to query the internal state of an abstract attribute. 1751 /// 1752 /// The abstract state is a minimal interface that allows the Attributor to 1753 /// communicate with the abstract attributes about their internal state without 1754 /// enforcing or exposing implementation details, e.g., the (existence of an) 1755 /// underlying lattice. 1756 /// 1757 /// It is sufficient to be able to query if a state is (1) valid or invalid, (2) 1758 /// at a fixpoint, and to indicate to the state that (3) an optimistic fixpoint 1759 /// was reached or (4) a pessimistic fixpoint was enforced. 1760 /// 1761 /// All methods need to be implemented by the subclass. For the common use case, 1762 /// a single boolean state or a bit-encoded state, the BooleanState and 1763 /// {Inc,Dec,Bit}IntegerState classes are already provided. An abstract 1764 /// attribute can inherit from them to get the abstract state interface and 1765 /// additional methods to directly modify the state based if needed. See the 1766 /// class comments for help. 1767 struct AbstractState { 1768 virtual ~AbstractState() {} 1769 1770 /// Return if this abstract state is in a valid state. If false, no 1771 /// information provided should be used. 1772 virtual bool isValidState() const = 0; 1773 1774 /// Return if this abstract state is fixed, thus does not need to be updated 1775 /// if information changes as it cannot change itself. 1776 virtual bool isAtFixpoint() const = 0; 1777 1778 /// Indicate that the abstract state should converge to the optimistic state. 1779 /// 1780 /// This will usually make the optimistically assumed state the known to be 1781 /// true state. 1782 /// 1783 /// \returns ChangeStatus::UNCHANGED as the assumed value should not change. 1784 virtual ChangeStatus indicateOptimisticFixpoint() = 0; 1785 1786 /// Indicate that the abstract state should converge to the pessimistic state. 1787 /// 1788 /// This will usually revert the optimistically assumed state to the known to 1789 /// be true state. 1790 /// 1791 /// \returns ChangeStatus::CHANGED as the assumed value may change. 1792 virtual ChangeStatus indicatePessimisticFixpoint() = 0; 1793 }; 1794 1795 /// Simple state with integers encoding. 1796 /// 1797 /// The interface ensures that the assumed bits are always a subset of the known 1798 /// bits. Users can only add known bits and, except through adding known bits, 1799 /// they can only remove assumed bits. This should guarantee monotoniticy and 1800 /// thereby the existence of a fixpoint (if used corretly). The fixpoint is 1801 /// reached when the assumed and known state/bits are equal. Users can 1802 /// force/inidicate a fixpoint. If an optimistic one is indicated, the known 1803 /// state will catch up with the assumed one, for a pessimistic fixpoint it is 1804 /// the other way around. 1805 template <typename base_ty, base_ty BestState, base_ty WorstState> 1806 struct IntegerStateBase : public AbstractState { 1807 using base_t = base_ty; 1808 1809 IntegerStateBase() {} 1810 IntegerStateBase(base_t Assumed) : Assumed(Assumed) {} 1811 1812 /// Return the best possible representable state. 1813 static constexpr base_t getBestState() { return BestState; } 1814 static constexpr base_t getBestState(const IntegerStateBase &) { 1815 return getBestState(); 1816 } 1817 1818 /// Return the worst possible representable state. 1819 static constexpr base_t getWorstState() { return WorstState; } 1820 static constexpr base_t getWorstState(const IntegerStateBase &) { 1821 return getWorstState(); 1822 } 1823 1824 /// See AbstractState::isValidState() 1825 /// NOTE: For now we simply pretend that the worst possible state is invalid. 1826 bool isValidState() const override { return Assumed != getWorstState(); } 1827 1828 /// See AbstractState::isAtFixpoint() 1829 bool isAtFixpoint() const override { return Assumed == Known; } 1830 1831 /// See AbstractState::indicateOptimisticFixpoint(...) 1832 ChangeStatus indicateOptimisticFixpoint() override { 1833 Known = Assumed; 1834 return ChangeStatus::UNCHANGED; 1835 } 1836 1837 /// See AbstractState::indicatePessimisticFixpoint(...) 1838 ChangeStatus indicatePessimisticFixpoint() override { 1839 Assumed = Known; 1840 return ChangeStatus::CHANGED; 1841 } 1842 1843 /// Return the known state encoding 1844 base_t getKnown() const { return Known; } 1845 1846 /// Return the assumed state encoding. 1847 base_t getAssumed() const { return Assumed; } 1848 1849 /// Equality for IntegerStateBase. 1850 bool 1851 operator==(const IntegerStateBase<base_t, BestState, WorstState> &R) const { 1852 return this->getAssumed() == R.getAssumed() && 1853 this->getKnown() == R.getKnown(); 1854 } 1855 1856 /// Inequality for IntegerStateBase. 1857 bool 1858 operator!=(const IntegerStateBase<base_t, BestState, WorstState> &R) const { 1859 return !(*this == R); 1860 } 1861 1862 /// "Clamp" this state with \p R. The result is subtype dependent but it is 1863 /// intended that only information assumed in both states will be assumed in 1864 /// this one afterwards. 1865 void operator^=(const IntegerStateBase<base_t, BestState, WorstState> &R) { 1866 handleNewAssumedValue(R.getAssumed()); 1867 } 1868 1869 /// "Clamp" this state with \p R. The result is subtype dependent but it is 1870 /// intended that information known in either state will be known in 1871 /// this one afterwards. 1872 void operator+=(const IntegerStateBase<base_t, BestState, WorstState> &R) { 1873 handleNewKnownValue(R.getKnown()); 1874 } 1875 1876 void operator|=(const IntegerStateBase<base_t, BestState, WorstState> &R) { 1877 joinOR(R.getAssumed(), R.getKnown()); 1878 } 1879 1880 void operator&=(const IntegerStateBase<base_t, BestState, WorstState> &R) { 1881 joinAND(R.getAssumed(), R.getKnown()); 1882 } 1883 1884 protected: 1885 /// Handle a new assumed value \p Value. Subtype dependent. 1886 virtual void handleNewAssumedValue(base_t Value) = 0; 1887 1888 /// Handle a new known value \p Value. Subtype dependent. 1889 virtual void handleNewKnownValue(base_t Value) = 0; 1890 1891 /// Handle a value \p Value. Subtype dependent. 1892 virtual void joinOR(base_t AssumedValue, base_t KnownValue) = 0; 1893 1894 /// Handle a new assumed value \p Value. Subtype dependent. 1895 virtual void joinAND(base_t AssumedValue, base_t KnownValue) = 0; 1896 1897 /// The known state encoding in an integer of type base_t. 1898 base_t Known = getWorstState(); 1899 1900 /// The assumed state encoding in an integer of type base_t. 1901 base_t Assumed = getBestState(); 1902 }; 1903 1904 /// Specialization of the integer state for a bit-wise encoding. 1905 template <typename base_ty = uint32_t, base_ty BestState = ~base_ty(0), 1906 base_ty WorstState = 0> 1907 struct BitIntegerState 1908 : public IntegerStateBase<base_ty, BestState, WorstState> { 1909 using base_t = base_ty; 1910 1911 /// Return true if the bits set in \p BitsEncoding are "known bits". 1912 bool isKnown(base_t BitsEncoding) const { 1913 return (this->Known & BitsEncoding) == BitsEncoding; 1914 } 1915 1916 /// Return true if the bits set in \p BitsEncoding are "assumed bits". 1917 bool isAssumed(base_t BitsEncoding) const { 1918 return (this->Assumed & BitsEncoding) == BitsEncoding; 1919 } 1920 1921 /// Add the bits in \p BitsEncoding to the "known bits". 1922 BitIntegerState &addKnownBits(base_t Bits) { 1923 // Make sure we never miss any "known bits". 1924 this->Assumed |= Bits; 1925 this->Known |= Bits; 1926 return *this; 1927 } 1928 1929 /// Remove the bits in \p BitsEncoding from the "assumed bits" if not known. 1930 BitIntegerState &removeAssumedBits(base_t BitsEncoding) { 1931 return intersectAssumedBits(~BitsEncoding); 1932 } 1933 1934 /// Remove the bits in \p BitsEncoding from the "known bits". 1935 BitIntegerState &removeKnownBits(base_t BitsEncoding) { 1936 this->Known = (this->Known & ~BitsEncoding); 1937 return *this; 1938 } 1939 1940 /// Keep only "assumed bits" also set in \p BitsEncoding but all known ones. 1941 BitIntegerState &intersectAssumedBits(base_t BitsEncoding) { 1942 // Make sure we never loose any "known bits". 1943 this->Assumed = (this->Assumed & BitsEncoding) | this->Known; 1944 return *this; 1945 } 1946 1947 private: 1948 void handleNewAssumedValue(base_t Value) override { 1949 intersectAssumedBits(Value); 1950 } 1951 void handleNewKnownValue(base_t Value) override { addKnownBits(Value); } 1952 void joinOR(base_t AssumedValue, base_t KnownValue) override { 1953 this->Known |= KnownValue; 1954 this->Assumed |= AssumedValue; 1955 } 1956 void joinAND(base_t AssumedValue, base_t KnownValue) override { 1957 this->Known &= KnownValue; 1958 this->Assumed &= AssumedValue; 1959 } 1960 }; 1961 1962 /// Specialization of the integer state for an increasing value, hence ~0u is 1963 /// the best state and 0 the worst. 1964 template <typename base_ty = uint32_t, base_ty BestState = ~base_ty(0), 1965 base_ty WorstState = 0> 1966 struct IncIntegerState 1967 : public IntegerStateBase<base_ty, BestState, WorstState> { 1968 using super = IntegerStateBase<base_ty, BestState, WorstState>; 1969 using base_t = base_ty; 1970 1971 IncIntegerState() : super() {} 1972 IncIntegerState(base_t Assumed) : super(Assumed) {} 1973 1974 /// Return the best possible representable state. 1975 static constexpr base_t getBestState() { return BestState; } 1976 static constexpr base_t 1977 getBestState(const IncIntegerState<base_ty, BestState, WorstState> &) { 1978 return getBestState(); 1979 } 1980 1981 /// Take minimum of assumed and \p Value. 1982 IncIntegerState &takeAssumedMinimum(base_t Value) { 1983 // Make sure we never loose "known value". 1984 this->Assumed = std::max(std::min(this->Assumed, Value), this->Known); 1985 return *this; 1986 } 1987 1988 /// Take maximum of known and \p Value. 1989 IncIntegerState &takeKnownMaximum(base_t Value) { 1990 // Make sure we never loose "known value". 1991 this->Assumed = std::max(Value, this->Assumed); 1992 this->Known = std::max(Value, this->Known); 1993 return *this; 1994 } 1995 1996 private: 1997 void handleNewAssumedValue(base_t Value) override { 1998 takeAssumedMinimum(Value); 1999 } 2000 void handleNewKnownValue(base_t Value) override { takeKnownMaximum(Value); } 2001 void joinOR(base_t AssumedValue, base_t KnownValue) override { 2002 this->Known = std::max(this->Known, KnownValue); 2003 this->Assumed = std::max(this->Assumed, AssumedValue); 2004 } 2005 void joinAND(base_t AssumedValue, base_t KnownValue) override { 2006 this->Known = std::min(this->Known, KnownValue); 2007 this->Assumed = std::min(this->Assumed, AssumedValue); 2008 } 2009 }; 2010 2011 /// Specialization of the integer state for a decreasing value, hence 0 is the 2012 /// best state and ~0u the worst. 2013 template <typename base_ty = uint32_t> 2014 struct DecIntegerState : public IntegerStateBase<base_ty, 0, ~base_ty(0)> { 2015 using base_t = base_ty; 2016 2017 /// Take maximum of assumed and \p Value. 2018 DecIntegerState &takeAssumedMaximum(base_t Value) { 2019 // Make sure we never loose "known value". 2020 this->Assumed = std::min(std::max(this->Assumed, Value), this->Known); 2021 return *this; 2022 } 2023 2024 /// Take minimum of known and \p Value. 2025 DecIntegerState &takeKnownMinimum(base_t Value) { 2026 // Make sure we never loose "known value". 2027 this->Assumed = std::min(Value, this->Assumed); 2028 this->Known = std::min(Value, this->Known); 2029 return *this; 2030 } 2031 2032 private: 2033 void handleNewAssumedValue(base_t Value) override { 2034 takeAssumedMaximum(Value); 2035 } 2036 void handleNewKnownValue(base_t Value) override { takeKnownMinimum(Value); } 2037 void joinOR(base_t AssumedValue, base_t KnownValue) override { 2038 this->Assumed = std::min(this->Assumed, KnownValue); 2039 this->Assumed = std::min(this->Assumed, AssumedValue); 2040 } 2041 void joinAND(base_t AssumedValue, base_t KnownValue) override { 2042 this->Assumed = std::max(this->Assumed, KnownValue); 2043 this->Assumed = std::max(this->Assumed, AssumedValue); 2044 } 2045 }; 2046 2047 /// Simple wrapper for a single bit (boolean) state. 2048 struct BooleanState : public IntegerStateBase<bool, 1, 0> { 2049 using super = IntegerStateBase<bool, 1, 0>; 2050 using base_t = IntegerStateBase::base_t; 2051 2052 BooleanState() : super() {} 2053 BooleanState(base_t Assumed) : super(Assumed) {} 2054 2055 /// Set the assumed value to \p Value but never below the known one. 2056 void setAssumed(bool Value) { Assumed &= (Known | Value); } 2057 2058 /// Set the known and asssumed value to \p Value. 2059 void setKnown(bool Value) { 2060 Known |= Value; 2061 Assumed |= Value; 2062 } 2063 2064 /// Return true if the state is assumed to hold. 2065 bool isAssumed() const { return getAssumed(); } 2066 2067 /// Return true if the state is known to hold. 2068 bool isKnown() const { return getKnown(); } 2069 2070 private: 2071 void handleNewAssumedValue(base_t Value) override { 2072 if (!Value) 2073 Assumed = Known; 2074 } 2075 void handleNewKnownValue(base_t Value) override { 2076 if (Value) 2077 Known = (Assumed = Value); 2078 } 2079 void joinOR(base_t AssumedValue, base_t KnownValue) override { 2080 Known |= KnownValue; 2081 Assumed |= AssumedValue; 2082 } 2083 void joinAND(base_t AssumedValue, base_t KnownValue) override { 2084 Known &= KnownValue; 2085 Assumed &= AssumedValue; 2086 } 2087 }; 2088 2089 /// State for an integer range. 2090 struct IntegerRangeState : public AbstractState { 2091 2092 /// Bitwidth of the associated value. 2093 uint32_t BitWidth; 2094 2095 /// State representing assumed range, initially set to empty. 2096 ConstantRange Assumed; 2097 2098 /// State representing known range, initially set to [-inf, inf]. 2099 ConstantRange Known; 2100 2101 IntegerRangeState(uint32_t BitWidth) 2102 : BitWidth(BitWidth), Assumed(ConstantRange::getEmpty(BitWidth)), 2103 Known(ConstantRange::getFull(BitWidth)) {} 2104 2105 IntegerRangeState(const ConstantRange &CR) 2106 : BitWidth(CR.getBitWidth()), Assumed(CR), 2107 Known(getWorstState(CR.getBitWidth())) {} 2108 2109 /// Return the worst possible representable state. 2110 static ConstantRange getWorstState(uint32_t BitWidth) { 2111 return ConstantRange::getFull(BitWidth); 2112 } 2113 2114 /// Return the best possible representable state. 2115 static ConstantRange getBestState(uint32_t BitWidth) { 2116 return ConstantRange::getEmpty(BitWidth); 2117 } 2118 static ConstantRange getBestState(const IntegerRangeState &IRS) { 2119 return getBestState(IRS.getBitWidth()); 2120 } 2121 2122 /// Return associated values' bit width. 2123 uint32_t getBitWidth() const { return BitWidth; } 2124 2125 /// See AbstractState::isValidState() 2126 bool isValidState() const override { 2127 return BitWidth > 0 && !Assumed.isFullSet(); 2128 } 2129 2130 /// See AbstractState::isAtFixpoint() 2131 bool isAtFixpoint() const override { return Assumed == Known; } 2132 2133 /// See AbstractState::indicateOptimisticFixpoint(...) 2134 ChangeStatus indicateOptimisticFixpoint() override { 2135 Known = Assumed; 2136 return ChangeStatus::CHANGED; 2137 } 2138 2139 /// See AbstractState::indicatePessimisticFixpoint(...) 2140 ChangeStatus indicatePessimisticFixpoint() override { 2141 Assumed = Known; 2142 return ChangeStatus::CHANGED; 2143 } 2144 2145 /// Return the known state encoding 2146 ConstantRange getKnown() const { return Known; } 2147 2148 /// Return the assumed state encoding. 2149 ConstantRange getAssumed() const { return Assumed; } 2150 2151 /// Unite assumed range with the passed state. 2152 void unionAssumed(const ConstantRange &R) { 2153 // Don't loose a known range. 2154 Assumed = Assumed.unionWith(R).intersectWith(Known); 2155 } 2156 2157 /// See IntegerRangeState::unionAssumed(..). 2158 void unionAssumed(const IntegerRangeState &R) { 2159 unionAssumed(R.getAssumed()); 2160 } 2161 2162 /// Unite known range with the passed state. 2163 void unionKnown(const ConstantRange &R) { 2164 // Don't loose a known range. 2165 Known = Known.unionWith(R); 2166 Assumed = Assumed.unionWith(Known); 2167 } 2168 2169 /// See IntegerRangeState::unionKnown(..). 2170 void unionKnown(const IntegerRangeState &R) { unionKnown(R.getKnown()); } 2171 2172 /// Intersect known range with the passed state. 2173 void intersectKnown(const ConstantRange &R) { 2174 Assumed = Assumed.intersectWith(R); 2175 Known = Known.intersectWith(R); 2176 } 2177 2178 /// See IntegerRangeState::intersectKnown(..). 2179 void intersectKnown(const IntegerRangeState &R) { 2180 intersectKnown(R.getKnown()); 2181 } 2182 2183 /// Equality for IntegerRangeState. 2184 bool operator==(const IntegerRangeState &R) const { 2185 return getAssumed() == R.getAssumed() && getKnown() == R.getKnown(); 2186 } 2187 2188 /// "Clamp" this state with \p R. The result is subtype dependent but it is 2189 /// intended that only information assumed in both states will be assumed in 2190 /// this one afterwards. 2191 IntegerRangeState operator^=(const IntegerRangeState &R) { 2192 // NOTE: `^=` operator seems like `intersect` but in this case, we need to 2193 // take `union`. 2194 unionAssumed(R); 2195 return *this; 2196 } 2197 2198 IntegerRangeState operator&=(const IntegerRangeState &R) { 2199 // NOTE: `&=` operator seems like `intersect` but in this case, we need to 2200 // take `union`. 2201 unionKnown(R); 2202 unionAssumed(R); 2203 return *this; 2204 } 2205 }; 2206 /// Helper struct necessary as the modular build fails if the virtual method 2207 /// IRAttribute::manifest is defined in the Attributor.cpp. 2208 struct IRAttributeManifest { 2209 static ChangeStatus manifestAttrs(Attributor &A, const IRPosition &IRP, 2210 const ArrayRef<Attribute> &DeducedAttrs); 2211 }; 2212 2213 /// Helper to tie a abstract state implementation to an abstract attribute. 2214 template <typename StateTy, typename BaseType, class... Ts> 2215 struct StateWrapper : public BaseType, public StateTy { 2216 /// Provide static access to the type of the state. 2217 using StateType = StateTy; 2218 2219 StateWrapper(const IRPosition &IRP, Ts... Args) 2220 : BaseType(IRP), StateTy(Args...) {} 2221 2222 /// See AbstractAttribute::getState(...). 2223 StateType &getState() override { return *this; } 2224 2225 /// See AbstractAttribute::getState(...). 2226 const StateType &getState() const override { return *this; } 2227 }; 2228 2229 /// Helper class that provides common functionality to manifest IR attributes. 2230 template <Attribute::AttrKind AK, typename BaseType> 2231 struct IRAttribute : public BaseType { 2232 IRAttribute(const IRPosition &IRP) : BaseType(IRP) {} 2233 2234 /// See AbstractAttribute::initialize(...). 2235 virtual void initialize(Attributor &A) override { 2236 const IRPosition &IRP = this->getIRPosition(); 2237 if (isa<UndefValue>(IRP.getAssociatedValue()) || 2238 this->hasAttr(getAttrKind(), /* IgnoreSubsumingPositions */ false, 2239 &A)) { 2240 this->getState().indicateOptimisticFixpoint(); 2241 return; 2242 } 2243 2244 bool IsFnInterface = IRP.isFnInterfaceKind(); 2245 const Function *FnScope = IRP.getAnchorScope(); 2246 // TODO: Not all attributes require an exact definition. Find a way to 2247 // enable deduction for some but not all attributes in case the 2248 // definition might be changed at runtime, see also 2249 // http://lists.llvm.org/pipermail/llvm-dev/2018-February/121275.html. 2250 // TODO: We could always determine abstract attributes and if sufficient 2251 // information was found we could duplicate the functions that do not 2252 // have an exact definition. 2253 if (IsFnInterface && (!FnScope || !A.isFunctionIPOAmendable(*FnScope))) 2254 this->getState().indicatePessimisticFixpoint(); 2255 } 2256 2257 /// See AbstractAttribute::manifest(...). 2258 ChangeStatus manifest(Attributor &A) override { 2259 if (isa<UndefValue>(this->getIRPosition().getAssociatedValue())) 2260 return ChangeStatus::UNCHANGED; 2261 SmallVector<Attribute, 4> DeducedAttrs; 2262 getDeducedAttributes(this->getAnchorValue().getContext(), DeducedAttrs); 2263 return IRAttributeManifest::manifestAttrs(A, this->getIRPosition(), 2264 DeducedAttrs); 2265 } 2266 2267 /// Return the kind that identifies the abstract attribute implementation. 2268 Attribute::AttrKind getAttrKind() const { return AK; } 2269 2270 /// Return the deduced attributes in \p Attrs. 2271 virtual void getDeducedAttributes(LLVMContext &Ctx, 2272 SmallVectorImpl<Attribute> &Attrs) const { 2273 Attrs.emplace_back(Attribute::get(Ctx, getAttrKind())); 2274 } 2275 }; 2276 2277 /// Base struct for all "concrete attribute" deductions. 2278 /// 2279 /// The abstract attribute is a minimal interface that allows the Attributor to 2280 /// orchestrate the abstract/fixpoint analysis. The design allows to hide away 2281 /// implementation choices made for the subclasses but also to structure their 2282 /// implementation and simplify the use of other abstract attributes in-flight. 2283 /// 2284 /// To allow easy creation of new attributes, most methods have default 2285 /// implementations. The ones that do not are generally straight forward, except 2286 /// `AbstractAttribute::updateImpl` which is the location of most reasoning 2287 /// associated with the abstract attribute. The update is invoked by the 2288 /// Attributor in case the situation used to justify the current optimistic 2289 /// state might have changed. The Attributor determines this automatically 2290 /// by monitoring the `Attributor::getAAFor` calls made by abstract attributes. 2291 /// 2292 /// The `updateImpl` method should inspect the IR and other abstract attributes 2293 /// in-flight to justify the best possible (=optimistic) state. The actual 2294 /// implementation is, similar to the underlying abstract state encoding, not 2295 /// exposed. In the most common case, the `updateImpl` will go through a list of 2296 /// reasons why its optimistic state is valid given the current information. If 2297 /// any combination of them holds and is sufficient to justify the current 2298 /// optimistic state, the method shall return UNCHAGED. If not, the optimistic 2299 /// state is adjusted to the situation and the method shall return CHANGED. 2300 /// 2301 /// If the manifestation of the "concrete attribute" deduced by the subclass 2302 /// differs from the "default" behavior, which is a (set of) LLVM-IR 2303 /// attribute(s) for an argument, call site argument, function return value, or 2304 /// function, the `AbstractAttribute::manifest` method should be overloaded. 2305 /// 2306 /// NOTE: If the state obtained via getState() is INVALID, thus if 2307 /// AbstractAttribute::getState().isValidState() returns false, no 2308 /// information provided by the methods of this class should be used. 2309 /// NOTE: The Attributor currently has certain limitations to what we can do. 2310 /// As a general rule of thumb, "concrete" abstract attributes should *for 2311 /// now* only perform "backward" information propagation. That means 2312 /// optimistic information obtained through abstract attributes should 2313 /// only be used at positions that precede the origin of the information 2314 /// with regards to the program flow. More practically, information can 2315 /// *now* be propagated from instructions to their enclosing function, but 2316 /// *not* from call sites to the called function. The mechanisms to allow 2317 /// both directions will be added in the future. 2318 /// NOTE: The mechanics of adding a new "concrete" abstract attribute are 2319 /// described in the file comment. 2320 struct AbstractAttribute : public IRPosition, public AADepGraphNode { 2321 using StateType = AbstractState; 2322 2323 AbstractAttribute(const IRPosition &IRP) : IRPosition(IRP) {} 2324 2325 /// Virtual destructor. 2326 virtual ~AbstractAttribute() {} 2327 2328 /// This function is used to identify if an \p DGN is of type 2329 /// AbstractAttribute so that the dyn_cast and cast can use such information 2330 /// to cast an AADepGraphNode to an AbstractAttribute. 2331 /// 2332 /// We eagerly return true here because all AADepGraphNodes except for the 2333 /// Synthethis Node are of type AbstractAttribute 2334 static bool classof(const AADepGraphNode *DGN) { return true; } 2335 2336 /// Initialize the state with the information in the Attributor \p A. 2337 /// 2338 /// This function is called by the Attributor once all abstract attributes 2339 /// have been identified. It can and shall be used for task like: 2340 /// - identify existing knowledge in the IR and use it for the "known state" 2341 /// - perform any work that is not going to change over time, e.g., determine 2342 /// a subset of the IR, or attributes in-flight, that have to be looked at 2343 /// in the `updateImpl` method. 2344 virtual void initialize(Attributor &A) {} 2345 2346 /// Return the internal abstract state for inspection. 2347 virtual StateType &getState() = 0; 2348 virtual const StateType &getState() const = 0; 2349 2350 /// Return an IR position, see struct IRPosition. 2351 const IRPosition &getIRPosition() const { return *this; }; 2352 IRPosition &getIRPosition() { return *this; }; 2353 2354 /// Helper functions, for debug purposes only. 2355 ///{ 2356 void print(raw_ostream &OS) const override; 2357 virtual void printWithDeps(raw_ostream &OS) const; 2358 void dump() const { print(dbgs()); } 2359 2360 /// This function should return the "summarized" assumed state as string. 2361 virtual const std::string getAsStr() const = 0; 2362 2363 /// This function should return the name of the AbstractAttribute 2364 virtual const std::string getName() const = 0; 2365 2366 /// This function should return the address of the ID of the AbstractAttribute 2367 virtual const char *getIdAddr() const = 0; 2368 ///} 2369 2370 /// Allow the Attributor access to the protected methods. 2371 friend struct Attributor; 2372 2373 protected: 2374 /// Hook for the Attributor to trigger an update of the internal state. 2375 /// 2376 /// If this attribute is already fixed, this method will return UNCHANGED, 2377 /// otherwise it delegates to `AbstractAttribute::updateImpl`. 2378 /// 2379 /// \Return CHANGED if the internal state changed, otherwise UNCHANGED. 2380 ChangeStatus update(Attributor &A); 2381 2382 /// Hook for the Attributor to trigger the manifestation of the information 2383 /// represented by the abstract attribute in the LLVM-IR. 2384 /// 2385 /// \Return CHANGED if the IR was altered, otherwise UNCHANGED. 2386 virtual ChangeStatus manifest(Attributor &A) { 2387 return ChangeStatus::UNCHANGED; 2388 } 2389 2390 /// Hook to enable custom statistic tracking, called after manifest that 2391 /// resulted in a change if statistics are enabled. 2392 /// 2393 /// We require subclasses to provide an implementation so we remember to 2394 /// add statistics for them. 2395 virtual void trackStatistics() const = 0; 2396 2397 /// The actual update/transfer function which has to be implemented by the 2398 /// derived classes. 2399 /// 2400 /// If it is called, the environment has changed and we have to determine if 2401 /// the current information is still valid or adjust it otherwise. 2402 /// 2403 /// \Return CHANGED if the internal state changed, otherwise UNCHANGED. 2404 virtual ChangeStatus updateImpl(Attributor &A) = 0; 2405 }; 2406 2407 /// Forward declarations of output streams for debug purposes. 2408 /// 2409 ///{ 2410 raw_ostream &operator<<(raw_ostream &OS, const AbstractAttribute &AA); 2411 raw_ostream &operator<<(raw_ostream &OS, ChangeStatus S); 2412 raw_ostream &operator<<(raw_ostream &OS, IRPosition::Kind); 2413 raw_ostream &operator<<(raw_ostream &OS, const IRPosition &); 2414 raw_ostream &operator<<(raw_ostream &OS, const AbstractState &State); 2415 template <typename base_ty, base_ty BestState, base_ty WorstState> 2416 raw_ostream & 2417 operator<<(raw_ostream &OS, 2418 const IntegerStateBase<base_ty, BestState, WorstState> &S) { 2419 return OS << "(" << S.getKnown() << "-" << S.getAssumed() << ")" 2420 << static_cast<const AbstractState &>(S); 2421 } 2422 raw_ostream &operator<<(raw_ostream &OS, const IntegerRangeState &State); 2423 ///} 2424 2425 struct AttributorPass : public PassInfoMixin<AttributorPass> { 2426 PreservedAnalyses run(Module &M, ModuleAnalysisManager &AM); 2427 }; 2428 struct AttributorCGSCCPass : public PassInfoMixin<AttributorCGSCCPass> { 2429 PreservedAnalyses run(LazyCallGraph::SCC &C, CGSCCAnalysisManager &AM, 2430 LazyCallGraph &CG, CGSCCUpdateResult &UR); 2431 }; 2432 2433 Pass *createAttributorLegacyPass(); 2434 Pass *createAttributorCGSCCLegacyPass(); 2435 2436 /// ---------------------------------------------------------------------------- 2437 /// Abstract Attribute Classes 2438 /// ---------------------------------------------------------------------------- 2439 2440 /// An abstract attribute for the returned values of a function. 2441 struct AAReturnedValues 2442 : public IRAttribute<Attribute::Returned, AbstractAttribute> { 2443 AAReturnedValues(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2444 2445 /// Return an assumed unique return value if a single candidate is found. If 2446 /// there cannot be one, return a nullptr. If it is not clear yet, return the 2447 /// Optional::NoneType. 2448 Optional<Value *> getAssumedUniqueReturnValue(Attributor &A) const; 2449 2450 /// Check \p Pred on all returned values. 2451 /// 2452 /// This method will evaluate \p Pred on returned values and return 2453 /// true if (1) all returned values are known, and (2) \p Pred returned true 2454 /// for all returned values. 2455 /// 2456 /// Note: Unlike the Attributor::checkForAllReturnedValuesAndReturnInsts 2457 /// method, this one will not filter dead return instructions. 2458 virtual bool checkForAllReturnedValuesAndReturnInsts( 2459 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred) 2460 const = 0; 2461 2462 using iterator = 2463 MapVector<Value *, SmallSetVector<ReturnInst *, 4>>::iterator; 2464 using const_iterator = 2465 MapVector<Value *, SmallSetVector<ReturnInst *, 4>>::const_iterator; 2466 virtual llvm::iterator_range<iterator> returned_values() = 0; 2467 virtual llvm::iterator_range<const_iterator> returned_values() const = 0; 2468 2469 virtual size_t getNumReturnValues() const = 0; 2470 virtual const SmallSetVector<CallBase *, 4> &getUnresolvedCalls() const = 0; 2471 2472 /// Create an abstract attribute view for the position \p IRP. 2473 static AAReturnedValues &createForPosition(const IRPosition &IRP, 2474 Attributor &A); 2475 2476 /// See AbstractAttribute::getName() 2477 const std::string getName() const override { return "AAReturnedValues"; } 2478 2479 /// See AbstractAttribute::getIdAddr() 2480 const char *getIdAddr() const override { return &ID; } 2481 2482 /// This function should return true if the type of the \p AA is 2483 /// AAReturnedValues 2484 static bool classof(const AbstractAttribute *AA) { 2485 return (AA->getIdAddr() == &ID); 2486 } 2487 2488 /// Unique ID (due to the unique address) 2489 static const char ID; 2490 }; 2491 2492 struct AANoUnwind 2493 : public IRAttribute<Attribute::NoUnwind, 2494 StateWrapper<BooleanState, AbstractAttribute>> { 2495 AANoUnwind(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2496 2497 /// Returns true if nounwind is assumed. 2498 bool isAssumedNoUnwind() const { return getAssumed(); } 2499 2500 /// Returns true if nounwind is known. 2501 bool isKnownNoUnwind() const { return getKnown(); } 2502 2503 /// Create an abstract attribute view for the position \p IRP. 2504 static AANoUnwind &createForPosition(const IRPosition &IRP, Attributor &A); 2505 2506 /// See AbstractAttribute::getName() 2507 const std::string getName() const override { return "AANoUnwind"; } 2508 2509 /// See AbstractAttribute::getIdAddr() 2510 const char *getIdAddr() const override { return &ID; } 2511 2512 /// This function should return true if the type of the \p AA is AANoUnwind 2513 static bool classof(const AbstractAttribute *AA) { 2514 return (AA->getIdAddr() == &ID); 2515 } 2516 2517 /// Unique ID (due to the unique address) 2518 static const char ID; 2519 }; 2520 2521 struct AANoSync 2522 : public IRAttribute<Attribute::NoSync, 2523 StateWrapper<BooleanState, AbstractAttribute>> { 2524 AANoSync(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2525 2526 /// Returns true if "nosync" is assumed. 2527 bool isAssumedNoSync() const { return getAssumed(); } 2528 2529 /// Returns true if "nosync" is known. 2530 bool isKnownNoSync() const { return getKnown(); } 2531 2532 /// Create an abstract attribute view for the position \p IRP. 2533 static AANoSync &createForPosition(const IRPosition &IRP, Attributor &A); 2534 2535 /// See AbstractAttribute::getName() 2536 const std::string getName() const override { return "AANoSync"; } 2537 2538 /// See AbstractAttribute::getIdAddr() 2539 const char *getIdAddr() const override { return &ID; } 2540 2541 /// This function should return true if the type of the \p AA is AANoSync 2542 static bool classof(const AbstractAttribute *AA) { 2543 return (AA->getIdAddr() == &ID); 2544 } 2545 2546 /// Unique ID (due to the unique address) 2547 static const char ID; 2548 }; 2549 2550 /// An abstract interface for all nonnull attributes. 2551 struct AANonNull 2552 : public IRAttribute<Attribute::NonNull, 2553 StateWrapper<BooleanState, AbstractAttribute>> { 2554 AANonNull(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2555 2556 /// Return true if we assume that the underlying value is nonnull. 2557 bool isAssumedNonNull() const { return getAssumed(); } 2558 2559 /// Return true if we know that underlying value is nonnull. 2560 bool isKnownNonNull() const { return getKnown(); } 2561 2562 /// Create an abstract attribute view for the position \p IRP. 2563 static AANonNull &createForPosition(const IRPosition &IRP, Attributor &A); 2564 2565 /// See AbstractAttribute::getName() 2566 const std::string getName() const override { return "AANonNull"; } 2567 2568 /// See AbstractAttribute::getIdAddr() 2569 const char *getIdAddr() const override { return &ID; } 2570 2571 /// This function should return true if the type of the \p AA is AANonNull 2572 static bool classof(const AbstractAttribute *AA) { 2573 return (AA->getIdAddr() == &ID); 2574 } 2575 2576 /// Unique ID (due to the unique address) 2577 static const char ID; 2578 }; 2579 2580 /// An abstract attribute for norecurse. 2581 struct AANoRecurse 2582 : public IRAttribute<Attribute::NoRecurse, 2583 StateWrapper<BooleanState, AbstractAttribute>> { 2584 AANoRecurse(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2585 2586 /// Return true if "norecurse" is assumed. 2587 bool isAssumedNoRecurse() const { return getAssumed(); } 2588 2589 /// Return true if "norecurse" is known. 2590 bool isKnownNoRecurse() const { return getKnown(); } 2591 2592 /// Create an abstract attribute view for the position \p IRP. 2593 static AANoRecurse &createForPosition(const IRPosition &IRP, Attributor &A); 2594 2595 /// See AbstractAttribute::getName() 2596 const std::string getName() const override { return "AANoRecurse"; } 2597 2598 /// See AbstractAttribute::getIdAddr() 2599 const char *getIdAddr() const override { return &ID; } 2600 2601 /// This function should return true if the type of the \p AA is AANoRecurse 2602 static bool classof(const AbstractAttribute *AA) { 2603 return (AA->getIdAddr() == &ID); 2604 } 2605 2606 /// Unique ID (due to the unique address) 2607 static const char ID; 2608 }; 2609 2610 /// An abstract attribute for willreturn. 2611 struct AAWillReturn 2612 : public IRAttribute<Attribute::WillReturn, 2613 StateWrapper<BooleanState, AbstractAttribute>> { 2614 AAWillReturn(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2615 2616 /// Return true if "willreturn" is assumed. 2617 bool isAssumedWillReturn() const { return getAssumed(); } 2618 2619 /// Return true if "willreturn" is known. 2620 bool isKnownWillReturn() const { return getKnown(); } 2621 2622 /// Create an abstract attribute view for the position \p IRP. 2623 static AAWillReturn &createForPosition(const IRPosition &IRP, Attributor &A); 2624 2625 /// See AbstractAttribute::getName() 2626 const std::string getName() const override { return "AAWillReturn"; } 2627 2628 /// See AbstractAttribute::getIdAddr() 2629 const char *getIdAddr() const override { return &ID; } 2630 2631 /// This function should return true if the type of the \p AA is AAWillReturn 2632 static bool classof(const AbstractAttribute *AA) { 2633 return (AA->getIdAddr() == &ID); 2634 } 2635 2636 /// Unique ID (due to the unique address) 2637 static const char ID; 2638 }; 2639 2640 /// An abstract attribute for undefined behavior. 2641 struct AAUndefinedBehavior 2642 : public StateWrapper<BooleanState, AbstractAttribute> { 2643 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2644 AAUndefinedBehavior(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2645 2646 /// Return true if "undefined behavior" is assumed. 2647 bool isAssumedToCauseUB() const { return getAssumed(); } 2648 2649 /// Return true if "undefined behavior" is assumed for a specific instruction. 2650 virtual bool isAssumedToCauseUB(Instruction *I) const = 0; 2651 2652 /// Return true if "undefined behavior" is known. 2653 bool isKnownToCauseUB() const { return getKnown(); } 2654 2655 /// Return true if "undefined behavior" is known for a specific instruction. 2656 virtual bool isKnownToCauseUB(Instruction *I) const = 0; 2657 2658 /// Create an abstract attribute view for the position \p IRP. 2659 static AAUndefinedBehavior &createForPosition(const IRPosition &IRP, 2660 Attributor &A); 2661 2662 /// See AbstractAttribute::getName() 2663 const std::string getName() const override { return "AAUndefinedBehavior"; } 2664 2665 /// See AbstractAttribute::getIdAddr() 2666 const char *getIdAddr() const override { return &ID; } 2667 2668 /// This function should return true if the type of the \p AA is 2669 /// AAUndefineBehavior 2670 static bool classof(const AbstractAttribute *AA) { 2671 return (AA->getIdAddr() == &ID); 2672 } 2673 2674 /// Unique ID (due to the unique address) 2675 static const char ID; 2676 }; 2677 2678 /// An abstract interface to determine reachability of point A to B. 2679 struct AAReachability : public StateWrapper<BooleanState, AbstractAttribute> { 2680 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2681 AAReachability(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2682 2683 /// Returns true if 'From' instruction is assumed to reach, 'To' instruction. 2684 /// Users should provide two positions they are interested in, and the class 2685 /// determines (and caches) reachability. 2686 bool isAssumedReachable(Attributor &A, const Instruction &From, 2687 const Instruction &To) const { 2688 return A.getInfoCache().getPotentiallyReachable(From, To); 2689 } 2690 2691 /// Returns true if 'From' instruction is known to reach, 'To' instruction. 2692 /// Users should provide two positions they are interested in, and the class 2693 /// determines (and caches) reachability. 2694 bool isKnownReachable(Attributor &A, const Instruction &From, 2695 const Instruction &To) const { 2696 return A.getInfoCache().getPotentiallyReachable(From, To); 2697 } 2698 2699 /// Create an abstract attribute view for the position \p IRP. 2700 static AAReachability &createForPosition(const IRPosition &IRP, 2701 Attributor &A); 2702 2703 /// See AbstractAttribute::getName() 2704 const std::string getName() const override { return "AAReachability"; } 2705 2706 /// See AbstractAttribute::getIdAddr() 2707 const char *getIdAddr() const override { return &ID; } 2708 2709 /// This function should return true if the type of the \p AA is 2710 /// AAReachability 2711 static bool classof(const AbstractAttribute *AA) { 2712 return (AA->getIdAddr() == &ID); 2713 } 2714 2715 /// Unique ID (due to the unique address) 2716 static const char ID; 2717 }; 2718 2719 /// An abstract interface for all noalias attributes. 2720 struct AANoAlias 2721 : public IRAttribute<Attribute::NoAlias, 2722 StateWrapper<BooleanState, AbstractAttribute>> { 2723 AANoAlias(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2724 2725 /// Return true if we assume that the underlying value is alias. 2726 bool isAssumedNoAlias() const { return getAssumed(); } 2727 2728 /// Return true if we know that underlying value is noalias. 2729 bool isKnownNoAlias() const { return getKnown(); } 2730 2731 /// Create an abstract attribute view for the position \p IRP. 2732 static AANoAlias &createForPosition(const IRPosition &IRP, Attributor &A); 2733 2734 /// See AbstractAttribute::getName() 2735 const std::string getName() const override { return "AANoAlias"; } 2736 2737 /// See AbstractAttribute::getIdAddr() 2738 const char *getIdAddr() const override { return &ID; } 2739 2740 /// This function should return true if the type of the \p AA is AANoAlias 2741 static bool classof(const AbstractAttribute *AA) { 2742 return (AA->getIdAddr() == &ID); 2743 } 2744 2745 /// Unique ID (due to the unique address) 2746 static const char ID; 2747 }; 2748 2749 /// An AbstractAttribute for nofree. 2750 struct AANoFree 2751 : public IRAttribute<Attribute::NoFree, 2752 StateWrapper<BooleanState, AbstractAttribute>> { 2753 AANoFree(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2754 2755 /// Return true if "nofree" is assumed. 2756 bool isAssumedNoFree() const { return getAssumed(); } 2757 2758 /// Return true if "nofree" is known. 2759 bool isKnownNoFree() const { return getKnown(); } 2760 2761 /// Create an abstract attribute view for the position \p IRP. 2762 static AANoFree &createForPosition(const IRPosition &IRP, Attributor &A); 2763 2764 /// See AbstractAttribute::getName() 2765 const std::string getName() const override { return "AANoFree"; } 2766 2767 /// See AbstractAttribute::getIdAddr() 2768 const char *getIdAddr() const override { return &ID; } 2769 2770 /// This function should return true if the type of the \p AA is AANoFree 2771 static bool classof(const AbstractAttribute *AA) { 2772 return (AA->getIdAddr() == &ID); 2773 } 2774 2775 /// Unique ID (due to the unique address) 2776 static const char ID; 2777 }; 2778 2779 /// An AbstractAttribute for noreturn. 2780 struct AANoReturn 2781 : public IRAttribute<Attribute::NoReturn, 2782 StateWrapper<BooleanState, AbstractAttribute>> { 2783 AANoReturn(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 2784 2785 /// Return true if the underlying object is assumed to never return. 2786 bool isAssumedNoReturn() const { return getAssumed(); } 2787 2788 /// Return true if the underlying object is known to never return. 2789 bool isKnownNoReturn() const { return getKnown(); } 2790 2791 /// Create an abstract attribute view for the position \p IRP. 2792 static AANoReturn &createForPosition(const IRPosition &IRP, Attributor &A); 2793 2794 /// See AbstractAttribute::getName() 2795 const std::string getName() const override { return "AANoReturn"; } 2796 2797 /// See AbstractAttribute::getIdAddr() 2798 const char *getIdAddr() const override { return &ID; } 2799 2800 /// This function should return true if the type of the \p AA is AANoReturn 2801 static bool classof(const AbstractAttribute *AA) { 2802 return (AA->getIdAddr() == &ID); 2803 } 2804 2805 /// Unique ID (due to the unique address) 2806 static const char ID; 2807 }; 2808 2809 /// An abstract interface for liveness abstract attribute. 2810 struct AAIsDead : public StateWrapper<BooleanState, AbstractAttribute> { 2811 using Base = StateWrapper<BooleanState, AbstractAttribute>; 2812 AAIsDead(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 2813 2814 protected: 2815 /// The query functions are protected such that other attributes need to go 2816 /// through the Attributor interfaces: `Attributor::isAssumedDead(...)` 2817 2818 /// Returns true if the underlying value is assumed dead. 2819 virtual bool isAssumedDead() const = 0; 2820 2821 /// Returns true if the underlying value is known dead. 2822 virtual bool isKnownDead() const = 0; 2823 2824 /// Returns true if \p BB is assumed dead. 2825 virtual bool isAssumedDead(const BasicBlock *BB) const = 0; 2826 2827 /// Returns true if \p BB is known dead. 2828 virtual bool isKnownDead(const BasicBlock *BB) const = 0; 2829 2830 /// Returns true if \p I is assumed dead. 2831 virtual bool isAssumedDead(const Instruction *I) const = 0; 2832 2833 /// Returns true if \p I is known dead. 2834 virtual bool isKnownDead(const Instruction *I) const = 0; 2835 2836 /// This method is used to check if at least one instruction in a collection 2837 /// of instructions is live. 2838 template <typename T> bool isLiveInstSet(T begin, T end) const { 2839 for (const auto &I : llvm::make_range(begin, end)) { 2840 assert(I->getFunction() == getIRPosition().getAssociatedFunction() && 2841 "Instruction must be in the same anchor scope function."); 2842 2843 if (!isAssumedDead(I)) 2844 return true; 2845 } 2846 2847 return false; 2848 } 2849 2850 public: 2851 /// Create an abstract attribute view for the position \p IRP. 2852 static AAIsDead &createForPosition(const IRPosition &IRP, Attributor &A); 2853 2854 /// Determine if \p F might catch asynchronous exceptions. 2855 static bool mayCatchAsynchronousExceptions(const Function &F) { 2856 return F.hasPersonalityFn() && !canSimplifyInvokeNoUnwind(&F); 2857 } 2858 2859 /// Return if the edge from \p From BB to \p To BB is assumed dead. 2860 /// This is specifically useful in AAReachability. 2861 virtual bool isEdgeDead(const BasicBlock *From, const BasicBlock *To) const { 2862 return false; 2863 } 2864 2865 /// See AbstractAttribute::getName() 2866 const std::string getName() const override { return "AAIsDead"; } 2867 2868 /// See AbstractAttribute::getIdAddr() 2869 const char *getIdAddr() const override { return &ID; } 2870 2871 /// This function should return true if the type of the \p AA is AAIsDead 2872 static bool classof(const AbstractAttribute *AA) { 2873 return (AA->getIdAddr() == &ID); 2874 } 2875 2876 /// Unique ID (due to the unique address) 2877 static const char ID; 2878 2879 friend struct Attributor; 2880 }; 2881 2882 /// State for dereferenceable attribute 2883 struct DerefState : AbstractState { 2884 2885 static DerefState getBestState() { return DerefState(); } 2886 static DerefState getBestState(const DerefState &) { return getBestState(); } 2887 2888 /// Return the worst possible representable state. 2889 static DerefState getWorstState() { 2890 DerefState DS; 2891 DS.indicatePessimisticFixpoint(); 2892 return DS; 2893 } 2894 static DerefState getWorstState(const DerefState &) { 2895 return getWorstState(); 2896 } 2897 2898 /// State representing for dereferenceable bytes. 2899 IncIntegerState<> DerefBytesState; 2900 2901 /// Map representing for accessed memory offsets and sizes. 2902 /// A key is Offset and a value is size. 2903 /// If there is a load/store instruction something like, 2904 /// p[offset] = v; 2905 /// (offset, sizeof(v)) will be inserted to this map. 2906 /// std::map is used because we want to iterate keys in ascending order. 2907 std::map<int64_t, uint64_t> AccessedBytesMap; 2908 2909 /// Helper function to calculate dereferenceable bytes from current known 2910 /// bytes and accessed bytes. 2911 /// 2912 /// int f(int *A){ 2913 /// *A = 0; 2914 /// *(A+2) = 2; 2915 /// *(A+1) = 1; 2916 /// *(A+10) = 10; 2917 /// } 2918 /// ``` 2919 /// In that case, AccessedBytesMap is `{0:4, 4:4, 8:4, 40:4}`. 2920 /// AccessedBytesMap is std::map so it is iterated in accending order on 2921 /// key(Offset). So KnownBytes will be updated like this: 2922 /// 2923 /// |Access | KnownBytes 2924 /// |(0, 4)| 0 -> 4 2925 /// |(4, 4)| 4 -> 8 2926 /// |(8, 4)| 8 -> 12 2927 /// |(40, 4) | 12 (break) 2928 void computeKnownDerefBytesFromAccessedMap() { 2929 int64_t KnownBytes = DerefBytesState.getKnown(); 2930 for (auto &Access : AccessedBytesMap) { 2931 if (KnownBytes < Access.first) 2932 break; 2933 KnownBytes = std::max(KnownBytes, Access.first + (int64_t)Access.second); 2934 } 2935 2936 DerefBytesState.takeKnownMaximum(KnownBytes); 2937 } 2938 2939 /// State representing that whether the value is globaly dereferenceable. 2940 BooleanState GlobalState; 2941 2942 /// See AbstractState::isValidState() 2943 bool isValidState() const override { return DerefBytesState.isValidState(); } 2944 2945 /// See AbstractState::isAtFixpoint() 2946 bool isAtFixpoint() const override { 2947 return !isValidState() || 2948 (DerefBytesState.isAtFixpoint() && GlobalState.isAtFixpoint()); 2949 } 2950 2951 /// See AbstractState::indicateOptimisticFixpoint(...) 2952 ChangeStatus indicateOptimisticFixpoint() override { 2953 DerefBytesState.indicateOptimisticFixpoint(); 2954 GlobalState.indicateOptimisticFixpoint(); 2955 return ChangeStatus::UNCHANGED; 2956 } 2957 2958 /// See AbstractState::indicatePessimisticFixpoint(...) 2959 ChangeStatus indicatePessimisticFixpoint() override { 2960 DerefBytesState.indicatePessimisticFixpoint(); 2961 GlobalState.indicatePessimisticFixpoint(); 2962 return ChangeStatus::CHANGED; 2963 } 2964 2965 /// Update known dereferenceable bytes. 2966 void takeKnownDerefBytesMaximum(uint64_t Bytes) { 2967 DerefBytesState.takeKnownMaximum(Bytes); 2968 2969 // Known bytes might increase. 2970 computeKnownDerefBytesFromAccessedMap(); 2971 } 2972 2973 /// Update assumed dereferenceable bytes. 2974 void takeAssumedDerefBytesMinimum(uint64_t Bytes) { 2975 DerefBytesState.takeAssumedMinimum(Bytes); 2976 } 2977 2978 /// Add accessed bytes to the map. 2979 void addAccessedBytes(int64_t Offset, uint64_t Size) { 2980 uint64_t &AccessedBytes = AccessedBytesMap[Offset]; 2981 AccessedBytes = std::max(AccessedBytes, Size); 2982 2983 // Known bytes might increase. 2984 computeKnownDerefBytesFromAccessedMap(); 2985 } 2986 2987 /// Equality for DerefState. 2988 bool operator==(const DerefState &R) const { 2989 return this->DerefBytesState == R.DerefBytesState && 2990 this->GlobalState == R.GlobalState; 2991 } 2992 2993 /// Inequality for DerefState. 2994 bool operator!=(const DerefState &R) const { return !(*this == R); } 2995 2996 /// See IntegerStateBase::operator^= 2997 DerefState operator^=(const DerefState &R) { 2998 DerefBytesState ^= R.DerefBytesState; 2999 GlobalState ^= R.GlobalState; 3000 return *this; 3001 } 3002 3003 /// See IntegerStateBase::operator+= 3004 DerefState operator+=(const DerefState &R) { 3005 DerefBytesState += R.DerefBytesState; 3006 GlobalState += R.GlobalState; 3007 return *this; 3008 } 3009 3010 /// See IntegerStateBase::operator&= 3011 DerefState operator&=(const DerefState &R) { 3012 DerefBytesState &= R.DerefBytesState; 3013 GlobalState &= R.GlobalState; 3014 return *this; 3015 } 3016 3017 /// See IntegerStateBase::operator|= 3018 DerefState operator|=(const DerefState &R) { 3019 DerefBytesState |= R.DerefBytesState; 3020 GlobalState |= R.GlobalState; 3021 return *this; 3022 } 3023 3024 protected: 3025 const AANonNull *NonNullAA = nullptr; 3026 }; 3027 3028 /// An abstract interface for all dereferenceable attribute. 3029 struct AADereferenceable 3030 : public IRAttribute<Attribute::Dereferenceable, 3031 StateWrapper<DerefState, AbstractAttribute>> { 3032 AADereferenceable(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 3033 3034 /// Return true if we assume that the underlying value is nonnull. 3035 bool isAssumedNonNull() const { 3036 return NonNullAA && NonNullAA->isAssumedNonNull(); 3037 } 3038 3039 /// Return true if we know that the underlying value is nonnull. 3040 bool isKnownNonNull() const { 3041 return NonNullAA && NonNullAA->isKnownNonNull(); 3042 } 3043 3044 /// Return true if we assume that underlying value is 3045 /// dereferenceable(_or_null) globally. 3046 bool isAssumedGlobal() const { return GlobalState.getAssumed(); } 3047 3048 /// Return true if we know that underlying value is 3049 /// dereferenceable(_or_null) globally. 3050 bool isKnownGlobal() const { return GlobalState.getKnown(); } 3051 3052 /// Return assumed dereferenceable bytes. 3053 uint32_t getAssumedDereferenceableBytes() const { 3054 return DerefBytesState.getAssumed(); 3055 } 3056 3057 /// Return known dereferenceable bytes. 3058 uint32_t getKnownDereferenceableBytes() const { 3059 return DerefBytesState.getKnown(); 3060 } 3061 3062 /// Create an abstract attribute view for the position \p IRP. 3063 static AADereferenceable &createForPosition(const IRPosition &IRP, 3064 Attributor &A); 3065 3066 /// See AbstractAttribute::getName() 3067 const std::string getName() const override { return "AADereferenceable"; } 3068 3069 /// See AbstractAttribute::getIdAddr() 3070 const char *getIdAddr() const override { return &ID; } 3071 3072 /// This function should return true if the type of the \p AA is 3073 /// AADereferenceable 3074 static bool classof(const AbstractAttribute *AA) { 3075 return (AA->getIdAddr() == &ID); 3076 } 3077 3078 /// Unique ID (due to the unique address) 3079 static const char ID; 3080 }; 3081 3082 using AAAlignmentStateType = 3083 IncIntegerState<uint32_t, Value::MaximumAlignment, 1>; 3084 /// An abstract interface for all align attributes. 3085 struct AAAlign : public IRAttribute< 3086 Attribute::Alignment, 3087 StateWrapper<AAAlignmentStateType, AbstractAttribute>> { 3088 AAAlign(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 3089 3090 /// Return assumed alignment. 3091 unsigned getAssumedAlign() const { return getAssumed(); } 3092 3093 /// Return known alignment. 3094 unsigned getKnownAlign() const { return getKnown(); } 3095 3096 /// See AbstractAttribute::getName() 3097 const std::string getName() const override { return "AAAlign"; } 3098 3099 /// See AbstractAttribute::getIdAddr() 3100 const char *getIdAddr() const override { return &ID; } 3101 3102 /// This function should return true if the type of the \p AA is AAAlign 3103 static bool classof(const AbstractAttribute *AA) { 3104 return (AA->getIdAddr() == &ID); 3105 } 3106 3107 /// Create an abstract attribute view for the position \p IRP. 3108 static AAAlign &createForPosition(const IRPosition &IRP, Attributor &A); 3109 3110 /// Unique ID (due to the unique address) 3111 static const char ID; 3112 }; 3113 3114 /// An abstract interface for all nocapture attributes. 3115 struct AANoCapture 3116 : public IRAttribute< 3117 Attribute::NoCapture, 3118 StateWrapper<BitIntegerState<uint16_t, 7, 0>, AbstractAttribute>> { 3119 AANoCapture(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 3120 3121 /// State encoding bits. A set bit in the state means the property holds. 3122 /// NO_CAPTURE is the best possible state, 0 the worst possible state. 3123 enum { 3124 NOT_CAPTURED_IN_MEM = 1 << 0, 3125 NOT_CAPTURED_IN_INT = 1 << 1, 3126 NOT_CAPTURED_IN_RET = 1 << 2, 3127 3128 /// If we do not capture the value in memory or through integers we can only 3129 /// communicate it back as a derived pointer. 3130 NO_CAPTURE_MAYBE_RETURNED = NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT, 3131 3132 /// If we do not capture the value in memory, through integers, or as a 3133 /// derived pointer we know it is not captured. 3134 NO_CAPTURE = 3135 NOT_CAPTURED_IN_MEM | NOT_CAPTURED_IN_INT | NOT_CAPTURED_IN_RET, 3136 }; 3137 3138 /// Return true if we know that the underlying value is not captured in its 3139 /// respective scope. 3140 bool isKnownNoCapture() const { return isKnown(NO_CAPTURE); } 3141 3142 /// Return true if we assume that the underlying value is not captured in its 3143 /// respective scope. 3144 bool isAssumedNoCapture() const { return isAssumed(NO_CAPTURE); } 3145 3146 /// Return true if we know that the underlying value is not captured in its 3147 /// respective scope but we allow it to escape through a "return". 3148 bool isKnownNoCaptureMaybeReturned() const { 3149 return isKnown(NO_CAPTURE_MAYBE_RETURNED); 3150 } 3151 3152 /// Return true if we assume that the underlying value is not captured in its 3153 /// respective scope but we allow it to escape through a "return". 3154 bool isAssumedNoCaptureMaybeReturned() const { 3155 return isAssumed(NO_CAPTURE_MAYBE_RETURNED); 3156 } 3157 3158 /// Create an abstract attribute view for the position \p IRP. 3159 static AANoCapture &createForPosition(const IRPosition &IRP, Attributor &A); 3160 3161 /// See AbstractAttribute::getName() 3162 const std::string getName() const override { return "AANoCapture"; } 3163 3164 /// See AbstractAttribute::getIdAddr() 3165 const char *getIdAddr() const override { return &ID; } 3166 3167 /// This function should return true if the type of the \p AA is AANoCapture 3168 static bool classof(const AbstractAttribute *AA) { 3169 return (AA->getIdAddr() == &ID); 3170 } 3171 3172 /// Unique ID (due to the unique address) 3173 static const char ID; 3174 }; 3175 3176 /// An abstract interface for value simplify abstract attribute. 3177 struct AAValueSimplify : public StateWrapper<BooleanState, AbstractAttribute> { 3178 using Base = StateWrapper<BooleanState, AbstractAttribute>; 3179 AAValueSimplify(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3180 3181 /// Return an assumed simplified value if a single candidate is found. If 3182 /// there cannot be one, return original value. If it is not clear yet, return 3183 /// the Optional::NoneType. 3184 virtual Optional<Value *> getAssumedSimplifiedValue(Attributor &A) const = 0; 3185 3186 /// Create an abstract attribute view for the position \p IRP. 3187 static AAValueSimplify &createForPosition(const IRPosition &IRP, 3188 Attributor &A); 3189 3190 /// See AbstractAttribute::getName() 3191 const std::string getName() const override { return "AAValueSimplify"; } 3192 3193 /// See AbstractAttribute::getIdAddr() 3194 const char *getIdAddr() const override { return &ID; } 3195 3196 /// This function should return true if the type of the \p AA is 3197 /// AAValueSimplify 3198 static bool classof(const AbstractAttribute *AA) { 3199 return (AA->getIdAddr() == &ID); 3200 } 3201 3202 /// Unique ID (due to the unique address) 3203 static const char ID; 3204 }; 3205 3206 struct AAHeapToStack : public StateWrapper<BooleanState, AbstractAttribute> { 3207 using Base = StateWrapper<BooleanState, AbstractAttribute>; 3208 AAHeapToStack(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3209 3210 /// Returns true if HeapToStack conversion is assumed to be possible. 3211 bool isAssumedHeapToStack() const { return getAssumed(); } 3212 3213 /// Returns true if HeapToStack conversion is known to be possible. 3214 bool isKnownHeapToStack() const { return getKnown(); } 3215 3216 /// Create an abstract attribute view for the position \p IRP. 3217 static AAHeapToStack &createForPosition(const IRPosition &IRP, Attributor &A); 3218 3219 /// See AbstractAttribute::getName() 3220 const std::string getName() const override { return "AAHeapToStack"; } 3221 3222 /// See AbstractAttribute::getIdAddr() 3223 const char *getIdAddr() const override { return &ID; } 3224 3225 /// This function should return true if the type of the \p AA is AAHeapToStack 3226 static bool classof(const AbstractAttribute *AA) { 3227 return (AA->getIdAddr() == &ID); 3228 } 3229 3230 /// Unique ID (due to the unique address) 3231 static const char ID; 3232 }; 3233 3234 /// An abstract interface for privatizability. 3235 /// 3236 /// A pointer is privatizable if it can be replaced by a new, private one. 3237 /// Privatizing pointer reduces the use count, interaction between unrelated 3238 /// code parts. 3239 /// 3240 /// In order for a pointer to be privatizable its value cannot be observed 3241 /// (=nocapture), it is (for now) not written (=readonly & noalias), we know 3242 /// what values are necessary to make the private copy look like the original 3243 /// one, and the values we need can be loaded (=dereferenceable). 3244 struct AAPrivatizablePtr 3245 : public StateWrapper<BooleanState, AbstractAttribute> { 3246 using Base = StateWrapper<BooleanState, AbstractAttribute>; 3247 AAPrivatizablePtr(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3248 3249 /// Returns true if pointer privatization is assumed to be possible. 3250 bool isAssumedPrivatizablePtr() const { return getAssumed(); } 3251 3252 /// Returns true if pointer privatization is known to be possible. 3253 bool isKnownPrivatizablePtr() const { return getKnown(); } 3254 3255 /// Return the type we can choose for a private copy of the underlying 3256 /// value. None means it is not clear yet, nullptr means there is none. 3257 virtual Optional<Type *> getPrivatizableType() const = 0; 3258 3259 /// Create an abstract attribute view for the position \p IRP. 3260 static AAPrivatizablePtr &createForPosition(const IRPosition &IRP, 3261 Attributor &A); 3262 3263 /// See AbstractAttribute::getName() 3264 const std::string getName() const override { return "AAPrivatizablePtr"; } 3265 3266 /// See AbstractAttribute::getIdAddr() 3267 const char *getIdAddr() const override { return &ID; } 3268 3269 /// This function should return true if the type of the \p AA is 3270 /// AAPricatizablePtr 3271 static bool classof(const AbstractAttribute *AA) { 3272 return (AA->getIdAddr() == &ID); 3273 } 3274 3275 /// Unique ID (due to the unique address) 3276 static const char ID; 3277 }; 3278 3279 /// An abstract interface for memory access kind related attributes 3280 /// (readnone/readonly/writeonly). 3281 struct AAMemoryBehavior 3282 : public IRAttribute< 3283 Attribute::ReadNone, 3284 StateWrapper<BitIntegerState<uint8_t, 3>, AbstractAttribute>> { 3285 AAMemoryBehavior(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 3286 3287 /// State encoding bits. A set bit in the state means the property holds. 3288 /// BEST_STATE is the best possible state, 0 the worst possible state. 3289 enum { 3290 NO_READS = 1 << 0, 3291 NO_WRITES = 1 << 1, 3292 NO_ACCESSES = NO_READS | NO_WRITES, 3293 3294 BEST_STATE = NO_ACCESSES, 3295 }; 3296 static_assert(BEST_STATE == getBestState(), "Unexpected BEST_STATE value"); 3297 3298 /// Return true if we know that the underlying value is not read or accessed 3299 /// in its respective scope. 3300 bool isKnownReadNone() const { return isKnown(NO_ACCESSES); } 3301 3302 /// Return true if we assume that the underlying value is not read or accessed 3303 /// in its respective scope. 3304 bool isAssumedReadNone() const { return isAssumed(NO_ACCESSES); } 3305 3306 /// Return true if we know that the underlying value is not accessed 3307 /// (=written) in its respective scope. 3308 bool isKnownReadOnly() const { return isKnown(NO_WRITES); } 3309 3310 /// Return true if we assume that the underlying value is not accessed 3311 /// (=written) in its respective scope. 3312 bool isAssumedReadOnly() const { return isAssumed(NO_WRITES); } 3313 3314 /// Return true if we know that the underlying value is not read in its 3315 /// respective scope. 3316 bool isKnownWriteOnly() const { return isKnown(NO_READS); } 3317 3318 /// Return true if we assume that the underlying value is not read in its 3319 /// respective scope. 3320 bool isAssumedWriteOnly() const { return isAssumed(NO_READS); } 3321 3322 /// Create an abstract attribute view for the position \p IRP. 3323 static AAMemoryBehavior &createForPosition(const IRPosition &IRP, 3324 Attributor &A); 3325 3326 /// See AbstractAttribute::getName() 3327 const std::string getName() const override { return "AAMemoryBehavior"; } 3328 3329 /// See AbstractAttribute::getIdAddr() 3330 const char *getIdAddr() const override { return &ID; } 3331 3332 /// This function should return true if the type of the \p AA is 3333 /// AAMemoryBehavior 3334 static bool classof(const AbstractAttribute *AA) { 3335 return (AA->getIdAddr() == &ID); 3336 } 3337 3338 /// Unique ID (due to the unique address) 3339 static const char ID; 3340 }; 3341 3342 /// An abstract interface for all memory location attributes 3343 /// (readnone/argmemonly/inaccessiblememonly/inaccessibleorargmemonly). 3344 struct AAMemoryLocation 3345 : public IRAttribute< 3346 Attribute::ReadNone, 3347 StateWrapper<BitIntegerState<uint32_t, 511>, AbstractAttribute>> { 3348 using MemoryLocationsKind = StateType::base_t; 3349 3350 AAMemoryLocation(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 3351 3352 /// Encoding of different locations that could be accessed by a memory 3353 /// access. 3354 enum { 3355 ALL_LOCATIONS = 0, 3356 NO_LOCAL_MEM = 1 << 0, 3357 NO_CONST_MEM = 1 << 1, 3358 NO_GLOBAL_INTERNAL_MEM = 1 << 2, 3359 NO_GLOBAL_EXTERNAL_MEM = 1 << 3, 3360 NO_GLOBAL_MEM = NO_GLOBAL_INTERNAL_MEM | NO_GLOBAL_EXTERNAL_MEM, 3361 NO_ARGUMENT_MEM = 1 << 4, 3362 NO_INACCESSIBLE_MEM = 1 << 5, 3363 NO_MALLOCED_MEM = 1 << 6, 3364 NO_UNKOWN_MEM = 1 << 7, 3365 NO_LOCATIONS = NO_LOCAL_MEM | NO_CONST_MEM | NO_GLOBAL_INTERNAL_MEM | 3366 NO_GLOBAL_EXTERNAL_MEM | NO_ARGUMENT_MEM | 3367 NO_INACCESSIBLE_MEM | NO_MALLOCED_MEM | NO_UNKOWN_MEM, 3368 3369 // Helper bit to track if we gave up or not. 3370 VALID_STATE = NO_LOCATIONS + 1, 3371 3372 BEST_STATE = NO_LOCATIONS | VALID_STATE, 3373 }; 3374 static_assert(BEST_STATE == getBestState(), "Unexpected BEST_STATE value"); 3375 3376 /// Return true if we know that the associated functions has no observable 3377 /// accesses. 3378 bool isKnownReadNone() const { return isKnown(NO_LOCATIONS); } 3379 3380 /// Return true if we assume that the associated functions has no observable 3381 /// accesses. 3382 bool isAssumedReadNone() const { 3383 return isAssumed(NO_LOCATIONS) | isAssumedStackOnly(); 3384 } 3385 3386 /// Return true if we know that the associated functions has at most 3387 /// local/stack accesses. 3388 bool isKnowStackOnly() const { 3389 return isKnown(inverseLocation(NO_LOCAL_MEM, true, true)); 3390 } 3391 3392 /// Return true if we assume that the associated functions has at most 3393 /// local/stack accesses. 3394 bool isAssumedStackOnly() const { 3395 return isAssumed(inverseLocation(NO_LOCAL_MEM, true, true)); 3396 } 3397 3398 /// Return true if we know that the underlying value will only access 3399 /// inaccesible memory only (see Attribute::InaccessibleMemOnly). 3400 bool isKnownInaccessibleMemOnly() const { 3401 return isKnown(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 3402 } 3403 3404 /// Return true if we assume that the underlying value will only access 3405 /// inaccesible memory only (see Attribute::InaccessibleMemOnly). 3406 bool isAssumedInaccessibleMemOnly() const { 3407 return isAssumed(inverseLocation(NO_INACCESSIBLE_MEM, true, true)); 3408 } 3409 3410 /// Return true if we know that the underlying value will only access 3411 /// argument pointees (see Attribute::ArgMemOnly). 3412 bool isKnownArgMemOnly() const { 3413 return isKnown(inverseLocation(NO_ARGUMENT_MEM, true, true)); 3414 } 3415 3416 /// Return true if we assume that the underlying value will only access 3417 /// argument pointees (see Attribute::ArgMemOnly). 3418 bool isAssumedArgMemOnly() const { 3419 return isAssumed(inverseLocation(NO_ARGUMENT_MEM, true, true)); 3420 } 3421 3422 /// Return true if we know that the underlying value will only access 3423 /// inaccesible memory or argument pointees (see 3424 /// Attribute::InaccessibleOrArgMemOnly). 3425 bool isKnownInaccessibleOrArgMemOnly() const { 3426 return isKnown( 3427 inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 3428 } 3429 3430 /// Return true if we assume that the underlying value will only access 3431 /// inaccesible memory or argument pointees (see 3432 /// Attribute::InaccessibleOrArgMemOnly). 3433 bool isAssumedInaccessibleOrArgMemOnly() const { 3434 return isAssumed( 3435 inverseLocation(NO_INACCESSIBLE_MEM | NO_ARGUMENT_MEM, true, true)); 3436 } 3437 3438 /// Return true if the underlying value may access memory through arguement 3439 /// pointers of the associated function, if any. 3440 bool mayAccessArgMem() const { return !isAssumed(NO_ARGUMENT_MEM); } 3441 3442 /// Return true if only the memory locations specififed by \p MLK are assumed 3443 /// to be accessed by the associated function. 3444 bool isAssumedSpecifiedMemOnly(MemoryLocationsKind MLK) const { 3445 return isAssumed(MLK); 3446 } 3447 3448 /// Return the locations that are assumed to be not accessed by the associated 3449 /// function, if any. 3450 MemoryLocationsKind getAssumedNotAccessedLocation() const { 3451 return getAssumed(); 3452 } 3453 3454 /// Return the inverse of location \p Loc, thus for NO_XXX the return 3455 /// describes ONLY_XXX. The flags \p AndLocalMem and \p AndConstMem determine 3456 /// if local (=stack) and constant memory are allowed as well. Most of the 3457 /// time we do want them to be included, e.g., argmemonly allows accesses via 3458 /// argument pointers or local or constant memory accesses. 3459 static MemoryLocationsKind 3460 inverseLocation(MemoryLocationsKind Loc, bool AndLocalMem, bool AndConstMem) { 3461 return NO_LOCATIONS & ~(Loc | (AndLocalMem ? NO_LOCAL_MEM : 0) | 3462 (AndConstMem ? NO_CONST_MEM : 0)); 3463 }; 3464 3465 /// Return the locations encoded by \p MLK as a readable string. 3466 static std::string getMemoryLocationsAsStr(MemoryLocationsKind MLK); 3467 3468 /// Simple enum to distinguish read/write/read-write accesses. 3469 enum AccessKind { 3470 NONE = 0, 3471 READ = 1 << 0, 3472 WRITE = 1 << 1, 3473 READ_WRITE = READ | WRITE, 3474 }; 3475 3476 /// Check \p Pred on all accesses to the memory kinds specified by \p MLK. 3477 /// 3478 /// This method will evaluate \p Pred on all accesses (access instruction + 3479 /// underlying accessed memory pointer) and it will return true if \p Pred 3480 /// holds every time. 3481 virtual bool checkForAllAccessesToMemoryKind( 3482 function_ref<bool(const Instruction *, const Value *, AccessKind, 3483 MemoryLocationsKind)> 3484 Pred, 3485 MemoryLocationsKind MLK) const = 0; 3486 3487 /// Create an abstract attribute view for the position \p IRP. 3488 static AAMemoryLocation &createForPosition(const IRPosition &IRP, 3489 Attributor &A); 3490 3491 /// See AbstractState::getAsStr(). 3492 const std::string getAsStr() const override { 3493 return getMemoryLocationsAsStr(getAssumedNotAccessedLocation()); 3494 } 3495 3496 /// See AbstractAttribute::getName() 3497 const std::string getName() const override { return "AAMemoryLocation"; } 3498 3499 /// See AbstractAttribute::getIdAddr() 3500 const char *getIdAddr() const override { return &ID; } 3501 3502 /// This function should return true if the type of the \p AA is 3503 /// AAMemoryLocation 3504 static bool classof(const AbstractAttribute *AA) { 3505 return (AA->getIdAddr() == &ID); 3506 } 3507 3508 /// Unique ID (due to the unique address) 3509 static const char ID; 3510 }; 3511 3512 /// An abstract interface for range value analysis. 3513 struct AAValueConstantRange 3514 : public StateWrapper<IntegerRangeState, AbstractAttribute, uint32_t> { 3515 using Base = StateWrapper<IntegerRangeState, AbstractAttribute, uint32_t>; 3516 AAValueConstantRange(const IRPosition &IRP, Attributor &A) 3517 : Base(IRP, IRP.getAssociatedType()->getIntegerBitWidth()) {} 3518 3519 /// See AbstractAttribute::getState(...). 3520 IntegerRangeState &getState() override { return *this; } 3521 const IntegerRangeState &getState() const override { return *this; } 3522 3523 /// Create an abstract attribute view for the position \p IRP. 3524 static AAValueConstantRange &createForPosition(const IRPosition &IRP, 3525 Attributor &A); 3526 3527 /// Return an assumed range for the assocaited value a program point \p CtxI. 3528 /// If \p I is nullptr, simply return an assumed range. 3529 virtual ConstantRange 3530 getAssumedConstantRange(Attributor &A, 3531 const Instruction *CtxI = nullptr) const = 0; 3532 3533 /// Return a known range for the assocaited value at a program point \p CtxI. 3534 /// If \p I is nullptr, simply return a known range. 3535 virtual ConstantRange 3536 getKnownConstantRange(Attributor &A, 3537 const Instruction *CtxI = nullptr) const = 0; 3538 3539 /// Return an assumed constant for the assocaited value a program point \p 3540 /// CtxI. 3541 Optional<ConstantInt *> 3542 getAssumedConstantInt(Attributor &A, 3543 const Instruction *CtxI = nullptr) const { 3544 ConstantRange RangeV = getAssumedConstantRange(A, CtxI); 3545 if (auto *C = RangeV.getSingleElement()) 3546 return cast<ConstantInt>( 3547 ConstantInt::get(getAssociatedValue().getType(), *C)); 3548 if (RangeV.isEmptySet()) 3549 return llvm::None; 3550 return nullptr; 3551 } 3552 3553 /// See AbstractAttribute::getName() 3554 const std::string getName() const override { return "AAValueConstantRange"; } 3555 3556 /// See AbstractAttribute::getIdAddr() 3557 const char *getIdAddr() const override { return &ID; } 3558 3559 /// This function should return true if the type of the \p AA is 3560 /// AAValueConstantRange 3561 static bool classof(const AbstractAttribute *AA) { 3562 return (AA->getIdAddr() == &ID); 3563 } 3564 3565 /// Unique ID (due to the unique address) 3566 static const char ID; 3567 }; 3568 3569 /// A class for a set state. 3570 /// The assumed boolean state indicates whether the corresponding set is full 3571 /// set or not. If the assumed state is false, this is the worst state. The 3572 /// worst state (invalid state) of set of potential values is when the set 3573 /// contains every possible value (i.e. we cannot in any way limit the value 3574 /// that the target position can take). That never happens naturally, we only 3575 /// force it. As for the conditions under which we force it, see 3576 /// AAPotentialValues. 3577 template <typename MemberTy, typename KeyInfo = DenseMapInfo<MemberTy>> 3578 struct PotentialValuesState : AbstractState { 3579 using SetTy = DenseSet<MemberTy, KeyInfo>; 3580 3581 PotentialValuesState() : IsValidState(true), UndefIsContained(false) {} 3582 3583 PotentialValuesState(bool IsValid) 3584 : IsValidState(IsValid), UndefIsContained(false) {} 3585 3586 /// See AbstractState::isValidState(...) 3587 bool isValidState() const override { return IsValidState.isValidState(); } 3588 3589 /// See AbstractState::isAtFixpoint(...) 3590 bool isAtFixpoint() const override { return IsValidState.isAtFixpoint(); } 3591 3592 /// See AbstractState::indicatePessimisticFixpoint(...) 3593 ChangeStatus indicatePessimisticFixpoint() override { 3594 return IsValidState.indicatePessimisticFixpoint(); 3595 } 3596 3597 /// See AbstractState::indicateOptimisticFixpoint(...) 3598 ChangeStatus indicateOptimisticFixpoint() override { 3599 return IsValidState.indicateOptimisticFixpoint(); 3600 } 3601 3602 /// Return the assumed state 3603 PotentialValuesState &getAssumed() { return *this; } 3604 const PotentialValuesState &getAssumed() const { return *this; } 3605 3606 /// Return this set. We should check whether this set is valid or not by 3607 /// isValidState() before calling this function. 3608 const SetTy &getAssumedSet() const { 3609 assert(isValidState() && "This set shoud not be used when it is invalid!"); 3610 return Set; 3611 } 3612 3613 /// Returns whether this state contains an undef value or not. 3614 bool undefIsContained() const { 3615 assert(isValidState() && "This flag shoud not be used when it is invalid!"); 3616 return UndefIsContained; 3617 } 3618 3619 bool operator==(const PotentialValuesState &RHS) const { 3620 if (isValidState() != RHS.isValidState()) 3621 return false; 3622 if (!isValidState() && !RHS.isValidState()) 3623 return true; 3624 if (undefIsContained() != RHS.undefIsContained()) 3625 return false; 3626 return Set == RHS.getAssumedSet(); 3627 } 3628 3629 /// Maximum number of potential values to be tracked. 3630 /// This is set by -attributor-max-potential-values command line option 3631 static unsigned MaxPotentialValues; 3632 3633 /// Return empty set as the best state of potential values. 3634 static PotentialValuesState getBestState() { 3635 return PotentialValuesState(true); 3636 } 3637 3638 static PotentialValuesState getBestState(PotentialValuesState &PVS) { 3639 return getBestState(); 3640 } 3641 3642 /// Return full set as the worst state of potential values. 3643 static PotentialValuesState getWorstState() { 3644 return PotentialValuesState(false); 3645 } 3646 3647 /// Union assumed set with the passed value. 3648 void unionAssumed(const MemberTy &C) { insert(C); } 3649 3650 /// Union assumed set with assumed set of the passed state \p PVS. 3651 void unionAssumed(const PotentialValuesState &PVS) { unionWith(PVS); } 3652 3653 /// Union assumed set with an undef value. 3654 void unionAssumedWithUndef() { unionWithUndef(); } 3655 3656 /// "Clamp" this state with \p PVS. 3657 PotentialValuesState operator^=(const PotentialValuesState &PVS) { 3658 IsValidState ^= PVS.IsValidState; 3659 unionAssumed(PVS); 3660 return *this; 3661 } 3662 3663 PotentialValuesState operator&=(const PotentialValuesState &PVS) { 3664 IsValidState &= PVS.IsValidState; 3665 unionAssumed(PVS); 3666 return *this; 3667 } 3668 3669 private: 3670 /// Check the size of this set, and invalidate when the size is no 3671 /// less than \p MaxPotentialValues threshold. 3672 void checkAndInvalidate() { 3673 if (Set.size() >= MaxPotentialValues) 3674 indicatePessimisticFixpoint(); 3675 } 3676 3677 /// If this state contains both undef and not undef, we can reduce 3678 /// undef to the not undef value. 3679 void reduceUndefValue() { UndefIsContained = UndefIsContained & Set.empty(); } 3680 3681 /// Insert an element into this set. 3682 void insert(const MemberTy &C) { 3683 if (!isValidState()) 3684 return; 3685 Set.insert(C); 3686 checkAndInvalidate(); 3687 } 3688 3689 /// Take union with R. 3690 void unionWith(const PotentialValuesState &R) { 3691 /// If this is a full set, do nothing.; 3692 if (!isValidState()) 3693 return; 3694 /// If R is full set, change L to a full set. 3695 if (!R.isValidState()) { 3696 indicatePessimisticFixpoint(); 3697 return; 3698 } 3699 for (const MemberTy &C : R.Set) 3700 Set.insert(C); 3701 UndefIsContained |= R.undefIsContained(); 3702 reduceUndefValue(); 3703 checkAndInvalidate(); 3704 } 3705 3706 /// Take union with an undef value. 3707 void unionWithUndef() { 3708 UndefIsContained = true; 3709 reduceUndefValue(); 3710 } 3711 3712 /// Take intersection with R. 3713 void intersectWith(const PotentialValuesState &R) { 3714 /// If R is a full set, do nothing. 3715 if (!R.isValidState()) 3716 return; 3717 /// If this is a full set, change this to R. 3718 if (!isValidState()) { 3719 *this = R; 3720 return; 3721 } 3722 SetTy IntersectSet; 3723 for (const MemberTy &C : Set) { 3724 if (R.Set.count(C)) 3725 IntersectSet.insert(C); 3726 } 3727 Set = IntersectSet; 3728 UndefIsContained &= R.undefIsContained(); 3729 reduceUndefValue(); 3730 } 3731 3732 /// A helper state which indicate whether this state is valid or not. 3733 BooleanState IsValidState; 3734 3735 /// Container for potential values 3736 SetTy Set; 3737 3738 /// Flag for undef value 3739 bool UndefIsContained; 3740 }; 3741 3742 using PotentialConstantIntValuesState = PotentialValuesState<APInt>; 3743 3744 raw_ostream &operator<<(raw_ostream &OS, 3745 const PotentialConstantIntValuesState &R); 3746 3747 /// An abstract interface for potential values analysis. 3748 /// 3749 /// This AA collects potential values for each IR position. 3750 /// An assumed set of potential values is initialized with the empty set (the 3751 /// best state) and it will grow monotonically as we find more potential values 3752 /// for this position. 3753 /// The set might be forced to the worst state, that is, to contain every 3754 /// possible value for this position in 2 cases. 3755 /// 1. We surpassed the \p MaxPotentialValues threshold. This includes the 3756 /// case that this position is affected (e.g. because of an operation) by a 3757 /// Value that is in the worst state. 3758 /// 2. We tried to initialize on a Value that we cannot handle (e.g. an 3759 /// operator we do not currently handle). 3760 /// 3761 /// TODO: Support values other than constant integers. 3762 struct AAPotentialValues 3763 : public StateWrapper<PotentialConstantIntValuesState, AbstractAttribute> { 3764 using Base = StateWrapper<PotentialConstantIntValuesState, AbstractAttribute>; 3765 AAPotentialValues(const IRPosition &IRP, Attributor &A) : Base(IRP) {} 3766 3767 /// See AbstractAttribute::getState(...). 3768 PotentialConstantIntValuesState &getState() override { return *this; } 3769 const PotentialConstantIntValuesState &getState() const override { 3770 return *this; 3771 } 3772 3773 /// Create an abstract attribute view for the position \p IRP. 3774 static AAPotentialValues &createForPosition(const IRPosition &IRP, 3775 Attributor &A); 3776 3777 /// Return assumed constant for the associated value 3778 Optional<ConstantInt *> 3779 getAssumedConstantInt(Attributor &A, 3780 const Instruction *CtxI = nullptr) const { 3781 if (!isValidState()) 3782 return nullptr; 3783 if (getAssumedSet().size() == 1) 3784 return cast<ConstantInt>(ConstantInt::get(getAssociatedValue().getType(), 3785 *(getAssumedSet().begin()))); 3786 if (getAssumedSet().size() == 0) { 3787 if (undefIsContained()) 3788 return cast<ConstantInt>( 3789 ConstantInt::get(getAssociatedValue().getType(), 0)); 3790 return llvm::None; 3791 } 3792 3793 return nullptr; 3794 } 3795 3796 /// See AbstractAttribute::getName() 3797 const std::string getName() const override { return "AAPotentialValues"; } 3798 3799 /// See AbstractAttribute::getIdAddr() 3800 const char *getIdAddr() const override { return &ID; } 3801 3802 /// This function should return true if the type of the \p AA is 3803 /// AAPotentialValues 3804 static bool classof(const AbstractAttribute *AA) { 3805 return (AA->getIdAddr() == &ID); 3806 } 3807 3808 /// Unique ID (due to the unique address) 3809 static const char ID; 3810 }; 3811 3812 /// An abstract interface for all noundef attributes. 3813 struct AANoUndef 3814 : public IRAttribute<Attribute::NoUndef, 3815 StateWrapper<BooleanState, AbstractAttribute>> { 3816 AANoUndef(const IRPosition &IRP, Attributor &A) : IRAttribute(IRP) {} 3817 3818 /// Return true if we assume that the underlying value is noundef. 3819 bool isAssumedNoUndef() const { return getAssumed(); } 3820 3821 /// Return true if we know that underlying value is noundef. 3822 bool isKnownNoUndef() const { return getKnown(); } 3823 3824 /// Create an abstract attribute view for the position \p IRP. 3825 static AANoUndef &createForPosition(const IRPosition &IRP, Attributor &A); 3826 3827 /// See AbstractAttribute::getName() 3828 const std::string getName() const override { return "AANoUndef"; } 3829 3830 /// See AbstractAttribute::getIdAddr() 3831 const char *getIdAddr() const override { return &ID; } 3832 3833 /// This function should return true if the type of the \p AA is AANoUndef 3834 static bool classof(const AbstractAttribute *AA) { 3835 return (AA->getIdAddr() == &ID); 3836 } 3837 3838 /// Unique ID (due to the unique address) 3839 static const char ID; 3840 }; 3841 3842 /// Run options, used by the pass manager. 3843 enum AttributorRunOption { 3844 NONE = 0, 3845 MODULE = 1 << 0, 3846 CGSCC = 1 << 1, 3847 ALL = MODULE | CGSCC 3848 }; 3849 3850 } // end namespace llvm 3851 3852 #endif // LLVM_TRANSFORMS_IPO_ATTRIBUTOR_H 3853