1 //===- Attributor.cpp - Module-wide attribute deduction -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements an interprocedural pass that deduces and/or propagates
10 // attributes. This is done in an abstract interpretation style fixpoint
11 // iteration. See the Attributor.h file comment and the class descriptions in
12 // that file for more information.
13 //
14 //===----------------------------------------------------------------------===//
15
16 #include "llvm/Transforms/IPO/Attributor.h"
17
18 #include "llvm/ADT/PointerIntPair.h"
19 #include "llvm/ADT/STLExtras.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/ADT/TinyPtrVector.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/CallGraph.h"
24 #include "llvm/Analysis/CallGraphSCCPass.h"
25 #include "llvm/Analysis/InlineCost.h"
26 #include "llvm/Analysis/MemoryBuiltins.h"
27 #include "llvm/Analysis/MustExecute.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/Constant.h"
30 #include "llvm/IR/ConstantFold.h"
31 #include "llvm/IR/Constants.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/GlobalValue.h"
34 #include "llvm/IR/GlobalVariable.h"
35 #include "llvm/IR/Instruction.h"
36 #include "llvm/IR/Instructions.h"
37 #include "llvm/IR/IntrinsicInst.h"
38 #include "llvm/IR/ValueHandle.h"
39 #include "llvm/InitializePasses.h"
40 #include "llvm/Support/Casting.h"
41 #include "llvm/Support/CommandLine.h"
42 #include "llvm/Support/Debug.h"
43 #include "llvm/Support/DebugCounter.h"
44 #include "llvm/Support/FileSystem.h"
45 #include "llvm/Support/GraphWriter.h"
46 #include "llvm/Support/raw_ostream.h"
47 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
48 #include "llvm/Transforms/Utils/Cloning.h"
49 #include "llvm/Transforms/Utils/Local.h"
50 #include <cstdint>
51
52 #ifdef EXPENSIVE_CHECKS
53 #include "llvm/IR/Verifier.h"
54 #endif
55
56 #include <cassert>
57 #include <optional>
58 #include <string>
59
60 using namespace llvm;
61
62 #define DEBUG_TYPE "attributor"
63 #define VERBOSE_DEBUG_TYPE DEBUG_TYPE "-verbose"
64
65 DEBUG_COUNTER(ManifestDBGCounter, "attributor-manifest",
66 "Determine what attributes are manifested in the IR");
67
68 STATISTIC(NumFnDeleted, "Number of function deleted");
69 STATISTIC(NumFnWithExactDefinition,
70 "Number of functions with exact definitions");
71 STATISTIC(NumFnWithoutExactDefinition,
72 "Number of functions without exact definitions");
73 STATISTIC(NumFnShallowWrappersCreated, "Number of shallow wrappers created");
74 STATISTIC(NumAttributesTimedOut,
75 "Number of abstract attributes timed out before fixpoint");
76 STATISTIC(NumAttributesValidFixpoint,
77 "Number of abstract attributes in a valid fixpoint state");
78 STATISTIC(NumAttributesManifested,
79 "Number of abstract attributes manifested in IR");
80
81 // TODO: Determine a good default value.
82 //
83 // In the LLVM-TS and SPEC2006, 32 seems to not induce compile time overheads
84 // (when run with the first 5 abstract attributes). The results also indicate
85 // that we never reach 32 iterations but always find a fixpoint sooner.
86 //
87 // This will become more evolved once we perform two interleaved fixpoint
88 // iterations: bottom-up and top-down.
89 static cl::opt<unsigned>
90 SetFixpointIterations("attributor-max-iterations", cl::Hidden,
91 cl::desc("Maximal number of fixpoint iterations."),
92 cl::init(32));
93
94 static cl::opt<unsigned, true> MaxInitializationChainLengthX(
95 "attributor-max-initialization-chain-length", cl::Hidden,
96 cl::desc(
97 "Maximal number of chained initializations (to avoid stack overflows)"),
98 cl::location(MaxInitializationChainLength), cl::init(1024));
99 unsigned llvm::MaxInitializationChainLength;
100
101 static cl::opt<bool> VerifyMaxFixpointIterations(
102 "attributor-max-iterations-verify", cl::Hidden,
103 cl::desc("Verify that max-iterations is a tight bound for a fixpoint"),
104 cl::init(false));
105
106 static cl::opt<bool> AnnotateDeclarationCallSites(
107 "attributor-annotate-decl-cs", cl::Hidden,
108 cl::desc("Annotate call sites of function declarations."), cl::init(false));
109
110 static cl::opt<bool> EnableHeapToStack("enable-heap-to-stack-conversion",
111 cl::init(true), cl::Hidden);
112
113 static cl::opt<bool>
114 AllowShallowWrappers("attributor-allow-shallow-wrappers", cl::Hidden,
115 cl::desc("Allow the Attributor to create shallow "
116 "wrappers for non-exact definitions."),
117 cl::init(false));
118
119 static cl::opt<bool>
120 AllowDeepWrapper("attributor-allow-deep-wrappers", cl::Hidden,
121 cl::desc("Allow the Attributor to use IP information "
122 "derived from non-exact functions via cloning"),
123 cl::init(false));
124
125 // These options can only used for debug builds.
126 #ifndef NDEBUG
127 static cl::list<std::string>
128 SeedAllowList("attributor-seed-allow-list", cl::Hidden,
129 cl::desc("Comma seperated list of attribute names that are "
130 "allowed to be seeded."),
131 cl::CommaSeparated);
132
133 static cl::list<std::string> FunctionSeedAllowList(
134 "attributor-function-seed-allow-list", cl::Hidden,
135 cl::desc("Comma seperated list of function names that are "
136 "allowed to be seeded."),
137 cl::CommaSeparated);
138 #endif
139
140 static cl::opt<bool>
141 DumpDepGraph("attributor-dump-dep-graph", cl::Hidden,
142 cl::desc("Dump the dependency graph to dot files."),
143 cl::init(false));
144
145 static cl::opt<std::string> DepGraphDotFileNamePrefix(
146 "attributor-depgraph-dot-filename-prefix", cl::Hidden,
147 cl::desc("The prefix used for the CallGraph dot file names."));
148
149 static cl::opt<bool> ViewDepGraph("attributor-view-dep-graph", cl::Hidden,
150 cl::desc("View the dependency graph."),
151 cl::init(false));
152
153 static cl::opt<bool> PrintDependencies("attributor-print-dep", cl::Hidden,
154 cl::desc("Print attribute dependencies"),
155 cl::init(false));
156
157 static cl::opt<bool> EnableCallSiteSpecific(
158 "attributor-enable-call-site-specific-deduction", cl::Hidden,
159 cl::desc("Allow the Attributor to do call site specific analysis"),
160 cl::init(false));
161
162 static cl::opt<bool>
163 PrintCallGraph("attributor-print-call-graph", cl::Hidden,
164 cl::desc("Print Attributor's internal call graph"),
165 cl::init(false));
166
167 static cl::opt<bool> SimplifyAllLoads("attributor-simplify-all-loads",
168 cl::Hidden,
169 cl::desc("Try to simplify all loads."),
170 cl::init(true));
171
172 /// Logic operators for the change status enum class.
173 ///
174 ///{
operator |(ChangeStatus L,ChangeStatus R)175 ChangeStatus llvm::operator|(ChangeStatus L, ChangeStatus R) {
176 return L == ChangeStatus::CHANGED ? L : R;
177 }
operator |=(ChangeStatus & L,ChangeStatus R)178 ChangeStatus &llvm::operator|=(ChangeStatus &L, ChangeStatus R) {
179 L = L | R;
180 return L;
181 }
operator &(ChangeStatus L,ChangeStatus R)182 ChangeStatus llvm::operator&(ChangeStatus L, ChangeStatus R) {
183 return L == ChangeStatus::UNCHANGED ? L : R;
184 }
operator &=(ChangeStatus & L,ChangeStatus R)185 ChangeStatus &llvm::operator&=(ChangeStatus &L, ChangeStatus R) {
186 L = L & R;
187 return L;
188 }
189 ///}
190
isNoSyncInst(Attributor & A,const Instruction & I,const AbstractAttribute & QueryingAA)191 bool AA::isNoSyncInst(Attributor &A, const Instruction &I,
192 const AbstractAttribute &QueryingAA) {
193 // We are looking for volatile instructions or non-relaxed atomics.
194 if (const auto *CB = dyn_cast<CallBase>(&I)) {
195 if (CB->hasFnAttr(Attribute::NoSync))
196 return true;
197
198 // Non-convergent and readnone imply nosync.
199 if (!CB->isConvergent() && !CB->mayReadOrWriteMemory())
200 return true;
201
202 if (AANoSync::isNoSyncIntrinsic(&I))
203 return true;
204
205 const auto &NoSyncAA = A.getAAFor<AANoSync>(
206 QueryingAA, IRPosition::callsite_function(*CB), DepClassTy::OPTIONAL);
207 return NoSyncAA.isAssumedNoSync();
208 }
209
210 if (!I.mayReadOrWriteMemory())
211 return true;
212
213 return !I.isVolatile() && !AANoSync::isNonRelaxedAtomic(&I);
214 }
215
isDynamicallyUnique(Attributor & A,const AbstractAttribute & QueryingAA,const Value & V,bool ForAnalysisOnly)216 bool AA::isDynamicallyUnique(Attributor &A, const AbstractAttribute &QueryingAA,
217 const Value &V, bool ForAnalysisOnly) {
218 // TODO: See the AAInstanceInfo class comment.
219 if (!ForAnalysisOnly)
220 return false;
221 auto &InstanceInfoAA = A.getAAFor<AAInstanceInfo>(
222 QueryingAA, IRPosition::value(V), DepClassTy::OPTIONAL);
223 return InstanceInfoAA.isAssumedUniqueForAnalysis();
224 }
225
getInitialValueForObj(Value & Obj,Type & Ty,const TargetLibraryInfo * TLI,const DataLayout & DL,AA::RangeTy * RangePtr)226 Constant *AA::getInitialValueForObj(Value &Obj, Type &Ty,
227 const TargetLibraryInfo *TLI,
228 const DataLayout &DL,
229 AA::RangeTy *RangePtr) {
230 if (isa<AllocaInst>(Obj))
231 return UndefValue::get(&Ty);
232 if (Constant *Init = getInitialValueOfAllocation(&Obj, TLI, &Ty))
233 return Init;
234 auto *GV = dyn_cast<GlobalVariable>(&Obj);
235 if (!GV)
236 return nullptr;
237 if (!GV->hasLocalLinkage() && !(GV->isConstant() && GV->hasInitializer()))
238 return nullptr;
239 if (!GV->hasInitializer())
240 return UndefValue::get(&Ty);
241
242 if (RangePtr && !RangePtr->offsetOrSizeAreUnknown()) {
243 APInt Offset = APInt(64, RangePtr->Offset);
244 return ConstantFoldLoadFromConst(GV->getInitializer(), &Ty, Offset, DL);
245 }
246
247 return ConstantFoldLoadFromUniformValue(GV->getInitializer(), &Ty);
248 }
249
isValidInScope(const Value & V,const Function * Scope)250 bool AA::isValidInScope(const Value &V, const Function *Scope) {
251 if (isa<Constant>(V))
252 return true;
253 if (auto *I = dyn_cast<Instruction>(&V))
254 return I->getFunction() == Scope;
255 if (auto *A = dyn_cast<Argument>(&V))
256 return A->getParent() == Scope;
257 return false;
258 }
259
isValidAtPosition(const AA::ValueAndContext & VAC,InformationCache & InfoCache)260 bool AA::isValidAtPosition(const AA::ValueAndContext &VAC,
261 InformationCache &InfoCache) {
262 if (isa<Constant>(VAC.getValue()) || VAC.getValue() == VAC.getCtxI())
263 return true;
264 const Function *Scope = nullptr;
265 const Instruction *CtxI = VAC.getCtxI();
266 if (CtxI)
267 Scope = CtxI->getFunction();
268 if (auto *A = dyn_cast<Argument>(VAC.getValue()))
269 return A->getParent() == Scope;
270 if (auto *I = dyn_cast<Instruction>(VAC.getValue())) {
271 if (I->getFunction() == Scope) {
272 if (const DominatorTree *DT =
273 InfoCache.getAnalysisResultForFunction<DominatorTreeAnalysis>(
274 *Scope))
275 return DT->dominates(I, CtxI);
276 // Local dominance check mostly for the old PM passes.
277 if (CtxI && I->getParent() == CtxI->getParent())
278 return llvm::any_of(
279 make_range(I->getIterator(), I->getParent()->end()),
280 [&](const Instruction &AfterI) { return &AfterI == CtxI; });
281 }
282 }
283 return false;
284 }
285
getWithType(Value & V,Type & Ty)286 Value *AA::getWithType(Value &V, Type &Ty) {
287 if (V.getType() == &Ty)
288 return &V;
289 if (isa<PoisonValue>(V))
290 return PoisonValue::get(&Ty);
291 if (isa<UndefValue>(V))
292 return UndefValue::get(&Ty);
293 if (auto *C = dyn_cast<Constant>(&V)) {
294 if (C->isNullValue())
295 return Constant::getNullValue(&Ty);
296 if (C->getType()->isPointerTy() && Ty.isPointerTy())
297 return ConstantExpr::getPointerCast(C, &Ty);
298 if (C->getType()->getPrimitiveSizeInBits() >= Ty.getPrimitiveSizeInBits()) {
299 if (C->getType()->isIntegerTy() && Ty.isIntegerTy())
300 return ConstantExpr::getTrunc(C, &Ty, /* OnlyIfReduced */ true);
301 if (C->getType()->isFloatingPointTy() && Ty.isFloatingPointTy())
302 return ConstantExpr::getFPTrunc(C, &Ty, /* OnlyIfReduced */ true);
303 }
304 }
305 return nullptr;
306 }
307
308 std::optional<Value *>
combineOptionalValuesInAAValueLatice(const std::optional<Value * > & A,const std::optional<Value * > & B,Type * Ty)309 AA::combineOptionalValuesInAAValueLatice(const std::optional<Value *> &A,
310 const std::optional<Value *> &B,
311 Type *Ty) {
312 if (A == B)
313 return A;
314 if (!B)
315 return A;
316 if (*B == nullptr)
317 return nullptr;
318 if (!A)
319 return Ty ? getWithType(**B, *Ty) : nullptr;
320 if (*A == nullptr)
321 return nullptr;
322 if (!Ty)
323 Ty = (*A)->getType();
324 if (isa_and_nonnull<UndefValue>(*A))
325 return getWithType(**B, *Ty);
326 if (isa<UndefValue>(*B))
327 return A;
328 if (*A && *B && *A == getWithType(**B, *Ty))
329 return A;
330 return nullptr;
331 }
332
333 template <bool IsLoad, typename Ty>
getPotentialCopiesOfMemoryValue(Attributor & A,Ty & I,SmallSetVector<Value *,4> & PotentialCopies,SmallSetVector<Instruction *,4> & PotentialValueOrigins,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)334 static bool getPotentialCopiesOfMemoryValue(
335 Attributor &A, Ty &I, SmallSetVector<Value *, 4> &PotentialCopies,
336 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
337 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
338 bool OnlyExact) {
339 LLVM_DEBUG(dbgs() << "Trying to determine the potential copies of " << I
340 << " (only exact: " << OnlyExact << ")\n";);
341
342 Value &Ptr = *I.getPointerOperand();
343 // Containers to remember the pointer infos and new copies while we are not
344 // sure that we can find all of them. If we abort we want to avoid spurious
345 // dependences and potential copies in the provided container.
346 SmallVector<const AAPointerInfo *> PIs;
347 SmallVector<Value *> NewCopies;
348 SmallVector<Instruction *> NewCopyOrigins;
349
350 const auto *TLI =
351 A.getInfoCache().getTargetLibraryInfoForFunction(*I.getFunction());
352
353 auto Pred = [&](Value &Obj) {
354 LLVM_DEBUG(dbgs() << "Visit underlying object " << Obj << "\n");
355 if (isa<UndefValue>(&Obj))
356 return true;
357 if (isa<ConstantPointerNull>(&Obj)) {
358 // A null pointer access can be undefined but any offset from null may
359 // be OK. We do not try to optimize the latter.
360 if (!NullPointerIsDefined(I.getFunction(),
361 Ptr.getType()->getPointerAddressSpace()) &&
362 A.getAssumedSimplified(Ptr, QueryingAA, UsedAssumedInformation,
363 AA::Interprocedural) == &Obj)
364 return true;
365 LLVM_DEBUG(
366 dbgs() << "Underlying object is a valid nullptr, giving up.\n";);
367 return false;
368 }
369 // TODO: Use assumed noalias return.
370 if (!isa<AllocaInst>(&Obj) && !isa<GlobalVariable>(&Obj) &&
371 !(IsLoad ? isAllocationFn(&Obj, TLI) : isNoAliasCall(&Obj))) {
372 LLVM_DEBUG(dbgs() << "Underlying object is not supported yet: " << Obj
373 << "\n";);
374 return false;
375 }
376 if (auto *GV = dyn_cast<GlobalVariable>(&Obj))
377 if (!GV->hasLocalLinkage() &&
378 !(GV->isConstant() && GV->hasInitializer())) {
379 LLVM_DEBUG(dbgs() << "Underlying object is global with external "
380 "linkage, not supported yet: "
381 << Obj << "\n";);
382 return false;
383 }
384
385 bool NullOnly = true;
386 bool NullRequired = false;
387 auto CheckForNullOnlyAndUndef = [&](std::optional<Value *> V,
388 bool IsExact) {
389 if (!V || *V == nullptr)
390 NullOnly = false;
391 else if (isa<UndefValue>(*V))
392 /* No op */;
393 else if (isa<Constant>(*V) && cast<Constant>(*V)->isNullValue())
394 NullRequired = !IsExact;
395 else
396 NullOnly = false;
397 };
398
399 auto AdjustWrittenValueType = [&](const AAPointerInfo::Access &Acc,
400 Value &V) {
401 Value *AdjV = AA::getWithType(V, *I.getType());
402 if (!AdjV) {
403 LLVM_DEBUG(dbgs() << "Underlying object written but stored value "
404 "cannot be converted to read type: "
405 << *Acc.getRemoteInst() << " : " << *I.getType()
406 << "\n";);
407 }
408 return AdjV;
409 };
410
411 auto CheckAccess = [&](const AAPointerInfo::Access &Acc, bool IsExact) {
412 if ((IsLoad && !Acc.isWriteOrAssumption()) || (!IsLoad && !Acc.isRead()))
413 return true;
414 if (IsLoad && Acc.isWrittenValueYetUndetermined())
415 return true;
416 CheckForNullOnlyAndUndef(Acc.getContent(), IsExact);
417 if (OnlyExact && !IsExact && !NullOnly &&
418 !isa_and_nonnull<UndefValue>(Acc.getWrittenValue())) {
419 LLVM_DEBUG(dbgs() << "Non exact access " << *Acc.getRemoteInst()
420 << ", abort!\n");
421 return false;
422 }
423 if (NullRequired && !NullOnly) {
424 LLVM_DEBUG(dbgs() << "Required all `null` accesses due to non exact "
425 "one, however found non-null one: "
426 << *Acc.getRemoteInst() << ", abort!\n");
427 return false;
428 }
429 if (IsLoad) {
430 assert(isa<LoadInst>(I) && "Expected load or store instruction only!");
431 if (!Acc.isWrittenValueUnknown()) {
432 Value *V = AdjustWrittenValueType(Acc, *Acc.getWrittenValue());
433 if (!V)
434 return false;
435 NewCopies.push_back(V);
436 NewCopyOrigins.push_back(Acc.getRemoteInst());
437 return true;
438 }
439 auto *SI = dyn_cast<StoreInst>(Acc.getRemoteInst());
440 if (!SI) {
441 LLVM_DEBUG(dbgs() << "Underlying object written through a non-store "
442 "instruction not supported yet: "
443 << *Acc.getRemoteInst() << "\n";);
444 return false;
445 }
446 Value *V = AdjustWrittenValueType(Acc, *SI->getValueOperand());
447 if (!V)
448 return false;
449 NewCopies.push_back(V);
450 NewCopyOrigins.push_back(SI);
451 } else {
452 assert(isa<StoreInst>(I) && "Expected load or store instruction only!");
453 auto *LI = dyn_cast<LoadInst>(Acc.getRemoteInst());
454 if (!LI && OnlyExact) {
455 LLVM_DEBUG(dbgs() << "Underlying object read through a non-load "
456 "instruction not supported yet: "
457 << *Acc.getRemoteInst() << "\n";);
458 return false;
459 }
460 NewCopies.push_back(Acc.getRemoteInst());
461 }
462 return true;
463 };
464
465 // If the value has been written to we don't need the initial value of the
466 // object.
467 bool HasBeenWrittenTo = false;
468
469 AA::RangeTy Range;
470 auto &PI = A.getAAFor<AAPointerInfo>(QueryingAA, IRPosition::value(Obj),
471 DepClassTy::NONE);
472 if (!PI.forallInterferingAccesses(A, QueryingAA, I, CheckAccess,
473 HasBeenWrittenTo, Range)) {
474 LLVM_DEBUG(
475 dbgs()
476 << "Failed to verify all interfering accesses for underlying object: "
477 << Obj << "\n");
478 return false;
479 }
480
481 if (IsLoad && !HasBeenWrittenTo && !Range.isUnassigned()) {
482 const DataLayout &DL = A.getDataLayout();
483 Value *InitialValue =
484 AA::getInitialValueForObj(Obj, *I.getType(), TLI, DL, &Range);
485 if (!InitialValue) {
486 LLVM_DEBUG(dbgs() << "Could not determine required initial value of "
487 "underlying object, abort!\n");
488 return false;
489 }
490 CheckForNullOnlyAndUndef(InitialValue, /* IsExact */ true);
491 if (NullRequired && !NullOnly) {
492 LLVM_DEBUG(dbgs() << "Non exact access but initial value that is not "
493 "null or undef, abort!\n");
494 return false;
495 }
496
497 NewCopies.push_back(InitialValue);
498 NewCopyOrigins.push_back(nullptr);
499 }
500
501 PIs.push_back(&PI);
502
503 return true;
504 };
505
506 const auto &AAUO = A.getAAFor<AAUnderlyingObjects>(
507 QueryingAA, IRPosition::value(Ptr), DepClassTy::OPTIONAL);
508 if (!AAUO.forallUnderlyingObjects(Pred)) {
509 LLVM_DEBUG(
510 dbgs() << "Underlying objects stored into could not be determined\n";);
511 return false;
512 }
513
514 // Only if we were successful collection all potential copies we record
515 // dependences (on non-fix AAPointerInfo AAs). We also only then modify the
516 // given PotentialCopies container.
517 for (const auto *PI : PIs) {
518 if (!PI->getState().isAtFixpoint())
519 UsedAssumedInformation = true;
520 A.recordDependence(*PI, QueryingAA, DepClassTy::OPTIONAL);
521 }
522 PotentialCopies.insert(NewCopies.begin(), NewCopies.end());
523 PotentialValueOrigins.insert(NewCopyOrigins.begin(), NewCopyOrigins.end());
524
525 return true;
526 }
527
getPotentiallyLoadedValues(Attributor & A,LoadInst & LI,SmallSetVector<Value *,4> & PotentialValues,SmallSetVector<Instruction *,4> & PotentialValueOrigins,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)528 bool AA::getPotentiallyLoadedValues(
529 Attributor &A, LoadInst &LI, SmallSetVector<Value *, 4> &PotentialValues,
530 SmallSetVector<Instruction *, 4> &PotentialValueOrigins,
531 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
532 bool OnlyExact) {
533 return getPotentialCopiesOfMemoryValue</* IsLoad */ true>(
534 A, LI, PotentialValues, PotentialValueOrigins, QueryingAA,
535 UsedAssumedInformation, OnlyExact);
536 }
537
getPotentialCopiesOfStoredValue(Attributor & A,StoreInst & SI,SmallSetVector<Value *,4> & PotentialCopies,const AbstractAttribute & QueryingAA,bool & UsedAssumedInformation,bool OnlyExact)538 bool AA::getPotentialCopiesOfStoredValue(
539 Attributor &A, StoreInst &SI, SmallSetVector<Value *, 4> &PotentialCopies,
540 const AbstractAttribute &QueryingAA, bool &UsedAssumedInformation,
541 bool OnlyExact) {
542 SmallSetVector<Instruction *, 4> PotentialValueOrigins;
543 return getPotentialCopiesOfMemoryValue</* IsLoad */ false>(
544 A, SI, PotentialCopies, PotentialValueOrigins, QueryingAA,
545 UsedAssumedInformation, OnlyExact);
546 }
547
isAssumedReadOnlyOrReadNone(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool RequireReadNone,bool & IsKnown)548 static bool isAssumedReadOnlyOrReadNone(Attributor &A, const IRPosition &IRP,
549 const AbstractAttribute &QueryingAA,
550 bool RequireReadNone, bool &IsKnown) {
551
552 IRPosition::Kind Kind = IRP.getPositionKind();
553 if (Kind == IRPosition::IRP_FUNCTION || Kind == IRPosition::IRP_CALL_SITE) {
554 const auto &MemLocAA =
555 A.getAAFor<AAMemoryLocation>(QueryingAA, IRP, DepClassTy::NONE);
556 if (MemLocAA.isAssumedReadNone()) {
557 IsKnown = MemLocAA.isKnownReadNone();
558 if (!IsKnown)
559 A.recordDependence(MemLocAA, QueryingAA, DepClassTy::OPTIONAL);
560 return true;
561 }
562 }
563
564 const auto &MemBehaviorAA =
565 A.getAAFor<AAMemoryBehavior>(QueryingAA, IRP, DepClassTy::NONE);
566 if (MemBehaviorAA.isAssumedReadNone() ||
567 (!RequireReadNone && MemBehaviorAA.isAssumedReadOnly())) {
568 IsKnown = RequireReadNone ? MemBehaviorAA.isKnownReadNone()
569 : MemBehaviorAA.isKnownReadOnly();
570 if (!IsKnown)
571 A.recordDependence(MemBehaviorAA, QueryingAA, DepClassTy::OPTIONAL);
572 return true;
573 }
574
575 return false;
576 }
577
isAssumedReadOnly(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool & IsKnown)578 bool AA::isAssumedReadOnly(Attributor &A, const IRPosition &IRP,
579 const AbstractAttribute &QueryingAA, bool &IsKnown) {
580 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
581 /* RequireReadNone */ false, IsKnown);
582 }
isAssumedReadNone(Attributor & A,const IRPosition & IRP,const AbstractAttribute & QueryingAA,bool & IsKnown)583 bool AA::isAssumedReadNone(Attributor &A, const IRPosition &IRP,
584 const AbstractAttribute &QueryingAA, bool &IsKnown) {
585 return isAssumedReadOnlyOrReadNone(A, IRP, QueryingAA,
586 /* RequireReadNone */ true, IsKnown);
587 }
588
589 static bool
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Instruction * ToI,const Function & ToFn,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)590 isPotentiallyReachable(Attributor &A, const Instruction &FromI,
591 const Instruction *ToI, const Function &ToFn,
592 const AbstractAttribute &QueryingAA,
593 const AA::InstExclusionSetTy *ExclusionSet,
594 std::function<bool(const Function &F)> GoBackwardsCB) {
595 LLVM_DEBUG({
596 dbgs() << "[AA] isPotentiallyReachable @" << ToFn.getName() << " from "
597 << FromI << " [GBCB: " << bool(GoBackwardsCB) << "][#ExS: "
598 << (ExclusionSet ? std::to_string(ExclusionSet->size()) : "none")
599 << "]\n";
600 if (ExclusionSet)
601 for (auto *ES : *ExclusionSet)
602 dbgs() << *ES << "\n";
603 });
604
605 // If we can go arbitrarily backwards we will eventually reach an entry point
606 // that can reach ToI. Only if a set of blocks through which we cannot go is
607 // provided, or once we track internal functions not accessible from the
608 // outside, it makes sense to perform backwards analysis in the absence of a
609 // GoBackwardsCB.
610 if (!GoBackwardsCB && !ExclusionSet) {
611 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
612 << " is not checked backwards and does not have an "
613 "exclusion set, abort\n");
614 return true;
615 }
616
617 SmallPtrSet<const Instruction *, 8> Visited;
618 SmallVector<const Instruction *> Worklist;
619 Worklist.push_back(&FromI);
620
621 while (!Worklist.empty()) {
622 const Instruction *CurFromI = Worklist.pop_back_val();
623 if (!Visited.insert(CurFromI).second)
624 continue;
625
626 const Function *FromFn = CurFromI->getFunction();
627 if (FromFn == &ToFn) {
628 if (!ToI)
629 return true;
630 LLVM_DEBUG(dbgs() << "[AA] check " << *ToI << " from " << *CurFromI
631 << " intraprocedurally\n");
632 const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
633 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
634 bool Result =
635 ReachabilityAA.isAssumedReachable(A, *CurFromI, *ToI, ExclusionSet);
636 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " "
637 << (Result ? "can potentially " : "cannot ") << "reach "
638 << *ToI << " [Intra]\n");
639 if (Result)
640 return true;
641 }
642
643 bool Result = true;
644 if (!ToFn.isDeclaration() && ToI) {
645 const auto &ToReachabilityAA = A.getAAFor<AAIntraFnReachability>(
646 QueryingAA, IRPosition::function(ToFn), DepClassTy::OPTIONAL);
647 const Instruction &EntryI = ToFn.getEntryBlock().front();
648 Result =
649 ToReachabilityAA.isAssumedReachable(A, EntryI, *ToI, ExclusionSet);
650 LLVM_DEBUG(dbgs() << "[AA] Entry " << EntryI << " of @" << ToFn.getName()
651 << " " << (Result ? "can potentially " : "cannot ")
652 << "reach @" << *ToI << " [ToFn]\n");
653 }
654
655 if (Result) {
656 // The entry of the ToFn can reach the instruction ToI. If the current
657 // instruction is already known to reach the ToFn.
658 const auto &FnReachabilityAA = A.getAAFor<AAInterFnReachability>(
659 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
660 Result = FnReachabilityAA.instructionCanReach(A, *CurFromI, ToFn,
661 ExclusionSet);
662 LLVM_DEBUG(dbgs() << "[AA] " << *CurFromI << " in @" << FromFn->getName()
663 << " " << (Result ? "can potentially " : "cannot ")
664 << "reach @" << ToFn.getName() << " [FromFn]\n");
665 if (Result)
666 return true;
667 }
668
669 // TODO: Check assumed nounwind.
670 const auto &ReachabilityAA = A.getAAFor<AAIntraFnReachability>(
671 QueryingAA, IRPosition::function(*FromFn), DepClassTy::OPTIONAL);
672 auto ReturnInstCB = [&](Instruction &Ret) {
673 bool Result =
674 ReachabilityAA.isAssumedReachable(A, *CurFromI, Ret, ExclusionSet);
675 LLVM_DEBUG(dbgs() << "[AA][Ret] " << *CurFromI << " "
676 << (Result ? "can potentially " : "cannot ") << "reach "
677 << Ret << " [Intra]\n");
678 return !Result;
679 };
680
681 // Check if we can reach returns.
682 bool UsedAssumedInformation = false;
683 if (A.checkForAllInstructions(ReturnInstCB, FromFn, QueryingAA,
684 {Instruction::Ret}, UsedAssumedInformation)) {
685 LLVM_DEBUG(dbgs() << "[AA] No return is reachable, done\n");
686 continue;
687 }
688
689 if (!GoBackwardsCB) {
690 LLVM_DEBUG(dbgs() << "[AA] check @" << ToFn.getName() << " from " << FromI
691 << " is not checked backwards, abort\n");
692 return true;
693 }
694
695 // If we do not go backwards from the FromFn we are done here and so far we
696 // could not find a way to reach ToFn/ToI.
697 if (!GoBackwardsCB(*FromFn))
698 continue;
699
700 LLVM_DEBUG(dbgs() << "Stepping backwards to the call sites of @"
701 << FromFn->getName() << "\n");
702
703 auto CheckCallSite = [&](AbstractCallSite ACS) {
704 CallBase *CB = ACS.getInstruction();
705 if (!CB)
706 return false;
707
708 if (isa<InvokeInst>(CB))
709 return false;
710
711 Instruction *Inst = CB->getNextNonDebugInstruction();
712 Worklist.push_back(Inst);
713 return true;
714 };
715
716 Result = !A.checkForAllCallSites(CheckCallSite, *FromFn,
717 /* RequireAllCallSites */ true,
718 &QueryingAA, UsedAssumedInformation);
719 if (Result) {
720 LLVM_DEBUG(dbgs() << "[AA] stepping back to call sites from " << *CurFromI
721 << " in @" << FromFn->getName()
722 << " failed, give up\n");
723 return true;
724 }
725
726 LLVM_DEBUG(dbgs() << "[AA] stepped back to call sites from " << *CurFromI
727 << " in @" << FromFn->getName()
728 << " worklist size is: " << Worklist.size() << "\n");
729 }
730 return false;
731 }
732
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Instruction & ToI,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)733 bool AA::isPotentiallyReachable(
734 Attributor &A, const Instruction &FromI, const Instruction &ToI,
735 const AbstractAttribute &QueryingAA,
736 const AA::InstExclusionSetTy *ExclusionSet,
737 std::function<bool(const Function &F)> GoBackwardsCB) {
738 const Function *ToFn = ToI.getFunction();
739 return ::isPotentiallyReachable(A, FromI, &ToI, *ToFn, QueryingAA,
740 ExclusionSet, GoBackwardsCB);
741 }
742
isPotentiallyReachable(Attributor & A,const Instruction & FromI,const Function & ToFn,const AbstractAttribute & QueryingAA,const AA::InstExclusionSetTy * ExclusionSet,std::function<bool (const Function & F)> GoBackwardsCB)743 bool AA::isPotentiallyReachable(
744 Attributor &A, const Instruction &FromI, const Function &ToFn,
745 const AbstractAttribute &QueryingAA,
746 const AA::InstExclusionSetTy *ExclusionSet,
747 std::function<bool(const Function &F)> GoBackwardsCB) {
748 return ::isPotentiallyReachable(A, FromI, /* ToI */ nullptr, ToFn, QueryingAA,
749 ExclusionSet, GoBackwardsCB);
750 }
751
isAssumedThreadLocalObject(Attributor & A,Value & Obj,const AbstractAttribute & QueryingAA)752 bool AA::isAssumedThreadLocalObject(Attributor &A, Value &Obj,
753 const AbstractAttribute &QueryingAA) {
754 if (isa<UndefValue>(Obj))
755 return true;
756 if (isa<AllocaInst>(Obj)) {
757 InformationCache &InfoCache = A.getInfoCache();
758 if (!InfoCache.stackIsAccessibleByOtherThreads()) {
759 LLVM_DEBUG(
760 dbgs() << "[AA] Object '" << Obj
761 << "' is thread local; stack objects are thread local.\n");
762 return true;
763 }
764 const auto &NoCaptureAA = A.getAAFor<AANoCapture>(
765 QueryingAA, IRPosition::value(Obj), DepClassTy::OPTIONAL);
766 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is "
767 << (NoCaptureAA.isAssumedNoCapture() ? "" : "not")
768 << " thread local; "
769 << (NoCaptureAA.isAssumedNoCapture() ? "non-" : "")
770 << "captured stack object.\n");
771 return NoCaptureAA.isAssumedNoCapture();
772 }
773 if (auto *GV = dyn_cast<GlobalVariable>(&Obj)) {
774 if (GV->isConstant()) {
775 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
776 << "' is thread local; constant global\n");
777 return true;
778 }
779 if (GV->isThreadLocal()) {
780 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
781 << "' is thread local; thread local global\n");
782 return true;
783 }
784 }
785
786 if (A.getInfoCache().targetIsGPU()) {
787 if (Obj.getType()->getPointerAddressSpace() ==
788 (int)AA::GPUAddressSpace::Local) {
789 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
790 << "' is thread local; GPU local memory\n");
791 return true;
792 }
793 if (Obj.getType()->getPointerAddressSpace() ==
794 (int)AA::GPUAddressSpace::Constant) {
795 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj
796 << "' is thread local; GPU constant memory\n");
797 return true;
798 }
799 }
800
801 LLVM_DEBUG(dbgs() << "[AA] Object '" << Obj << "' is not thread local\n");
802 return false;
803 }
804
isPotentiallyAffectedByBarrier(Attributor & A,const Instruction & I,const AbstractAttribute & QueryingAA)805 bool AA::isPotentiallyAffectedByBarrier(Attributor &A, const Instruction &I,
806 const AbstractAttribute &QueryingAA) {
807 if (!I.mayHaveSideEffects() && !I.mayReadFromMemory())
808 return false;
809
810 SmallSetVector<const Value *, 8> Ptrs;
811
812 auto AddLocationPtr = [&](std::optional<MemoryLocation> Loc) {
813 if (!Loc || !Loc->Ptr) {
814 LLVM_DEBUG(
815 dbgs() << "[AA] Access to unknown location; -> requires barriers\n");
816 return false;
817 }
818 Ptrs.insert(Loc->Ptr);
819 return true;
820 };
821
822 if (const MemIntrinsic *MI = dyn_cast<MemIntrinsic>(&I)) {
823 if (!AddLocationPtr(MemoryLocation::getForDest(MI)))
824 return true;
825 if (const MemTransferInst *MTI = dyn_cast<MemTransferInst>(&I))
826 if (!AddLocationPtr(MemoryLocation::getForSource(MTI)))
827 return true;
828 } else if (!AddLocationPtr(MemoryLocation::getOrNone(&I)))
829 return true;
830
831 return isPotentiallyAffectedByBarrier(A, Ptrs.getArrayRef(), QueryingAA, &I);
832 }
833
isPotentiallyAffectedByBarrier(Attributor & A,ArrayRef<const Value * > Ptrs,const AbstractAttribute & QueryingAA,const Instruction * CtxI)834 bool AA::isPotentiallyAffectedByBarrier(Attributor &A,
835 ArrayRef<const Value *> Ptrs,
836 const AbstractAttribute &QueryingAA,
837 const Instruction *CtxI) {
838 for (const Value *Ptr : Ptrs) {
839 if (!Ptr) {
840 LLVM_DEBUG(dbgs() << "[AA] nullptr; -> requires barriers\n");
841 return true;
842 }
843
844 auto Pred = [&](Value &Obj) {
845 if (AA::isAssumedThreadLocalObject(A, Obj, QueryingAA))
846 return true;
847 LLVM_DEBUG(dbgs() << "[AA] Access to '" << Obj << "' via '" << *Ptr
848 << "'; -> requires barrier\n");
849 return false;
850 };
851
852 const auto &UnderlyingObjsAA = A.getAAFor<AAUnderlyingObjects>(
853 QueryingAA, IRPosition::value(*Ptr), DepClassTy::OPTIONAL);
854 if (!UnderlyingObjsAA.forallUnderlyingObjects(Pred))
855 return true;
856 }
857 return false;
858 }
859
860 /// Return true if \p New is equal or worse than \p Old.
isEqualOrWorse(const Attribute & New,const Attribute & Old)861 static bool isEqualOrWorse(const Attribute &New, const Attribute &Old) {
862 if (!Old.isIntAttribute())
863 return true;
864
865 return Old.getValueAsInt() >= New.getValueAsInt();
866 }
867
868 /// Return true if the information provided by \p Attr was added to the
869 /// attribute list \p Attrs. This is only the case if it was not already present
870 /// in \p Attrs at the position describe by \p PK and \p AttrIdx.
addIfNotExistent(LLVMContext & Ctx,const Attribute & Attr,AttributeList & Attrs,int AttrIdx,bool ForceReplace=false)871 static bool addIfNotExistent(LLVMContext &Ctx, const Attribute &Attr,
872 AttributeList &Attrs, int AttrIdx,
873 bool ForceReplace = false) {
874
875 if (Attr.isEnumAttribute()) {
876 Attribute::AttrKind Kind = Attr.getKindAsEnum();
877 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
878 if (!ForceReplace &&
879 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
880 return false;
881 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
882 return true;
883 }
884 if (Attr.isStringAttribute()) {
885 StringRef Kind = Attr.getKindAsString();
886 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
887 if (!ForceReplace &&
888 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
889 return false;
890 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
891 return true;
892 }
893 if (Attr.isIntAttribute()) {
894 Attribute::AttrKind Kind = Attr.getKindAsEnum();
895 if (Attrs.hasAttributeAtIndex(AttrIdx, Kind))
896 if (!ForceReplace &&
897 isEqualOrWorse(Attr, Attrs.getAttributeAtIndex(AttrIdx, Kind)))
898 return false;
899 Attrs = Attrs.removeAttributeAtIndex(Ctx, AttrIdx, Kind);
900 Attrs = Attrs.addAttributeAtIndex(Ctx, AttrIdx, Attr);
901 return true;
902 }
903
904 llvm_unreachable("Expected enum or string attribute!");
905 }
906
getAssociatedArgument() const907 Argument *IRPosition::getAssociatedArgument() const {
908 if (getPositionKind() == IRP_ARGUMENT)
909 return cast<Argument>(&getAnchorValue());
910
911 // Not an Argument and no argument number means this is not a call site
912 // argument, thus we cannot find a callback argument to return.
913 int ArgNo = getCallSiteArgNo();
914 if (ArgNo < 0)
915 return nullptr;
916
917 // Use abstract call sites to make the connection between the call site
918 // values and the ones in callbacks. If a callback was found that makes use
919 // of the underlying call site operand, we want the corresponding callback
920 // callee argument and not the direct callee argument.
921 std::optional<Argument *> CBCandidateArg;
922 SmallVector<const Use *, 4> CallbackUses;
923 const auto &CB = cast<CallBase>(getAnchorValue());
924 AbstractCallSite::getCallbackUses(CB, CallbackUses);
925 for (const Use *U : CallbackUses) {
926 AbstractCallSite ACS(U);
927 assert(ACS && ACS.isCallbackCall());
928 if (!ACS.getCalledFunction())
929 continue;
930
931 for (unsigned u = 0, e = ACS.getNumArgOperands(); u < e; u++) {
932
933 // Test if the underlying call site operand is argument number u of the
934 // callback callee.
935 if (ACS.getCallArgOperandNo(u) != ArgNo)
936 continue;
937
938 assert(ACS.getCalledFunction()->arg_size() > u &&
939 "ACS mapped into var-args arguments!");
940 if (CBCandidateArg) {
941 CBCandidateArg = nullptr;
942 break;
943 }
944 CBCandidateArg = ACS.getCalledFunction()->getArg(u);
945 }
946 }
947
948 // If we found a unique callback candidate argument, return it.
949 if (CBCandidateArg && *CBCandidateArg)
950 return *CBCandidateArg;
951
952 // If no callbacks were found, or none used the underlying call site operand
953 // exclusively, use the direct callee argument if available.
954 const Function *Callee = CB.getCalledFunction();
955 if (Callee && Callee->arg_size() > unsigned(ArgNo))
956 return Callee->getArg(ArgNo);
957
958 return nullptr;
959 }
960
update(Attributor & A)961 ChangeStatus AbstractAttribute::update(Attributor &A) {
962 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
963 if (getState().isAtFixpoint())
964 return HasChanged;
965
966 LLVM_DEBUG(dbgs() << "[Attributor] Update: " << *this << "\n");
967
968 HasChanged = updateImpl(A);
969
970 LLVM_DEBUG(dbgs() << "[Attributor] Update " << HasChanged << " " << *this
971 << "\n");
972
973 return HasChanged;
974 }
975
976 ChangeStatus
manifestAttrs(Attributor & A,const IRPosition & IRP,const ArrayRef<Attribute> & DeducedAttrs,bool ForceReplace)977 IRAttributeManifest::manifestAttrs(Attributor &A, const IRPosition &IRP,
978 const ArrayRef<Attribute> &DeducedAttrs,
979 bool ForceReplace) {
980 Function *ScopeFn = IRP.getAnchorScope();
981 IRPosition::Kind PK = IRP.getPositionKind();
982
983 // In the following some generic code that will manifest attributes in
984 // DeducedAttrs if they improve the current IR. Due to the different
985 // annotation positions we use the underlying AttributeList interface.
986
987 AttributeList Attrs;
988 switch (PK) {
989 case IRPosition::IRP_INVALID:
990 case IRPosition::IRP_FLOAT:
991 return ChangeStatus::UNCHANGED;
992 case IRPosition::IRP_ARGUMENT:
993 case IRPosition::IRP_FUNCTION:
994 case IRPosition::IRP_RETURNED:
995 Attrs = ScopeFn->getAttributes();
996 break;
997 case IRPosition::IRP_CALL_SITE:
998 case IRPosition::IRP_CALL_SITE_RETURNED:
999 case IRPosition::IRP_CALL_SITE_ARGUMENT:
1000 Attrs = cast<CallBase>(IRP.getAnchorValue()).getAttributes();
1001 break;
1002 }
1003
1004 ChangeStatus HasChanged = ChangeStatus::UNCHANGED;
1005 LLVMContext &Ctx = IRP.getAnchorValue().getContext();
1006 for (const Attribute &Attr : DeducedAttrs) {
1007 if (!addIfNotExistent(Ctx, Attr, Attrs, IRP.getAttrIdx(), ForceReplace))
1008 continue;
1009
1010 HasChanged = ChangeStatus::CHANGED;
1011 }
1012
1013 if (HasChanged == ChangeStatus::UNCHANGED)
1014 return HasChanged;
1015
1016 switch (PK) {
1017 case IRPosition::IRP_ARGUMENT:
1018 case IRPosition::IRP_FUNCTION:
1019 case IRPosition::IRP_RETURNED:
1020 ScopeFn->setAttributes(Attrs);
1021 break;
1022 case IRPosition::IRP_CALL_SITE:
1023 case IRPosition::IRP_CALL_SITE_RETURNED:
1024 case IRPosition::IRP_CALL_SITE_ARGUMENT:
1025 cast<CallBase>(IRP.getAnchorValue()).setAttributes(Attrs);
1026 break;
1027 case IRPosition::IRP_INVALID:
1028 case IRPosition::IRP_FLOAT:
1029 break;
1030 }
1031
1032 return HasChanged;
1033 }
1034
1035 const IRPosition IRPosition::EmptyKey(DenseMapInfo<void *>::getEmptyKey());
1036 const IRPosition
1037 IRPosition::TombstoneKey(DenseMapInfo<void *>::getTombstoneKey());
1038
SubsumingPositionIterator(const IRPosition & IRP)1039 SubsumingPositionIterator::SubsumingPositionIterator(const IRPosition &IRP) {
1040 IRPositions.emplace_back(IRP);
1041
1042 // Helper to determine if operand bundles on a call site are benin or
1043 // potentially problematic. We handle only llvm.assume for now.
1044 auto CanIgnoreOperandBundles = [](const CallBase &CB) {
1045 return (isa<IntrinsicInst>(CB) &&
1046 cast<IntrinsicInst>(CB).getIntrinsicID() == Intrinsic ::assume);
1047 };
1048
1049 const auto *CB = dyn_cast<CallBase>(&IRP.getAnchorValue());
1050 switch (IRP.getPositionKind()) {
1051 case IRPosition::IRP_INVALID:
1052 case IRPosition::IRP_FLOAT:
1053 case IRPosition::IRP_FUNCTION:
1054 return;
1055 case IRPosition::IRP_ARGUMENT:
1056 case IRPosition::IRP_RETURNED:
1057 IRPositions.emplace_back(IRPosition::function(*IRP.getAnchorScope()));
1058 return;
1059 case IRPosition::IRP_CALL_SITE:
1060 assert(CB && "Expected call site!");
1061 // TODO: We need to look at the operand bundles similar to the redirection
1062 // in CallBase.
1063 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB))
1064 if (const Function *Callee = CB->getCalledFunction())
1065 IRPositions.emplace_back(IRPosition::function(*Callee));
1066 return;
1067 case IRPosition::IRP_CALL_SITE_RETURNED:
1068 assert(CB && "Expected call site!");
1069 // TODO: We need to look at the operand bundles similar to the redirection
1070 // in CallBase.
1071 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1072 if (const Function *Callee = CB->getCalledFunction()) {
1073 IRPositions.emplace_back(IRPosition::returned(*Callee));
1074 IRPositions.emplace_back(IRPosition::function(*Callee));
1075 for (const Argument &Arg : Callee->args())
1076 if (Arg.hasReturnedAttr()) {
1077 IRPositions.emplace_back(
1078 IRPosition::callsite_argument(*CB, Arg.getArgNo()));
1079 IRPositions.emplace_back(
1080 IRPosition::value(*CB->getArgOperand(Arg.getArgNo())));
1081 IRPositions.emplace_back(IRPosition::argument(Arg));
1082 }
1083 }
1084 }
1085 IRPositions.emplace_back(IRPosition::callsite_function(*CB));
1086 return;
1087 case IRPosition::IRP_CALL_SITE_ARGUMENT: {
1088 assert(CB && "Expected call site!");
1089 // TODO: We need to look at the operand bundles similar to the redirection
1090 // in CallBase.
1091 if (!CB->hasOperandBundles() || CanIgnoreOperandBundles(*CB)) {
1092 const Function *Callee = CB->getCalledFunction();
1093 if (Callee) {
1094 if (Argument *Arg = IRP.getAssociatedArgument())
1095 IRPositions.emplace_back(IRPosition::argument(*Arg));
1096 IRPositions.emplace_back(IRPosition::function(*Callee));
1097 }
1098 }
1099 IRPositions.emplace_back(IRPosition::value(IRP.getAssociatedValue()));
1100 return;
1101 }
1102 }
1103 }
1104
hasAttr(ArrayRef<Attribute::AttrKind> AKs,bool IgnoreSubsumingPositions,Attributor * A) const1105 bool IRPosition::hasAttr(ArrayRef<Attribute::AttrKind> AKs,
1106 bool IgnoreSubsumingPositions, Attributor *A) const {
1107 SmallVector<Attribute, 4> Attrs;
1108 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
1109 for (Attribute::AttrKind AK : AKs)
1110 if (EquivIRP.getAttrsFromIRAttr(AK, Attrs))
1111 return true;
1112 // The first position returned by the SubsumingPositionIterator is
1113 // always the position itself. If we ignore subsuming positions we
1114 // are done after the first iteration.
1115 if (IgnoreSubsumingPositions)
1116 break;
1117 }
1118 if (A)
1119 for (Attribute::AttrKind AK : AKs)
1120 if (getAttrsFromAssumes(AK, Attrs, *A))
1121 return true;
1122 return false;
1123 }
1124
getAttrs(ArrayRef<Attribute::AttrKind> AKs,SmallVectorImpl<Attribute> & Attrs,bool IgnoreSubsumingPositions,Attributor * A) const1125 void IRPosition::getAttrs(ArrayRef<Attribute::AttrKind> AKs,
1126 SmallVectorImpl<Attribute> &Attrs,
1127 bool IgnoreSubsumingPositions, Attributor *A) const {
1128 for (const IRPosition &EquivIRP : SubsumingPositionIterator(*this)) {
1129 for (Attribute::AttrKind AK : AKs)
1130 EquivIRP.getAttrsFromIRAttr(AK, Attrs);
1131 // The first position returned by the SubsumingPositionIterator is
1132 // always the position itself. If we ignore subsuming positions we
1133 // are done after the first iteration.
1134 if (IgnoreSubsumingPositions)
1135 break;
1136 }
1137 if (A)
1138 for (Attribute::AttrKind AK : AKs)
1139 getAttrsFromAssumes(AK, Attrs, *A);
1140 }
1141
getAttrsFromIRAttr(Attribute::AttrKind AK,SmallVectorImpl<Attribute> & Attrs) const1142 bool IRPosition::getAttrsFromIRAttr(Attribute::AttrKind AK,
1143 SmallVectorImpl<Attribute> &Attrs) const {
1144 if (getPositionKind() == IRP_INVALID || getPositionKind() == IRP_FLOAT)
1145 return false;
1146
1147 AttributeList AttrList;
1148 if (const auto *CB = dyn_cast<CallBase>(&getAnchorValue()))
1149 AttrList = CB->getAttributes();
1150 else
1151 AttrList = getAssociatedFunction()->getAttributes();
1152
1153 bool HasAttr = AttrList.hasAttributeAtIndex(getAttrIdx(), AK);
1154 if (HasAttr)
1155 Attrs.push_back(AttrList.getAttributeAtIndex(getAttrIdx(), AK));
1156 return HasAttr;
1157 }
1158
getAttrsFromAssumes(Attribute::AttrKind AK,SmallVectorImpl<Attribute> & Attrs,Attributor & A) const1159 bool IRPosition::getAttrsFromAssumes(Attribute::AttrKind AK,
1160 SmallVectorImpl<Attribute> &Attrs,
1161 Attributor &A) const {
1162 assert(getPositionKind() != IRP_INVALID && "Did expect a valid position!");
1163 Value &AssociatedValue = getAssociatedValue();
1164
1165 const Assume2KnowledgeMap &A2K =
1166 A.getInfoCache().getKnowledgeMap().lookup({&AssociatedValue, AK});
1167
1168 // Check if we found any potential assume use, if not we don't need to create
1169 // explorer iterators.
1170 if (A2K.empty())
1171 return false;
1172
1173 LLVMContext &Ctx = AssociatedValue.getContext();
1174 unsigned AttrsSize = Attrs.size();
1175 MustBeExecutedContextExplorer &Explorer =
1176 A.getInfoCache().getMustBeExecutedContextExplorer();
1177 auto EIt = Explorer.begin(getCtxI()), EEnd = Explorer.end(getCtxI());
1178 for (const auto &It : A2K)
1179 if (Explorer.findInContextOf(It.first, EIt, EEnd))
1180 Attrs.push_back(Attribute::get(Ctx, AK, It.second.Max));
1181 return AttrsSize != Attrs.size();
1182 }
1183
verify()1184 void IRPosition::verify() {
1185 #ifdef EXPENSIVE_CHECKS
1186 switch (getPositionKind()) {
1187 case IRP_INVALID:
1188 assert((CBContext == nullptr) &&
1189 "Invalid position must not have CallBaseContext!");
1190 assert(!Enc.getOpaqueValue() &&
1191 "Expected a nullptr for an invalid position!");
1192 return;
1193 case IRP_FLOAT:
1194 assert((!isa<Argument>(&getAssociatedValue())) &&
1195 "Expected specialized kind for argument values!");
1196 return;
1197 case IRP_RETURNED:
1198 assert(isa<Function>(getAsValuePtr()) &&
1199 "Expected function for a 'returned' position!");
1200 assert(getAsValuePtr() == &getAssociatedValue() &&
1201 "Associated value mismatch!");
1202 return;
1203 case IRP_CALL_SITE_RETURNED:
1204 assert((CBContext == nullptr) &&
1205 "'call site returned' position must not have CallBaseContext!");
1206 assert((isa<CallBase>(getAsValuePtr())) &&
1207 "Expected call base for 'call site returned' position!");
1208 assert(getAsValuePtr() == &getAssociatedValue() &&
1209 "Associated value mismatch!");
1210 return;
1211 case IRP_CALL_SITE:
1212 assert((CBContext == nullptr) &&
1213 "'call site function' position must not have CallBaseContext!");
1214 assert((isa<CallBase>(getAsValuePtr())) &&
1215 "Expected call base for 'call site function' position!");
1216 assert(getAsValuePtr() == &getAssociatedValue() &&
1217 "Associated value mismatch!");
1218 return;
1219 case IRP_FUNCTION:
1220 assert(isa<Function>(getAsValuePtr()) &&
1221 "Expected function for a 'function' position!");
1222 assert(getAsValuePtr() == &getAssociatedValue() &&
1223 "Associated value mismatch!");
1224 return;
1225 case IRP_ARGUMENT:
1226 assert(isa<Argument>(getAsValuePtr()) &&
1227 "Expected argument for a 'argument' position!");
1228 assert(getAsValuePtr() == &getAssociatedValue() &&
1229 "Associated value mismatch!");
1230 return;
1231 case IRP_CALL_SITE_ARGUMENT: {
1232 assert((CBContext == nullptr) &&
1233 "'call site argument' position must not have CallBaseContext!");
1234 Use *U = getAsUsePtr();
1235 (void)U; // Silence unused variable warning.
1236 assert(U && "Expected use for a 'call site argument' position!");
1237 assert(isa<CallBase>(U->getUser()) &&
1238 "Expected call base user for a 'call site argument' position!");
1239 assert(cast<CallBase>(U->getUser())->isArgOperand(U) &&
1240 "Expected call base argument operand for a 'call site argument' "
1241 "position");
1242 assert(cast<CallBase>(U->getUser())->getArgOperandNo(U) ==
1243 unsigned(getCallSiteArgNo()) &&
1244 "Argument number mismatch!");
1245 assert(U->get() == &getAssociatedValue() && "Associated value mismatch!");
1246 return;
1247 }
1248 }
1249 #endif
1250 }
1251
1252 std::optional<Constant *>
getAssumedConstant(const IRPosition & IRP,const AbstractAttribute & AA,bool & UsedAssumedInformation)1253 Attributor::getAssumedConstant(const IRPosition &IRP,
1254 const AbstractAttribute &AA,
1255 bool &UsedAssumedInformation) {
1256 // First check all callbacks provided by outside AAs. If any of them returns
1257 // a non-null value that is different from the associated value, or
1258 // std::nullopt, we assume it's simplified.
1259 for (auto &CB : SimplificationCallbacks.lookup(IRP)) {
1260 std::optional<Value *> SimplifiedV = CB(IRP, &AA, UsedAssumedInformation);
1261 if (!SimplifiedV)
1262 return std::nullopt;
1263 if (isa_and_nonnull<Constant>(*SimplifiedV))
1264 return cast<Constant>(*SimplifiedV);
1265 return nullptr;
1266 }
1267 if (auto *C = dyn_cast<Constant>(&IRP.getAssociatedValue()))
1268 return C;
1269 SmallVector<AA::ValueAndContext> Values;
1270 if (getAssumedSimplifiedValues(IRP, &AA, Values,
1271 AA::ValueScope::Interprocedural,
1272 UsedAssumedInformation)) {
1273 if (Values.empty())
1274 return std::nullopt;
1275 if (auto *C = dyn_cast_or_null<Constant>(
1276 AAPotentialValues::getSingleValue(*this, AA, IRP, Values)))
1277 return C;
1278 }
1279 return nullptr;
1280 }
1281
getAssumedSimplified(const IRPosition & IRP,const AbstractAttribute * AA,bool & UsedAssumedInformation,AA::ValueScope S)1282 std::optional<Value *> Attributor::getAssumedSimplified(
1283 const IRPosition &IRP, const AbstractAttribute *AA,
1284 bool &UsedAssumedInformation, AA::ValueScope S) {
1285 // First check all callbacks provided by outside AAs. If any of them returns
1286 // a non-null value that is different from the associated value, or
1287 // std::nullopt, we assume it's simplified.
1288 for (auto &CB : SimplificationCallbacks.lookup(IRP))
1289 return CB(IRP, AA, UsedAssumedInformation);
1290
1291 SmallVector<AA::ValueAndContext> Values;
1292 if (!getAssumedSimplifiedValues(IRP, AA, Values, S, UsedAssumedInformation))
1293 return &IRP.getAssociatedValue();
1294 if (Values.empty())
1295 return std::nullopt;
1296 if (AA)
1297 if (Value *V = AAPotentialValues::getSingleValue(*this, *AA, IRP, Values))
1298 return V;
1299 if (IRP.getPositionKind() == IRPosition::IRP_RETURNED ||
1300 IRP.getPositionKind() == IRPosition::IRP_CALL_SITE_RETURNED)
1301 return nullptr;
1302 return &IRP.getAssociatedValue();
1303 }
1304
getAssumedSimplifiedValues(const IRPosition & IRP,const AbstractAttribute * AA,SmallVectorImpl<AA::ValueAndContext> & Values,AA::ValueScope S,bool & UsedAssumedInformation)1305 bool Attributor::getAssumedSimplifiedValues(
1306 const IRPosition &IRP, const AbstractAttribute *AA,
1307 SmallVectorImpl<AA::ValueAndContext> &Values, AA::ValueScope S,
1308 bool &UsedAssumedInformation) {
1309 // First check all callbacks provided by outside AAs. If any of them returns
1310 // a non-null value that is different from the associated value, or
1311 // std::nullopt, we assume it's simplified.
1312 const auto &SimplificationCBs = SimplificationCallbacks.lookup(IRP);
1313 for (const auto &CB : SimplificationCBs) {
1314 std::optional<Value *> CBResult = CB(IRP, AA, UsedAssumedInformation);
1315 if (!CBResult.has_value())
1316 continue;
1317 Value *V = *CBResult;
1318 if (!V)
1319 return false;
1320 if ((S & AA::ValueScope::Interprocedural) ||
1321 AA::isValidInScope(*V, IRP.getAnchorScope()))
1322 Values.push_back(AA::ValueAndContext{*V, nullptr});
1323 else
1324 return false;
1325 }
1326 if (!SimplificationCBs.empty())
1327 return true;
1328
1329 // If no high-level/outside simplification occurred, use AAPotentialValues.
1330 const auto &PotentialValuesAA =
1331 getOrCreateAAFor<AAPotentialValues>(IRP, AA, DepClassTy::OPTIONAL);
1332 if (!PotentialValuesAA.getAssumedSimplifiedValues(*this, Values, S))
1333 return false;
1334 UsedAssumedInformation |= !PotentialValuesAA.isAtFixpoint();
1335 return true;
1336 }
1337
translateArgumentToCallSiteContent(std::optional<Value * > V,CallBase & CB,const AbstractAttribute & AA,bool & UsedAssumedInformation)1338 std::optional<Value *> Attributor::translateArgumentToCallSiteContent(
1339 std::optional<Value *> V, CallBase &CB, const AbstractAttribute &AA,
1340 bool &UsedAssumedInformation) {
1341 if (!V)
1342 return V;
1343 if (*V == nullptr || isa<Constant>(*V))
1344 return V;
1345 if (auto *Arg = dyn_cast<Argument>(*V))
1346 if (CB.getCalledFunction() == Arg->getParent())
1347 if (!Arg->hasPointeeInMemoryValueAttr())
1348 return getAssumedSimplified(
1349 IRPosition::callsite_argument(CB, Arg->getArgNo()), AA,
1350 UsedAssumedInformation, AA::Intraprocedural);
1351 return nullptr;
1352 }
1353
~Attributor()1354 Attributor::~Attributor() {
1355 // The abstract attributes are allocated via the BumpPtrAllocator Allocator,
1356 // thus we cannot delete them. We can, and want to, destruct them though.
1357 for (auto &It : AAMap) {
1358 AbstractAttribute *AA = It.getSecond();
1359 AA->~AbstractAttribute();
1360 }
1361 }
1362
isAssumedDead(const AbstractAttribute & AA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1363 bool Attributor::isAssumedDead(const AbstractAttribute &AA,
1364 const AAIsDead *FnLivenessAA,
1365 bool &UsedAssumedInformation,
1366 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1367 const IRPosition &IRP = AA.getIRPosition();
1368 if (!Functions.count(IRP.getAnchorScope()))
1369 return false;
1370 return isAssumedDead(IRP, &AA, FnLivenessAA, UsedAssumedInformation,
1371 CheckBBLivenessOnly, DepClass);
1372 }
1373
isAssumedDead(const Use & U,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1374 bool Attributor::isAssumedDead(const Use &U,
1375 const AbstractAttribute *QueryingAA,
1376 const AAIsDead *FnLivenessAA,
1377 bool &UsedAssumedInformation,
1378 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1379 Instruction *UserI = dyn_cast<Instruction>(U.getUser());
1380 if (!UserI)
1381 return isAssumedDead(IRPosition::value(*U.get()), QueryingAA, FnLivenessAA,
1382 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1383
1384 if (auto *CB = dyn_cast<CallBase>(UserI)) {
1385 // For call site argument uses we can check if the argument is
1386 // unused/dead.
1387 if (CB->isArgOperand(&U)) {
1388 const IRPosition &CSArgPos =
1389 IRPosition::callsite_argument(*CB, CB->getArgOperandNo(&U));
1390 return isAssumedDead(CSArgPos, QueryingAA, FnLivenessAA,
1391 UsedAssumedInformation, CheckBBLivenessOnly,
1392 DepClass);
1393 }
1394 } else if (ReturnInst *RI = dyn_cast<ReturnInst>(UserI)) {
1395 const IRPosition &RetPos = IRPosition::returned(*RI->getFunction());
1396 return isAssumedDead(RetPos, QueryingAA, FnLivenessAA,
1397 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1398 } else if (PHINode *PHI = dyn_cast<PHINode>(UserI)) {
1399 BasicBlock *IncomingBB = PHI->getIncomingBlock(U);
1400 return isAssumedDead(*IncomingBB->getTerminator(), QueryingAA, FnLivenessAA,
1401 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1402 } else if (StoreInst *SI = dyn_cast<StoreInst>(UserI)) {
1403 if (!CheckBBLivenessOnly && SI->getPointerOperand() != U.get()) {
1404 const IRPosition IRP = IRPosition::inst(*SI);
1405 const AAIsDead &IsDeadAA =
1406 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1407 if (IsDeadAA.isRemovableStore()) {
1408 if (QueryingAA)
1409 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1410 if (!IsDeadAA.isKnown(AAIsDead::IS_REMOVABLE))
1411 UsedAssumedInformation = true;
1412 return true;
1413 }
1414 }
1415 }
1416
1417 return isAssumedDead(IRPosition::inst(*UserI), QueryingAA, FnLivenessAA,
1418 UsedAssumedInformation, CheckBBLivenessOnly, DepClass);
1419 }
1420
isAssumedDead(const Instruction & I,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass,bool CheckForDeadStore)1421 bool Attributor::isAssumedDead(const Instruction &I,
1422 const AbstractAttribute *QueryingAA,
1423 const AAIsDead *FnLivenessAA,
1424 bool &UsedAssumedInformation,
1425 bool CheckBBLivenessOnly, DepClassTy DepClass,
1426 bool CheckForDeadStore) {
1427 const IRPosition::CallBaseContext *CBCtx =
1428 QueryingAA ? QueryingAA->getCallBaseContext() : nullptr;
1429
1430 if (ManifestAddedBlocks.contains(I.getParent()))
1431 return false;
1432
1433 const Function &F = *I.getFunction();
1434 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1435 FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F, CBCtx),
1436 QueryingAA, DepClassTy::NONE);
1437
1438 // Don't use recursive reasoning.
1439 if (QueryingAA == FnLivenessAA)
1440 return false;
1441
1442 // If we have a context instruction and a liveness AA we use it.
1443 if (CheckBBLivenessOnly ? FnLivenessAA->isAssumedDead(I.getParent())
1444 : FnLivenessAA->isAssumedDead(&I)) {
1445 if (QueryingAA)
1446 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1447 if (!FnLivenessAA->isKnownDead(&I))
1448 UsedAssumedInformation = true;
1449 return true;
1450 }
1451
1452 if (CheckBBLivenessOnly)
1453 return false;
1454
1455 const IRPosition IRP = IRPosition::inst(I, CBCtx);
1456 const AAIsDead &IsDeadAA =
1457 getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1458
1459 // Don't use recursive reasoning.
1460 if (QueryingAA == &IsDeadAA)
1461 return false;
1462
1463 if (IsDeadAA.isAssumedDead()) {
1464 if (QueryingAA)
1465 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1466 if (!IsDeadAA.isKnownDead())
1467 UsedAssumedInformation = true;
1468 return true;
1469 }
1470
1471 if (CheckForDeadStore && isa<StoreInst>(I) && IsDeadAA.isRemovableStore()) {
1472 if (QueryingAA)
1473 recordDependence(IsDeadAA, *QueryingAA, DepClass);
1474 if (!IsDeadAA.isKnownDead())
1475 UsedAssumedInformation = true;
1476 return true;
1477 }
1478
1479 return false;
1480 }
1481
isAssumedDead(const IRPosition & IRP,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,DepClassTy DepClass)1482 bool Attributor::isAssumedDead(const IRPosition &IRP,
1483 const AbstractAttribute *QueryingAA,
1484 const AAIsDead *FnLivenessAA,
1485 bool &UsedAssumedInformation,
1486 bool CheckBBLivenessOnly, DepClassTy DepClass) {
1487 // Don't check liveness for constants, e.g. functions, used as (floating)
1488 // values since the context instruction and such is here meaningless.
1489 if (IRP.getPositionKind() == IRPosition::IRP_FLOAT &&
1490 isa<Constant>(IRP.getAssociatedValue())) {
1491 return false;
1492 }
1493
1494 Instruction *CtxI = IRP.getCtxI();
1495 if (CtxI &&
1496 isAssumedDead(*CtxI, QueryingAA, FnLivenessAA, UsedAssumedInformation,
1497 /* CheckBBLivenessOnly */ true,
1498 CheckBBLivenessOnly ? DepClass : DepClassTy::OPTIONAL))
1499 return true;
1500
1501 if (CheckBBLivenessOnly)
1502 return false;
1503
1504 // If we haven't succeeded we query the specific liveness info for the IRP.
1505 const AAIsDead *IsDeadAA;
1506 if (IRP.getPositionKind() == IRPosition::IRP_CALL_SITE)
1507 IsDeadAA = &getOrCreateAAFor<AAIsDead>(
1508 IRPosition::callsite_returned(cast<CallBase>(IRP.getAssociatedValue())),
1509 QueryingAA, DepClassTy::NONE);
1510 else
1511 IsDeadAA = &getOrCreateAAFor<AAIsDead>(IRP, QueryingAA, DepClassTy::NONE);
1512
1513 // Don't use recursive reasoning.
1514 if (QueryingAA == IsDeadAA)
1515 return false;
1516
1517 if (IsDeadAA->isAssumedDead()) {
1518 if (QueryingAA)
1519 recordDependence(*IsDeadAA, *QueryingAA, DepClass);
1520 if (!IsDeadAA->isKnownDead())
1521 UsedAssumedInformation = true;
1522 return true;
1523 }
1524
1525 return false;
1526 }
1527
isAssumedDead(const BasicBlock & BB,const AbstractAttribute * QueryingAA,const AAIsDead * FnLivenessAA,DepClassTy DepClass)1528 bool Attributor::isAssumedDead(const BasicBlock &BB,
1529 const AbstractAttribute *QueryingAA,
1530 const AAIsDead *FnLivenessAA,
1531 DepClassTy DepClass) {
1532 const Function &F = *BB.getParent();
1533 if (!FnLivenessAA || FnLivenessAA->getAnchorScope() != &F)
1534 FnLivenessAA = &getOrCreateAAFor<AAIsDead>(IRPosition::function(F),
1535 QueryingAA, DepClassTy::NONE);
1536
1537 // Don't use recursive reasoning.
1538 if (QueryingAA == FnLivenessAA)
1539 return false;
1540
1541 if (FnLivenessAA->isAssumedDead(&BB)) {
1542 if (QueryingAA)
1543 recordDependence(*FnLivenessAA, *QueryingAA, DepClass);
1544 return true;
1545 }
1546
1547 return false;
1548 }
1549
checkForAllUses(function_ref<bool (const Use &,bool &)> Pred,const AbstractAttribute & QueryingAA,const Value & V,bool CheckBBLivenessOnly,DepClassTy LivenessDepClass,bool IgnoreDroppableUses,function_ref<bool (const Use & OldU,const Use & NewU)> EquivalentUseCB)1550 bool Attributor::checkForAllUses(
1551 function_ref<bool(const Use &, bool &)> Pred,
1552 const AbstractAttribute &QueryingAA, const Value &V,
1553 bool CheckBBLivenessOnly, DepClassTy LivenessDepClass,
1554 bool IgnoreDroppableUses,
1555 function_ref<bool(const Use &OldU, const Use &NewU)> EquivalentUseCB) {
1556
1557 // Check virtual uses first.
1558 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&V))
1559 if (!CB(*this, &QueryingAA))
1560 return false;
1561
1562 // Check the trivial case first as it catches void values.
1563 if (V.use_empty())
1564 return true;
1565
1566 const IRPosition &IRP = QueryingAA.getIRPosition();
1567 SmallVector<const Use *, 16> Worklist;
1568 SmallPtrSet<const Use *, 16> Visited;
1569
1570 auto AddUsers = [&](const Value &V, const Use *OldUse) {
1571 for (const Use &UU : V.uses()) {
1572 if (OldUse && EquivalentUseCB && !EquivalentUseCB(*OldUse, UU)) {
1573 LLVM_DEBUG(dbgs() << "[Attributor] Potential copy was "
1574 "rejected by the equivalence call back: "
1575 << *UU << "!\n");
1576 return false;
1577 }
1578
1579 Worklist.push_back(&UU);
1580 }
1581 return true;
1582 };
1583
1584 AddUsers(V, /* OldUse */ nullptr);
1585
1586 LLVM_DEBUG(dbgs() << "[Attributor] Got " << Worklist.size()
1587 << " initial uses to check\n");
1588
1589 const Function *ScopeFn = IRP.getAnchorScope();
1590 const auto *LivenessAA =
1591 ScopeFn ? &getAAFor<AAIsDead>(QueryingAA, IRPosition::function(*ScopeFn),
1592 DepClassTy::NONE)
1593 : nullptr;
1594
1595 while (!Worklist.empty()) {
1596 const Use *U = Worklist.pop_back_val();
1597 if (isa<PHINode>(U->getUser()) && !Visited.insert(U).second)
1598 continue;
1599 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1600 if (auto *Fn = dyn_cast<Function>(U->getUser()))
1601 dbgs() << "[Attributor] Check use: " << **U << " in " << Fn->getName()
1602 << "\n";
1603 else
1604 dbgs() << "[Attributor] Check use: " << **U << " in " << *U->getUser()
1605 << "\n";
1606 });
1607 bool UsedAssumedInformation = false;
1608 if (isAssumedDead(*U, &QueryingAA, LivenessAA, UsedAssumedInformation,
1609 CheckBBLivenessOnly, LivenessDepClass)) {
1610 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1611 dbgs() << "[Attributor] Dead use, skip!\n");
1612 continue;
1613 }
1614 if (IgnoreDroppableUses && U->getUser()->isDroppable()) {
1615 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1616 dbgs() << "[Attributor] Droppable user, skip!\n");
1617 continue;
1618 }
1619
1620 if (auto *SI = dyn_cast<StoreInst>(U->getUser())) {
1621 if (&SI->getOperandUse(0) == U) {
1622 if (!Visited.insert(U).second)
1623 continue;
1624 SmallSetVector<Value *, 4> PotentialCopies;
1625 if (AA::getPotentialCopiesOfStoredValue(
1626 *this, *SI, PotentialCopies, QueryingAA, UsedAssumedInformation,
1627 /* OnlyExact */ true)) {
1628 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1629 dbgs()
1630 << "[Attributor] Value is stored, continue with "
1631 << PotentialCopies.size()
1632 << " potential copies instead!\n");
1633 for (Value *PotentialCopy : PotentialCopies)
1634 if (!AddUsers(*PotentialCopy, U))
1635 return false;
1636 continue;
1637 }
1638 }
1639 }
1640
1641 bool Follow = false;
1642 if (!Pred(*U, Follow))
1643 return false;
1644 if (!Follow)
1645 continue;
1646
1647 User &Usr = *U->getUser();
1648 AddUsers(Usr, /* OldUse */ nullptr);
1649
1650 auto *RI = dyn_cast<ReturnInst>(&Usr);
1651 if (!RI)
1652 continue;
1653
1654 Function &F = *RI->getFunction();
1655 auto CallSitePred = [&](AbstractCallSite ACS) {
1656 return AddUsers(*ACS.getInstruction(), U);
1657 };
1658 if (!checkForAllCallSites(CallSitePred, F, /* RequireAllCallSites */ true,
1659 &QueryingAA, UsedAssumedInformation)) {
1660 LLVM_DEBUG(dbgs() << "[Attributor] Could not follow return instruction "
1661 "to all call sites: "
1662 << *RI << "\n");
1663 return false;
1664 }
1665 }
1666
1667 return true;
1668 }
1669
checkForAllCallSites(function_ref<bool (AbstractCallSite)> Pred,const AbstractAttribute & QueryingAA,bool RequireAllCallSites,bool & UsedAssumedInformation)1670 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1671 const AbstractAttribute &QueryingAA,
1672 bool RequireAllCallSites,
1673 bool &UsedAssumedInformation) {
1674 // We can try to determine information from
1675 // the call sites. However, this is only possible all call sites are known,
1676 // hence the function has internal linkage.
1677 const IRPosition &IRP = QueryingAA.getIRPosition();
1678 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1679 if (!AssociatedFunction) {
1680 LLVM_DEBUG(dbgs() << "[Attributor] No function associated with " << IRP
1681 << "\n");
1682 return false;
1683 }
1684
1685 return checkForAllCallSites(Pred, *AssociatedFunction, RequireAllCallSites,
1686 &QueryingAA, UsedAssumedInformation);
1687 }
1688
checkForAllCallSites(function_ref<bool (AbstractCallSite)> Pred,const Function & Fn,bool RequireAllCallSites,const AbstractAttribute * QueryingAA,bool & UsedAssumedInformation,bool CheckPotentiallyDead)1689 bool Attributor::checkForAllCallSites(function_ref<bool(AbstractCallSite)> Pred,
1690 const Function &Fn,
1691 bool RequireAllCallSites,
1692 const AbstractAttribute *QueryingAA,
1693 bool &UsedAssumedInformation,
1694 bool CheckPotentiallyDead) {
1695 if (RequireAllCallSites && !Fn.hasLocalLinkage()) {
1696 LLVM_DEBUG(
1697 dbgs()
1698 << "[Attributor] Function " << Fn.getName()
1699 << " has no internal linkage, hence not all call sites are known\n");
1700 return false;
1701 }
1702 // Check virtual uses first.
1703 for (VirtualUseCallbackTy &CB : VirtualUseCallbacks.lookup(&Fn))
1704 if (!CB(*this, QueryingAA))
1705 return false;
1706
1707 SmallVector<const Use *, 8> Uses(make_pointer_range(Fn.uses()));
1708 for (unsigned u = 0; u < Uses.size(); ++u) {
1709 const Use &U = *Uses[u];
1710 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1711 if (auto *Fn = dyn_cast<Function>(U))
1712 dbgs() << "[Attributor] Check use: " << Fn->getName() << " in "
1713 << *U.getUser() << "\n";
1714 else
1715 dbgs() << "[Attributor] Check use: " << *U << " in " << *U.getUser()
1716 << "\n";
1717 });
1718 if (!CheckPotentiallyDead &&
1719 isAssumedDead(U, QueryingAA, nullptr, UsedAssumedInformation,
1720 /* CheckBBLivenessOnly */ true)) {
1721 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1722 dbgs() << "[Attributor] Dead use, skip!\n");
1723 continue;
1724 }
1725 if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
1726 if (CE->isCast() && CE->getType()->isPointerTy()) {
1727 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, {
1728 dbgs() << "[Attributor] Use, is constant cast expression, add "
1729 << CE->getNumUses() << " uses of that expression instead!\n";
1730 });
1731 for (const Use &CEU : CE->uses())
1732 Uses.push_back(&CEU);
1733 continue;
1734 }
1735 }
1736
1737 AbstractCallSite ACS(&U);
1738 if (!ACS) {
1739 LLVM_DEBUG(dbgs() << "[Attributor] Function " << Fn.getName()
1740 << " has non call site use " << *U.get() << " in "
1741 << *U.getUser() << "\n");
1742 // BlockAddress users are allowed.
1743 if (isa<BlockAddress>(U.getUser()))
1744 continue;
1745 return false;
1746 }
1747
1748 const Use *EffectiveUse =
1749 ACS.isCallbackCall() ? &ACS.getCalleeUseForCallback() : &U;
1750 if (!ACS.isCallee(EffectiveUse)) {
1751 if (!RequireAllCallSites) {
1752 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1753 << " is not a call of " << Fn.getName()
1754 << ", skip use\n");
1755 continue;
1756 }
1757 LLVM_DEBUG(dbgs() << "[Attributor] User " << *EffectiveUse->getUser()
1758 << " is an invalid use of " << Fn.getName() << "\n");
1759 return false;
1760 }
1761
1762 // Make sure the arguments that can be matched between the call site and the
1763 // callee argee on their type. It is unlikely they do not and it doesn't
1764 // make sense for all attributes to know/care about this.
1765 assert(&Fn == ACS.getCalledFunction() && "Expected known callee");
1766 unsigned MinArgsParams =
1767 std::min(size_t(ACS.getNumArgOperands()), Fn.arg_size());
1768 for (unsigned u = 0; u < MinArgsParams; ++u) {
1769 Value *CSArgOp = ACS.getCallArgOperand(u);
1770 if (CSArgOp && Fn.getArg(u)->getType() != CSArgOp->getType()) {
1771 LLVM_DEBUG(
1772 dbgs() << "[Attributor] Call site / callee argument type mismatch ["
1773 << u << "@" << Fn.getName() << ": "
1774 << *Fn.getArg(u)->getType() << " vs. "
1775 << *ACS.getCallArgOperand(u)->getType() << "\n");
1776 return false;
1777 }
1778 }
1779
1780 if (Pred(ACS))
1781 continue;
1782
1783 LLVM_DEBUG(dbgs() << "[Attributor] Call site callback failed for "
1784 << *ACS.getInstruction() << "\n");
1785 return false;
1786 }
1787
1788 return true;
1789 }
1790
shouldPropagateCallBaseContext(const IRPosition & IRP)1791 bool Attributor::shouldPropagateCallBaseContext(const IRPosition &IRP) {
1792 // TODO: Maintain a cache of Values that are
1793 // on the pathway from a Argument to a Instruction that would effect the
1794 // liveness/return state etc.
1795 return EnableCallSiteSpecific;
1796 }
1797
checkForAllReturnedValuesAndReturnInsts(function_ref<bool (Value &,const SmallSetVector<ReturnInst *,4> &)> Pred,const AbstractAttribute & QueryingAA)1798 bool Attributor::checkForAllReturnedValuesAndReturnInsts(
1799 function_ref<bool(Value &, const SmallSetVector<ReturnInst *, 4> &)> Pred,
1800 const AbstractAttribute &QueryingAA) {
1801
1802 const IRPosition &IRP = QueryingAA.getIRPosition();
1803 // Since we need to provide return instructions we have to have an exact
1804 // definition.
1805 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1806 if (!AssociatedFunction)
1807 return false;
1808
1809 // If this is a call site query we use the call site specific return values
1810 // and liveness information.
1811 // TODO: use the function scope once we have call site AAReturnedValues.
1812 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1813 const auto &AARetVal =
1814 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1815 if (!AARetVal.getState().isValidState())
1816 return false;
1817
1818 return AARetVal.checkForAllReturnedValuesAndReturnInsts(Pred);
1819 }
1820
checkForAllReturnedValues(function_ref<bool (Value &)> Pred,const AbstractAttribute & QueryingAA)1821 bool Attributor::checkForAllReturnedValues(
1822 function_ref<bool(Value &)> Pred, const AbstractAttribute &QueryingAA) {
1823
1824 const IRPosition &IRP = QueryingAA.getIRPosition();
1825 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1826 if (!AssociatedFunction)
1827 return false;
1828
1829 // TODO: use the function scope once we have call site AAReturnedValues.
1830 const IRPosition &QueryIRP = IRPosition::function(
1831 *AssociatedFunction, QueryingAA.getCallBaseContext());
1832 const auto &AARetVal =
1833 getAAFor<AAReturnedValues>(QueryingAA, QueryIRP, DepClassTy::REQUIRED);
1834 if (!AARetVal.getState().isValidState())
1835 return false;
1836
1837 return AARetVal.checkForAllReturnedValuesAndReturnInsts(
1838 [&](Value &RV, const SmallSetVector<ReturnInst *, 4> &) {
1839 return Pred(RV);
1840 });
1841 }
1842
checkForAllInstructionsImpl(Attributor * A,InformationCache::OpcodeInstMapTy & OpcodeInstMap,function_ref<bool (Instruction &)> Pred,const AbstractAttribute * QueryingAA,const AAIsDead * LivenessAA,const ArrayRef<unsigned> & Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly=false,bool CheckPotentiallyDead=false)1843 static bool checkForAllInstructionsImpl(
1844 Attributor *A, InformationCache::OpcodeInstMapTy &OpcodeInstMap,
1845 function_ref<bool(Instruction &)> Pred, const AbstractAttribute *QueryingAA,
1846 const AAIsDead *LivenessAA, const ArrayRef<unsigned> &Opcodes,
1847 bool &UsedAssumedInformation, bool CheckBBLivenessOnly = false,
1848 bool CheckPotentiallyDead = false) {
1849 for (unsigned Opcode : Opcodes) {
1850 // Check if we have instructions with this opcode at all first.
1851 auto *Insts = OpcodeInstMap.lookup(Opcode);
1852 if (!Insts)
1853 continue;
1854
1855 for (Instruction *I : *Insts) {
1856 // Skip dead instructions.
1857 if (A && !CheckPotentiallyDead &&
1858 A->isAssumedDead(IRPosition::inst(*I), QueryingAA, LivenessAA,
1859 UsedAssumedInformation, CheckBBLivenessOnly)) {
1860 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1861 dbgs() << "[Attributor] Instruction " << *I
1862 << " is potentially dead, skip!\n";);
1863 continue;
1864 }
1865
1866 if (!Pred(*I))
1867 return false;
1868 }
1869 }
1870 return true;
1871 }
1872
checkForAllInstructions(function_ref<bool (Instruction &)> Pred,const Function * Fn,const AbstractAttribute & QueryingAA,const ArrayRef<unsigned> & Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,bool CheckPotentiallyDead)1873 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
1874 const Function *Fn,
1875 const AbstractAttribute &QueryingAA,
1876 const ArrayRef<unsigned> &Opcodes,
1877 bool &UsedAssumedInformation,
1878 bool CheckBBLivenessOnly,
1879 bool CheckPotentiallyDead) {
1880 // Since we need to provide instructions we have to have an exact definition.
1881 if (!Fn || Fn->isDeclaration())
1882 return false;
1883
1884 // TODO: use the function scope once we have call site AAReturnedValues.
1885 const IRPosition &QueryIRP = IRPosition::function(*Fn);
1886 const auto *LivenessAA =
1887 (CheckBBLivenessOnly || CheckPotentiallyDead)
1888 ? nullptr
1889 : &(getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE));
1890
1891 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
1892 if (!checkForAllInstructionsImpl(this, OpcodeInstMap, Pred, &QueryingAA,
1893 LivenessAA, Opcodes, UsedAssumedInformation,
1894 CheckBBLivenessOnly, CheckPotentiallyDead))
1895 return false;
1896
1897 return true;
1898 }
1899
checkForAllInstructions(function_ref<bool (Instruction &)> Pred,const AbstractAttribute & QueryingAA,const ArrayRef<unsigned> & Opcodes,bool & UsedAssumedInformation,bool CheckBBLivenessOnly,bool CheckPotentiallyDead)1900 bool Attributor::checkForAllInstructions(function_ref<bool(Instruction &)> Pred,
1901 const AbstractAttribute &QueryingAA,
1902 const ArrayRef<unsigned> &Opcodes,
1903 bool &UsedAssumedInformation,
1904 bool CheckBBLivenessOnly,
1905 bool CheckPotentiallyDead) {
1906 const IRPosition &IRP = QueryingAA.getIRPosition();
1907 const Function *AssociatedFunction = IRP.getAssociatedFunction();
1908 return checkForAllInstructions(Pred, AssociatedFunction, QueryingAA, Opcodes,
1909 UsedAssumedInformation, CheckBBLivenessOnly,
1910 CheckPotentiallyDead);
1911 }
1912
checkForAllReadWriteInstructions(function_ref<bool (Instruction &)> Pred,AbstractAttribute & QueryingAA,bool & UsedAssumedInformation)1913 bool Attributor::checkForAllReadWriteInstructions(
1914 function_ref<bool(Instruction &)> Pred, AbstractAttribute &QueryingAA,
1915 bool &UsedAssumedInformation) {
1916
1917 const Function *AssociatedFunction =
1918 QueryingAA.getIRPosition().getAssociatedFunction();
1919 if (!AssociatedFunction)
1920 return false;
1921
1922 // TODO: use the function scope once we have call site AAReturnedValues.
1923 const IRPosition &QueryIRP = IRPosition::function(*AssociatedFunction);
1924 const auto &LivenessAA =
1925 getAAFor<AAIsDead>(QueryingAA, QueryIRP, DepClassTy::NONE);
1926
1927 for (Instruction *I :
1928 InfoCache.getReadOrWriteInstsForFunction(*AssociatedFunction)) {
1929 // Skip dead instructions.
1930 if (isAssumedDead(IRPosition::inst(*I), &QueryingAA, &LivenessAA,
1931 UsedAssumedInformation))
1932 continue;
1933
1934 if (!Pred(*I))
1935 return false;
1936 }
1937
1938 return true;
1939 }
1940
runTillFixpoint()1941 void Attributor::runTillFixpoint() {
1942 TimeTraceScope TimeScope("Attributor::runTillFixpoint");
1943 LLVM_DEBUG(dbgs() << "[Attributor] Identified and initialized "
1944 << DG.SyntheticRoot.Deps.size()
1945 << " abstract attributes.\n");
1946
1947 // Now that all abstract attributes are collected and initialized we start
1948 // the abstract analysis.
1949
1950 unsigned IterationCounter = 1;
1951 unsigned MaxIterations =
1952 Configuration.MaxFixpointIterations.value_or(SetFixpointIterations);
1953
1954 SmallVector<AbstractAttribute *, 32> ChangedAAs;
1955 SetVector<AbstractAttribute *> Worklist, InvalidAAs;
1956 Worklist.insert(DG.SyntheticRoot.begin(), DG.SyntheticRoot.end());
1957
1958 do {
1959 // Remember the size to determine new attributes.
1960 size_t NumAAs = DG.SyntheticRoot.Deps.size();
1961 LLVM_DEBUG(dbgs() << "\n\n[Attributor] #Iteration: " << IterationCounter
1962 << ", Worklist size: " << Worklist.size() << "\n");
1963
1964 // For invalid AAs we can fix dependent AAs that have a required dependence,
1965 // thereby folding long dependence chains in a single step without the need
1966 // to run updates.
1967 for (unsigned u = 0; u < InvalidAAs.size(); ++u) {
1968 AbstractAttribute *InvalidAA = InvalidAAs[u];
1969
1970 // Check the dependences to fast track invalidation.
1971 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1972 dbgs() << "[Attributor] InvalidAA: " << *InvalidAA
1973 << " has " << InvalidAA->Deps.size()
1974 << " required & optional dependences\n");
1975 while (!InvalidAA->Deps.empty()) {
1976 const auto &Dep = InvalidAA->Deps.back();
1977 InvalidAA->Deps.pop_back();
1978 AbstractAttribute *DepAA = cast<AbstractAttribute>(Dep.getPointer());
1979 if (Dep.getInt() == unsigned(DepClassTy::OPTIONAL)) {
1980 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE,
1981 dbgs() << " - recompute: " << *DepAA);
1982 Worklist.insert(DepAA);
1983 continue;
1984 }
1985 DEBUG_WITH_TYPE(VERBOSE_DEBUG_TYPE, dbgs()
1986 << " - invalidate: " << *DepAA);
1987 DepAA->getState().indicatePessimisticFixpoint();
1988 assert(DepAA->getState().isAtFixpoint() && "Expected fixpoint state!");
1989 if (!DepAA->getState().isValidState())
1990 InvalidAAs.insert(DepAA);
1991 else
1992 ChangedAAs.push_back(DepAA);
1993 }
1994 }
1995
1996 // Add all abstract attributes that are potentially dependent on one that
1997 // changed to the work list.
1998 for (AbstractAttribute *ChangedAA : ChangedAAs)
1999 while (!ChangedAA->Deps.empty()) {
2000 Worklist.insert(
2001 cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer()));
2002 ChangedAA->Deps.pop_back();
2003 }
2004
2005 LLVM_DEBUG(dbgs() << "[Attributor] #Iteration: " << IterationCounter
2006 << ", Worklist+Dependent size: " << Worklist.size()
2007 << "\n");
2008
2009 // Reset the changed and invalid set.
2010 ChangedAAs.clear();
2011 InvalidAAs.clear();
2012
2013 // Update all abstract attribute in the work list and record the ones that
2014 // changed.
2015 for (AbstractAttribute *AA : Worklist) {
2016 const auto &AAState = AA->getState();
2017 if (!AAState.isAtFixpoint())
2018 if (updateAA(*AA) == ChangeStatus::CHANGED)
2019 ChangedAAs.push_back(AA);
2020
2021 // Use the InvalidAAs vector to propagate invalid states fast transitively
2022 // without requiring updates.
2023 if (!AAState.isValidState())
2024 InvalidAAs.insert(AA);
2025 }
2026
2027 // Add attributes to the changed set if they have been created in the last
2028 // iteration.
2029 ChangedAAs.append(DG.SyntheticRoot.begin() + NumAAs,
2030 DG.SyntheticRoot.end());
2031
2032 // Reset the work list and repopulate with the changed abstract attributes.
2033 // Note that dependent ones are added above.
2034 Worklist.clear();
2035 Worklist.insert(ChangedAAs.begin(), ChangedAAs.end());
2036 Worklist.insert(QueryAAsAwaitingUpdate.begin(),
2037 QueryAAsAwaitingUpdate.end());
2038 QueryAAsAwaitingUpdate.clear();
2039
2040 } while (!Worklist.empty() &&
2041 (IterationCounter++ < MaxIterations || VerifyMaxFixpointIterations));
2042
2043 if (IterationCounter > MaxIterations && !Functions.empty()) {
2044 auto Remark = [&](OptimizationRemarkMissed ORM) {
2045 return ORM << "Attributor did not reach a fixpoint after "
2046 << ore::NV("Iterations", MaxIterations) << " iterations.";
2047 };
2048 Function *F = Functions.front();
2049 emitRemark<OptimizationRemarkMissed>(F, "FixedPoint", Remark);
2050 }
2051
2052 LLVM_DEBUG(dbgs() << "\n[Attributor] Fixpoint iteration done after: "
2053 << IterationCounter << "/" << MaxIterations
2054 << " iterations\n");
2055
2056 // Reset abstract arguments not settled in a sound fixpoint by now. This
2057 // happens when we stopped the fixpoint iteration early. Note that only the
2058 // ones marked as "changed" *and* the ones transitively depending on them
2059 // need to be reverted to a pessimistic state. Others might not be in a
2060 // fixpoint state but we can use the optimistic results for them anyway.
2061 SmallPtrSet<AbstractAttribute *, 32> Visited;
2062 for (unsigned u = 0; u < ChangedAAs.size(); u++) {
2063 AbstractAttribute *ChangedAA = ChangedAAs[u];
2064 if (!Visited.insert(ChangedAA).second)
2065 continue;
2066
2067 AbstractState &State = ChangedAA->getState();
2068 if (!State.isAtFixpoint()) {
2069 State.indicatePessimisticFixpoint();
2070
2071 NumAttributesTimedOut++;
2072 }
2073
2074 while (!ChangedAA->Deps.empty()) {
2075 ChangedAAs.push_back(
2076 cast<AbstractAttribute>(ChangedAA->Deps.back().getPointer()));
2077 ChangedAA->Deps.pop_back();
2078 }
2079 }
2080
2081 LLVM_DEBUG({
2082 if (!Visited.empty())
2083 dbgs() << "\n[Attributor] Finalized " << Visited.size()
2084 << " abstract attributes.\n";
2085 });
2086
2087 if (VerifyMaxFixpointIterations && IterationCounter != MaxIterations) {
2088 errs() << "\n[Attributor] Fixpoint iteration done after: "
2089 << IterationCounter << "/" << MaxIterations << " iterations\n";
2090 llvm_unreachable("The fixpoint was not reached with exactly the number of "
2091 "specified iterations!");
2092 }
2093 }
2094
registerForUpdate(AbstractAttribute & AA)2095 void Attributor::registerForUpdate(AbstractAttribute &AA) {
2096 assert(AA.isQueryAA() &&
2097 "Non-query AAs should not be required to register for updates!");
2098 QueryAAsAwaitingUpdate.insert(&AA);
2099 }
2100
manifestAttributes()2101 ChangeStatus Attributor::manifestAttributes() {
2102 TimeTraceScope TimeScope("Attributor::manifestAttributes");
2103 size_t NumFinalAAs = DG.SyntheticRoot.Deps.size();
2104
2105 unsigned NumManifested = 0;
2106 unsigned NumAtFixpoint = 0;
2107 ChangeStatus ManifestChange = ChangeStatus::UNCHANGED;
2108 for (auto &DepAA : DG.SyntheticRoot.Deps) {
2109 AbstractAttribute *AA = cast<AbstractAttribute>(DepAA.getPointer());
2110 AbstractState &State = AA->getState();
2111
2112 // If there is not already a fixpoint reached, we can now take the
2113 // optimistic state. This is correct because we enforced a pessimistic one
2114 // on abstract attributes that were transitively dependent on a changed one
2115 // already above.
2116 if (!State.isAtFixpoint())
2117 State.indicateOptimisticFixpoint();
2118
2119 // We must not manifest Attributes that use Callbase info.
2120 if (AA->hasCallBaseContext())
2121 continue;
2122 // If the state is invalid, we do not try to manifest it.
2123 if (!State.isValidState())
2124 continue;
2125
2126 if (AA->getCtxI() && !isRunOn(*AA->getAnchorScope()))
2127 continue;
2128
2129 // Skip dead code.
2130 bool UsedAssumedInformation = false;
2131 if (isAssumedDead(*AA, nullptr, UsedAssumedInformation,
2132 /* CheckBBLivenessOnly */ true))
2133 continue;
2134 // Check if the manifest debug counter that allows skipping manifestation of
2135 // AAs
2136 if (!DebugCounter::shouldExecute(ManifestDBGCounter))
2137 continue;
2138 // Manifest the state and record if we changed the IR.
2139 ChangeStatus LocalChange = AA->manifest(*this);
2140 if (LocalChange == ChangeStatus::CHANGED && AreStatisticsEnabled())
2141 AA->trackStatistics();
2142 LLVM_DEBUG(dbgs() << "[Attributor] Manifest " << LocalChange << " : " << *AA
2143 << "\n");
2144
2145 ManifestChange = ManifestChange | LocalChange;
2146
2147 NumAtFixpoint++;
2148 NumManifested += (LocalChange == ChangeStatus::CHANGED);
2149 }
2150
2151 (void)NumManifested;
2152 (void)NumAtFixpoint;
2153 LLVM_DEBUG(dbgs() << "\n[Attributor] Manifested " << NumManifested
2154 << " arguments while " << NumAtFixpoint
2155 << " were in a valid fixpoint state\n");
2156
2157 NumAttributesManifested += NumManifested;
2158 NumAttributesValidFixpoint += NumAtFixpoint;
2159
2160 (void)NumFinalAAs;
2161 if (NumFinalAAs != DG.SyntheticRoot.Deps.size()) {
2162 for (unsigned u = NumFinalAAs; u < DG.SyntheticRoot.Deps.size(); ++u)
2163 errs() << "Unexpected abstract attribute: "
2164 << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer())
2165 << " :: "
2166 << cast<AbstractAttribute>(DG.SyntheticRoot.Deps[u].getPointer())
2167 ->getIRPosition()
2168 .getAssociatedValue()
2169 << "\n";
2170 llvm_unreachable("Expected the final number of abstract attributes to "
2171 "remain unchanged!");
2172 }
2173 return ManifestChange;
2174 }
2175
identifyDeadInternalFunctions()2176 void Attributor::identifyDeadInternalFunctions() {
2177 // Early exit if we don't intend to delete functions.
2178 if (!Configuration.DeleteFns)
2179 return;
2180
2181 // To avoid triggering an assertion in the lazy call graph we will not delete
2182 // any internal library functions. We should modify the assertion though and
2183 // allow internals to be deleted.
2184 const auto *TLI =
2185 isModulePass()
2186 ? nullptr
2187 : getInfoCache().getTargetLibraryInfoForFunction(*Functions.back());
2188 LibFunc LF;
2189
2190 // Identify dead internal functions and delete them. This happens outside
2191 // the other fixpoint analysis as we might treat potentially dead functions
2192 // as live to lower the number of iterations. If they happen to be dead, the
2193 // below fixpoint loop will identify and eliminate them.
2194
2195 SmallVector<Function *, 8> InternalFns;
2196 for (Function *F : Functions)
2197 if (F->hasLocalLinkage() && (isModulePass() || !TLI->getLibFunc(*F, LF)))
2198 InternalFns.push_back(F);
2199
2200 SmallPtrSet<Function *, 8> LiveInternalFns;
2201 bool FoundLiveInternal = true;
2202 while (FoundLiveInternal) {
2203 FoundLiveInternal = false;
2204 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u) {
2205 Function *F = InternalFns[u];
2206 if (!F)
2207 continue;
2208
2209 bool UsedAssumedInformation = false;
2210 if (checkForAllCallSites(
2211 [&](AbstractCallSite ACS) {
2212 Function *Callee = ACS.getInstruction()->getFunction();
2213 return ToBeDeletedFunctions.count(Callee) ||
2214 (Functions.count(Callee) && Callee->hasLocalLinkage() &&
2215 !LiveInternalFns.count(Callee));
2216 },
2217 *F, true, nullptr, UsedAssumedInformation)) {
2218 continue;
2219 }
2220
2221 LiveInternalFns.insert(F);
2222 InternalFns[u] = nullptr;
2223 FoundLiveInternal = true;
2224 }
2225 }
2226
2227 for (unsigned u = 0, e = InternalFns.size(); u < e; ++u)
2228 if (Function *F = InternalFns[u])
2229 ToBeDeletedFunctions.insert(F);
2230 }
2231
cleanupIR()2232 ChangeStatus Attributor::cleanupIR() {
2233 TimeTraceScope TimeScope("Attributor::cleanupIR");
2234 // Delete stuff at the end to avoid invalid references and a nice order.
2235 LLVM_DEBUG(dbgs() << "\n[Attributor] Delete/replace at least "
2236 << ToBeDeletedFunctions.size() << " functions and "
2237 << ToBeDeletedBlocks.size() << " blocks and "
2238 << ToBeDeletedInsts.size() << " instructions and "
2239 << ToBeChangedValues.size() << " values and "
2240 << ToBeChangedUses.size() << " uses. To insert "
2241 << ToBeChangedToUnreachableInsts.size()
2242 << " unreachables.\n"
2243 << "Preserve manifest added " << ManifestAddedBlocks.size()
2244 << " blocks\n");
2245
2246 SmallVector<WeakTrackingVH, 32> DeadInsts;
2247 SmallVector<Instruction *, 32> TerminatorsToFold;
2248
2249 auto ReplaceUse = [&](Use *U, Value *NewV) {
2250 Value *OldV = U->get();
2251
2252 // If we plan to replace NewV we need to update it at this point.
2253 do {
2254 const auto &Entry = ToBeChangedValues.lookup(NewV);
2255 if (!get<0>(Entry))
2256 break;
2257 NewV = get<0>(Entry);
2258 } while (true);
2259
2260 Instruction *I = dyn_cast<Instruction>(U->getUser());
2261 assert((!I || isRunOn(*I->getFunction())) &&
2262 "Cannot replace an instruction outside the current SCC!");
2263
2264 // Do not replace uses in returns if the value is a must-tail call we will
2265 // not delete.
2266 if (auto *RI = dyn_cast_or_null<ReturnInst>(I)) {
2267 if (auto *CI = dyn_cast<CallInst>(OldV->stripPointerCasts()))
2268 if (CI->isMustTailCall() && !ToBeDeletedInsts.count(CI))
2269 return;
2270 // If we rewrite a return and the new value is not an argument, strip the
2271 // `returned` attribute as it is wrong now.
2272 if (!isa<Argument>(NewV))
2273 for (auto &Arg : RI->getFunction()->args())
2274 Arg.removeAttr(Attribute::Returned);
2275 }
2276
2277 LLVM_DEBUG(dbgs() << "Use " << *NewV << " in " << *U->getUser()
2278 << " instead of " << *OldV << "\n");
2279 U->set(NewV);
2280
2281 if (Instruction *I = dyn_cast<Instruction>(OldV)) {
2282 CGModifiedFunctions.insert(I->getFunction());
2283 if (!isa<PHINode>(I) && !ToBeDeletedInsts.count(I) &&
2284 isInstructionTriviallyDead(I))
2285 DeadInsts.push_back(I);
2286 }
2287 if (isa<UndefValue>(NewV) && isa<CallBase>(U->getUser())) {
2288 auto *CB = cast<CallBase>(U->getUser());
2289 if (CB->isArgOperand(U)) {
2290 unsigned Idx = CB->getArgOperandNo(U);
2291 CB->removeParamAttr(Idx, Attribute::NoUndef);
2292 Function *Fn = CB->getCalledFunction();
2293 if (Fn && Fn->arg_size() > Idx)
2294 Fn->removeParamAttr(Idx, Attribute::NoUndef);
2295 }
2296 }
2297 if (isa<Constant>(NewV) && isa<BranchInst>(U->getUser())) {
2298 Instruction *UserI = cast<Instruction>(U->getUser());
2299 if (isa<UndefValue>(NewV)) {
2300 ToBeChangedToUnreachableInsts.insert(UserI);
2301 } else {
2302 TerminatorsToFold.push_back(UserI);
2303 }
2304 }
2305 };
2306
2307 for (auto &It : ToBeChangedUses) {
2308 Use *U = It.first;
2309 Value *NewV = It.second;
2310 ReplaceUse(U, NewV);
2311 }
2312
2313 SmallVector<Use *, 4> Uses;
2314 for (auto &It : ToBeChangedValues) {
2315 Value *OldV = It.first;
2316 auto [NewV, Done] = It.second;
2317 Uses.clear();
2318 for (auto &U : OldV->uses())
2319 if (Done || !U.getUser()->isDroppable())
2320 Uses.push_back(&U);
2321 for (Use *U : Uses) {
2322 if (auto *I = dyn_cast<Instruction>(U->getUser()))
2323 if (!isRunOn(*I->getFunction()))
2324 continue;
2325 ReplaceUse(U, NewV);
2326 }
2327 }
2328
2329 for (const auto &V : InvokeWithDeadSuccessor)
2330 if (InvokeInst *II = dyn_cast_or_null<InvokeInst>(V)) {
2331 assert(isRunOn(*II->getFunction()) &&
2332 "Cannot replace an invoke outside the current SCC!");
2333 bool UnwindBBIsDead = II->hasFnAttr(Attribute::NoUnwind);
2334 bool NormalBBIsDead = II->hasFnAttr(Attribute::NoReturn);
2335 bool Invoke2CallAllowed =
2336 !AAIsDead::mayCatchAsynchronousExceptions(*II->getFunction());
2337 assert((UnwindBBIsDead || NormalBBIsDead) &&
2338 "Invoke does not have dead successors!");
2339 BasicBlock *BB = II->getParent();
2340 BasicBlock *NormalDestBB = II->getNormalDest();
2341 if (UnwindBBIsDead) {
2342 Instruction *NormalNextIP = &NormalDestBB->front();
2343 if (Invoke2CallAllowed) {
2344 changeToCall(II);
2345 NormalNextIP = BB->getTerminator();
2346 }
2347 if (NormalBBIsDead)
2348 ToBeChangedToUnreachableInsts.insert(NormalNextIP);
2349 } else {
2350 assert(NormalBBIsDead && "Broken invariant!");
2351 if (!NormalDestBB->getUniquePredecessor())
2352 NormalDestBB = SplitBlockPredecessors(NormalDestBB, {BB}, ".dead");
2353 ToBeChangedToUnreachableInsts.insert(&NormalDestBB->front());
2354 }
2355 }
2356 for (Instruction *I : TerminatorsToFold) {
2357 assert(isRunOn(*I->getFunction()) &&
2358 "Cannot replace a terminator outside the current SCC!");
2359 CGModifiedFunctions.insert(I->getFunction());
2360 ConstantFoldTerminator(I->getParent());
2361 }
2362 for (const auto &V : ToBeChangedToUnreachableInsts)
2363 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2364 LLVM_DEBUG(dbgs() << "[Attributor] Change to unreachable: " << *I
2365 << "\n");
2366 assert(isRunOn(*I->getFunction()) &&
2367 "Cannot replace an instruction outside the current SCC!");
2368 CGModifiedFunctions.insert(I->getFunction());
2369 changeToUnreachable(I);
2370 }
2371
2372 for (const auto &V : ToBeDeletedInsts) {
2373 if (Instruction *I = dyn_cast_or_null<Instruction>(V)) {
2374 if (auto *CB = dyn_cast<CallBase>(I)) {
2375 assert((isa<IntrinsicInst>(CB) || isRunOn(*I->getFunction())) &&
2376 "Cannot delete an instruction outside the current SCC!");
2377 if (!isa<IntrinsicInst>(CB))
2378 Configuration.CGUpdater.removeCallSite(*CB);
2379 }
2380 I->dropDroppableUses();
2381 CGModifiedFunctions.insert(I->getFunction());
2382 if (!I->getType()->isVoidTy())
2383 I->replaceAllUsesWith(UndefValue::get(I->getType()));
2384 if (!isa<PHINode>(I) && isInstructionTriviallyDead(I))
2385 DeadInsts.push_back(I);
2386 else
2387 I->eraseFromParent();
2388 }
2389 }
2390
2391 llvm::erase_if(DeadInsts, [&](WeakTrackingVH I) { return !I; });
2392
2393 LLVM_DEBUG({
2394 dbgs() << "[Attributor] DeadInsts size: " << DeadInsts.size() << "\n";
2395 for (auto &I : DeadInsts)
2396 if (I)
2397 dbgs() << " - " << *I << "\n";
2398 });
2399
2400 RecursivelyDeleteTriviallyDeadInstructions(DeadInsts);
2401
2402 if (unsigned NumDeadBlocks = ToBeDeletedBlocks.size()) {
2403 SmallVector<BasicBlock *, 8> ToBeDeletedBBs;
2404 ToBeDeletedBBs.reserve(NumDeadBlocks);
2405 for (BasicBlock *BB : ToBeDeletedBlocks) {
2406 assert(isRunOn(*BB->getParent()) &&
2407 "Cannot delete a block outside the current SCC!");
2408 CGModifiedFunctions.insert(BB->getParent());
2409 // Do not delete BBs added during manifests of AAs.
2410 if (ManifestAddedBlocks.contains(BB))
2411 continue;
2412 ToBeDeletedBBs.push_back(BB);
2413 }
2414 // Actually we do not delete the blocks but squash them into a single
2415 // unreachable but untangling branches that jump here is something we need
2416 // to do in a more generic way.
2417 detachDeadBlocks(ToBeDeletedBBs, nullptr);
2418 }
2419
2420 identifyDeadInternalFunctions();
2421
2422 // Rewrite the functions as requested during manifest.
2423 ChangeStatus ManifestChange = rewriteFunctionSignatures(CGModifiedFunctions);
2424
2425 for (Function *Fn : CGModifiedFunctions)
2426 if (!ToBeDeletedFunctions.count(Fn) && Functions.count(Fn))
2427 Configuration.CGUpdater.reanalyzeFunction(*Fn);
2428
2429 for (Function *Fn : ToBeDeletedFunctions) {
2430 if (!Functions.count(Fn))
2431 continue;
2432 Configuration.CGUpdater.removeFunction(*Fn);
2433 }
2434
2435 if (!ToBeChangedUses.empty())
2436 ManifestChange = ChangeStatus::CHANGED;
2437
2438 if (!ToBeChangedToUnreachableInsts.empty())
2439 ManifestChange = ChangeStatus::CHANGED;
2440
2441 if (!ToBeDeletedFunctions.empty())
2442 ManifestChange = ChangeStatus::CHANGED;
2443
2444 if (!ToBeDeletedBlocks.empty())
2445 ManifestChange = ChangeStatus::CHANGED;
2446
2447 if (!ToBeDeletedInsts.empty())
2448 ManifestChange = ChangeStatus::CHANGED;
2449
2450 if (!InvokeWithDeadSuccessor.empty())
2451 ManifestChange = ChangeStatus::CHANGED;
2452
2453 if (!DeadInsts.empty())
2454 ManifestChange = ChangeStatus::CHANGED;
2455
2456 NumFnDeleted += ToBeDeletedFunctions.size();
2457
2458 LLVM_DEBUG(dbgs() << "[Attributor] Deleted " << ToBeDeletedFunctions.size()
2459 << " functions after manifest.\n");
2460
2461 #ifdef EXPENSIVE_CHECKS
2462 for (Function *F : Functions) {
2463 if (ToBeDeletedFunctions.count(F))
2464 continue;
2465 assert(!verifyFunction(*F, &errs()) && "Module verification failed!");
2466 }
2467 #endif
2468
2469 return ManifestChange;
2470 }
2471
run()2472 ChangeStatus Attributor::run() {
2473 TimeTraceScope TimeScope("Attributor::run");
2474 AttributorCallGraph ACallGraph(*this);
2475
2476 if (PrintCallGraph)
2477 ACallGraph.populateAll();
2478
2479 Phase = AttributorPhase::UPDATE;
2480 runTillFixpoint();
2481
2482 // dump graphs on demand
2483 if (DumpDepGraph)
2484 DG.dumpGraph();
2485
2486 if (ViewDepGraph)
2487 DG.viewGraph();
2488
2489 if (PrintDependencies)
2490 DG.print();
2491
2492 Phase = AttributorPhase::MANIFEST;
2493 ChangeStatus ManifestChange = manifestAttributes();
2494
2495 Phase = AttributorPhase::CLEANUP;
2496 ChangeStatus CleanupChange = cleanupIR();
2497
2498 if (PrintCallGraph)
2499 ACallGraph.print();
2500
2501 return ManifestChange | CleanupChange;
2502 }
2503
updateAA(AbstractAttribute & AA)2504 ChangeStatus Attributor::updateAA(AbstractAttribute &AA) {
2505 TimeTraceScope TimeScope(
2506 AA.getName() + std::to_string(AA.getIRPosition().getPositionKind()) +
2507 "::updateAA");
2508 assert(Phase == AttributorPhase::UPDATE &&
2509 "We can update AA only in the update stage!");
2510
2511 // Use a new dependence vector for this update.
2512 DependenceVector DV;
2513 DependenceStack.push_back(&DV);
2514
2515 auto &AAState = AA.getState();
2516 ChangeStatus CS = ChangeStatus::UNCHANGED;
2517 bool UsedAssumedInformation = false;
2518 if (!isAssumedDead(AA, nullptr, UsedAssumedInformation,
2519 /* CheckBBLivenessOnly */ true))
2520 CS = AA.update(*this);
2521
2522 if (!AA.isQueryAA() && DV.empty() && !AA.getState().isAtFixpoint()) {
2523 // If the AA did not rely on outside information but changed, we run it
2524 // again to see if it found a fixpoint. Most AAs do but we don't require
2525 // them to. Hence, it might take the AA multiple iterations to get to a
2526 // fixpoint even if it does not rely on outside information, which is fine.
2527 ChangeStatus RerunCS = ChangeStatus::UNCHANGED;
2528 if (CS == ChangeStatus::CHANGED)
2529 RerunCS = AA.update(*this);
2530
2531 // If the attribute did not change during the run or rerun, and it still did
2532 // not query any non-fix information, the state will not change and we can
2533 // indicate that right at this point.
2534 if (RerunCS == ChangeStatus::UNCHANGED && !AA.isQueryAA() && DV.empty())
2535 AAState.indicateOptimisticFixpoint();
2536 }
2537
2538 if (!AAState.isAtFixpoint())
2539 rememberDependences();
2540
2541 // Verify the stack was used properly, that is we pop the dependence vector we
2542 // put there earlier.
2543 DependenceVector *PoppedDV = DependenceStack.pop_back_val();
2544 (void)PoppedDV;
2545 assert(PoppedDV == &DV && "Inconsistent usage of the dependence stack!");
2546
2547 return CS;
2548 }
2549
createShallowWrapper(Function & F)2550 void Attributor::createShallowWrapper(Function &F) {
2551 assert(!F.isDeclaration() && "Cannot create a wrapper around a declaration!");
2552
2553 Module &M = *F.getParent();
2554 LLVMContext &Ctx = M.getContext();
2555 FunctionType *FnTy = F.getFunctionType();
2556
2557 Function *Wrapper =
2558 Function::Create(FnTy, F.getLinkage(), F.getAddressSpace(), F.getName());
2559 F.setName(""); // set the inside function anonymous
2560 M.getFunctionList().insert(F.getIterator(), Wrapper);
2561
2562 F.setLinkage(GlobalValue::InternalLinkage);
2563
2564 F.replaceAllUsesWith(Wrapper);
2565 assert(F.use_empty() && "Uses remained after wrapper was created!");
2566
2567 // Move the COMDAT section to the wrapper.
2568 // TODO: Check if we need to keep it for F as well.
2569 Wrapper->setComdat(F.getComdat());
2570 F.setComdat(nullptr);
2571
2572 // Copy all metadata and attributes but keep them on F as well.
2573 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2574 F.getAllMetadata(MDs);
2575 for (auto MDIt : MDs)
2576 Wrapper->addMetadata(MDIt.first, *MDIt.second);
2577 Wrapper->setAttributes(F.getAttributes());
2578
2579 // Create the call in the wrapper.
2580 BasicBlock *EntryBB = BasicBlock::Create(Ctx, "entry", Wrapper);
2581
2582 SmallVector<Value *, 8> Args;
2583 Argument *FArgIt = F.arg_begin();
2584 for (Argument &Arg : Wrapper->args()) {
2585 Args.push_back(&Arg);
2586 Arg.setName((FArgIt++)->getName());
2587 }
2588
2589 CallInst *CI = CallInst::Create(&F, Args, "", EntryBB);
2590 CI->setTailCall(true);
2591 CI->addFnAttr(Attribute::NoInline);
2592 ReturnInst::Create(Ctx, CI->getType()->isVoidTy() ? nullptr : CI, EntryBB);
2593
2594 NumFnShallowWrappersCreated++;
2595 }
2596
isInternalizable(Function & F)2597 bool Attributor::isInternalizable(Function &F) {
2598 if (F.isDeclaration() || F.hasLocalLinkage() ||
2599 GlobalValue::isInterposableLinkage(F.getLinkage()))
2600 return false;
2601 return true;
2602 }
2603
internalizeFunction(Function & F,bool Force)2604 Function *Attributor::internalizeFunction(Function &F, bool Force) {
2605 if (!AllowDeepWrapper && !Force)
2606 return nullptr;
2607 if (!isInternalizable(F))
2608 return nullptr;
2609
2610 SmallPtrSet<Function *, 2> FnSet = {&F};
2611 DenseMap<Function *, Function *> InternalizedFns;
2612 internalizeFunctions(FnSet, InternalizedFns);
2613
2614 return InternalizedFns[&F];
2615 }
2616
internalizeFunctions(SmallPtrSetImpl<Function * > & FnSet,DenseMap<Function *,Function * > & FnMap)2617 bool Attributor::internalizeFunctions(SmallPtrSetImpl<Function *> &FnSet,
2618 DenseMap<Function *, Function *> &FnMap) {
2619 for (Function *F : FnSet)
2620 if (!Attributor::isInternalizable(*F))
2621 return false;
2622
2623 FnMap.clear();
2624 // Generate the internalized version of each function.
2625 for (Function *F : FnSet) {
2626 Module &M = *F->getParent();
2627 FunctionType *FnTy = F->getFunctionType();
2628
2629 // Create a copy of the current function
2630 Function *Copied =
2631 Function::Create(FnTy, F->getLinkage(), F->getAddressSpace(),
2632 F->getName() + ".internalized");
2633 ValueToValueMapTy VMap;
2634 auto *NewFArgIt = Copied->arg_begin();
2635 for (auto &Arg : F->args()) {
2636 auto ArgName = Arg.getName();
2637 NewFArgIt->setName(ArgName);
2638 VMap[&Arg] = &(*NewFArgIt++);
2639 }
2640 SmallVector<ReturnInst *, 8> Returns;
2641
2642 // Copy the body of the original function to the new one
2643 CloneFunctionInto(Copied, F, VMap,
2644 CloneFunctionChangeType::LocalChangesOnly, Returns);
2645
2646 // Set the linakage and visibility late as CloneFunctionInto has some
2647 // implicit requirements.
2648 Copied->setVisibility(GlobalValue::DefaultVisibility);
2649 Copied->setLinkage(GlobalValue::PrivateLinkage);
2650
2651 // Copy metadata
2652 SmallVector<std::pair<unsigned, MDNode *>, 1> MDs;
2653 F->getAllMetadata(MDs);
2654 for (auto MDIt : MDs)
2655 if (!Copied->hasMetadata())
2656 Copied->addMetadata(MDIt.first, *MDIt.second);
2657
2658 M.getFunctionList().insert(F->getIterator(), Copied);
2659 Copied->setDSOLocal(true);
2660 FnMap[F] = Copied;
2661 }
2662
2663 // Replace all uses of the old function with the new internalized function
2664 // unless the caller is a function that was just internalized.
2665 for (Function *F : FnSet) {
2666 auto &InternalizedFn = FnMap[F];
2667 auto IsNotInternalized = [&](Use &U) -> bool {
2668 if (auto *CB = dyn_cast<CallBase>(U.getUser()))
2669 return !FnMap.lookup(CB->getCaller());
2670 return false;
2671 };
2672 F->replaceUsesWithIf(InternalizedFn, IsNotInternalized);
2673 }
2674
2675 return true;
2676 }
2677
isValidFunctionSignatureRewrite(Argument & Arg,ArrayRef<Type * > ReplacementTypes)2678 bool Attributor::isValidFunctionSignatureRewrite(
2679 Argument &Arg, ArrayRef<Type *> ReplacementTypes) {
2680
2681 if (!Configuration.RewriteSignatures)
2682 return false;
2683
2684 Function *Fn = Arg.getParent();
2685 auto CallSiteCanBeChanged = [Fn](AbstractCallSite ACS) {
2686 // Forbid the call site to cast the function return type. If we need to
2687 // rewrite these functions we need to re-create a cast for the new call site
2688 // (if the old had uses).
2689 if (!ACS.getCalledFunction() ||
2690 ACS.getInstruction()->getType() !=
2691 ACS.getCalledFunction()->getReturnType())
2692 return false;
2693 if (ACS.getCalledOperand()->getType() != Fn->getType())
2694 return false;
2695 // Forbid must-tail calls for now.
2696 return !ACS.isCallbackCall() && !ACS.getInstruction()->isMustTailCall();
2697 };
2698
2699 // Avoid var-arg functions for now.
2700 if (Fn->isVarArg()) {
2701 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite var-args functions\n");
2702 return false;
2703 }
2704
2705 // Avoid functions with complicated argument passing semantics.
2706 AttributeList FnAttributeList = Fn->getAttributes();
2707 if (FnAttributeList.hasAttrSomewhere(Attribute::Nest) ||
2708 FnAttributeList.hasAttrSomewhere(Attribute::StructRet) ||
2709 FnAttributeList.hasAttrSomewhere(Attribute::InAlloca) ||
2710 FnAttributeList.hasAttrSomewhere(Attribute::Preallocated)) {
2711 LLVM_DEBUG(
2712 dbgs() << "[Attributor] Cannot rewrite due to complex attribute\n");
2713 return false;
2714 }
2715
2716 // Avoid callbacks for now.
2717 bool UsedAssumedInformation = false;
2718 if (!checkForAllCallSites(CallSiteCanBeChanged, *Fn, true, nullptr,
2719 UsedAssumedInformation)) {
2720 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite all call sites\n");
2721 return false;
2722 }
2723
2724 auto InstPred = [](Instruction &I) {
2725 if (auto *CI = dyn_cast<CallInst>(&I))
2726 return !CI->isMustTailCall();
2727 return true;
2728 };
2729
2730 // Forbid must-tail calls for now.
2731 // TODO:
2732 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(*Fn);
2733 if (!checkForAllInstructionsImpl(nullptr, OpcodeInstMap, InstPred, nullptr,
2734 nullptr, {Instruction::Call},
2735 UsedAssumedInformation)) {
2736 LLVM_DEBUG(dbgs() << "[Attributor] Cannot rewrite due to instructions\n");
2737 return false;
2738 }
2739
2740 return true;
2741 }
2742
registerFunctionSignatureRewrite(Argument & Arg,ArrayRef<Type * > ReplacementTypes,ArgumentReplacementInfo::CalleeRepairCBTy && CalleeRepairCB,ArgumentReplacementInfo::ACSRepairCBTy && ACSRepairCB)2743 bool Attributor::registerFunctionSignatureRewrite(
2744 Argument &Arg, ArrayRef<Type *> ReplacementTypes,
2745 ArgumentReplacementInfo::CalleeRepairCBTy &&CalleeRepairCB,
2746 ArgumentReplacementInfo::ACSRepairCBTy &&ACSRepairCB) {
2747 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2748 << Arg.getParent()->getName() << " with "
2749 << ReplacementTypes.size() << " replacements\n");
2750 assert(isValidFunctionSignatureRewrite(Arg, ReplacementTypes) &&
2751 "Cannot register an invalid rewrite");
2752
2753 Function *Fn = Arg.getParent();
2754 SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2755 ArgumentReplacementMap[Fn];
2756 if (ARIs.empty())
2757 ARIs.resize(Fn->arg_size());
2758
2759 // If we have a replacement already with less than or equal new arguments,
2760 // ignore this request.
2761 std::unique_ptr<ArgumentReplacementInfo> &ARI = ARIs[Arg.getArgNo()];
2762 if (ARI && ARI->getNumReplacementArgs() <= ReplacementTypes.size()) {
2763 LLVM_DEBUG(dbgs() << "[Attributor] Existing rewrite is preferred\n");
2764 return false;
2765 }
2766
2767 // If we have a replacement already but we like the new one better, delete
2768 // the old.
2769 ARI.reset();
2770
2771 LLVM_DEBUG(dbgs() << "[Attributor] Register new rewrite of " << Arg << " in "
2772 << Arg.getParent()->getName() << " with "
2773 << ReplacementTypes.size() << " replacements\n");
2774
2775 // Remember the replacement.
2776 ARI.reset(new ArgumentReplacementInfo(*this, Arg, ReplacementTypes,
2777 std::move(CalleeRepairCB),
2778 std::move(ACSRepairCB)));
2779
2780 return true;
2781 }
2782
shouldSeedAttribute(AbstractAttribute & AA)2783 bool Attributor::shouldSeedAttribute(AbstractAttribute &AA) {
2784 bool Result = true;
2785 #ifndef NDEBUG
2786 if (SeedAllowList.size() != 0)
2787 Result = llvm::is_contained(SeedAllowList, AA.getName());
2788 Function *Fn = AA.getAnchorScope();
2789 if (FunctionSeedAllowList.size() != 0 && Fn)
2790 Result &= llvm::is_contained(FunctionSeedAllowList, Fn->getName());
2791 #endif
2792 return Result;
2793 }
2794
rewriteFunctionSignatures(SmallSetVector<Function *,8> & ModifiedFns)2795 ChangeStatus Attributor::rewriteFunctionSignatures(
2796 SmallSetVector<Function *, 8> &ModifiedFns) {
2797 ChangeStatus Changed = ChangeStatus::UNCHANGED;
2798
2799 for (auto &It : ArgumentReplacementMap) {
2800 Function *OldFn = It.getFirst();
2801
2802 // Deleted functions do not require rewrites.
2803 if (!Functions.count(OldFn) || ToBeDeletedFunctions.count(OldFn))
2804 continue;
2805
2806 const SmallVectorImpl<std::unique_ptr<ArgumentReplacementInfo>> &ARIs =
2807 It.getSecond();
2808 assert(ARIs.size() == OldFn->arg_size() && "Inconsistent state!");
2809
2810 SmallVector<Type *, 16> NewArgumentTypes;
2811 SmallVector<AttributeSet, 16> NewArgumentAttributes;
2812
2813 // Collect replacement argument types and copy over existing attributes.
2814 AttributeList OldFnAttributeList = OldFn->getAttributes();
2815 for (Argument &Arg : OldFn->args()) {
2816 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2817 ARIs[Arg.getArgNo()]) {
2818 NewArgumentTypes.append(ARI->ReplacementTypes.begin(),
2819 ARI->ReplacementTypes.end());
2820 NewArgumentAttributes.append(ARI->getNumReplacementArgs(),
2821 AttributeSet());
2822 } else {
2823 NewArgumentTypes.push_back(Arg.getType());
2824 NewArgumentAttributes.push_back(
2825 OldFnAttributeList.getParamAttrs(Arg.getArgNo()));
2826 }
2827 }
2828
2829 uint64_t LargestVectorWidth = 0;
2830 for (auto *I : NewArgumentTypes)
2831 if (auto *VT = dyn_cast<llvm::VectorType>(I))
2832 LargestVectorWidth =
2833 std::max(LargestVectorWidth,
2834 VT->getPrimitiveSizeInBits().getKnownMinValue());
2835
2836 FunctionType *OldFnTy = OldFn->getFunctionType();
2837 Type *RetTy = OldFnTy->getReturnType();
2838
2839 // Construct the new function type using the new arguments types.
2840 FunctionType *NewFnTy =
2841 FunctionType::get(RetTy, NewArgumentTypes, OldFnTy->isVarArg());
2842
2843 LLVM_DEBUG(dbgs() << "[Attributor] Function rewrite '" << OldFn->getName()
2844 << "' from " << *OldFn->getFunctionType() << " to "
2845 << *NewFnTy << "\n");
2846
2847 // Create the new function body and insert it into the module.
2848 Function *NewFn = Function::Create(NewFnTy, OldFn->getLinkage(),
2849 OldFn->getAddressSpace(), "");
2850 Functions.insert(NewFn);
2851 OldFn->getParent()->getFunctionList().insert(OldFn->getIterator(), NewFn);
2852 NewFn->takeName(OldFn);
2853 NewFn->copyAttributesFrom(OldFn);
2854
2855 // Patch the pointer to LLVM function in debug info descriptor.
2856 NewFn->setSubprogram(OldFn->getSubprogram());
2857 OldFn->setSubprogram(nullptr);
2858
2859 // Recompute the parameter attributes list based on the new arguments for
2860 // the function.
2861 LLVMContext &Ctx = OldFn->getContext();
2862 NewFn->setAttributes(AttributeList::get(
2863 Ctx, OldFnAttributeList.getFnAttrs(), OldFnAttributeList.getRetAttrs(),
2864 NewArgumentAttributes));
2865 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewFn, LargestVectorWidth);
2866
2867 // Since we have now created the new function, splice the body of the old
2868 // function right into the new function, leaving the old rotting hulk of the
2869 // function empty.
2870 NewFn->splice(NewFn->begin(), OldFn);
2871
2872 // Fixup block addresses to reference new function.
2873 SmallVector<BlockAddress *, 8u> BlockAddresses;
2874 for (User *U : OldFn->users())
2875 if (auto *BA = dyn_cast<BlockAddress>(U))
2876 BlockAddresses.push_back(BA);
2877 for (auto *BA : BlockAddresses)
2878 BA->replaceAllUsesWith(BlockAddress::get(NewFn, BA->getBasicBlock()));
2879
2880 // Set of all "call-like" instructions that invoke the old function mapped
2881 // to their new replacements.
2882 SmallVector<std::pair<CallBase *, CallBase *>, 8> CallSitePairs;
2883
2884 // Callback to create a new "call-like" instruction for a given one.
2885 auto CallSiteReplacementCreator = [&](AbstractCallSite ACS) {
2886 CallBase *OldCB = cast<CallBase>(ACS.getInstruction());
2887 const AttributeList &OldCallAttributeList = OldCB->getAttributes();
2888
2889 // Collect the new argument operands for the replacement call site.
2890 SmallVector<Value *, 16> NewArgOperands;
2891 SmallVector<AttributeSet, 16> NewArgOperandAttributes;
2892 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size(); ++OldArgNum) {
2893 unsigned NewFirstArgNum = NewArgOperands.size();
2894 (void)NewFirstArgNum; // only used inside assert.
2895 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2896 ARIs[OldArgNum]) {
2897 if (ARI->ACSRepairCB)
2898 ARI->ACSRepairCB(*ARI, ACS, NewArgOperands);
2899 assert(ARI->getNumReplacementArgs() + NewFirstArgNum ==
2900 NewArgOperands.size() &&
2901 "ACS repair callback did not provide as many operand as new "
2902 "types were registered!");
2903 // TODO: Exose the attribute set to the ACS repair callback
2904 NewArgOperandAttributes.append(ARI->ReplacementTypes.size(),
2905 AttributeSet());
2906 } else {
2907 NewArgOperands.push_back(ACS.getCallArgOperand(OldArgNum));
2908 NewArgOperandAttributes.push_back(
2909 OldCallAttributeList.getParamAttrs(OldArgNum));
2910 }
2911 }
2912
2913 assert(NewArgOperands.size() == NewArgOperandAttributes.size() &&
2914 "Mismatch # argument operands vs. # argument operand attributes!");
2915 assert(NewArgOperands.size() == NewFn->arg_size() &&
2916 "Mismatch # argument operands vs. # function arguments!");
2917
2918 SmallVector<OperandBundleDef, 4> OperandBundleDefs;
2919 OldCB->getOperandBundlesAsDefs(OperandBundleDefs);
2920
2921 // Create a new call or invoke instruction to replace the old one.
2922 CallBase *NewCB;
2923 if (InvokeInst *II = dyn_cast<InvokeInst>(OldCB)) {
2924 NewCB =
2925 InvokeInst::Create(NewFn, II->getNormalDest(), II->getUnwindDest(),
2926 NewArgOperands, OperandBundleDefs, "", OldCB);
2927 } else {
2928 auto *NewCI = CallInst::Create(NewFn, NewArgOperands, OperandBundleDefs,
2929 "", OldCB);
2930 NewCI->setTailCallKind(cast<CallInst>(OldCB)->getTailCallKind());
2931 NewCB = NewCI;
2932 }
2933
2934 // Copy over various properties and the new attributes.
2935 NewCB->copyMetadata(*OldCB, {LLVMContext::MD_prof, LLVMContext::MD_dbg});
2936 NewCB->setCallingConv(OldCB->getCallingConv());
2937 NewCB->takeName(OldCB);
2938 NewCB->setAttributes(AttributeList::get(
2939 Ctx, OldCallAttributeList.getFnAttrs(),
2940 OldCallAttributeList.getRetAttrs(), NewArgOperandAttributes));
2941
2942 AttributeFuncs::updateMinLegalVectorWidthAttr(*NewCB->getCaller(),
2943 LargestVectorWidth);
2944
2945 CallSitePairs.push_back({OldCB, NewCB});
2946 return true;
2947 };
2948
2949 // Use the CallSiteReplacementCreator to create replacement call sites.
2950 bool UsedAssumedInformation = false;
2951 bool Success = checkForAllCallSites(CallSiteReplacementCreator, *OldFn,
2952 true, nullptr, UsedAssumedInformation,
2953 /* CheckPotentiallyDead */ true);
2954 (void)Success;
2955 assert(Success && "Assumed call site replacement to succeed!");
2956
2957 // Rewire the arguments.
2958 Argument *OldFnArgIt = OldFn->arg_begin();
2959 Argument *NewFnArgIt = NewFn->arg_begin();
2960 for (unsigned OldArgNum = 0; OldArgNum < ARIs.size();
2961 ++OldArgNum, ++OldFnArgIt) {
2962 if (const std::unique_ptr<ArgumentReplacementInfo> &ARI =
2963 ARIs[OldArgNum]) {
2964 if (ARI->CalleeRepairCB)
2965 ARI->CalleeRepairCB(*ARI, *NewFn, NewFnArgIt);
2966 if (ARI->ReplacementTypes.empty())
2967 OldFnArgIt->replaceAllUsesWith(
2968 PoisonValue::get(OldFnArgIt->getType()));
2969 NewFnArgIt += ARI->ReplacementTypes.size();
2970 } else {
2971 NewFnArgIt->takeName(&*OldFnArgIt);
2972 OldFnArgIt->replaceAllUsesWith(&*NewFnArgIt);
2973 ++NewFnArgIt;
2974 }
2975 }
2976
2977 // Eliminate the instructions *after* we visited all of them.
2978 for (auto &CallSitePair : CallSitePairs) {
2979 CallBase &OldCB = *CallSitePair.first;
2980 CallBase &NewCB = *CallSitePair.second;
2981 assert(OldCB.getType() == NewCB.getType() &&
2982 "Cannot handle call sites with different types!");
2983 ModifiedFns.insert(OldCB.getFunction());
2984 Configuration.CGUpdater.replaceCallSite(OldCB, NewCB);
2985 OldCB.replaceAllUsesWith(&NewCB);
2986 OldCB.eraseFromParent();
2987 }
2988
2989 // Replace the function in the call graph (if any).
2990 Configuration.CGUpdater.replaceFunctionWith(*OldFn, *NewFn);
2991
2992 // If the old function was modified and needed to be reanalyzed, the new one
2993 // does now.
2994 if (ModifiedFns.remove(OldFn))
2995 ModifiedFns.insert(NewFn);
2996
2997 Changed = ChangeStatus::CHANGED;
2998 }
2999
3000 return Changed;
3001 }
3002
initializeInformationCache(const Function & CF,FunctionInfo & FI)3003 void InformationCache::initializeInformationCache(const Function &CF,
3004 FunctionInfo &FI) {
3005 // As we do not modify the function here we can remove the const
3006 // withouth breaking implicit assumptions. At the end of the day, we could
3007 // initialize the cache eagerly which would look the same to the users.
3008 Function &F = const_cast<Function &>(CF);
3009
3010 // Walk all instructions to find interesting instructions that might be
3011 // queried by abstract attributes during their initialization or update.
3012 // This has to happen before we create attributes.
3013
3014 DenseMap<const Value *, std::optional<short>> AssumeUsesMap;
3015
3016 // Add \p V to the assume uses map which track the number of uses outside of
3017 // "visited" assumes. If no outside uses are left the value is added to the
3018 // assume only use vector.
3019 auto AddToAssumeUsesMap = [&](const Value &V) -> void {
3020 SmallVector<const Instruction *> Worklist;
3021 if (auto *I = dyn_cast<Instruction>(&V))
3022 Worklist.push_back(I);
3023 while (!Worklist.empty()) {
3024 const Instruction *I = Worklist.pop_back_val();
3025 std::optional<short> &NumUses = AssumeUsesMap[I];
3026 if (!NumUses)
3027 NumUses = I->getNumUses();
3028 NumUses = *NumUses - /* this assume */ 1;
3029 if (*NumUses != 0)
3030 continue;
3031 AssumeOnlyValues.insert(I);
3032 for (const Value *Op : I->operands())
3033 if (auto *OpI = dyn_cast<Instruction>(Op))
3034 Worklist.push_back(OpI);
3035 }
3036 };
3037
3038 for (Instruction &I : instructions(&F)) {
3039 bool IsInterestingOpcode = false;
3040
3041 // To allow easy access to all instructions in a function with a given
3042 // opcode we store them in the InfoCache. As not all opcodes are interesting
3043 // to concrete attributes we only cache the ones that are as identified in
3044 // the following switch.
3045 // Note: There are no concrete attributes now so this is initially empty.
3046 switch (I.getOpcode()) {
3047 default:
3048 assert(!isa<CallBase>(&I) &&
3049 "New call base instruction type needs to be known in the "
3050 "Attributor.");
3051 break;
3052 case Instruction::Call:
3053 // Calls are interesting on their own, additionally:
3054 // For `llvm.assume` calls we also fill the KnowledgeMap as we find them.
3055 // For `must-tail` calls we remember the caller and callee.
3056 if (auto *Assume = dyn_cast<AssumeInst>(&I)) {
3057 AssumeOnlyValues.insert(Assume);
3058 fillMapFromAssume(*Assume, KnowledgeMap);
3059 AddToAssumeUsesMap(*Assume->getArgOperand(0));
3060 } else if (cast<CallInst>(I).isMustTailCall()) {
3061 FI.ContainsMustTailCall = true;
3062 if (const Function *Callee = cast<CallInst>(I).getCalledFunction())
3063 getFunctionInfo(*Callee).CalledViaMustTail = true;
3064 }
3065 [[fallthrough]];
3066 case Instruction::CallBr:
3067 case Instruction::Invoke:
3068 case Instruction::CleanupRet:
3069 case Instruction::CatchSwitch:
3070 case Instruction::AtomicRMW:
3071 case Instruction::AtomicCmpXchg:
3072 case Instruction::Br:
3073 case Instruction::Resume:
3074 case Instruction::Ret:
3075 case Instruction::Load:
3076 // The alignment of a pointer is interesting for loads.
3077 case Instruction::Store:
3078 // The alignment of a pointer is interesting for stores.
3079 case Instruction::Alloca:
3080 case Instruction::AddrSpaceCast:
3081 IsInterestingOpcode = true;
3082 }
3083 if (IsInterestingOpcode) {
3084 auto *&Insts = FI.OpcodeInstMap[I.getOpcode()];
3085 if (!Insts)
3086 Insts = new (Allocator) InstructionVectorTy();
3087 Insts->push_back(&I);
3088 }
3089 if (I.mayReadOrWriteMemory())
3090 FI.RWInsts.push_back(&I);
3091 }
3092
3093 if (F.hasFnAttribute(Attribute::AlwaysInline) &&
3094 isInlineViable(F).isSuccess())
3095 InlineableFunctions.insert(&F);
3096 }
3097
getAAResultsForFunction(const Function & F)3098 AAResults *InformationCache::getAAResultsForFunction(const Function &F) {
3099 return AG.getAnalysis<AAManager>(F);
3100 }
3101
~FunctionInfo()3102 InformationCache::FunctionInfo::~FunctionInfo() {
3103 // The instruction vectors are allocated using a BumpPtrAllocator, we need to
3104 // manually destroy them.
3105 for (auto &It : OpcodeInstMap)
3106 It.getSecond()->~InstructionVectorTy();
3107 }
3108
recordDependence(const AbstractAttribute & FromAA,const AbstractAttribute & ToAA,DepClassTy DepClass)3109 void Attributor::recordDependence(const AbstractAttribute &FromAA,
3110 const AbstractAttribute &ToAA,
3111 DepClassTy DepClass) {
3112 if (DepClass == DepClassTy::NONE)
3113 return;
3114 // If we are outside of an update, thus before the actual fixpoint iteration
3115 // started (= when we create AAs), we do not track dependences because we will
3116 // put all AAs into the initial worklist anyway.
3117 if (DependenceStack.empty())
3118 return;
3119 if (FromAA.getState().isAtFixpoint())
3120 return;
3121 DependenceStack.back()->push_back({&FromAA, &ToAA, DepClass});
3122 }
3123
rememberDependences()3124 void Attributor::rememberDependences() {
3125 assert(!DependenceStack.empty() && "No dependences to remember!");
3126
3127 for (DepInfo &DI : *DependenceStack.back()) {
3128 assert((DI.DepClass == DepClassTy::REQUIRED ||
3129 DI.DepClass == DepClassTy::OPTIONAL) &&
3130 "Expected required or optional dependence (1 bit)!");
3131 auto &DepAAs = const_cast<AbstractAttribute &>(*DI.FromAA).Deps;
3132 DepAAs.push_back(AbstractAttribute::DepTy(
3133 const_cast<AbstractAttribute *>(DI.ToAA), unsigned(DI.DepClass)));
3134 }
3135 }
3136
identifyDefaultAbstractAttributes(Function & F)3137 void Attributor::identifyDefaultAbstractAttributes(Function &F) {
3138 if (!VisitedFunctions.insert(&F).second)
3139 return;
3140 if (F.isDeclaration())
3141 return;
3142
3143 // In non-module runs we need to look at the call sites of a function to
3144 // determine if it is part of a must-tail call edge. This will influence what
3145 // attributes we can derive.
3146 InformationCache::FunctionInfo &FI = InfoCache.getFunctionInfo(F);
3147 if (!isModulePass() && !FI.CalledViaMustTail) {
3148 for (const Use &U : F.uses())
3149 if (const auto *CB = dyn_cast<CallBase>(U.getUser()))
3150 if (CB->isCallee(&U) && CB->isMustTailCall())
3151 FI.CalledViaMustTail = true;
3152 }
3153
3154 IRPosition FPos = IRPosition::function(F);
3155
3156 // Check for dead BasicBlocks in every function.
3157 // We need dead instruction detection because we do not want to deal with
3158 // broken IR in which SSA rules do not apply.
3159 getOrCreateAAFor<AAIsDead>(FPos);
3160
3161 // Every function might be "will-return".
3162 getOrCreateAAFor<AAWillReturn>(FPos);
3163
3164 // Every function might contain instructions that cause "undefined behavior".
3165 getOrCreateAAFor<AAUndefinedBehavior>(FPos);
3166
3167 // Every function can be nounwind.
3168 getOrCreateAAFor<AANoUnwind>(FPos);
3169
3170 // Every function might be marked "nosync"
3171 getOrCreateAAFor<AANoSync>(FPos);
3172
3173 // Every function might be "no-free".
3174 getOrCreateAAFor<AANoFree>(FPos);
3175
3176 // Every function might be "no-return".
3177 getOrCreateAAFor<AANoReturn>(FPos);
3178
3179 // Every function might be "no-recurse".
3180 getOrCreateAAFor<AANoRecurse>(FPos);
3181
3182 // Every function might be "readnone/readonly/writeonly/...".
3183 getOrCreateAAFor<AAMemoryBehavior>(FPos);
3184
3185 // Every function can be "readnone/argmemonly/inaccessiblememonly/...".
3186 getOrCreateAAFor<AAMemoryLocation>(FPos);
3187
3188 // Every function can track active assumptions.
3189 getOrCreateAAFor<AAAssumptionInfo>(FPos);
3190
3191 // Every function might be applicable for Heap-To-Stack conversion.
3192 if (EnableHeapToStack)
3193 getOrCreateAAFor<AAHeapToStack>(FPos);
3194
3195 // Return attributes are only appropriate if the return type is non void.
3196 Type *ReturnType = F.getReturnType();
3197 if (!ReturnType->isVoidTy()) {
3198 // Argument attribute "returned" --- Create only one per function even
3199 // though it is an argument attribute.
3200 getOrCreateAAFor<AAReturnedValues>(FPos);
3201
3202 IRPosition RetPos = IRPosition::returned(F);
3203
3204 // Every returned value might be dead.
3205 getOrCreateAAFor<AAIsDead>(RetPos);
3206
3207 // Every function might be simplified.
3208 bool UsedAssumedInformation = false;
3209 getAssumedSimplified(RetPos, nullptr, UsedAssumedInformation,
3210 AA::Intraprocedural);
3211
3212 // Every returned value might be marked noundef.
3213 getOrCreateAAFor<AANoUndef>(RetPos);
3214
3215 if (ReturnType->isPointerTy()) {
3216
3217 // Every function with pointer return type might be marked align.
3218 getOrCreateAAFor<AAAlign>(RetPos);
3219
3220 // Every function with pointer return type might be marked nonnull.
3221 getOrCreateAAFor<AANonNull>(RetPos);
3222
3223 // Every function with pointer return type might be marked noalias.
3224 getOrCreateAAFor<AANoAlias>(RetPos);
3225
3226 // Every function with pointer return type might be marked
3227 // dereferenceable.
3228 getOrCreateAAFor<AADereferenceable>(RetPos);
3229 }
3230 }
3231
3232 for (Argument &Arg : F.args()) {
3233 IRPosition ArgPos = IRPosition::argument(Arg);
3234
3235 // Every argument might be simplified. We have to go through the Attributor
3236 // interface though as outside AAs can register custom simplification
3237 // callbacks.
3238 bool UsedAssumedInformation = false;
3239 getAssumedSimplified(ArgPos, /* AA */ nullptr, UsedAssumedInformation,
3240 AA::Intraprocedural);
3241
3242 // Every argument might be dead.
3243 getOrCreateAAFor<AAIsDead>(ArgPos);
3244
3245 // Every argument might be marked noundef.
3246 getOrCreateAAFor<AANoUndef>(ArgPos);
3247
3248 if (Arg.getType()->isPointerTy()) {
3249 // Every argument with pointer type might be marked nonnull.
3250 getOrCreateAAFor<AANonNull>(ArgPos);
3251
3252 // Every argument with pointer type might be marked noalias.
3253 getOrCreateAAFor<AANoAlias>(ArgPos);
3254
3255 // Every argument with pointer type might be marked dereferenceable.
3256 getOrCreateAAFor<AADereferenceable>(ArgPos);
3257
3258 // Every argument with pointer type might be marked align.
3259 getOrCreateAAFor<AAAlign>(ArgPos);
3260
3261 // Every argument with pointer type might be marked nocapture.
3262 getOrCreateAAFor<AANoCapture>(ArgPos);
3263
3264 // Every argument with pointer type might be marked
3265 // "readnone/readonly/writeonly/..."
3266 getOrCreateAAFor<AAMemoryBehavior>(ArgPos);
3267
3268 // Every argument with pointer type might be marked nofree.
3269 getOrCreateAAFor<AANoFree>(ArgPos);
3270
3271 // Every argument with pointer type might be privatizable (or promotable)
3272 getOrCreateAAFor<AAPrivatizablePtr>(ArgPos);
3273 }
3274 }
3275
3276 auto CallSitePred = [&](Instruction &I) -> bool {
3277 auto &CB = cast<CallBase>(I);
3278 IRPosition CBInstPos = IRPosition::inst(CB);
3279 IRPosition CBFnPos = IRPosition::callsite_function(CB);
3280
3281 // Call sites might be dead if they do not have side effects and no live
3282 // users. The return value might be dead if there are no live users.
3283 getOrCreateAAFor<AAIsDead>(CBInstPos);
3284
3285 Function *Callee = CB.getCalledFunction();
3286 // TODO: Even if the callee is not known now we might be able to simplify
3287 // the call/callee.
3288 if (!Callee)
3289 return true;
3290
3291 // Every call site can track active assumptions.
3292 getOrCreateAAFor<AAAssumptionInfo>(CBFnPos);
3293
3294 // Skip declarations except if annotations on their call sites were
3295 // explicitly requested.
3296 if (!AnnotateDeclarationCallSites && Callee->isDeclaration() &&
3297 !Callee->hasMetadata(LLVMContext::MD_callback))
3298 return true;
3299
3300 if (!Callee->getReturnType()->isVoidTy() && !CB.use_empty()) {
3301
3302 IRPosition CBRetPos = IRPosition::callsite_returned(CB);
3303 bool UsedAssumedInformation = false;
3304 getAssumedSimplified(CBRetPos, nullptr, UsedAssumedInformation,
3305 AA::Intraprocedural);
3306 }
3307
3308 for (int I = 0, E = CB.arg_size(); I < E; ++I) {
3309
3310 IRPosition CBArgPos = IRPosition::callsite_argument(CB, I);
3311
3312 // Every call site argument might be dead.
3313 getOrCreateAAFor<AAIsDead>(CBArgPos);
3314
3315 // Call site argument might be simplified. We have to go through the
3316 // Attributor interface though as outside AAs can register custom
3317 // simplification callbacks.
3318 bool UsedAssumedInformation = false;
3319 getAssumedSimplified(CBArgPos, /* AA */ nullptr, UsedAssumedInformation,
3320 AA::Intraprocedural);
3321
3322 // Every call site argument might be marked "noundef".
3323 getOrCreateAAFor<AANoUndef>(CBArgPos);
3324
3325 if (!CB.getArgOperand(I)->getType()->isPointerTy())
3326 continue;
3327
3328 // Call site argument attribute "non-null".
3329 getOrCreateAAFor<AANonNull>(CBArgPos);
3330
3331 // Call site argument attribute "nocapture".
3332 getOrCreateAAFor<AANoCapture>(CBArgPos);
3333
3334 // Call site argument attribute "no-alias".
3335 getOrCreateAAFor<AANoAlias>(CBArgPos);
3336
3337 // Call site argument attribute "dereferenceable".
3338 getOrCreateAAFor<AADereferenceable>(CBArgPos);
3339
3340 // Call site argument attribute "align".
3341 getOrCreateAAFor<AAAlign>(CBArgPos);
3342
3343 // Call site argument attribute
3344 // "readnone/readonly/writeonly/..."
3345 getOrCreateAAFor<AAMemoryBehavior>(CBArgPos);
3346
3347 // Call site argument attribute "nofree".
3348 getOrCreateAAFor<AANoFree>(CBArgPos);
3349 }
3350 return true;
3351 };
3352
3353 auto &OpcodeInstMap = InfoCache.getOpcodeInstMapForFunction(F);
3354 bool Success;
3355 bool UsedAssumedInformation = false;
3356 Success = checkForAllInstructionsImpl(
3357 nullptr, OpcodeInstMap, CallSitePred, nullptr, nullptr,
3358 {(unsigned)Instruction::Invoke, (unsigned)Instruction::CallBr,
3359 (unsigned)Instruction::Call},
3360 UsedAssumedInformation);
3361 (void)Success;
3362 assert(Success && "Expected the check call to be successful!");
3363
3364 auto LoadStorePred = [&](Instruction &I) -> bool {
3365 if (isa<LoadInst>(I)) {
3366 getOrCreateAAFor<AAAlign>(
3367 IRPosition::value(*cast<LoadInst>(I).getPointerOperand()));
3368 if (SimplifyAllLoads)
3369 getAssumedSimplified(IRPosition::value(I), nullptr,
3370 UsedAssumedInformation, AA::Intraprocedural);
3371 } else {
3372 auto &SI = cast<StoreInst>(I);
3373 getOrCreateAAFor<AAIsDead>(IRPosition::inst(I));
3374 getAssumedSimplified(IRPosition::value(*SI.getValueOperand()), nullptr,
3375 UsedAssumedInformation, AA::Intraprocedural);
3376 getOrCreateAAFor<AAAlign>(IRPosition::value(*SI.getPointerOperand()));
3377 }
3378 return true;
3379 };
3380 Success = checkForAllInstructionsImpl(
3381 nullptr, OpcodeInstMap, LoadStorePred, nullptr, nullptr,
3382 {(unsigned)Instruction::Load, (unsigned)Instruction::Store},
3383 UsedAssumedInformation);
3384 (void)Success;
3385 assert(Success && "Expected the check call to be successful!");
3386 }
3387
3388 /// Helpers to ease debugging through output streams and print calls.
3389 ///
3390 ///{
operator <<(raw_ostream & OS,ChangeStatus S)3391 raw_ostream &llvm::operator<<(raw_ostream &OS, ChangeStatus S) {
3392 return OS << (S == ChangeStatus::CHANGED ? "changed" : "unchanged");
3393 }
3394
operator <<(raw_ostream & OS,IRPosition::Kind AP)3395 raw_ostream &llvm::operator<<(raw_ostream &OS, IRPosition::Kind AP) {
3396 switch (AP) {
3397 case IRPosition::IRP_INVALID:
3398 return OS << "inv";
3399 case IRPosition::IRP_FLOAT:
3400 return OS << "flt";
3401 case IRPosition::IRP_RETURNED:
3402 return OS << "fn_ret";
3403 case IRPosition::IRP_CALL_SITE_RETURNED:
3404 return OS << "cs_ret";
3405 case IRPosition::IRP_FUNCTION:
3406 return OS << "fn";
3407 case IRPosition::IRP_CALL_SITE:
3408 return OS << "cs";
3409 case IRPosition::IRP_ARGUMENT:
3410 return OS << "arg";
3411 case IRPosition::IRP_CALL_SITE_ARGUMENT:
3412 return OS << "cs_arg";
3413 }
3414 llvm_unreachable("Unknown attribute position!");
3415 }
3416
operator <<(raw_ostream & OS,const IRPosition & Pos)3417 raw_ostream &llvm::operator<<(raw_ostream &OS, const IRPosition &Pos) {
3418 const Value &AV = Pos.getAssociatedValue();
3419 OS << "{" << Pos.getPositionKind() << ":" << AV.getName() << " ["
3420 << Pos.getAnchorValue().getName() << "@" << Pos.getCallSiteArgNo() << "]";
3421
3422 if (Pos.hasCallBaseContext())
3423 OS << "[cb_context:" << *Pos.getCallBaseContext() << "]";
3424 return OS << "}";
3425 }
3426
operator <<(raw_ostream & OS,const IntegerRangeState & S)3427 raw_ostream &llvm::operator<<(raw_ostream &OS, const IntegerRangeState &S) {
3428 OS << "range-state(" << S.getBitWidth() << ")<";
3429 S.getKnown().print(OS);
3430 OS << " / ";
3431 S.getAssumed().print(OS);
3432 OS << ">";
3433
3434 return OS << static_cast<const AbstractState &>(S);
3435 }
3436
operator <<(raw_ostream & OS,const AbstractState & S)3437 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractState &S) {
3438 return OS << (!S.isValidState() ? "top" : (S.isAtFixpoint() ? "fix" : ""));
3439 }
3440
operator <<(raw_ostream & OS,const AbstractAttribute & AA)3441 raw_ostream &llvm::operator<<(raw_ostream &OS, const AbstractAttribute &AA) {
3442 AA.print(OS);
3443 return OS;
3444 }
3445
operator <<(raw_ostream & OS,const PotentialConstantIntValuesState & S)3446 raw_ostream &llvm::operator<<(raw_ostream &OS,
3447 const PotentialConstantIntValuesState &S) {
3448 OS << "set-state(< {";
3449 if (!S.isValidState())
3450 OS << "full-set";
3451 else {
3452 for (const auto &It : S.getAssumedSet())
3453 OS << It << ", ";
3454 if (S.undefIsContained())
3455 OS << "undef ";
3456 }
3457 OS << "} >)";
3458
3459 return OS;
3460 }
3461
operator <<(raw_ostream & OS,const PotentialLLVMValuesState & S)3462 raw_ostream &llvm::operator<<(raw_ostream &OS,
3463 const PotentialLLVMValuesState &S) {
3464 OS << "set-state(< {";
3465 if (!S.isValidState())
3466 OS << "full-set";
3467 else {
3468 for (const auto &It : S.getAssumedSet()) {
3469 if (auto *F = dyn_cast<Function>(It.first.getValue()))
3470 OS << "@" << F->getName() << "[" << int(It.second) << "], ";
3471 else
3472 OS << *It.first.getValue() << "[" << int(It.second) << "], ";
3473 }
3474 if (S.undefIsContained())
3475 OS << "undef ";
3476 }
3477 OS << "} >)";
3478
3479 return OS;
3480 }
3481
print(raw_ostream & OS) const3482 void AbstractAttribute::print(raw_ostream &OS) const {
3483 OS << "[";
3484 OS << getName();
3485 OS << "] for CtxI ";
3486
3487 if (auto *I = getCtxI()) {
3488 OS << "'";
3489 I->print(OS);
3490 OS << "'";
3491 } else
3492 OS << "<<null inst>>";
3493
3494 OS << " at position " << getIRPosition() << " with state " << getAsStr()
3495 << '\n';
3496 }
3497
printWithDeps(raw_ostream & OS) const3498 void AbstractAttribute::printWithDeps(raw_ostream &OS) const {
3499 print(OS);
3500
3501 for (const auto &DepAA : Deps) {
3502 auto *AA = DepAA.getPointer();
3503 OS << " updates ";
3504 AA->print(OS);
3505 }
3506
3507 OS << '\n';
3508 }
3509
operator <<(raw_ostream & OS,const AAPointerInfo::Access & Acc)3510 raw_ostream &llvm::operator<<(raw_ostream &OS,
3511 const AAPointerInfo::Access &Acc) {
3512 OS << " [" << Acc.getKind() << "] " << *Acc.getRemoteInst();
3513 if (Acc.getLocalInst() != Acc.getRemoteInst())
3514 OS << " via " << *Acc.getLocalInst();
3515 if (Acc.getContent()) {
3516 if (*Acc.getContent())
3517 OS << " [" << **Acc.getContent() << "]";
3518 else
3519 OS << " [ <unknown> ]";
3520 }
3521 return OS;
3522 }
3523 ///}
3524
3525 /// ----------------------------------------------------------------------------
3526 /// Pass (Manager) Boilerplate
3527 /// ----------------------------------------------------------------------------
3528
runAttributorOnFunctions(InformationCache & InfoCache,SetVector<Function * > & Functions,AnalysisGetter & AG,CallGraphUpdater & CGUpdater,bool DeleteFns,bool IsModulePass)3529 static bool runAttributorOnFunctions(InformationCache &InfoCache,
3530 SetVector<Function *> &Functions,
3531 AnalysisGetter &AG,
3532 CallGraphUpdater &CGUpdater,
3533 bool DeleteFns, bool IsModulePass) {
3534 if (Functions.empty())
3535 return false;
3536
3537 LLVM_DEBUG({
3538 dbgs() << "[Attributor] Run on module with " << Functions.size()
3539 << " functions:\n";
3540 for (Function *Fn : Functions)
3541 dbgs() << " - " << Fn->getName() << "\n";
3542 });
3543
3544 // Create an Attributor and initially empty information cache that is filled
3545 // while we identify default attribute opportunities.
3546 AttributorConfig AC(CGUpdater);
3547 AC.IsModulePass = IsModulePass;
3548 AC.DeleteFns = DeleteFns;
3549 Attributor A(Functions, InfoCache, AC);
3550
3551 // Create shallow wrappers for all functions that are not IPO amendable
3552 if (AllowShallowWrappers)
3553 for (Function *F : Functions)
3554 if (!A.isFunctionIPOAmendable(*F))
3555 Attributor::createShallowWrapper(*F);
3556
3557 // Internalize non-exact functions
3558 // TODO: for now we eagerly internalize functions without calculating the
3559 // cost, we need a cost interface to determine whether internalizing
3560 // a function is "beneficial"
3561 if (AllowDeepWrapper) {
3562 unsigned FunSize = Functions.size();
3563 for (unsigned u = 0; u < FunSize; u++) {
3564 Function *F = Functions[u];
3565 if (!F->isDeclaration() && !F->isDefinitionExact() && F->getNumUses() &&
3566 !GlobalValue::isInterposableLinkage(F->getLinkage())) {
3567 Function *NewF = Attributor::internalizeFunction(*F);
3568 assert(NewF && "Could not internalize function.");
3569 Functions.insert(NewF);
3570
3571 // Update call graph
3572 CGUpdater.replaceFunctionWith(*F, *NewF);
3573 for (const Use &U : NewF->uses())
3574 if (CallBase *CB = dyn_cast<CallBase>(U.getUser())) {
3575 auto *CallerF = CB->getCaller();
3576 CGUpdater.reanalyzeFunction(*CallerF);
3577 }
3578 }
3579 }
3580 }
3581
3582 for (Function *F : Functions) {
3583 if (F->hasExactDefinition())
3584 NumFnWithExactDefinition++;
3585 else
3586 NumFnWithoutExactDefinition++;
3587
3588 // We look at internal functions only on-demand but if any use is not a
3589 // direct call or outside the current set of analyzed functions, we have
3590 // to do it eagerly.
3591 if (F->hasLocalLinkage()) {
3592 if (llvm::all_of(F->uses(), [&Functions](const Use &U) {
3593 const auto *CB = dyn_cast<CallBase>(U.getUser());
3594 return CB && CB->isCallee(&U) &&
3595 Functions.count(const_cast<Function *>(CB->getCaller()));
3596 }))
3597 continue;
3598 }
3599
3600 // Populate the Attributor with abstract attribute opportunities in the
3601 // function and the information cache with IR information.
3602 A.identifyDefaultAbstractAttributes(*F);
3603 }
3604
3605 ChangeStatus Changed = A.run();
3606
3607 LLVM_DEBUG(dbgs() << "[Attributor] Done with " << Functions.size()
3608 << " functions, result: " << Changed << ".\n");
3609 return Changed == ChangeStatus::CHANGED;
3610 }
3611
viewGraph()3612 void AADepGraph::viewGraph() { llvm::ViewGraph(this, "Dependency Graph"); }
3613
dumpGraph()3614 void AADepGraph::dumpGraph() {
3615 static std::atomic<int> CallTimes;
3616 std::string Prefix;
3617
3618 if (!DepGraphDotFileNamePrefix.empty())
3619 Prefix = DepGraphDotFileNamePrefix;
3620 else
3621 Prefix = "dep_graph";
3622 std::string Filename =
3623 Prefix + "_" + std::to_string(CallTimes.load()) + ".dot";
3624
3625 outs() << "Dependency graph dump to " << Filename << ".\n";
3626
3627 std::error_code EC;
3628
3629 raw_fd_ostream File(Filename, EC, sys::fs::OF_TextWithCRLF);
3630 if (!EC)
3631 llvm::WriteGraph(File, this);
3632
3633 CallTimes++;
3634 }
3635
print()3636 void AADepGraph::print() {
3637 for (auto DepAA : SyntheticRoot.Deps)
3638 cast<AbstractAttribute>(DepAA.getPointer())->printWithDeps(outs());
3639 }
3640
run(Module & M,ModuleAnalysisManager & AM)3641 PreservedAnalyses AttributorPass::run(Module &M, ModuleAnalysisManager &AM) {
3642 FunctionAnalysisManager &FAM =
3643 AM.getResult<FunctionAnalysisManagerModuleProxy>(M).getManager();
3644 AnalysisGetter AG(FAM);
3645
3646 SetVector<Function *> Functions;
3647 for (Function &F : M)
3648 Functions.insert(&F);
3649
3650 CallGraphUpdater CGUpdater;
3651 BumpPtrAllocator Allocator;
3652 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3653 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3654 /* DeleteFns */ true, /* IsModulePass */ true)) {
3655 // FIXME: Think about passes we will preserve and add them here.
3656 return PreservedAnalyses::none();
3657 }
3658 return PreservedAnalyses::all();
3659 }
3660
run(LazyCallGraph::SCC & C,CGSCCAnalysisManager & AM,LazyCallGraph & CG,CGSCCUpdateResult & UR)3661 PreservedAnalyses AttributorCGSCCPass::run(LazyCallGraph::SCC &C,
3662 CGSCCAnalysisManager &AM,
3663 LazyCallGraph &CG,
3664 CGSCCUpdateResult &UR) {
3665 FunctionAnalysisManager &FAM =
3666 AM.getResult<FunctionAnalysisManagerCGSCCProxy>(C, CG).getManager();
3667 AnalysisGetter AG(FAM);
3668
3669 SetVector<Function *> Functions;
3670 for (LazyCallGraph::Node &N : C)
3671 Functions.insert(&N.getFunction());
3672
3673 if (Functions.empty())
3674 return PreservedAnalyses::all();
3675
3676 Module &M = *Functions.back()->getParent();
3677 CallGraphUpdater CGUpdater;
3678 CGUpdater.initialize(CG, C, AM, UR);
3679 BumpPtrAllocator Allocator;
3680 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3681 if (runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3682 /* DeleteFns */ false,
3683 /* IsModulePass */ false)) {
3684 // FIXME: Think about passes we will preserve and add them here.
3685 PreservedAnalyses PA;
3686 PA.preserve<FunctionAnalysisManagerCGSCCProxy>();
3687 return PA;
3688 }
3689 return PreservedAnalyses::all();
3690 }
3691
3692 namespace llvm {
3693
3694 template <> struct GraphTraits<AADepGraphNode *> {
3695 using NodeRef = AADepGraphNode *;
3696 using DepTy = PointerIntPair<AADepGraphNode *, 1>;
3697 using EdgeRef = PointerIntPair<AADepGraphNode *, 1>;
3698
getEntryNodellvm::GraphTraits3699 static NodeRef getEntryNode(AADepGraphNode *DGN) { return DGN; }
DepGetValllvm::GraphTraits3700 static NodeRef DepGetVal(DepTy &DT) { return DT.getPointer(); }
3701
3702 using ChildIteratorType =
3703 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>;
3704 using ChildEdgeIteratorType = TinyPtrVector<DepTy>::iterator;
3705
child_beginllvm::GraphTraits3706 static ChildIteratorType child_begin(NodeRef N) { return N->child_begin(); }
3707
child_endllvm::GraphTraits3708 static ChildIteratorType child_end(NodeRef N) { return N->child_end(); }
3709 };
3710
3711 template <>
3712 struct GraphTraits<AADepGraph *> : public GraphTraits<AADepGraphNode *> {
getEntryNodellvm::GraphTraits3713 static NodeRef getEntryNode(AADepGraph *DG) { return DG->GetEntryNode(); }
3714
3715 using nodes_iterator =
3716 mapped_iterator<TinyPtrVector<DepTy>::iterator, decltype(&DepGetVal)>;
3717
nodes_beginllvm::GraphTraits3718 static nodes_iterator nodes_begin(AADepGraph *DG) { return DG->begin(); }
3719
nodes_endllvm::GraphTraits3720 static nodes_iterator nodes_end(AADepGraph *DG) { return DG->end(); }
3721 };
3722
3723 template <> struct DOTGraphTraits<AADepGraph *> : public DefaultDOTGraphTraits {
DOTGraphTraitsllvm::DOTGraphTraits3724 DOTGraphTraits(bool isSimple = false) : DefaultDOTGraphTraits(isSimple) {}
3725
getNodeLabelllvm::DOTGraphTraits3726 static std::string getNodeLabel(const AADepGraphNode *Node,
3727 const AADepGraph *DG) {
3728 std::string AAString;
3729 raw_string_ostream O(AAString);
3730 Node->print(O);
3731 return AAString;
3732 }
3733 };
3734
3735 } // end namespace llvm
3736
3737 namespace {
3738
3739 struct AttributorLegacyPass : public ModulePass {
3740 static char ID;
3741
AttributorLegacyPass__anon81af11021a11::AttributorLegacyPass3742 AttributorLegacyPass() : ModulePass(ID) {
3743 initializeAttributorLegacyPassPass(*PassRegistry::getPassRegistry());
3744 }
3745
runOnModule__anon81af11021a11::AttributorLegacyPass3746 bool runOnModule(Module &M) override {
3747 if (skipModule(M))
3748 return false;
3749
3750 AnalysisGetter AG;
3751 SetVector<Function *> Functions;
3752 for (Function &F : M)
3753 Functions.insert(&F);
3754
3755 CallGraphUpdater CGUpdater;
3756 BumpPtrAllocator Allocator;
3757 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ nullptr);
3758 return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3759 /* DeleteFns*/ true,
3760 /* IsModulePass */ true);
3761 }
3762
getAnalysisUsage__anon81af11021a11::AttributorLegacyPass3763 void getAnalysisUsage(AnalysisUsage &AU) const override {
3764 // FIXME: Think about passes we will preserve and add them here.
3765 AU.addRequired<TargetLibraryInfoWrapperPass>();
3766 }
3767 };
3768
3769 struct AttributorCGSCCLegacyPass : public CallGraphSCCPass {
3770 static char ID;
3771
AttributorCGSCCLegacyPass__anon81af11021a11::AttributorCGSCCLegacyPass3772 AttributorCGSCCLegacyPass() : CallGraphSCCPass(ID) {
3773 initializeAttributorCGSCCLegacyPassPass(*PassRegistry::getPassRegistry());
3774 }
3775
runOnSCC__anon81af11021a11::AttributorCGSCCLegacyPass3776 bool runOnSCC(CallGraphSCC &SCC) override {
3777 if (skipSCC(SCC))
3778 return false;
3779
3780 SetVector<Function *> Functions;
3781 for (CallGraphNode *CGN : SCC)
3782 if (Function *Fn = CGN->getFunction())
3783 if (!Fn->isDeclaration())
3784 Functions.insert(Fn);
3785
3786 if (Functions.empty())
3787 return false;
3788
3789 AnalysisGetter AG;
3790 CallGraph &CG = const_cast<CallGraph &>(SCC.getCallGraph());
3791 CallGraphUpdater CGUpdater;
3792 CGUpdater.initialize(CG, SCC);
3793 Module &M = *Functions.back()->getParent();
3794 BumpPtrAllocator Allocator;
3795 InformationCache InfoCache(M, AG, Allocator, /* CGSCC */ &Functions);
3796 return runAttributorOnFunctions(InfoCache, Functions, AG, CGUpdater,
3797 /* DeleteFns */ false,
3798 /* IsModulePass */ false);
3799 }
3800
getAnalysisUsage__anon81af11021a11::AttributorCGSCCLegacyPass3801 void getAnalysisUsage(AnalysisUsage &AU) const override {
3802 // FIXME: Think about passes we will preserve and add them here.
3803 AU.addRequired<TargetLibraryInfoWrapperPass>();
3804 CallGraphSCCPass::getAnalysisUsage(AU);
3805 }
3806 };
3807
3808 } // end anonymous namespace
3809
createAttributorLegacyPass()3810 Pass *llvm::createAttributorLegacyPass() { return new AttributorLegacyPass(); }
createAttributorCGSCCLegacyPass()3811 Pass *llvm::createAttributorCGSCCLegacyPass() {
3812 return new AttributorCGSCCLegacyPass();
3813 }
3814
3815 char AttributorLegacyPass::ID = 0;
3816 char AttributorCGSCCLegacyPass::ID = 0;
3817
3818 INITIALIZE_PASS_BEGIN(AttributorLegacyPass, "attributor",
3819 "Deduce and propagate attributes", false, false)
3820 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3821 INITIALIZE_PASS_END(AttributorLegacyPass, "attributor",
3822 "Deduce and propagate attributes", false, false)
3823 INITIALIZE_PASS_BEGIN(AttributorCGSCCLegacyPass, "attributor-cgscc",
3824 "Deduce and propagate attributes (CGSCC pass)", false,
3825 false)
3826 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
3827 INITIALIZE_PASS_DEPENDENCY(CallGraphWrapperPass)
3828 INITIALIZE_PASS_END(AttributorCGSCCLegacyPass, "attributor-cgscc",
3829 "Deduce and propagate attributes (CGSCC pass)", false,
3830 false)
3831