1 //===-- AMDGPUAnnotateUniformValues.cpp - ---------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This pass adds amdgpu.uniform metadata to IR values so this information
11 /// can be used during instruction selection.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "AMDGPU.h"
16 #include "Utils/AMDGPUBaseInfo.h"
17 #include "llvm/Analysis/LegacyDivergenceAnalysis.h"
18 #include "llvm/Analysis/MemorySSA.h"
19 #include "llvm/IR/InstVisitor.h"
20 #include "llvm/InitializePasses.h"
21
22 #define DEBUG_TYPE "amdgpu-annotate-uniform"
23
24 using namespace llvm;
25
26 namespace {
27
28 class AMDGPUAnnotateUniformValues : public FunctionPass,
29 public InstVisitor<AMDGPUAnnotateUniformValues> {
30 LegacyDivergenceAnalysis *DA;
31 MemorySSA *MSSA;
32 DenseMap<Value*, GetElementPtrInst*> noClobberClones;
33 bool isEntryFunc;
34
35 public:
36 static char ID;
AMDGPUAnnotateUniformValues()37 AMDGPUAnnotateUniformValues() :
38 FunctionPass(ID) { }
39 bool doInitialization(Module &M) override;
40 bool runOnFunction(Function &F) override;
getPassName() const41 StringRef getPassName() const override {
42 return "AMDGPU Annotate Uniform Values";
43 }
getAnalysisUsage(AnalysisUsage & AU) const44 void getAnalysisUsage(AnalysisUsage &AU) const override {
45 AU.addRequired<LegacyDivergenceAnalysis>();
46 AU.addRequired<MemorySSAWrapperPass>();
47 AU.setPreservesAll();
48 }
49
50 void visitBranchInst(BranchInst &I);
51 void visitLoadInst(LoadInst &I);
52 bool isClobberedInFunction(LoadInst * Load);
53 };
54
55 } // End anonymous namespace
56
57 INITIALIZE_PASS_BEGIN(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
58 "Add AMDGPU uniform metadata", false, false)
59 INITIALIZE_PASS_DEPENDENCY(LegacyDivergenceAnalysis)
60 INITIALIZE_PASS_DEPENDENCY(MemorySSAWrapperPass)
61 INITIALIZE_PASS_END(AMDGPUAnnotateUniformValues, DEBUG_TYPE,
62 "Add AMDGPU uniform metadata", false, false)
63
64 char AMDGPUAnnotateUniformValues::ID = 0;
65
setUniformMetadata(Instruction * I)66 static void setUniformMetadata(Instruction *I) {
67 I->setMetadata("amdgpu.uniform", MDNode::get(I->getContext(), {}));
68 }
setNoClobberMetadata(Instruction * I)69 static void setNoClobberMetadata(Instruction *I) {
70 I->setMetadata("amdgpu.noclobber", MDNode::get(I->getContext(), {}));
71 }
72
isClobberedInFunction(LoadInst * Load)73 bool AMDGPUAnnotateUniformValues::isClobberedInFunction(LoadInst * Load) {
74 const MemoryAccess *MA = MSSA->getWalker()->getClobberingMemoryAccess(Load);
75 return !MSSA->isLiveOnEntryDef(MA);
76 }
77
visitBranchInst(BranchInst & I)78 void AMDGPUAnnotateUniformValues::visitBranchInst(BranchInst &I) {
79 if (DA->isUniform(&I))
80 setUniformMetadata(&I);
81 }
82
visitLoadInst(LoadInst & I)83 void AMDGPUAnnotateUniformValues::visitLoadInst(LoadInst &I) {
84 Value *Ptr = I.getPointerOperand();
85 if (!DA->isUniform(Ptr))
86 return;
87 auto isGlobalLoad = [&](LoadInst &Load)->bool {
88 return Load.getPointerAddressSpace() == AMDGPUAS::GLOBAL_ADDRESS;
89 };
90 // We're tracking up to the Function boundaries, and cannot go beyond because
91 // of FunctionPass restrictions. We can ensure that is memory not clobbered
92 // for memory operations that are live in to entry points only.
93 Instruction *PtrI = dyn_cast<Instruction>(Ptr);
94
95 if (!isEntryFunc) {
96 if (PtrI)
97 setUniformMetadata(PtrI);
98 return;
99 }
100
101 bool NotClobbered = false;
102 bool GlobalLoad = isGlobalLoad(I);
103 if (PtrI)
104 NotClobbered = GlobalLoad && !isClobberedInFunction(&I);
105 else if (isa<Argument>(Ptr) || isa<GlobalValue>(Ptr)) {
106 if (GlobalLoad && !isClobberedInFunction(&I)) {
107 NotClobbered = true;
108 // Lookup for the existing GEP
109 if (noClobberClones.count(Ptr)) {
110 PtrI = noClobberClones[Ptr];
111 } else {
112 // Create GEP of the Value
113 Function *F = I.getParent()->getParent();
114 Value *Idx = Constant::getIntegerValue(
115 Type::getInt32Ty(Ptr->getContext()), APInt(64, 0));
116 // Insert GEP at the entry to make it dominate all uses
117 PtrI = GetElementPtrInst::Create(I.getType(), Ptr,
118 ArrayRef<Value *>(Idx), Twine(""),
119 F->getEntryBlock().getFirstNonPHI());
120 }
121 I.replaceUsesOfWith(Ptr, PtrI);
122 }
123 }
124
125 if (PtrI) {
126 setUniformMetadata(PtrI);
127 if (NotClobbered)
128 setNoClobberMetadata(PtrI);
129 }
130 }
131
doInitialization(Module & M)132 bool AMDGPUAnnotateUniformValues::doInitialization(Module &M) {
133 return false;
134 }
135
runOnFunction(Function & F)136 bool AMDGPUAnnotateUniformValues::runOnFunction(Function &F) {
137 if (skipFunction(F))
138 return false;
139
140 DA = &getAnalysis<LegacyDivergenceAnalysis>();
141 MSSA = &getAnalysis<MemorySSAWrapperPass>().getMSSA();
142 isEntryFunc = AMDGPU::isEntryFunctionCC(F.getCallingConv());
143
144 visit(F);
145 noClobberClones.clear();
146 return true;
147 }
148
149 FunctionPass *
createAMDGPUAnnotateUniformValues()150 llvm::createAMDGPUAnnotateUniformValues() {
151 return new AMDGPUAnnotateUniformValues();
152 }
153