1 //===----------------------- AlignmentFromAssumptions.cpp -----------------===//
2 // Set Load/Store Alignments From Assumptions
3 //
4 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
5 // See https://llvm.org/LICENSE.txt for license information.
6 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file implements a ScalarEvolution-based transformation to set
11 // the alignments of load, stores and memory intrinsics based on the truth
12 // expressions of assume intrinsics. The primary motivation is to handle
13 // complex alignment assumptions that apply to vector loads and stores that
14 // appear after vectorization and unrolling.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/Transforms/Scalar/AlignmentFromAssumptions.h"
19 #include "llvm/ADT/SmallPtrSet.h"
20 #include "llvm/ADT/Statistic.h"
21 #include "llvm/Analysis/AliasAnalysis.h"
22 #include "llvm/Analysis/AssumptionCache.h"
23 #include "llvm/Analysis/GlobalsModRef.h"
24 #include "llvm/Analysis/LoopInfo.h"
25 #include "llvm/Analysis/ScalarEvolutionExpressions.h"
26 #include "llvm/Analysis/ValueTracking.h"
27 #include "llvm/IR/Dominators.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/InitializePasses.h"
32 #include "llvm/Support/Debug.h"
33 #include "llvm/Support/raw_ostream.h"
34 #include "llvm/Transforms/Scalar.h"
35
36 #define AA_NAME "alignment-from-assumptions"
37 #define DEBUG_TYPE AA_NAME
38 using namespace llvm;
39
40 STATISTIC(NumLoadAlignChanged,
41 "Number of loads changed by alignment assumptions");
42 STATISTIC(NumStoreAlignChanged,
43 "Number of stores changed by alignment assumptions");
44 STATISTIC(NumMemIntAlignChanged,
45 "Number of memory intrinsics changed by alignment assumptions");
46
47 namespace {
48 struct AlignmentFromAssumptions : public FunctionPass {
49 static char ID; // Pass identification, replacement for typeid
AlignmentFromAssumptions__anon18ba66590111::AlignmentFromAssumptions50 AlignmentFromAssumptions() : FunctionPass(ID) {
51 initializeAlignmentFromAssumptionsPass(*PassRegistry::getPassRegistry());
52 }
53
54 bool runOnFunction(Function &F) override;
55
getAnalysisUsage__anon18ba66590111::AlignmentFromAssumptions56 void getAnalysisUsage(AnalysisUsage &AU) const override {
57 AU.addRequired<AssumptionCacheTracker>();
58 AU.addRequired<ScalarEvolutionWrapperPass>();
59 AU.addRequired<DominatorTreeWrapperPass>();
60
61 AU.setPreservesCFG();
62 AU.addPreserved<AAResultsWrapperPass>();
63 AU.addPreserved<GlobalsAAWrapperPass>();
64 AU.addPreserved<LoopInfoWrapperPass>();
65 AU.addPreserved<DominatorTreeWrapperPass>();
66 AU.addPreserved<ScalarEvolutionWrapperPass>();
67 }
68
69 AlignmentFromAssumptionsPass Impl;
70 };
71 }
72
73 char AlignmentFromAssumptions::ID = 0;
74 static const char aip_name[] = "Alignment from assumptions";
INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions,AA_NAME,aip_name,false,false)75 INITIALIZE_PASS_BEGIN(AlignmentFromAssumptions, AA_NAME,
76 aip_name, false, false)
77 INITIALIZE_PASS_DEPENDENCY(AssumptionCacheTracker)
78 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
79 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
80 INITIALIZE_PASS_END(AlignmentFromAssumptions, AA_NAME,
81 aip_name, false, false)
82
83 FunctionPass *llvm::createAlignmentFromAssumptionsPass() {
84 return new AlignmentFromAssumptions();
85 }
86
87 // Given an expression for the (constant) alignment, AlignSCEV, and an
88 // expression for the displacement between a pointer and the aligned address,
89 // DiffSCEV, compute the alignment of the displaced pointer if it can be reduced
90 // to a constant. Using SCEV to compute alignment handles the case where
91 // DiffSCEV is a recurrence with constant start such that the aligned offset
92 // is constant. e.g. {16,+,32} % 32 -> 16.
getNewAlignmentDiff(const SCEV * DiffSCEV,const SCEV * AlignSCEV,ScalarEvolution * SE)93 static MaybeAlign getNewAlignmentDiff(const SCEV *DiffSCEV,
94 const SCEV *AlignSCEV,
95 ScalarEvolution *SE) {
96 // DiffUnits = Diff % int64_t(Alignment)
97 const SCEV *DiffUnitsSCEV = SE->getURemExpr(DiffSCEV, AlignSCEV);
98
99 LLVM_DEBUG(dbgs() << "\talignment relative to " << *AlignSCEV << " is "
100 << *DiffUnitsSCEV << " (diff: " << *DiffSCEV << ")\n");
101
102 if (const SCEVConstant *ConstDUSCEV =
103 dyn_cast<SCEVConstant>(DiffUnitsSCEV)) {
104 int64_t DiffUnits = ConstDUSCEV->getValue()->getSExtValue();
105
106 // If the displacement is an exact multiple of the alignment, then the
107 // displaced pointer has the same alignment as the aligned pointer, so
108 // return the alignment value.
109 if (!DiffUnits)
110 return cast<SCEVConstant>(AlignSCEV)->getValue()->getAlignValue();
111
112 // If the displacement is not an exact multiple, but the remainder is a
113 // constant, then return this remainder (but only if it is a power of 2).
114 uint64_t DiffUnitsAbs = std::abs(DiffUnits);
115 if (isPowerOf2_64(DiffUnitsAbs))
116 return Align(DiffUnitsAbs);
117 }
118
119 return std::nullopt;
120 }
121
122 // There is an address given by an offset OffSCEV from AASCEV which has an
123 // alignment AlignSCEV. Use that information, if possible, to compute a new
124 // alignment for Ptr.
getNewAlignment(const SCEV * AASCEV,const SCEV * AlignSCEV,const SCEV * OffSCEV,Value * Ptr,ScalarEvolution * SE)125 static Align getNewAlignment(const SCEV *AASCEV, const SCEV *AlignSCEV,
126 const SCEV *OffSCEV, Value *Ptr,
127 ScalarEvolution *SE) {
128 const SCEV *PtrSCEV = SE->getSCEV(Ptr);
129 // On a platform with 32-bit allocas, but 64-bit flat/global pointer sizes
130 // (*cough* AMDGPU), the effective SCEV type of AASCEV and PtrSCEV
131 // may disagree. Trunc/extend so they agree.
132 PtrSCEV = SE->getTruncateOrZeroExtend(
133 PtrSCEV, SE->getEffectiveSCEVType(AASCEV->getType()));
134 const SCEV *DiffSCEV = SE->getMinusSCEV(PtrSCEV, AASCEV);
135 if (isa<SCEVCouldNotCompute>(DiffSCEV))
136 return Align(1);
137
138 // On 32-bit platforms, DiffSCEV might now have type i32 -- we've always
139 // sign-extended OffSCEV to i64, so make sure they agree again.
140 DiffSCEV = SE->getNoopOrSignExtend(DiffSCEV, OffSCEV->getType());
141
142 // What we really want to know is the overall offset to the aligned
143 // address. This address is displaced by the provided offset.
144 DiffSCEV = SE->getAddExpr(DiffSCEV, OffSCEV);
145
146 LLVM_DEBUG(dbgs() << "AFI: alignment of " << *Ptr << " relative to "
147 << *AlignSCEV << " and offset " << *OffSCEV
148 << " using diff " << *DiffSCEV << "\n");
149
150 if (MaybeAlign NewAlignment = getNewAlignmentDiff(DiffSCEV, AlignSCEV, SE)) {
151 LLVM_DEBUG(dbgs() << "\tnew alignment: " << DebugStr(NewAlignment) << "\n");
152 return *NewAlignment;
153 }
154
155 if (const SCEVAddRecExpr *DiffARSCEV = dyn_cast<SCEVAddRecExpr>(DiffSCEV)) {
156 // The relative offset to the alignment assumption did not yield a constant,
157 // but we should try harder: if we assume that a is 32-byte aligned, then in
158 // for (i = 0; i < 1024; i += 4) r += a[i]; not all of the loads from a are
159 // 32-byte aligned, but instead alternate between 32 and 16-byte alignment.
160 // As a result, the new alignment will not be a constant, but can still
161 // be improved over the default (of 4) to 16.
162
163 const SCEV *DiffStartSCEV = DiffARSCEV->getStart();
164 const SCEV *DiffIncSCEV = DiffARSCEV->getStepRecurrence(*SE);
165
166 LLVM_DEBUG(dbgs() << "\ttrying start/inc alignment using start "
167 << *DiffStartSCEV << " and inc " << *DiffIncSCEV << "\n");
168
169 // Now compute the new alignment using the displacement to the value in the
170 // first iteration, and also the alignment using the per-iteration delta.
171 // If these are the same, then use that answer. Otherwise, use the smaller
172 // one, but only if it divides the larger one.
173 MaybeAlign NewAlignment = getNewAlignmentDiff(DiffStartSCEV, AlignSCEV, SE);
174 MaybeAlign NewIncAlignment =
175 getNewAlignmentDiff(DiffIncSCEV, AlignSCEV, SE);
176
177 LLVM_DEBUG(dbgs() << "\tnew start alignment: " << DebugStr(NewAlignment)
178 << "\n");
179 LLVM_DEBUG(dbgs() << "\tnew inc alignment: " << DebugStr(NewIncAlignment)
180 << "\n");
181
182 if (!NewAlignment || !NewIncAlignment)
183 return Align(1);
184
185 const Align NewAlign = *NewAlignment;
186 const Align NewIncAlign = *NewIncAlignment;
187 if (NewAlign > NewIncAlign) {
188 LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: "
189 << DebugStr(NewIncAlign) << "\n");
190 return NewIncAlign;
191 }
192 if (NewIncAlign > NewAlign) {
193 LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign)
194 << "\n");
195 return NewAlign;
196 }
197 assert(NewIncAlign == NewAlign);
198 LLVM_DEBUG(dbgs() << "\tnew start/inc alignment: " << DebugStr(NewAlign)
199 << "\n");
200 return NewAlign;
201 }
202
203 return Align(1);
204 }
205
extractAlignmentInfo(CallInst * I,unsigned Idx,Value * & AAPtr,const SCEV * & AlignSCEV,const SCEV * & OffSCEV)206 bool AlignmentFromAssumptionsPass::extractAlignmentInfo(CallInst *I,
207 unsigned Idx,
208 Value *&AAPtr,
209 const SCEV *&AlignSCEV,
210 const SCEV *&OffSCEV) {
211 Type *Int64Ty = Type::getInt64Ty(I->getContext());
212 OperandBundleUse AlignOB = I->getOperandBundleAt(Idx);
213 if (AlignOB.getTagName() != "align")
214 return false;
215 assert(AlignOB.Inputs.size() >= 2);
216 AAPtr = AlignOB.Inputs[0].get();
217 // TODO: Consider accumulating the offset to the base.
218 AAPtr = AAPtr->stripPointerCastsSameRepresentation();
219 AlignSCEV = SE->getSCEV(AlignOB.Inputs[1].get());
220 AlignSCEV = SE->getTruncateOrZeroExtend(AlignSCEV, Int64Ty);
221 if (!isa<SCEVConstant>(AlignSCEV))
222 // Added to suppress a crash because consumer doesn't expect non-constant
223 // alignments in the assume bundle. TODO: Consider generalizing caller.
224 return false;
225 if (AlignOB.Inputs.size() == 3)
226 OffSCEV = SE->getSCEV(AlignOB.Inputs[2].get());
227 else
228 OffSCEV = SE->getZero(Int64Ty);
229 OffSCEV = SE->getTruncateOrZeroExtend(OffSCEV, Int64Ty);
230 return true;
231 }
232
processAssumption(CallInst * ACall,unsigned Idx)233 bool AlignmentFromAssumptionsPass::processAssumption(CallInst *ACall,
234 unsigned Idx) {
235 Value *AAPtr;
236 const SCEV *AlignSCEV, *OffSCEV;
237 if (!extractAlignmentInfo(ACall, Idx, AAPtr, AlignSCEV, OffSCEV))
238 return false;
239
240 // Skip ConstantPointerNull and UndefValue. Assumptions on these shouldn't
241 // affect other users.
242 if (isa<ConstantData>(AAPtr))
243 return false;
244
245 const SCEV *AASCEV = SE->getSCEV(AAPtr);
246
247 // Apply the assumption to all other users of the specified pointer.
248 SmallPtrSet<Instruction *, 32> Visited;
249 SmallVector<Instruction*, 16> WorkList;
250 for (User *J : AAPtr->users()) {
251 if (J == ACall)
252 continue;
253
254 if (Instruction *K = dyn_cast<Instruction>(J))
255 WorkList.push_back(K);
256 }
257
258 while (!WorkList.empty()) {
259 Instruction *J = WorkList.pop_back_val();
260 if (LoadInst *LI = dyn_cast<LoadInst>(J)) {
261 if (!isValidAssumeForContext(ACall, J, DT))
262 continue;
263 Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
264 LI->getPointerOperand(), SE);
265 if (NewAlignment > LI->getAlign()) {
266 LI->setAlignment(NewAlignment);
267 ++NumLoadAlignChanged;
268 }
269 } else if (StoreInst *SI = dyn_cast<StoreInst>(J)) {
270 if (!isValidAssumeForContext(ACall, J, DT))
271 continue;
272 Align NewAlignment = getNewAlignment(AASCEV, AlignSCEV, OffSCEV,
273 SI->getPointerOperand(), SE);
274 if (NewAlignment > SI->getAlign()) {
275 SI->setAlignment(NewAlignment);
276 ++NumStoreAlignChanged;
277 }
278 } else if (MemIntrinsic *MI = dyn_cast<MemIntrinsic>(J)) {
279 if (!isValidAssumeForContext(ACall, J, DT))
280 continue;
281 Align NewDestAlignment =
282 getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MI->getDest(), SE);
283
284 LLVM_DEBUG(dbgs() << "\tmem inst: " << DebugStr(NewDestAlignment)
285 << "\n";);
286 if (NewDestAlignment > *MI->getDestAlign()) {
287 MI->setDestAlignment(NewDestAlignment);
288 ++NumMemIntAlignChanged;
289 }
290
291 // For memory transfers, there is also a source alignment that
292 // can be set.
293 if (MemTransferInst *MTI = dyn_cast<MemTransferInst>(MI)) {
294 Align NewSrcAlignment =
295 getNewAlignment(AASCEV, AlignSCEV, OffSCEV, MTI->getSource(), SE);
296
297 LLVM_DEBUG(dbgs() << "\tmem trans: " << DebugStr(NewSrcAlignment)
298 << "\n";);
299
300 if (NewSrcAlignment > *MTI->getSourceAlign()) {
301 MTI->setSourceAlignment(NewSrcAlignment);
302 ++NumMemIntAlignChanged;
303 }
304 }
305 }
306
307 // Now that we've updated that use of the pointer, look for other uses of
308 // the pointer to update.
309 Visited.insert(J);
310 for (User *UJ : J->users()) {
311 Instruction *K = cast<Instruction>(UJ);
312 if (!Visited.count(K))
313 WorkList.push_back(K);
314 }
315 }
316
317 return true;
318 }
319
runOnFunction(Function & F)320 bool AlignmentFromAssumptions::runOnFunction(Function &F) {
321 if (skipFunction(F))
322 return false;
323
324 auto &AC = getAnalysis<AssumptionCacheTracker>().getAssumptionCache(F);
325 ScalarEvolution *SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
326 DominatorTree *DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
327
328 return Impl.runImpl(F, AC, SE, DT);
329 }
330
runImpl(Function & F,AssumptionCache & AC,ScalarEvolution * SE_,DominatorTree * DT_)331 bool AlignmentFromAssumptionsPass::runImpl(Function &F, AssumptionCache &AC,
332 ScalarEvolution *SE_,
333 DominatorTree *DT_) {
334 SE = SE_;
335 DT = DT_;
336
337 bool Changed = false;
338 for (auto &AssumeVH : AC.assumptions())
339 if (AssumeVH) {
340 CallInst *Call = cast<CallInst>(AssumeVH);
341 for (unsigned Idx = 0; Idx < Call->getNumOperandBundles(); Idx++)
342 Changed |= processAssumption(Call, Idx);
343 }
344
345 return Changed;
346 }
347
348 PreservedAnalyses
run(Function & F,FunctionAnalysisManager & AM)349 AlignmentFromAssumptionsPass::run(Function &F, FunctionAnalysisManager &AM) {
350
351 AssumptionCache &AC = AM.getResult<AssumptionAnalysis>(F);
352 ScalarEvolution &SE = AM.getResult<ScalarEvolutionAnalysis>(F);
353 DominatorTree &DT = AM.getResult<DominatorTreeAnalysis>(F);
354 if (!runImpl(F, AC, &SE, &DT))
355 return PreservedAnalyses::all();
356
357 PreservedAnalyses PA;
358 PA.preserveSet<CFGAnalyses>();
359 PA.preserve<ScalarEvolutionAnalysis>();
360 return PA;
361 }
362