1 //===- AggressiveInstCombine.cpp ------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the aggressive expression pattern combiner classes.
10 // Currently, it handles expression patterns for:
11 // * Truncate instruction
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "llvm/Transforms/AggressiveInstCombine/AggressiveInstCombine.h"
16 #include "AggressiveInstCombineInternal.h"
17 #include "llvm-c/Initialization.h"
18 #include "llvm-c/Transforms/AggressiveInstCombine.h"
19 #include "llvm/ADT/Statistic.h"
20 #include "llvm/Analysis/AliasAnalysis.h"
21 #include "llvm/Analysis/BasicAliasAnalysis.h"
22 #include "llvm/Analysis/GlobalsModRef.h"
23 #include "llvm/Analysis/TargetLibraryInfo.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/Dominators.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/LegacyPassManager.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/InitializePasses.h"
30 #include "llvm/Pass.h"
31 #include "llvm/Transforms/Utils/Local.h"
32
33 using namespace llvm;
34 using namespace PatternMatch;
35
36 #define DEBUG_TYPE "aggressive-instcombine"
37
38 STATISTIC(NumAnyOrAllBitsSet, "Number of any/all-bits-set patterns folded");
39 STATISTIC(NumGuardedRotates,
40 "Number of guarded rotates transformed into funnel shifts");
41 STATISTIC(NumPopCountRecognized, "Number of popcount idioms recognized");
42
43 namespace {
44 /// Contains expression pattern combiner logic.
45 /// This class provides both the logic to combine expression patterns and
46 /// combine them. It differs from InstCombiner class in that each pattern
47 /// combiner runs only once as opposed to InstCombine's multi-iteration,
48 /// which allows pattern combiner to have higher complexity than the O(1)
49 /// required by the instruction combiner.
50 class AggressiveInstCombinerLegacyPass : public FunctionPass {
51 public:
52 static char ID; // Pass identification, replacement for typeid
53
AggressiveInstCombinerLegacyPass()54 AggressiveInstCombinerLegacyPass() : FunctionPass(ID) {
55 initializeAggressiveInstCombinerLegacyPassPass(
56 *PassRegistry::getPassRegistry());
57 }
58
59 void getAnalysisUsage(AnalysisUsage &AU) const override;
60
61 /// Run all expression pattern optimizations on the given /p F function.
62 ///
63 /// \param F function to optimize.
64 /// \returns true if the IR is changed.
65 bool runOnFunction(Function &F) override;
66 };
67 } // namespace
68
69 /// Match a pattern for a bitwise rotate operation that partially guards
70 /// against undefined behavior by branching around the rotation when the shift
71 /// amount is 0.
foldGuardedRotateToFunnelShift(Instruction & I)72 static bool foldGuardedRotateToFunnelShift(Instruction &I) {
73 if (I.getOpcode() != Instruction::PHI || I.getNumOperands() != 2)
74 return false;
75
76 // As with the one-use checks below, this is not strictly necessary, but we
77 // are being cautious to avoid potential perf regressions on targets that
78 // do not actually have a rotate instruction (where the funnel shift would be
79 // expanded back into math/shift/logic ops).
80 if (!isPowerOf2_32(I.getType()->getScalarSizeInBits()))
81 return false;
82
83 // Match V to funnel shift left/right and capture the source operand and
84 // shift amount in X and Y.
85 auto matchRotate = [](Value *V, Value *&X, Value *&Y) {
86 Value *L0, *L1, *R0, *R1;
87 unsigned Width = V->getType()->getScalarSizeInBits();
88 auto Sub = m_Sub(m_SpecificInt(Width), m_Value(R1));
89
90 // rotate_left(X, Y) == (X << Y) | (X >> (Width - Y))
91 auto RotL = m_OneUse(
92 m_c_Or(m_Shl(m_Value(L0), m_Value(L1)), m_LShr(m_Value(R0), Sub)));
93 if (RotL.match(V) && L0 == R0 && L1 == R1) {
94 X = L0;
95 Y = L1;
96 return Intrinsic::fshl;
97 }
98
99 // rotate_right(X, Y) == (X >> Y) | (X << (Width - Y))
100 auto RotR = m_OneUse(
101 m_c_Or(m_LShr(m_Value(L0), m_Value(L1)), m_Shl(m_Value(R0), Sub)));
102 if (RotR.match(V) && L0 == R0 && L1 == R1) {
103 X = L0;
104 Y = L1;
105 return Intrinsic::fshr;
106 }
107
108 return Intrinsic::not_intrinsic;
109 };
110
111 // One phi operand must be a rotate operation, and the other phi operand must
112 // be the source value of that rotate operation:
113 // phi [ rotate(RotSrc, RotAmt), RotBB ], [ RotSrc, GuardBB ]
114 PHINode &Phi = cast<PHINode>(I);
115 Value *P0 = Phi.getOperand(0), *P1 = Phi.getOperand(1);
116 Value *RotSrc, *RotAmt;
117 Intrinsic::ID IID = matchRotate(P0, RotSrc, RotAmt);
118 if (IID == Intrinsic::not_intrinsic || RotSrc != P1) {
119 IID = matchRotate(P1, RotSrc, RotAmt);
120 if (IID == Intrinsic::not_intrinsic || RotSrc != P0)
121 return false;
122 assert((IID == Intrinsic::fshl || IID == Intrinsic::fshr) &&
123 "Pattern must match funnel shift left or right");
124 }
125
126 // The incoming block with our source operand must be the "guard" block.
127 // That must contain a cmp+branch to avoid the rotate when the shift amount
128 // is equal to 0. The other incoming block is the block with the rotate.
129 BasicBlock *GuardBB = Phi.getIncomingBlock(RotSrc == P1);
130 BasicBlock *RotBB = Phi.getIncomingBlock(RotSrc != P1);
131 Instruction *TermI = GuardBB->getTerminator();
132 ICmpInst::Predicate Pred;
133 BasicBlock *PhiBB = Phi.getParent();
134 if (!match(TermI, m_Br(m_ICmp(Pred, m_Specific(RotAmt), m_ZeroInt()),
135 m_SpecificBB(PhiBB), m_SpecificBB(RotBB))))
136 return false;
137
138 if (Pred != CmpInst::ICMP_EQ)
139 return false;
140
141 // We matched a variation of this IR pattern:
142 // GuardBB:
143 // %cmp = icmp eq i32 %RotAmt, 0
144 // br i1 %cmp, label %PhiBB, label %RotBB
145 // RotBB:
146 // %sub = sub i32 32, %RotAmt
147 // %shr = lshr i32 %X, %sub
148 // %shl = shl i32 %X, %RotAmt
149 // %rot = or i32 %shr, %shl
150 // br label %PhiBB
151 // PhiBB:
152 // %cond = phi i32 [ %rot, %RotBB ], [ %X, %GuardBB ]
153 // -->
154 // llvm.fshl.i32(i32 %X, i32 %RotAmt)
155 IRBuilder<> Builder(PhiBB, PhiBB->getFirstInsertionPt());
156 Function *F = Intrinsic::getDeclaration(Phi.getModule(), IID, Phi.getType());
157 Phi.replaceAllUsesWith(Builder.CreateCall(F, {RotSrc, RotSrc, RotAmt}));
158 ++NumGuardedRotates;
159 return true;
160 }
161
162 /// This is used by foldAnyOrAllBitsSet() to capture a source value (Root) and
163 /// the bit indexes (Mask) needed by a masked compare. If we're matching a chain
164 /// of 'and' ops, then we also need to capture the fact that we saw an
165 /// "and X, 1", so that's an extra return value for that case.
166 struct MaskOps {
167 Value *Root;
168 APInt Mask;
169 bool MatchAndChain;
170 bool FoundAnd1;
171
MaskOpsMaskOps172 MaskOps(unsigned BitWidth, bool MatchAnds)
173 : Root(nullptr), Mask(APInt::getNullValue(BitWidth)),
174 MatchAndChain(MatchAnds), FoundAnd1(false) {}
175 };
176
177 /// This is a recursive helper for foldAnyOrAllBitsSet() that walks through a
178 /// chain of 'and' or 'or' instructions looking for shift ops of a common source
179 /// value. Examples:
180 /// or (or (or X, (X >> 3)), (X >> 5)), (X >> 8)
181 /// returns { X, 0x129 }
182 /// and (and (X >> 1), 1), (X >> 4)
183 /// returns { X, 0x12 }
matchAndOrChain(Value * V,MaskOps & MOps)184 static bool matchAndOrChain(Value *V, MaskOps &MOps) {
185 Value *Op0, *Op1;
186 if (MOps.MatchAndChain) {
187 // Recurse through a chain of 'and' operands. This requires an extra check
188 // vs. the 'or' matcher: we must find an "and X, 1" instruction somewhere
189 // in the chain to know that all of the high bits are cleared.
190 if (match(V, m_And(m_Value(Op0), m_One()))) {
191 MOps.FoundAnd1 = true;
192 return matchAndOrChain(Op0, MOps);
193 }
194 if (match(V, m_And(m_Value(Op0), m_Value(Op1))))
195 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
196 } else {
197 // Recurse through a chain of 'or' operands.
198 if (match(V, m_Or(m_Value(Op0), m_Value(Op1))))
199 return matchAndOrChain(Op0, MOps) && matchAndOrChain(Op1, MOps);
200 }
201
202 // We need a shift-right or a bare value representing a compare of bit 0 of
203 // the original source operand.
204 Value *Candidate;
205 uint64_t BitIndex = 0;
206 if (!match(V, m_LShr(m_Value(Candidate), m_ConstantInt(BitIndex))))
207 Candidate = V;
208
209 // Initialize result source operand.
210 if (!MOps.Root)
211 MOps.Root = Candidate;
212
213 // The shift constant is out-of-range? This code hasn't been simplified.
214 if (BitIndex >= MOps.Mask.getBitWidth())
215 return false;
216
217 // Fill in the mask bit derived from the shift constant.
218 MOps.Mask.setBit(BitIndex);
219 return MOps.Root == Candidate;
220 }
221
222 /// Match patterns that correspond to "any-bits-set" and "all-bits-set".
223 /// These will include a chain of 'or' or 'and'-shifted bits from a
224 /// common source value:
225 /// and (or (lshr X, C), ...), 1 --> (X & CMask) != 0
226 /// and (and (lshr X, C), ...), 1 --> (X & CMask) == CMask
227 /// Note: "any-bits-clear" and "all-bits-clear" are variations of these patterns
228 /// that differ only with a final 'not' of the result. We expect that final
229 /// 'not' to be folded with the compare that we create here (invert predicate).
foldAnyOrAllBitsSet(Instruction & I)230 static bool foldAnyOrAllBitsSet(Instruction &I) {
231 // The 'any-bits-set' ('or' chain) pattern is simpler to match because the
232 // final "and X, 1" instruction must be the final op in the sequence.
233 bool MatchAllBitsSet;
234 if (match(&I, m_c_And(m_OneUse(m_And(m_Value(), m_Value())), m_Value())))
235 MatchAllBitsSet = true;
236 else if (match(&I, m_And(m_OneUse(m_Or(m_Value(), m_Value())), m_One())))
237 MatchAllBitsSet = false;
238 else
239 return false;
240
241 MaskOps MOps(I.getType()->getScalarSizeInBits(), MatchAllBitsSet);
242 if (MatchAllBitsSet) {
243 if (!matchAndOrChain(cast<BinaryOperator>(&I), MOps) || !MOps.FoundAnd1)
244 return false;
245 } else {
246 if (!matchAndOrChain(cast<BinaryOperator>(&I)->getOperand(0), MOps))
247 return false;
248 }
249
250 // The pattern was found. Create a masked compare that replaces all of the
251 // shift and logic ops.
252 IRBuilder<> Builder(&I);
253 Constant *Mask = ConstantInt::get(I.getType(), MOps.Mask);
254 Value *And = Builder.CreateAnd(MOps.Root, Mask);
255 Value *Cmp = MatchAllBitsSet ? Builder.CreateICmpEQ(And, Mask)
256 : Builder.CreateIsNotNull(And);
257 Value *Zext = Builder.CreateZExt(Cmp, I.getType());
258 I.replaceAllUsesWith(Zext);
259 ++NumAnyOrAllBitsSet;
260 return true;
261 }
262
263 // Try to recognize below function as popcount intrinsic.
264 // This is the "best" algorithm from
265 // http://graphics.stanford.edu/~seander/bithacks.html#CountBitsSetParallel
266 // Also used in TargetLowering::expandCTPOP().
267 //
268 // int popcount(unsigned int i) {
269 // i = i - ((i >> 1) & 0x55555555);
270 // i = (i & 0x33333333) + ((i >> 2) & 0x33333333);
271 // i = ((i + (i >> 4)) & 0x0F0F0F0F);
272 // return (i * 0x01010101) >> 24;
273 // }
tryToRecognizePopCount(Instruction & I)274 static bool tryToRecognizePopCount(Instruction &I) {
275 if (I.getOpcode() != Instruction::LShr)
276 return false;
277
278 Type *Ty = I.getType();
279 if (!Ty->isIntOrIntVectorTy())
280 return false;
281
282 unsigned Len = Ty->getScalarSizeInBits();
283 // FIXME: fix Len == 8 and other irregular type lengths.
284 if (!(Len <= 128 && Len > 8 && Len % 8 == 0))
285 return false;
286
287 APInt Mask55 = APInt::getSplat(Len, APInt(8, 0x55));
288 APInt Mask33 = APInt::getSplat(Len, APInt(8, 0x33));
289 APInt Mask0F = APInt::getSplat(Len, APInt(8, 0x0F));
290 APInt Mask01 = APInt::getSplat(Len, APInt(8, 0x01));
291 APInt MaskShift = APInt(Len, Len - 8);
292
293 Value *Op0 = I.getOperand(0);
294 Value *Op1 = I.getOperand(1);
295 Value *MulOp0;
296 // Matching "(i * 0x01010101...) >> 24".
297 if ((match(Op0, m_Mul(m_Value(MulOp0), m_SpecificInt(Mask01)))) &&
298 match(Op1, m_SpecificInt(MaskShift))) {
299 Value *ShiftOp0;
300 // Matching "((i + (i >> 4)) & 0x0F0F0F0F...)".
301 if (match(MulOp0, m_And(m_c_Add(m_LShr(m_Value(ShiftOp0), m_SpecificInt(4)),
302 m_Deferred(ShiftOp0)),
303 m_SpecificInt(Mask0F)))) {
304 Value *AndOp0;
305 // Matching "(i & 0x33333333...) + ((i >> 2) & 0x33333333...)".
306 if (match(ShiftOp0,
307 m_c_Add(m_And(m_Value(AndOp0), m_SpecificInt(Mask33)),
308 m_And(m_LShr(m_Deferred(AndOp0), m_SpecificInt(2)),
309 m_SpecificInt(Mask33))))) {
310 Value *Root, *SubOp1;
311 // Matching "i - ((i >> 1) & 0x55555555...)".
312 if (match(AndOp0, m_Sub(m_Value(Root), m_Value(SubOp1))) &&
313 match(SubOp1, m_And(m_LShr(m_Specific(Root), m_SpecificInt(1)),
314 m_SpecificInt(Mask55)))) {
315 LLVM_DEBUG(dbgs() << "Recognized popcount intrinsic\n");
316 IRBuilder<> Builder(&I);
317 Function *Func = Intrinsic::getDeclaration(
318 I.getModule(), Intrinsic::ctpop, I.getType());
319 I.replaceAllUsesWith(Builder.CreateCall(Func, {Root}));
320 ++NumPopCountRecognized;
321 return true;
322 }
323 }
324 }
325 }
326
327 return false;
328 }
329
330 /// This is the entry point for folds that could be implemented in regular
331 /// InstCombine, but they are separated because they are not expected to
332 /// occur frequently and/or have more than a constant-length pattern match.
foldUnusualPatterns(Function & F,DominatorTree & DT)333 static bool foldUnusualPatterns(Function &F, DominatorTree &DT) {
334 bool MadeChange = false;
335 for (BasicBlock &BB : F) {
336 // Ignore unreachable basic blocks.
337 if (!DT.isReachableFromEntry(&BB))
338 continue;
339 // Do not delete instructions under here and invalidate the iterator.
340 // Walk the block backwards for efficiency. We're matching a chain of
341 // use->defs, so we're more likely to succeed by starting from the bottom.
342 // Also, we want to avoid matching partial patterns.
343 // TODO: It would be more efficient if we removed dead instructions
344 // iteratively in this loop rather than waiting until the end.
345 for (Instruction &I : make_range(BB.rbegin(), BB.rend())) {
346 MadeChange |= foldAnyOrAllBitsSet(I);
347 MadeChange |= foldGuardedRotateToFunnelShift(I);
348 MadeChange |= tryToRecognizePopCount(I);
349 }
350 }
351
352 // We're done with transforms, so remove dead instructions.
353 if (MadeChange)
354 for (BasicBlock &BB : F)
355 SimplifyInstructionsInBlock(&BB);
356
357 return MadeChange;
358 }
359
360 /// This is the entry point for all transforms. Pass manager differences are
361 /// handled in the callers of this function.
runImpl(Function & F,TargetLibraryInfo & TLI,DominatorTree & DT)362 static bool runImpl(Function &F, TargetLibraryInfo &TLI, DominatorTree &DT) {
363 bool MadeChange = false;
364 const DataLayout &DL = F.getParent()->getDataLayout();
365 TruncInstCombine TIC(TLI, DL, DT);
366 MadeChange |= TIC.run(F);
367 MadeChange |= foldUnusualPatterns(F, DT);
368 return MadeChange;
369 }
370
getAnalysisUsage(AnalysisUsage & AU) const371 void AggressiveInstCombinerLegacyPass::getAnalysisUsage(
372 AnalysisUsage &AU) const {
373 AU.setPreservesCFG();
374 AU.addRequired<DominatorTreeWrapperPass>();
375 AU.addRequired<TargetLibraryInfoWrapperPass>();
376 AU.addPreserved<AAResultsWrapperPass>();
377 AU.addPreserved<BasicAAWrapperPass>();
378 AU.addPreserved<DominatorTreeWrapperPass>();
379 AU.addPreserved<GlobalsAAWrapperPass>();
380 }
381
runOnFunction(Function & F)382 bool AggressiveInstCombinerLegacyPass::runOnFunction(Function &F) {
383 auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
384 auto &DT = getAnalysis<DominatorTreeWrapperPass>().getDomTree();
385 return runImpl(F, TLI, DT);
386 }
387
run(Function & F,FunctionAnalysisManager & AM)388 PreservedAnalyses AggressiveInstCombinePass::run(Function &F,
389 FunctionAnalysisManager &AM) {
390 auto &TLI = AM.getResult<TargetLibraryAnalysis>(F);
391 auto &DT = AM.getResult<DominatorTreeAnalysis>(F);
392 if (!runImpl(F, TLI, DT)) {
393 // No changes, all analyses are preserved.
394 return PreservedAnalyses::all();
395 }
396 // Mark all the analyses that instcombine updates as preserved.
397 PreservedAnalyses PA;
398 PA.preserveSet<CFGAnalyses>();
399 PA.preserve<AAManager>();
400 PA.preserve<GlobalsAA>();
401 return PA;
402 }
403
404 char AggressiveInstCombinerLegacyPass::ID = 0;
405 INITIALIZE_PASS_BEGIN(AggressiveInstCombinerLegacyPass,
406 "aggressive-instcombine",
407 "Combine pattern based expressions", false, false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)408 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
409 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
410 INITIALIZE_PASS_END(AggressiveInstCombinerLegacyPass, "aggressive-instcombine",
411 "Combine pattern based expressions", false, false)
412
413 // Initialization Routines
414 void llvm::initializeAggressiveInstCombine(PassRegistry &Registry) {
415 initializeAggressiveInstCombinerLegacyPassPass(Registry);
416 }
417
LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R)418 void LLVMInitializeAggressiveInstCombiner(LLVMPassRegistryRef R) {
419 initializeAggressiveInstCombinerLegacyPassPass(*unwrap(R));
420 }
421
createAggressiveInstCombinerPass()422 FunctionPass *llvm::createAggressiveInstCombinerPass() {
423 return new AggressiveInstCombinerLegacyPass();
424 }
425
LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM)426 void LLVMAddAggressiveInstCombinerPass(LLVMPassManagerRef PM) {
427 unwrap(PM)->add(createAggressiveInstCombinerPass());
428 }
429