1 //===- SeparateConstOffsetFromGEP.cpp -------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // Loop unrolling may create many similar GEPs for array accesses.
10 // e.g., a 2-level loop
11 //
12 // float a[32][32]; // global variable
13 //
14 // for (int i = 0; i < 2; ++i) {
15 // for (int j = 0; j < 2; ++j) {
16 // ...
17 // ... = a[x + i][y + j];
18 // ...
19 // }
20 // }
21 //
22 // will probably be unrolled to:
23 //
24 // gep %a, 0, %x, %y; load
25 // gep %a, 0, %x, %y + 1; load
26 // gep %a, 0, %x + 1, %y; load
27 // gep %a, 0, %x + 1, %y + 1; load
28 //
29 // LLVM's GVN does not use partial redundancy elimination yet, and is thus
30 // unable to reuse (gep %a, 0, %x, %y). As a result, this misoptimization incurs
31 // significant slowdown in targets with limited addressing modes. For instance,
32 // because the PTX target does not support the reg+reg addressing mode, the
33 // NVPTX backend emits PTX code that literally computes the pointer address of
34 // each GEP, wasting tons of registers. It emits the following PTX for the
35 // first load and similar PTX for other loads.
36 //
37 // mov.u32 %r1, %x;
38 // mov.u32 %r2, %y;
39 // mul.wide.u32 %rl2, %r1, 128;
40 // mov.u64 %rl3, a;
41 // add.s64 %rl4, %rl3, %rl2;
42 // mul.wide.u32 %rl5, %r2, 4;
43 // add.s64 %rl6, %rl4, %rl5;
44 // ld.global.f32 %f1, [%rl6];
45 //
46 // To reduce the register pressure, the optimization implemented in this file
47 // merges the common part of a group of GEPs, so we can compute each pointer
48 // address by adding a simple offset to the common part, saving many registers.
49 //
50 // It works by splitting each GEP into a variadic base and a constant offset.
51 // The variadic base can be computed once and reused by multiple GEPs, and the
52 // constant offsets can be nicely folded into the reg+immediate addressing mode
53 // (supported by most targets) without using any extra register.
54 //
55 // For instance, we transform the four GEPs and four loads in the above example
56 // into:
57 //
58 // base = gep a, 0, x, y
59 // load base
60 // laod base + 1 * sizeof(float)
61 // load base + 32 * sizeof(float)
62 // load base + 33 * sizeof(float)
63 //
64 // Given the transformed IR, a backend that supports the reg+immediate
65 // addressing mode can easily fold the pointer arithmetics into the loads. For
66 // example, the NVPTX backend can easily fold the pointer arithmetics into the
67 // ld.global.f32 instructions, and the resultant PTX uses much fewer registers.
68 //
69 // mov.u32 %r1, %tid.x;
70 // mov.u32 %r2, %tid.y;
71 // mul.wide.u32 %rl2, %r1, 128;
72 // mov.u64 %rl3, a;
73 // add.s64 %rl4, %rl3, %rl2;
74 // mul.wide.u32 %rl5, %r2, 4;
75 // add.s64 %rl6, %rl4, %rl5;
76 // ld.global.f32 %f1, [%rl6]; // so far the same as unoptimized PTX
77 // ld.global.f32 %f2, [%rl6+4]; // much better
78 // ld.global.f32 %f3, [%rl6+128]; // much better
79 // ld.global.f32 %f4, [%rl6+132]; // much better
80 //
81 // Another improvement enabled by the LowerGEP flag is to lower a GEP with
82 // multiple indices to either multiple GEPs with a single index or arithmetic
83 // operations (depending on whether the target uses alias analysis in codegen).
84 // Such transformation can have following benefits:
85 // (1) It can always extract constants in the indices of structure type.
86 // (2) After such Lowering, there are more optimization opportunities such as
87 // CSE, LICM and CGP.
88 //
89 // E.g. The following GEPs have multiple indices:
90 // BB1:
91 // %p = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 3
92 // load %p
93 // ...
94 // BB2:
95 // %p2 = getelementptr [10 x %struct]* %ptr, i64 %i, i64 %j1, i32 2
96 // load %p2
97 // ...
98 //
99 // We can not do CSE to the common part related to index "i64 %i". Lowering
100 // GEPs can achieve such goals.
101 // If the target does not use alias analysis in codegen, this pass will
102 // lower a GEP with multiple indices into arithmetic operations:
103 // BB1:
104 // %1 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
105 // %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
106 // %3 = add i64 %1, %2 ; CSE opportunity
107 // %4 = mul i64 %j1, length_of_struct
108 // %5 = add i64 %3, %4
109 // %6 = add i64 %3, struct_field_3 ; Constant offset
110 // %p = inttoptr i64 %6 to i32*
111 // load %p
112 // ...
113 // BB2:
114 // %7 = ptrtoint [10 x %struct]* %ptr to i64 ; CSE opportunity
115 // %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
116 // %9 = add i64 %7, %8 ; CSE opportunity
117 // %10 = mul i64 %j2, length_of_struct
118 // %11 = add i64 %9, %10
119 // %12 = add i64 %11, struct_field_2 ; Constant offset
120 // %p = inttoptr i64 %12 to i32*
121 // load %p2
122 // ...
123 //
124 // If the target uses alias analysis in codegen, this pass will lower a GEP
125 // with multiple indices into multiple GEPs with a single index:
126 // BB1:
127 // %1 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
128 // %2 = mul i64 %i, length_of_10xstruct ; CSE opportunity
129 // %3 = getelementptr i8* %1, i64 %2 ; CSE opportunity
130 // %4 = mul i64 %j1, length_of_struct
131 // %5 = getelementptr i8* %3, i64 %4
132 // %6 = getelementptr i8* %5, struct_field_3 ; Constant offset
133 // %p = bitcast i8* %6 to i32*
134 // load %p
135 // ...
136 // BB2:
137 // %7 = bitcast [10 x %struct]* %ptr to i8* ; CSE opportunity
138 // %8 = mul i64 %i, length_of_10xstruct ; CSE opportunity
139 // %9 = getelementptr i8* %7, i64 %8 ; CSE opportunity
140 // %10 = mul i64 %j2, length_of_struct
141 // %11 = getelementptr i8* %9, i64 %10
142 // %12 = getelementptr i8* %11, struct_field_2 ; Constant offset
143 // %p2 = bitcast i8* %12 to i32*
144 // load %p2
145 // ...
146 //
147 // Lowering GEPs can also benefit other passes such as LICM and CGP.
148 // LICM (Loop Invariant Code Motion) can not hoist/sink a GEP of multiple
149 // indices if one of the index is variant. If we lower such GEP into invariant
150 // parts and variant parts, LICM can hoist/sink those invariant parts.
151 // CGP (CodeGen Prepare) tries to sink address calculations that match the
152 // target's addressing modes. A GEP with multiple indices may not match and will
153 // not be sunk. If we lower such GEP into smaller parts, CGP may sink some of
154 // them. So we end up with a better addressing mode.
155 //
156 //===----------------------------------------------------------------------===//
157
158 #include "llvm/ADT/APInt.h"
159 #include "llvm/ADT/DenseMap.h"
160 #include "llvm/ADT/DepthFirstIterator.h"
161 #include "llvm/ADT/SmallVector.h"
162 #include "llvm/Analysis/LoopInfo.h"
163 #include "llvm/Analysis/MemoryBuiltins.h"
164 #include "llvm/Analysis/ScalarEvolution.h"
165 #include "llvm/Analysis/TargetLibraryInfo.h"
166 #include "llvm/Analysis/TargetTransformInfo.h"
167 #include "llvm/Analysis/ValueTracking.h"
168 #include "llvm/IR/BasicBlock.h"
169 #include "llvm/IR/Constant.h"
170 #include "llvm/IR/Constants.h"
171 #include "llvm/IR/DataLayout.h"
172 #include "llvm/IR/DerivedTypes.h"
173 #include "llvm/IR/Dominators.h"
174 #include "llvm/IR/Function.h"
175 #include "llvm/IR/GetElementPtrTypeIterator.h"
176 #include "llvm/IR/IRBuilder.h"
177 #include "llvm/IR/Instruction.h"
178 #include "llvm/IR/Instructions.h"
179 #include "llvm/IR/Module.h"
180 #include "llvm/IR/PatternMatch.h"
181 #include "llvm/IR/Type.h"
182 #include "llvm/IR/User.h"
183 #include "llvm/IR/Value.h"
184 #include "llvm/InitializePasses.h"
185 #include "llvm/Pass.h"
186 #include "llvm/Support/Casting.h"
187 #include "llvm/Support/CommandLine.h"
188 #include "llvm/Support/ErrorHandling.h"
189 #include "llvm/Support/raw_ostream.h"
190 #include "llvm/Target/TargetMachine.h"
191 #include "llvm/Transforms/Scalar.h"
192 #include "llvm/Transforms/Utils/Local.h"
193 #include <cassert>
194 #include <cstdint>
195 #include <string>
196
197 using namespace llvm;
198 using namespace llvm::PatternMatch;
199
200 static cl::opt<bool> DisableSeparateConstOffsetFromGEP(
201 "disable-separate-const-offset-from-gep", cl::init(false),
202 cl::desc("Do not separate the constant offset from a GEP instruction"),
203 cl::Hidden);
204
205 // Setting this flag may emit false positives when the input module already
206 // contains dead instructions. Therefore, we set it only in unit tests that are
207 // free of dead code.
208 static cl::opt<bool>
209 VerifyNoDeadCode("reassociate-geps-verify-no-dead-code", cl::init(false),
210 cl::desc("Verify this pass produces no dead code"),
211 cl::Hidden);
212
213 namespace {
214
215 /// A helper class for separating a constant offset from a GEP index.
216 ///
217 /// In real programs, a GEP index may be more complicated than a simple addition
218 /// of something and a constant integer which can be trivially splitted. For
219 /// example, to split ((a << 3) | 5) + b, we need to search deeper for the
220 /// constant offset, so that we can separate the index to (a << 3) + b and 5.
221 ///
222 /// Therefore, this class looks into the expression that computes a given GEP
223 /// index, and tries to find a constant integer that can be hoisted to the
224 /// outermost level of the expression as an addition. Not every constant in an
225 /// expression can jump out. e.g., we cannot transform (b * (a + 5)) to (b * a +
226 /// 5); nor can we transform (3 * (a + 5)) to (3 * a + 5), however in this case,
227 /// -instcombine probably already optimized (3 * (a + 5)) to (3 * a + 15).
228 class ConstantOffsetExtractor {
229 public:
230 /// Extracts a constant offset from the given GEP index. It returns the
231 /// new index representing the remainder (equal to the original index minus
232 /// the constant offset), or nullptr if we cannot extract a constant offset.
233 /// \p Idx The given GEP index
234 /// \p GEP The given GEP
235 /// \p UserChainTail Outputs the tail of UserChain so that we can
236 /// garbage-collect unused instructions in UserChain.
237 static Value *Extract(Value *Idx, GetElementPtrInst *GEP,
238 User *&UserChainTail, const DominatorTree *DT);
239
240 /// Looks for a constant offset from the given GEP index without extracting
241 /// it. It returns the numeric value of the extracted constant offset (0 if
242 /// failed). The meaning of the arguments are the same as Extract.
243 static int64_t Find(Value *Idx, GetElementPtrInst *GEP,
244 const DominatorTree *DT);
245
246 private:
ConstantOffsetExtractor(Instruction * InsertionPt,const DominatorTree * DT)247 ConstantOffsetExtractor(Instruction *InsertionPt, const DominatorTree *DT)
248 : IP(InsertionPt), DL(InsertionPt->getModule()->getDataLayout()), DT(DT) {
249 }
250
251 /// Searches the expression that computes V for a non-zero constant C s.t.
252 /// V can be reassociated into the form V' + C. If the searching is
253 /// successful, returns C and update UserChain as a def-use chain from C to V;
254 /// otherwise, UserChain is empty.
255 ///
256 /// \p V The given expression
257 /// \p SignExtended Whether V will be sign-extended in the computation of the
258 /// GEP index
259 /// \p ZeroExtended Whether V will be zero-extended in the computation of the
260 /// GEP index
261 /// \p NonNegative Whether V is guaranteed to be non-negative. For example,
262 /// an index of an inbounds GEP is guaranteed to be
263 /// non-negative. Levaraging this, we can better split
264 /// inbounds GEPs.
265 APInt find(Value *V, bool SignExtended, bool ZeroExtended, bool NonNegative);
266
267 /// A helper function to look into both operands of a binary operator.
268 APInt findInEitherOperand(BinaryOperator *BO, bool SignExtended,
269 bool ZeroExtended);
270
271 /// After finding the constant offset C from the GEP index I, we build a new
272 /// index I' s.t. I' + C = I. This function builds and returns the new
273 /// index I' according to UserChain produced by function "find".
274 ///
275 /// The building conceptually takes two steps:
276 /// 1) iteratively distribute s/zext towards the leaves of the expression tree
277 /// that computes I
278 /// 2) reassociate the expression tree to the form I' + C.
279 ///
280 /// For example, to extract the 5 from sext(a + (b + 5)), we first distribute
281 /// sext to a, b and 5 so that we have
282 /// sext(a) + (sext(b) + 5).
283 /// Then, we reassociate it to
284 /// (sext(a) + sext(b)) + 5.
285 /// Given this form, we know I' is sext(a) + sext(b).
286 Value *rebuildWithoutConstOffset();
287
288 /// After the first step of rebuilding the GEP index without the constant
289 /// offset, distribute s/zext to the operands of all operators in UserChain.
290 /// e.g., zext(sext(a + (b + 5)) (assuming no overflow) =>
291 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))).
292 ///
293 /// The function also updates UserChain to point to new subexpressions after
294 /// distributing s/zext. e.g., the old UserChain of the above example is
295 /// 5 -> b + 5 -> a + (b + 5) -> sext(...) -> zext(sext(...)),
296 /// and the new UserChain is
297 /// zext(sext(5)) -> zext(sext(b)) + zext(sext(5)) ->
298 /// zext(sext(a)) + (zext(sext(b)) + zext(sext(5))
299 ///
300 /// \p ChainIndex The index to UserChain. ChainIndex is initially
301 /// UserChain.size() - 1, and is decremented during
302 /// the recursion.
303 Value *distributeExtsAndCloneChain(unsigned ChainIndex);
304
305 /// Reassociates the GEP index to the form I' + C and returns I'.
306 Value *removeConstOffset(unsigned ChainIndex);
307
308 /// A helper function to apply ExtInsts, a list of s/zext, to value V.
309 /// e.g., if ExtInsts = [sext i32 to i64, zext i16 to i32], this function
310 /// returns "sext i32 (zext i16 V to i32) to i64".
311 Value *applyExts(Value *V);
312
313 /// A helper function that returns whether we can trace into the operands
314 /// of binary operator BO for a constant offset.
315 ///
316 /// \p SignExtended Whether BO is surrounded by sext
317 /// \p ZeroExtended Whether BO is surrounded by zext
318 /// \p NonNegative Whether BO is known to be non-negative, e.g., an in-bound
319 /// array index.
320 bool CanTraceInto(bool SignExtended, bool ZeroExtended, BinaryOperator *BO,
321 bool NonNegative);
322
323 /// The path from the constant offset to the old GEP index. e.g., if the GEP
324 /// index is "a * b + (c + 5)". After running function find, UserChain[0] will
325 /// be the constant 5, UserChain[1] will be the subexpression "c + 5", and
326 /// UserChain[2] will be the entire expression "a * b + (c + 5)".
327 ///
328 /// This path helps to rebuild the new GEP index.
329 SmallVector<User *, 8> UserChain;
330
331 /// A data structure used in rebuildWithoutConstOffset. Contains all
332 /// sext/zext instructions along UserChain.
333 SmallVector<CastInst *, 16> ExtInsts;
334
335 /// Insertion position of cloned instructions.
336 Instruction *IP;
337
338 const DataLayout &DL;
339 const DominatorTree *DT;
340 };
341
342 /// A pass that tries to split every GEP in the function into a variadic
343 /// base and a constant offset. It is a FunctionPass because searching for the
344 /// constant offset may inspect other basic blocks.
345 class SeparateConstOffsetFromGEP : public FunctionPass {
346 public:
347 static char ID;
348
SeparateConstOffsetFromGEP(bool LowerGEP=false)349 SeparateConstOffsetFromGEP(bool LowerGEP = false)
350 : FunctionPass(ID), LowerGEP(LowerGEP) {
351 initializeSeparateConstOffsetFromGEPPass(*PassRegistry::getPassRegistry());
352 }
353
getAnalysisUsage(AnalysisUsage & AU) const354 void getAnalysisUsage(AnalysisUsage &AU) const override {
355 AU.addRequired<DominatorTreeWrapperPass>();
356 AU.addRequired<ScalarEvolutionWrapperPass>();
357 AU.addRequired<TargetTransformInfoWrapperPass>();
358 AU.addRequired<LoopInfoWrapperPass>();
359 AU.setPreservesCFG();
360 AU.addRequired<TargetLibraryInfoWrapperPass>();
361 }
362
doInitialization(Module & M)363 bool doInitialization(Module &M) override {
364 DL = &M.getDataLayout();
365 return false;
366 }
367
368 bool runOnFunction(Function &F) override;
369
370 private:
371 /// Tries to split the given GEP into a variadic base and a constant offset,
372 /// and returns true if the splitting succeeds.
373 bool splitGEP(GetElementPtrInst *GEP);
374
375 /// Lower a GEP with multiple indices into multiple GEPs with a single index.
376 /// Function splitGEP already split the original GEP into a variadic part and
377 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
378 /// variadic part into a set of GEPs with a single index and applies
379 /// AccumulativeByteOffset to it.
380 /// \p Variadic The variadic part of the original GEP.
381 /// \p AccumulativeByteOffset The constant offset.
382 void lowerToSingleIndexGEPs(GetElementPtrInst *Variadic,
383 int64_t AccumulativeByteOffset);
384
385 /// Lower a GEP with multiple indices into ptrtoint+arithmetics+inttoptr form.
386 /// Function splitGEP already split the original GEP into a variadic part and
387 /// a constant offset (i.e., AccumulativeByteOffset). This function lowers the
388 /// variadic part into a set of arithmetic operations and applies
389 /// AccumulativeByteOffset to it.
390 /// \p Variadic The variadic part of the original GEP.
391 /// \p AccumulativeByteOffset The constant offset.
392 void lowerToArithmetics(GetElementPtrInst *Variadic,
393 int64_t AccumulativeByteOffset);
394
395 /// Finds the constant offset within each index and accumulates them. If
396 /// LowerGEP is true, it finds in indices of both sequential and structure
397 /// types, otherwise it only finds in sequential indices. The output
398 /// NeedsExtraction indicates whether we successfully find a non-zero constant
399 /// offset.
400 int64_t accumulateByteOffset(GetElementPtrInst *GEP, bool &NeedsExtraction);
401
402 /// Canonicalize array indices to pointer-size integers. This helps to
403 /// simplify the logic of splitting a GEP. For example, if a + b is a
404 /// pointer-size integer, we have
405 /// gep base, a + b = gep (gep base, a), b
406 /// However, this equality may not hold if the size of a + b is smaller than
407 /// the pointer size, because LLVM conceptually sign-extends GEP indices to
408 /// pointer size before computing the address
409 /// (http://llvm.org/docs/LangRef.html#id181).
410 ///
411 /// This canonicalization is very likely already done in clang and
412 /// instcombine. Therefore, the program will probably remain the same.
413 ///
414 /// Returns true if the module changes.
415 ///
416 /// Verified in @i32_add in split-gep.ll
417 bool canonicalizeArrayIndicesToPointerSize(GetElementPtrInst *GEP);
418
419 /// Optimize sext(a)+sext(b) to sext(a+b) when a+b can't sign overflow.
420 /// SeparateConstOffsetFromGEP distributes a sext to leaves before extracting
421 /// the constant offset. After extraction, it becomes desirable to reunion the
422 /// distributed sexts. For example,
423 ///
424 /// &a[sext(i +nsw (j +nsw 5)]
425 /// => distribute &a[sext(i) +nsw (sext(j) +nsw 5)]
426 /// => constant extraction &a[sext(i) + sext(j)] + 5
427 /// => reunion &a[sext(i +nsw j)] + 5
428 bool reuniteExts(Function &F);
429
430 /// A helper that reunites sexts in an instruction.
431 bool reuniteExts(Instruction *I);
432
433 /// Find the closest dominator of <Dominatee> that is equivalent to <Key>.
434 Instruction *findClosestMatchingDominator(
435 const SCEV *Key, Instruction *Dominatee,
436 DenseMap<const SCEV *, SmallVector<Instruction *, 2>> &DominatingExprs);
437
438 /// Verify F is free of dead code.
439 void verifyNoDeadCode(Function &F);
440
441 bool hasMoreThanOneUseInLoop(Value *v, Loop *L);
442
443 // Swap the index operand of two GEP.
444 void swapGEPOperand(GetElementPtrInst *First, GetElementPtrInst *Second);
445
446 // Check if it is safe to swap operand of two GEP.
447 bool isLegalToSwapOperand(GetElementPtrInst *First, GetElementPtrInst *Second,
448 Loop *CurLoop);
449
450 const DataLayout *DL = nullptr;
451 DominatorTree *DT = nullptr;
452 ScalarEvolution *SE;
453
454 LoopInfo *LI;
455 TargetLibraryInfo *TLI;
456
457 /// Whether to lower a GEP with multiple indices into arithmetic operations or
458 /// multiple GEPs with a single index.
459 bool LowerGEP;
460
461 DenseMap<const SCEV *, SmallVector<Instruction *, 2>> DominatingAdds;
462 DenseMap<const SCEV *, SmallVector<Instruction *, 2>> DominatingSubs;
463 };
464
465 } // end anonymous namespace
466
467 char SeparateConstOffsetFromGEP::ID = 0;
468
469 INITIALIZE_PASS_BEGIN(
470 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
471 "Split GEPs to a variadic base and a constant offset for better CSE", false,
472 false)
INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)473 INITIALIZE_PASS_DEPENDENCY(DominatorTreeWrapperPass)
474 INITIALIZE_PASS_DEPENDENCY(ScalarEvolutionWrapperPass)
475 INITIALIZE_PASS_DEPENDENCY(TargetTransformInfoWrapperPass)
476 INITIALIZE_PASS_DEPENDENCY(LoopInfoWrapperPass)
477 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
478 INITIALIZE_PASS_END(
479 SeparateConstOffsetFromGEP, "separate-const-offset-from-gep",
480 "Split GEPs to a variadic base and a constant offset for better CSE", false,
481 false)
482
483 FunctionPass *llvm::createSeparateConstOffsetFromGEPPass(bool LowerGEP) {
484 return new SeparateConstOffsetFromGEP(LowerGEP);
485 }
486
CanTraceInto(bool SignExtended,bool ZeroExtended,BinaryOperator * BO,bool NonNegative)487 bool ConstantOffsetExtractor::CanTraceInto(bool SignExtended,
488 bool ZeroExtended,
489 BinaryOperator *BO,
490 bool NonNegative) {
491 // We only consider ADD, SUB and OR, because a non-zero constant found in
492 // expressions composed of these operations can be easily hoisted as a
493 // constant offset by reassociation.
494 if (BO->getOpcode() != Instruction::Add &&
495 BO->getOpcode() != Instruction::Sub &&
496 BO->getOpcode() != Instruction::Or) {
497 return false;
498 }
499
500 Value *LHS = BO->getOperand(0), *RHS = BO->getOperand(1);
501 // Do not trace into "or" unless it is equivalent to "add". If LHS and RHS
502 // don't have common bits, (LHS | RHS) is equivalent to (LHS + RHS).
503 // FIXME: this does not appear to be covered by any tests
504 // (with x86/aarch64 backends at least)
505 if (BO->getOpcode() == Instruction::Or &&
506 !haveNoCommonBitsSet(LHS, RHS, DL, nullptr, BO, DT))
507 return false;
508
509 // In addition, tracing into BO requires that its surrounding s/zext (if
510 // any) is distributable to both operands.
511 //
512 // Suppose BO = A op B.
513 // SignExtended | ZeroExtended | Distributable?
514 // --------------+--------------+----------------------------------
515 // 0 | 0 | true because no s/zext exists
516 // 0 | 1 | zext(BO) == zext(A) op zext(B)
517 // 1 | 0 | sext(BO) == sext(A) op sext(B)
518 // 1 | 1 | zext(sext(BO)) ==
519 // | | zext(sext(A)) op zext(sext(B))
520 if (BO->getOpcode() == Instruction::Add && !ZeroExtended && NonNegative) {
521 // If a + b >= 0 and (a >= 0 or b >= 0), then
522 // sext(a + b) = sext(a) + sext(b)
523 // even if the addition is not marked nsw.
524 //
525 // Leveraging this invariant, we can trace into an sext'ed inbound GEP
526 // index if the constant offset is non-negative.
527 //
528 // Verified in @sext_add in split-gep.ll.
529 if (ConstantInt *ConstLHS = dyn_cast<ConstantInt>(LHS)) {
530 if (!ConstLHS->isNegative())
531 return true;
532 }
533 if (ConstantInt *ConstRHS = dyn_cast<ConstantInt>(RHS)) {
534 if (!ConstRHS->isNegative())
535 return true;
536 }
537 }
538
539 // sext (add/sub nsw A, B) == add/sub nsw (sext A), (sext B)
540 // zext (add/sub nuw A, B) == add/sub nuw (zext A), (zext B)
541 if (BO->getOpcode() == Instruction::Add ||
542 BO->getOpcode() == Instruction::Sub) {
543 if (SignExtended && !BO->hasNoSignedWrap())
544 return false;
545 if (ZeroExtended && !BO->hasNoUnsignedWrap())
546 return false;
547 }
548
549 return true;
550 }
551
findInEitherOperand(BinaryOperator * BO,bool SignExtended,bool ZeroExtended)552 APInt ConstantOffsetExtractor::findInEitherOperand(BinaryOperator *BO,
553 bool SignExtended,
554 bool ZeroExtended) {
555 // Save off the current height of the chain, in case we need to restore it.
556 size_t ChainLength = UserChain.size();
557
558 // BO being non-negative does not shed light on whether its operands are
559 // non-negative. Clear the NonNegative flag here.
560 APInt ConstantOffset = find(BO->getOperand(0), SignExtended, ZeroExtended,
561 /* NonNegative */ false);
562 // If we found a constant offset in the left operand, stop and return that.
563 // This shortcut might cause us to miss opportunities of combining the
564 // constant offsets in both operands, e.g., (a + 4) + (b + 5) => (a + b) + 9.
565 // However, such cases are probably already handled by -instcombine,
566 // given this pass runs after the standard optimizations.
567 if (ConstantOffset != 0) return ConstantOffset;
568
569 // Reset the chain back to where it was when we started exploring this node,
570 // since visiting the LHS didn't pan out.
571 UserChain.resize(ChainLength);
572
573 ConstantOffset = find(BO->getOperand(1), SignExtended, ZeroExtended,
574 /* NonNegative */ false);
575 // If U is a sub operator, negate the constant offset found in the right
576 // operand.
577 if (BO->getOpcode() == Instruction::Sub)
578 ConstantOffset = -ConstantOffset;
579
580 // If RHS wasn't a suitable candidate either, reset the chain again.
581 if (ConstantOffset == 0)
582 UserChain.resize(ChainLength);
583
584 return ConstantOffset;
585 }
586
find(Value * V,bool SignExtended,bool ZeroExtended,bool NonNegative)587 APInt ConstantOffsetExtractor::find(Value *V, bool SignExtended,
588 bool ZeroExtended, bool NonNegative) {
589 // TODO(jingyue): We could trace into integer/pointer casts, such as
590 // inttoptr, ptrtoint, bitcast, and addrspacecast. We choose to handle only
591 // integers because it gives good enough results for our benchmarks.
592 unsigned BitWidth = cast<IntegerType>(V->getType())->getBitWidth();
593
594 // We cannot do much with Values that are not a User, such as an Argument.
595 User *U = dyn_cast<User>(V);
596 if (U == nullptr) return APInt(BitWidth, 0);
597
598 APInt ConstantOffset(BitWidth, 0);
599 if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
600 // Hooray, we found it!
601 ConstantOffset = CI->getValue();
602 } else if (BinaryOperator *BO = dyn_cast<BinaryOperator>(V)) {
603 // Trace into subexpressions for more hoisting opportunities.
604 if (CanTraceInto(SignExtended, ZeroExtended, BO, NonNegative))
605 ConstantOffset = findInEitherOperand(BO, SignExtended, ZeroExtended);
606 } else if (isa<TruncInst>(V)) {
607 ConstantOffset =
608 find(U->getOperand(0), SignExtended, ZeroExtended, NonNegative)
609 .trunc(BitWidth);
610 } else if (isa<SExtInst>(V)) {
611 ConstantOffset = find(U->getOperand(0), /* SignExtended */ true,
612 ZeroExtended, NonNegative).sext(BitWidth);
613 } else if (isa<ZExtInst>(V)) {
614 // As an optimization, we can clear the SignExtended flag because
615 // sext(zext(a)) = zext(a). Verified in @sext_zext in split-gep.ll.
616 //
617 // Clear the NonNegative flag, because zext(a) >= 0 does not imply a >= 0.
618 ConstantOffset =
619 find(U->getOperand(0), /* SignExtended */ false,
620 /* ZeroExtended */ true, /* NonNegative */ false).zext(BitWidth);
621 }
622
623 // If we found a non-zero constant offset, add it to the path for
624 // rebuildWithoutConstOffset. Zero is a valid constant offset, but doesn't
625 // help this optimization.
626 if (ConstantOffset != 0)
627 UserChain.push_back(U);
628 return ConstantOffset;
629 }
630
applyExts(Value * V)631 Value *ConstantOffsetExtractor::applyExts(Value *V) {
632 Value *Current = V;
633 // ExtInsts is built in the use-def order. Therefore, we apply them to V
634 // in the reversed order.
635 for (auto I = ExtInsts.rbegin(), E = ExtInsts.rend(); I != E; ++I) {
636 if (Constant *C = dyn_cast<Constant>(Current)) {
637 // If Current is a constant, apply s/zext using ConstantExpr::getCast.
638 // ConstantExpr::getCast emits a ConstantInt if C is a ConstantInt.
639 Current = ConstantExpr::getCast((*I)->getOpcode(), C, (*I)->getType());
640 } else {
641 Instruction *Ext = (*I)->clone();
642 Ext->setOperand(0, Current);
643 Ext->insertBefore(IP);
644 Current = Ext;
645 }
646 }
647 return Current;
648 }
649
rebuildWithoutConstOffset()650 Value *ConstantOffsetExtractor::rebuildWithoutConstOffset() {
651 distributeExtsAndCloneChain(UserChain.size() - 1);
652 // Remove all nullptrs (used to be s/zext) from UserChain.
653 unsigned NewSize = 0;
654 for (User *I : UserChain) {
655 if (I != nullptr) {
656 UserChain[NewSize] = I;
657 NewSize++;
658 }
659 }
660 UserChain.resize(NewSize);
661 return removeConstOffset(UserChain.size() - 1);
662 }
663
664 Value *
distributeExtsAndCloneChain(unsigned ChainIndex)665 ConstantOffsetExtractor::distributeExtsAndCloneChain(unsigned ChainIndex) {
666 User *U = UserChain[ChainIndex];
667 if (ChainIndex == 0) {
668 assert(isa<ConstantInt>(U));
669 // If U is a ConstantInt, applyExts will return a ConstantInt as well.
670 return UserChain[ChainIndex] = cast<ConstantInt>(applyExts(U));
671 }
672
673 if (CastInst *Cast = dyn_cast<CastInst>(U)) {
674 assert(
675 (isa<SExtInst>(Cast) || isa<ZExtInst>(Cast) || isa<TruncInst>(Cast)) &&
676 "Only following instructions can be traced: sext, zext & trunc");
677 ExtInsts.push_back(Cast);
678 UserChain[ChainIndex] = nullptr;
679 return distributeExtsAndCloneChain(ChainIndex - 1);
680 }
681
682 // Function find only trace into BinaryOperator and CastInst.
683 BinaryOperator *BO = cast<BinaryOperator>(U);
684 // OpNo = which operand of BO is UserChain[ChainIndex - 1]
685 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
686 Value *TheOther = applyExts(BO->getOperand(1 - OpNo));
687 Value *NextInChain = distributeExtsAndCloneChain(ChainIndex - 1);
688
689 BinaryOperator *NewBO = nullptr;
690 if (OpNo == 0) {
691 NewBO = BinaryOperator::Create(BO->getOpcode(), NextInChain, TheOther,
692 BO->getName(), IP);
693 } else {
694 NewBO = BinaryOperator::Create(BO->getOpcode(), TheOther, NextInChain,
695 BO->getName(), IP);
696 }
697 return UserChain[ChainIndex] = NewBO;
698 }
699
removeConstOffset(unsigned ChainIndex)700 Value *ConstantOffsetExtractor::removeConstOffset(unsigned ChainIndex) {
701 if (ChainIndex == 0) {
702 assert(isa<ConstantInt>(UserChain[ChainIndex]));
703 return ConstantInt::getNullValue(UserChain[ChainIndex]->getType());
704 }
705
706 BinaryOperator *BO = cast<BinaryOperator>(UserChain[ChainIndex]);
707 assert((BO->use_empty() || BO->hasOneUse()) &&
708 "distributeExtsAndCloneChain clones each BinaryOperator in "
709 "UserChain, so no one should be used more than "
710 "once");
711
712 unsigned OpNo = (BO->getOperand(0) == UserChain[ChainIndex - 1] ? 0 : 1);
713 assert(BO->getOperand(OpNo) == UserChain[ChainIndex - 1]);
714 Value *NextInChain = removeConstOffset(ChainIndex - 1);
715 Value *TheOther = BO->getOperand(1 - OpNo);
716
717 // If NextInChain is 0 and not the LHS of a sub, we can simplify the
718 // sub-expression to be just TheOther.
719 if (ConstantInt *CI = dyn_cast<ConstantInt>(NextInChain)) {
720 if (CI->isZero() && !(BO->getOpcode() == Instruction::Sub && OpNo == 0))
721 return TheOther;
722 }
723
724 BinaryOperator::BinaryOps NewOp = BO->getOpcode();
725 if (BO->getOpcode() == Instruction::Or) {
726 // Rebuild "or" as "add", because "or" may be invalid for the new
727 // expression.
728 //
729 // For instance, given
730 // a | (b + 5) where a and b + 5 have no common bits,
731 // we can extract 5 as the constant offset.
732 //
733 // However, reusing the "or" in the new index would give us
734 // (a | b) + 5
735 // which does not equal a | (b + 5).
736 //
737 // Replacing the "or" with "add" is fine, because
738 // a | (b + 5) = a + (b + 5) = (a + b) + 5
739 NewOp = Instruction::Add;
740 }
741
742 BinaryOperator *NewBO;
743 if (OpNo == 0) {
744 NewBO = BinaryOperator::Create(NewOp, NextInChain, TheOther, "", IP);
745 } else {
746 NewBO = BinaryOperator::Create(NewOp, TheOther, NextInChain, "", IP);
747 }
748 NewBO->takeName(BO);
749 return NewBO;
750 }
751
Extract(Value * Idx,GetElementPtrInst * GEP,User * & UserChainTail,const DominatorTree * DT)752 Value *ConstantOffsetExtractor::Extract(Value *Idx, GetElementPtrInst *GEP,
753 User *&UserChainTail,
754 const DominatorTree *DT) {
755 ConstantOffsetExtractor Extractor(GEP, DT);
756 // Find a non-zero constant offset first.
757 APInt ConstantOffset =
758 Extractor.find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
759 GEP->isInBounds());
760 if (ConstantOffset == 0) {
761 UserChainTail = nullptr;
762 return nullptr;
763 }
764 // Separates the constant offset from the GEP index.
765 Value *IdxWithoutConstOffset = Extractor.rebuildWithoutConstOffset();
766 UserChainTail = Extractor.UserChain.back();
767 return IdxWithoutConstOffset;
768 }
769
Find(Value * Idx,GetElementPtrInst * GEP,const DominatorTree * DT)770 int64_t ConstantOffsetExtractor::Find(Value *Idx, GetElementPtrInst *GEP,
771 const DominatorTree *DT) {
772 // If Idx is an index of an inbound GEP, Idx is guaranteed to be non-negative.
773 return ConstantOffsetExtractor(GEP, DT)
774 .find(Idx, /* SignExtended */ false, /* ZeroExtended */ false,
775 GEP->isInBounds())
776 .getSExtValue();
777 }
778
canonicalizeArrayIndicesToPointerSize(GetElementPtrInst * GEP)779 bool SeparateConstOffsetFromGEP::canonicalizeArrayIndicesToPointerSize(
780 GetElementPtrInst *GEP) {
781 bool Changed = false;
782 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
783 gep_type_iterator GTI = gep_type_begin(*GEP);
784 for (User::op_iterator I = GEP->op_begin() + 1, E = GEP->op_end();
785 I != E; ++I, ++GTI) {
786 // Skip struct member indices which must be i32.
787 if (GTI.isSequential()) {
788 if ((*I)->getType() != IntPtrTy) {
789 *I = CastInst::CreateIntegerCast(*I, IntPtrTy, true, "idxprom", GEP);
790 Changed = true;
791 }
792 }
793 }
794 return Changed;
795 }
796
797 int64_t
accumulateByteOffset(GetElementPtrInst * GEP,bool & NeedsExtraction)798 SeparateConstOffsetFromGEP::accumulateByteOffset(GetElementPtrInst *GEP,
799 bool &NeedsExtraction) {
800 NeedsExtraction = false;
801 int64_t AccumulativeByteOffset = 0;
802 gep_type_iterator GTI = gep_type_begin(*GEP);
803 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
804 if (GTI.isSequential()) {
805 // Tries to extract a constant offset from this GEP index.
806 int64_t ConstantOffset =
807 ConstantOffsetExtractor::Find(GEP->getOperand(I), GEP, DT);
808 if (ConstantOffset != 0) {
809 NeedsExtraction = true;
810 // A GEP may have multiple indices. We accumulate the extracted
811 // constant offset to a byte offset, and later offset the remainder of
812 // the original GEP with this byte offset.
813 AccumulativeByteOffset +=
814 ConstantOffset * DL->getTypeAllocSize(GTI.getIndexedType());
815 }
816 } else if (LowerGEP) {
817 StructType *StTy = GTI.getStructType();
818 uint64_t Field = cast<ConstantInt>(GEP->getOperand(I))->getZExtValue();
819 // Skip field 0 as the offset is always 0.
820 if (Field != 0) {
821 NeedsExtraction = true;
822 AccumulativeByteOffset +=
823 DL->getStructLayout(StTy)->getElementOffset(Field);
824 }
825 }
826 }
827 return AccumulativeByteOffset;
828 }
829
lowerToSingleIndexGEPs(GetElementPtrInst * Variadic,int64_t AccumulativeByteOffset)830 void SeparateConstOffsetFromGEP::lowerToSingleIndexGEPs(
831 GetElementPtrInst *Variadic, int64_t AccumulativeByteOffset) {
832 IRBuilder<> Builder(Variadic);
833 Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
834
835 Type *I8PtrTy =
836 Builder.getInt8PtrTy(Variadic->getType()->getPointerAddressSpace());
837 Value *ResultPtr = Variadic->getOperand(0);
838 Loop *L = LI->getLoopFor(Variadic->getParent());
839 // Check if the base is not loop invariant or used more than once.
840 bool isSwapCandidate =
841 L && L->isLoopInvariant(ResultPtr) &&
842 !hasMoreThanOneUseInLoop(ResultPtr, L);
843 Value *FirstResult = nullptr;
844
845 if (ResultPtr->getType() != I8PtrTy)
846 ResultPtr = Builder.CreateBitCast(ResultPtr, I8PtrTy);
847
848 gep_type_iterator GTI = gep_type_begin(*Variadic);
849 // Create an ugly GEP for each sequential index. We don't create GEPs for
850 // structure indices, as they are accumulated in the constant offset index.
851 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
852 if (GTI.isSequential()) {
853 Value *Idx = Variadic->getOperand(I);
854 // Skip zero indices.
855 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
856 if (CI->isZero())
857 continue;
858
859 APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
860 DL->getTypeAllocSize(GTI.getIndexedType()));
861 // Scale the index by element size.
862 if (ElementSize != 1) {
863 if (ElementSize.isPowerOf2()) {
864 Idx = Builder.CreateShl(
865 Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
866 } else {
867 Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
868 }
869 }
870 // Create an ugly GEP with a single index for each index.
871 ResultPtr =
872 Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Idx, "uglygep");
873 if (FirstResult == nullptr)
874 FirstResult = ResultPtr;
875 }
876 }
877
878 // Create a GEP with the constant offset index.
879 if (AccumulativeByteOffset != 0) {
880 Value *Offset = ConstantInt::get(IntPtrTy, AccumulativeByteOffset);
881 ResultPtr =
882 Builder.CreateGEP(Builder.getInt8Ty(), ResultPtr, Offset, "uglygep");
883 } else
884 isSwapCandidate = false;
885
886 // If we created a GEP with constant index, and the base is loop invariant,
887 // then we swap the first one with it, so LICM can move constant GEP out
888 // later.
889 GetElementPtrInst *FirstGEP = dyn_cast_or_null<GetElementPtrInst>(FirstResult);
890 GetElementPtrInst *SecondGEP = dyn_cast_or_null<GetElementPtrInst>(ResultPtr);
891 if (isSwapCandidate && isLegalToSwapOperand(FirstGEP, SecondGEP, L))
892 swapGEPOperand(FirstGEP, SecondGEP);
893
894 if (ResultPtr->getType() != Variadic->getType())
895 ResultPtr = Builder.CreateBitCast(ResultPtr, Variadic->getType());
896
897 Variadic->replaceAllUsesWith(ResultPtr);
898 Variadic->eraseFromParent();
899 }
900
901 void
lowerToArithmetics(GetElementPtrInst * Variadic,int64_t AccumulativeByteOffset)902 SeparateConstOffsetFromGEP::lowerToArithmetics(GetElementPtrInst *Variadic,
903 int64_t AccumulativeByteOffset) {
904 IRBuilder<> Builder(Variadic);
905 Type *IntPtrTy = DL->getIntPtrType(Variadic->getType());
906
907 Value *ResultPtr = Builder.CreatePtrToInt(Variadic->getOperand(0), IntPtrTy);
908 gep_type_iterator GTI = gep_type_begin(*Variadic);
909 // Create ADD/SHL/MUL arithmetic operations for each sequential indices. We
910 // don't create arithmetics for structure indices, as they are accumulated
911 // in the constant offset index.
912 for (unsigned I = 1, E = Variadic->getNumOperands(); I != E; ++I, ++GTI) {
913 if (GTI.isSequential()) {
914 Value *Idx = Variadic->getOperand(I);
915 // Skip zero indices.
916 if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx))
917 if (CI->isZero())
918 continue;
919
920 APInt ElementSize = APInt(IntPtrTy->getIntegerBitWidth(),
921 DL->getTypeAllocSize(GTI.getIndexedType()));
922 // Scale the index by element size.
923 if (ElementSize != 1) {
924 if (ElementSize.isPowerOf2()) {
925 Idx = Builder.CreateShl(
926 Idx, ConstantInt::get(IntPtrTy, ElementSize.logBase2()));
927 } else {
928 Idx = Builder.CreateMul(Idx, ConstantInt::get(IntPtrTy, ElementSize));
929 }
930 }
931 // Create an ADD for each index.
932 ResultPtr = Builder.CreateAdd(ResultPtr, Idx);
933 }
934 }
935
936 // Create an ADD for the constant offset index.
937 if (AccumulativeByteOffset != 0) {
938 ResultPtr = Builder.CreateAdd(
939 ResultPtr, ConstantInt::get(IntPtrTy, AccumulativeByteOffset));
940 }
941
942 ResultPtr = Builder.CreateIntToPtr(ResultPtr, Variadic->getType());
943 Variadic->replaceAllUsesWith(ResultPtr);
944 Variadic->eraseFromParent();
945 }
946
splitGEP(GetElementPtrInst * GEP)947 bool SeparateConstOffsetFromGEP::splitGEP(GetElementPtrInst *GEP) {
948 // Skip vector GEPs.
949 if (GEP->getType()->isVectorTy())
950 return false;
951
952 // The backend can already nicely handle the case where all indices are
953 // constant.
954 if (GEP->hasAllConstantIndices())
955 return false;
956
957 bool Changed = canonicalizeArrayIndicesToPointerSize(GEP);
958
959 bool NeedsExtraction;
960 int64_t AccumulativeByteOffset = accumulateByteOffset(GEP, NeedsExtraction);
961
962 if (!NeedsExtraction)
963 return Changed;
964
965 TargetTransformInfo &TTI =
966 getAnalysis<TargetTransformInfoWrapperPass>().getTTI(*GEP->getFunction());
967
968 // If LowerGEP is disabled, before really splitting the GEP, check whether the
969 // backend supports the addressing mode we are about to produce. If no, this
970 // splitting probably won't be beneficial.
971 // If LowerGEP is enabled, even the extracted constant offset can not match
972 // the addressing mode, we can still do optimizations to other lowered parts
973 // of variable indices. Therefore, we don't check for addressing modes in that
974 // case.
975 if (!LowerGEP) {
976 unsigned AddrSpace = GEP->getPointerAddressSpace();
977 if (!TTI.isLegalAddressingMode(GEP->getResultElementType(),
978 /*BaseGV=*/nullptr, AccumulativeByteOffset,
979 /*HasBaseReg=*/true, /*Scale=*/0,
980 AddrSpace)) {
981 return Changed;
982 }
983 }
984
985 // Remove the constant offset in each sequential index. The resultant GEP
986 // computes the variadic base.
987 // Notice that we don't remove struct field indices here. If LowerGEP is
988 // disabled, a structure index is not accumulated and we still use the old
989 // one. If LowerGEP is enabled, a structure index is accumulated in the
990 // constant offset. LowerToSingleIndexGEPs or lowerToArithmetics will later
991 // handle the constant offset and won't need a new structure index.
992 gep_type_iterator GTI = gep_type_begin(*GEP);
993 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I, ++GTI) {
994 if (GTI.isSequential()) {
995 // Splits this GEP index into a variadic part and a constant offset, and
996 // uses the variadic part as the new index.
997 Value *OldIdx = GEP->getOperand(I);
998 User *UserChainTail;
999 Value *NewIdx =
1000 ConstantOffsetExtractor::Extract(OldIdx, GEP, UserChainTail, DT);
1001 if (NewIdx != nullptr) {
1002 // Switches to the index with the constant offset removed.
1003 GEP->setOperand(I, NewIdx);
1004 // After switching to the new index, we can garbage-collect UserChain
1005 // and the old index if they are not used.
1006 RecursivelyDeleteTriviallyDeadInstructions(UserChainTail);
1007 RecursivelyDeleteTriviallyDeadInstructions(OldIdx);
1008 }
1009 }
1010 }
1011
1012 // Clear the inbounds attribute because the new index may be off-bound.
1013 // e.g.,
1014 //
1015 // b = add i64 a, 5
1016 // addr = gep inbounds float, float* p, i64 b
1017 //
1018 // is transformed to:
1019 //
1020 // addr2 = gep float, float* p, i64 a ; inbounds removed
1021 // addr = gep inbounds float, float* addr2, i64 5
1022 //
1023 // If a is -4, although the old index b is in bounds, the new index a is
1024 // off-bound. http://llvm.org/docs/LangRef.html#id181 says "if the
1025 // inbounds keyword is not present, the offsets are added to the base
1026 // address with silently-wrapping two's complement arithmetic".
1027 // Therefore, the final code will be a semantically equivalent.
1028 //
1029 // TODO(jingyue): do some range analysis to keep as many inbounds as
1030 // possible. GEPs with inbounds are more friendly to alias analysis.
1031 bool GEPWasInBounds = GEP->isInBounds();
1032 GEP->setIsInBounds(false);
1033
1034 // Lowers a GEP to either GEPs with a single index or arithmetic operations.
1035 if (LowerGEP) {
1036 // As currently BasicAA does not analyze ptrtoint/inttoptr, do not lower to
1037 // arithmetic operations if the target uses alias analysis in codegen.
1038 if (TTI.useAA())
1039 lowerToSingleIndexGEPs(GEP, AccumulativeByteOffset);
1040 else
1041 lowerToArithmetics(GEP, AccumulativeByteOffset);
1042 return true;
1043 }
1044
1045 // No need to create another GEP if the accumulative byte offset is 0.
1046 if (AccumulativeByteOffset == 0)
1047 return true;
1048
1049 // Offsets the base with the accumulative byte offset.
1050 //
1051 // %gep ; the base
1052 // ... %gep ...
1053 //
1054 // => add the offset
1055 //
1056 // %gep2 ; clone of %gep
1057 // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
1058 // %gep ; will be removed
1059 // ... %gep ...
1060 //
1061 // => replace all uses of %gep with %new.gep and remove %gep
1062 //
1063 // %gep2 ; clone of %gep
1064 // %new.gep = gep %gep2, <offset / sizeof(*%gep)>
1065 // ... %new.gep ...
1066 //
1067 // If AccumulativeByteOffset is not a multiple of sizeof(*%gep), we emit an
1068 // uglygep (http://llvm.org/docs/GetElementPtr.html#what-s-an-uglygep):
1069 // bitcast %gep2 to i8*, add the offset, and bitcast the result back to the
1070 // type of %gep.
1071 //
1072 // %gep2 ; clone of %gep
1073 // %0 = bitcast %gep2 to i8*
1074 // %uglygep = gep %0, <offset>
1075 // %new.gep = bitcast %uglygep to <type of %gep>
1076 // ... %new.gep ...
1077 Instruction *NewGEP = GEP->clone();
1078 NewGEP->insertBefore(GEP);
1079
1080 // Per ANSI C standard, signed / unsigned = unsigned and signed % unsigned =
1081 // unsigned.. Therefore, we cast ElementTypeSizeOfGEP to signed because it is
1082 // used with unsigned integers later.
1083 int64_t ElementTypeSizeOfGEP = static_cast<int64_t>(
1084 DL->getTypeAllocSize(GEP->getResultElementType()));
1085 Type *IntPtrTy = DL->getIntPtrType(GEP->getType());
1086 if (AccumulativeByteOffset % ElementTypeSizeOfGEP == 0) {
1087 // Very likely. As long as %gep is naturally aligned, the byte offset we
1088 // extracted should be a multiple of sizeof(*%gep).
1089 int64_t Index = AccumulativeByteOffset / ElementTypeSizeOfGEP;
1090 NewGEP = GetElementPtrInst::Create(GEP->getResultElementType(), NewGEP,
1091 ConstantInt::get(IntPtrTy, Index, true),
1092 GEP->getName(), GEP);
1093 NewGEP->copyMetadata(*GEP);
1094 // Inherit the inbounds attribute of the original GEP.
1095 cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1096 } else {
1097 // Unlikely but possible. For example,
1098 // #pragma pack(1)
1099 // struct S {
1100 // int a[3];
1101 // int64 b[8];
1102 // };
1103 // #pragma pack()
1104 //
1105 // Suppose the gep before extraction is &s[i + 1].b[j + 3]. After
1106 // extraction, it becomes &s[i].b[j] and AccumulativeByteOffset is
1107 // sizeof(S) + 3 * sizeof(int64) = 100, which is not a multiple of
1108 // sizeof(int64).
1109 //
1110 // Emit an uglygep in this case.
1111 Type *I8PtrTy = Type::getInt8PtrTy(GEP->getContext(),
1112 GEP->getPointerAddressSpace());
1113 NewGEP = new BitCastInst(NewGEP, I8PtrTy, "", GEP);
1114 NewGEP = GetElementPtrInst::Create(
1115 Type::getInt8Ty(GEP->getContext()), NewGEP,
1116 ConstantInt::get(IntPtrTy, AccumulativeByteOffset, true), "uglygep",
1117 GEP);
1118 NewGEP->copyMetadata(*GEP);
1119 // Inherit the inbounds attribute of the original GEP.
1120 cast<GetElementPtrInst>(NewGEP)->setIsInBounds(GEPWasInBounds);
1121 if (GEP->getType() != I8PtrTy)
1122 NewGEP = new BitCastInst(NewGEP, GEP->getType(), GEP->getName(), GEP);
1123 }
1124
1125 GEP->replaceAllUsesWith(NewGEP);
1126 GEP->eraseFromParent();
1127
1128 return true;
1129 }
1130
runOnFunction(Function & F)1131 bool SeparateConstOffsetFromGEP::runOnFunction(Function &F) {
1132 if (skipFunction(F))
1133 return false;
1134
1135 if (DisableSeparateConstOffsetFromGEP)
1136 return false;
1137
1138 DT = &getAnalysis<DominatorTreeWrapperPass>().getDomTree();
1139 SE = &getAnalysis<ScalarEvolutionWrapperPass>().getSE();
1140 LI = &getAnalysis<LoopInfoWrapperPass>().getLoopInfo();
1141 TLI = &getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1142 bool Changed = false;
1143 for (BasicBlock &B : F) {
1144 for (BasicBlock::iterator I = B.begin(), IE = B.end(); I != IE;)
1145 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(I++))
1146 Changed |= splitGEP(GEP);
1147 // No need to split GEP ConstantExprs because all its indices are constant
1148 // already.
1149 }
1150
1151 Changed |= reuniteExts(F);
1152
1153 if (VerifyNoDeadCode)
1154 verifyNoDeadCode(F);
1155
1156 return Changed;
1157 }
1158
findClosestMatchingDominator(const SCEV * Key,Instruction * Dominatee,DenseMap<const SCEV *,SmallVector<Instruction *,2>> & DominatingExprs)1159 Instruction *SeparateConstOffsetFromGEP::findClosestMatchingDominator(
1160 const SCEV *Key, Instruction *Dominatee,
1161 DenseMap<const SCEV *, SmallVector<Instruction *, 2>> &DominatingExprs) {
1162 auto Pos = DominatingExprs.find(Key);
1163 if (Pos == DominatingExprs.end())
1164 return nullptr;
1165
1166 auto &Candidates = Pos->second;
1167 // Because we process the basic blocks in pre-order of the dominator tree, a
1168 // candidate that doesn't dominate the current instruction won't dominate any
1169 // future instruction either. Therefore, we pop it out of the stack. This
1170 // optimization makes the algorithm O(n).
1171 while (!Candidates.empty()) {
1172 Instruction *Candidate = Candidates.back();
1173 if (DT->dominates(Candidate, Dominatee))
1174 return Candidate;
1175 Candidates.pop_back();
1176 }
1177 return nullptr;
1178 }
1179
reuniteExts(Instruction * I)1180 bool SeparateConstOffsetFromGEP::reuniteExts(Instruction *I) {
1181 if (!SE->isSCEVable(I->getType()))
1182 return false;
1183
1184 // Dom: LHS+RHS
1185 // I: sext(LHS)+sext(RHS)
1186 // If Dom can't sign overflow and Dom dominates I, optimize I to sext(Dom).
1187 // TODO: handle zext
1188 Value *LHS = nullptr, *RHS = nullptr;
1189 if (match(I, m_Add(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS))))) {
1190 if (LHS->getType() == RHS->getType()) {
1191 const SCEV *Key =
1192 SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1193 if (auto *Dom = findClosestMatchingDominator(Key, I, DominatingAdds)) {
1194 Instruction *NewSExt = new SExtInst(Dom, I->getType(), "", I);
1195 NewSExt->takeName(I);
1196 I->replaceAllUsesWith(NewSExt);
1197 RecursivelyDeleteTriviallyDeadInstructions(I);
1198 return true;
1199 }
1200 }
1201 } else if (match(I, m_Sub(m_SExt(m_Value(LHS)), m_SExt(m_Value(RHS))))) {
1202 if (LHS->getType() == RHS->getType()) {
1203 const SCEV *Key =
1204 SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1205 if (auto *Dom = findClosestMatchingDominator(Key, I, DominatingSubs)) {
1206 Instruction *NewSExt = new SExtInst(Dom, I->getType(), "", I);
1207 NewSExt->takeName(I);
1208 I->replaceAllUsesWith(NewSExt);
1209 RecursivelyDeleteTriviallyDeadInstructions(I);
1210 return true;
1211 }
1212 }
1213 }
1214
1215 // Add I to DominatingExprs if it's an add/sub that can't sign overflow.
1216 if (match(I, m_NSWAdd(m_Value(LHS), m_Value(RHS)))) {
1217 if (programUndefinedIfPoison(I)) {
1218 const SCEV *Key =
1219 SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1220 DominatingAdds[Key].push_back(I);
1221 }
1222 } else if (match(I, m_NSWSub(m_Value(LHS), m_Value(RHS)))) {
1223 if (programUndefinedIfPoison(I)) {
1224 const SCEV *Key =
1225 SE->getAddExpr(SE->getUnknown(LHS), SE->getUnknown(RHS));
1226 DominatingSubs[Key].push_back(I);
1227 }
1228 }
1229 return false;
1230 }
1231
reuniteExts(Function & F)1232 bool SeparateConstOffsetFromGEP::reuniteExts(Function &F) {
1233 bool Changed = false;
1234 DominatingAdds.clear();
1235 DominatingSubs.clear();
1236 for (const auto Node : depth_first(DT)) {
1237 BasicBlock *BB = Node->getBlock();
1238 for (auto I = BB->begin(); I != BB->end(); ) {
1239 Instruction *Cur = &*I++;
1240 Changed |= reuniteExts(Cur);
1241 }
1242 }
1243 return Changed;
1244 }
1245
verifyNoDeadCode(Function & F)1246 void SeparateConstOffsetFromGEP::verifyNoDeadCode(Function &F) {
1247 for (BasicBlock &B : F) {
1248 for (Instruction &I : B) {
1249 if (isInstructionTriviallyDead(&I)) {
1250 std::string ErrMessage;
1251 raw_string_ostream RSO(ErrMessage);
1252 RSO << "Dead instruction detected!\n" << I << "\n";
1253 llvm_unreachable(RSO.str().c_str());
1254 }
1255 }
1256 }
1257 }
1258
isLegalToSwapOperand(GetElementPtrInst * FirstGEP,GetElementPtrInst * SecondGEP,Loop * CurLoop)1259 bool SeparateConstOffsetFromGEP::isLegalToSwapOperand(
1260 GetElementPtrInst *FirstGEP, GetElementPtrInst *SecondGEP, Loop *CurLoop) {
1261 if (!FirstGEP || !FirstGEP->hasOneUse())
1262 return false;
1263
1264 if (!SecondGEP || FirstGEP->getParent() != SecondGEP->getParent())
1265 return false;
1266
1267 if (FirstGEP == SecondGEP)
1268 return false;
1269
1270 unsigned FirstNum = FirstGEP->getNumOperands();
1271 unsigned SecondNum = SecondGEP->getNumOperands();
1272 // Give up if the number of operands are not 2.
1273 if (FirstNum != SecondNum || FirstNum != 2)
1274 return false;
1275
1276 Value *FirstBase = FirstGEP->getOperand(0);
1277 Value *SecondBase = SecondGEP->getOperand(0);
1278 Value *FirstOffset = FirstGEP->getOperand(1);
1279 // Give up if the index of the first GEP is loop invariant.
1280 if (CurLoop->isLoopInvariant(FirstOffset))
1281 return false;
1282
1283 // Give up if base doesn't have same type.
1284 if (FirstBase->getType() != SecondBase->getType())
1285 return false;
1286
1287 Instruction *FirstOffsetDef = dyn_cast<Instruction>(FirstOffset);
1288
1289 // Check if the second operand of first GEP has constant coefficient.
1290 // For an example, for the following code, we won't gain anything by
1291 // hoisting the second GEP out because the second GEP can be folded away.
1292 // %scevgep.sum.ur159 = add i64 %idxprom48.ur, 256
1293 // %67 = shl i64 %scevgep.sum.ur159, 2
1294 // %uglygep160 = getelementptr i8* %65, i64 %67
1295 // %uglygep161 = getelementptr i8* %uglygep160, i64 -1024
1296
1297 // Skip constant shift instruction which may be generated by Splitting GEPs.
1298 if (FirstOffsetDef && FirstOffsetDef->isShift() &&
1299 isa<ConstantInt>(FirstOffsetDef->getOperand(1)))
1300 FirstOffsetDef = dyn_cast<Instruction>(FirstOffsetDef->getOperand(0));
1301
1302 // Give up if FirstOffsetDef is an Add or Sub with constant.
1303 // Because it may not profitable at all due to constant folding.
1304 if (FirstOffsetDef)
1305 if (BinaryOperator *BO = dyn_cast<BinaryOperator>(FirstOffsetDef)) {
1306 unsigned opc = BO->getOpcode();
1307 if ((opc == Instruction::Add || opc == Instruction::Sub) &&
1308 (isa<ConstantInt>(BO->getOperand(0)) ||
1309 isa<ConstantInt>(BO->getOperand(1))))
1310 return false;
1311 }
1312 return true;
1313 }
1314
hasMoreThanOneUseInLoop(Value * V,Loop * L)1315 bool SeparateConstOffsetFromGEP::hasMoreThanOneUseInLoop(Value *V, Loop *L) {
1316 int UsesInLoop = 0;
1317 for (User *U : V->users()) {
1318 if (Instruction *User = dyn_cast<Instruction>(U))
1319 if (L->contains(User))
1320 if (++UsesInLoop > 1)
1321 return true;
1322 }
1323 return false;
1324 }
1325
swapGEPOperand(GetElementPtrInst * First,GetElementPtrInst * Second)1326 void SeparateConstOffsetFromGEP::swapGEPOperand(GetElementPtrInst *First,
1327 GetElementPtrInst *Second) {
1328 Value *Offset1 = First->getOperand(1);
1329 Value *Offset2 = Second->getOperand(1);
1330 First->setOperand(1, Offset2);
1331 Second->setOperand(1, Offset1);
1332
1333 // We changed p+o+c to p+c+o, p+c may not be inbound anymore.
1334 const DataLayout &DAL = First->getModule()->getDataLayout();
1335 APInt Offset(DAL.getIndexSizeInBits(
1336 cast<PointerType>(First->getType())->getAddressSpace()),
1337 0);
1338 Value *NewBase =
1339 First->stripAndAccumulateInBoundsConstantOffsets(DAL, Offset);
1340 uint64_t ObjectSize;
1341 if (!getObjectSize(NewBase, ObjectSize, DAL, TLI) ||
1342 Offset.ugt(ObjectSize)) {
1343 First->setIsInBounds(false);
1344 Second->setIsInBounds(false);
1345 } else
1346 First->setIsInBounds(true);
1347 }
1348