1 //===- Reassociate.cpp - Reassociate binary expressions -------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This pass reassociates commutative expressions in an order that is designed
10 // to promote better constant propagation, GCSE, LICM, PRE, etc.
11 //
12 // For example: 4 + (x + 5) -> x + (4 + 5)
13 //
14 // In the implementation of this algorithm, constants are assigned rank = 0,
15 // function arguments are rank = 1, and other values are assigned ranks
16 // corresponding to the reverse post order traversal of current function
17 // (starting at 2), which effectively gives values in deep loops higher rank
18 // than values not in loops.
19 //
20 //===----------------------------------------------------------------------===//
21
22 #include "llvm/Transforms/Scalar/Reassociate.h"
23 #include "llvm/ADT/APFloat.h"
24 #include "llvm/ADT/APInt.h"
25 #include "llvm/ADT/DenseMap.h"
26 #include "llvm/ADT/PostOrderIterator.h"
27 #include "llvm/ADT/SetVector.h"
28 #include "llvm/ADT/SmallPtrSet.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/SmallVector.h"
31 #include "llvm/ADT/Statistic.h"
32 #include "llvm/Analysis/BasicAliasAnalysis.h"
33 #include "llvm/Analysis/GlobalsModRef.h"
34 #include "llvm/Analysis/ValueTracking.h"
35 #include "llvm/IR/Argument.h"
36 #include "llvm/IR/BasicBlock.h"
37 #include "llvm/IR/CFG.h"
38 #include "llvm/IR/Constant.h"
39 #include "llvm/IR/Constants.h"
40 #include "llvm/IR/Function.h"
41 #include "llvm/IR/IRBuilder.h"
42 #include "llvm/IR/InstrTypes.h"
43 #include "llvm/IR/Instruction.h"
44 #include "llvm/IR/Instructions.h"
45 #include "llvm/IR/IntrinsicInst.h"
46 #include "llvm/IR/Operator.h"
47 #include "llvm/IR/PassManager.h"
48 #include "llvm/IR/PatternMatch.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/User.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/IR/ValueHandle.h"
53 #include "llvm/InitializePasses.h"
54 #include "llvm/Pass.h"
55 #include "llvm/Support/Casting.h"
56 #include "llvm/Support/Debug.h"
57 #include "llvm/Support/ErrorHandling.h"
58 #include "llvm/Support/raw_ostream.h"
59 #include "llvm/Transforms/Scalar.h"
60 #include "llvm/Transforms/Utils/Local.h"
61 #include <algorithm>
62 #include <cassert>
63 #include <utility>
64
65 using namespace llvm;
66 using namespace reassociate;
67 using namespace PatternMatch;
68
69 #define DEBUG_TYPE "reassociate"
70
71 STATISTIC(NumChanged, "Number of insts reassociated");
72 STATISTIC(NumAnnihil, "Number of expr tree annihilated");
73 STATISTIC(NumFactor , "Number of multiplies factored");
74
75 #ifndef NDEBUG
76 /// Print out the expression identified in the Ops list.
PrintOps(Instruction * I,const SmallVectorImpl<ValueEntry> & Ops)77 static void PrintOps(Instruction *I, const SmallVectorImpl<ValueEntry> &Ops) {
78 Module *M = I->getModule();
79 dbgs() << Instruction::getOpcodeName(I->getOpcode()) << " "
80 << *Ops[0].Op->getType() << '\t';
81 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
82 dbgs() << "[ ";
83 Ops[i].Op->printAsOperand(dbgs(), false, M);
84 dbgs() << ", #" << Ops[i].Rank << "] ";
85 }
86 }
87 #endif
88
89 /// Utility class representing a non-constant Xor-operand. We classify
90 /// non-constant Xor-Operands into two categories:
91 /// C1) The operand is in the form "X & C", where C is a constant and C != ~0
92 /// C2)
93 /// C2.1) The operand is in the form of "X | C", where C is a non-zero
94 /// constant.
95 /// C2.2) Any operand E which doesn't fall into C1 and C2.1, we view this
96 /// operand as "E | 0"
97 class llvm::reassociate::XorOpnd {
98 public:
99 XorOpnd(Value *V);
100
isInvalid() const101 bool isInvalid() const { return SymbolicPart == nullptr; }
isOrExpr() const102 bool isOrExpr() const { return isOr; }
getValue() const103 Value *getValue() const { return OrigVal; }
getSymbolicPart() const104 Value *getSymbolicPart() const { return SymbolicPart; }
getSymbolicRank() const105 unsigned getSymbolicRank() const { return SymbolicRank; }
getConstPart() const106 const APInt &getConstPart() const { return ConstPart; }
107
Invalidate()108 void Invalidate() { SymbolicPart = OrigVal = nullptr; }
setSymbolicRank(unsigned R)109 void setSymbolicRank(unsigned R) { SymbolicRank = R; }
110
111 private:
112 Value *OrigVal;
113 Value *SymbolicPart;
114 APInt ConstPart;
115 unsigned SymbolicRank;
116 bool isOr;
117 };
118
XorOpnd(Value * V)119 XorOpnd::XorOpnd(Value *V) {
120 assert(!isa<ConstantInt>(V) && "No ConstantInt");
121 OrigVal = V;
122 Instruction *I = dyn_cast<Instruction>(V);
123 SymbolicRank = 0;
124
125 if (I && (I->getOpcode() == Instruction::Or ||
126 I->getOpcode() == Instruction::And)) {
127 Value *V0 = I->getOperand(0);
128 Value *V1 = I->getOperand(1);
129 const APInt *C;
130 if (match(V0, m_APInt(C)))
131 std::swap(V0, V1);
132
133 if (match(V1, m_APInt(C))) {
134 ConstPart = *C;
135 SymbolicPart = V0;
136 isOr = (I->getOpcode() == Instruction::Or);
137 return;
138 }
139 }
140
141 // view the operand as "V | 0"
142 SymbolicPart = V;
143 ConstPart = APInt::getNullValue(V->getType()->getScalarSizeInBits());
144 isOr = true;
145 }
146
147 /// Return true if V is an instruction of the specified opcode and if it
148 /// only has one use.
isReassociableOp(Value * V,unsigned Opcode)149 static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode) {
150 auto *I = dyn_cast<Instruction>(V);
151 if (I && I->hasOneUse() && I->getOpcode() == Opcode)
152 if (!isa<FPMathOperator>(I) || I->isFast())
153 return cast<BinaryOperator>(I);
154 return nullptr;
155 }
156
isReassociableOp(Value * V,unsigned Opcode1,unsigned Opcode2)157 static BinaryOperator *isReassociableOp(Value *V, unsigned Opcode1,
158 unsigned Opcode2) {
159 auto *I = dyn_cast<Instruction>(V);
160 if (I && I->hasOneUse() &&
161 (I->getOpcode() == Opcode1 || I->getOpcode() == Opcode2))
162 if (!isa<FPMathOperator>(I) || I->isFast())
163 return cast<BinaryOperator>(I);
164 return nullptr;
165 }
166
BuildRankMap(Function & F,ReversePostOrderTraversal<Function * > & RPOT)167 void ReassociatePass::BuildRankMap(Function &F,
168 ReversePostOrderTraversal<Function*> &RPOT) {
169 unsigned Rank = 2;
170
171 // Assign distinct ranks to function arguments.
172 for (auto &Arg : F.args()) {
173 ValueRankMap[&Arg] = ++Rank;
174 LLVM_DEBUG(dbgs() << "Calculated Rank[" << Arg.getName() << "] = " << Rank
175 << "\n");
176 }
177
178 // Traverse basic blocks in ReversePostOrder.
179 for (BasicBlock *BB : RPOT) {
180 unsigned BBRank = RankMap[BB] = ++Rank << 16;
181
182 // Walk the basic block, adding precomputed ranks for any instructions that
183 // we cannot move. This ensures that the ranks for these instructions are
184 // all different in the block.
185 for (Instruction &I : *BB)
186 if (mayBeMemoryDependent(I))
187 ValueRankMap[&I] = ++BBRank;
188 }
189 }
190
getRank(Value * V)191 unsigned ReassociatePass::getRank(Value *V) {
192 Instruction *I = dyn_cast<Instruction>(V);
193 if (!I) {
194 if (isa<Argument>(V)) return ValueRankMap[V]; // Function argument.
195 return 0; // Otherwise it's a global or constant, rank 0.
196 }
197
198 if (unsigned Rank = ValueRankMap[I])
199 return Rank; // Rank already known?
200
201 // If this is an expression, return the 1+MAX(rank(LHS), rank(RHS)) so that
202 // we can reassociate expressions for code motion! Since we do not recurse
203 // for PHI nodes, we cannot have infinite recursion here, because there
204 // cannot be loops in the value graph that do not go through PHI nodes.
205 unsigned Rank = 0, MaxRank = RankMap[I->getParent()];
206 for (unsigned i = 0, e = I->getNumOperands(); i != e && Rank != MaxRank; ++i)
207 Rank = std::max(Rank, getRank(I->getOperand(i)));
208
209 // If this is a 'not' or 'neg' instruction, do not count it for rank. This
210 // assures us that X and ~X will have the same rank.
211 if (!match(I, m_Not(m_Value())) && !match(I, m_Neg(m_Value())) &&
212 !match(I, m_FNeg(m_Value())))
213 ++Rank;
214
215 LLVM_DEBUG(dbgs() << "Calculated Rank[" << V->getName() << "] = " << Rank
216 << "\n");
217
218 return ValueRankMap[I] = Rank;
219 }
220
221 // Canonicalize constants to RHS. Otherwise, sort the operands by rank.
canonicalizeOperands(Instruction * I)222 void ReassociatePass::canonicalizeOperands(Instruction *I) {
223 assert(isa<BinaryOperator>(I) && "Expected binary operator.");
224 assert(I->isCommutative() && "Expected commutative operator.");
225
226 Value *LHS = I->getOperand(0);
227 Value *RHS = I->getOperand(1);
228 if (LHS == RHS || isa<Constant>(RHS))
229 return;
230 if (isa<Constant>(LHS) || getRank(RHS) < getRank(LHS))
231 cast<BinaryOperator>(I)->swapOperands();
232 }
233
CreateAdd(Value * S1,Value * S2,const Twine & Name,Instruction * InsertBefore,Value * FlagsOp)234 static BinaryOperator *CreateAdd(Value *S1, Value *S2, const Twine &Name,
235 Instruction *InsertBefore, Value *FlagsOp) {
236 if (S1->getType()->isIntOrIntVectorTy())
237 return BinaryOperator::CreateAdd(S1, S2, Name, InsertBefore);
238 else {
239 BinaryOperator *Res =
240 BinaryOperator::CreateFAdd(S1, S2, Name, InsertBefore);
241 Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags());
242 return Res;
243 }
244 }
245
CreateMul(Value * S1,Value * S2,const Twine & Name,Instruction * InsertBefore,Value * FlagsOp)246 static BinaryOperator *CreateMul(Value *S1, Value *S2, const Twine &Name,
247 Instruction *InsertBefore, Value *FlagsOp) {
248 if (S1->getType()->isIntOrIntVectorTy())
249 return BinaryOperator::CreateMul(S1, S2, Name, InsertBefore);
250 else {
251 BinaryOperator *Res =
252 BinaryOperator::CreateFMul(S1, S2, Name, InsertBefore);
253 Res->setFastMathFlags(cast<FPMathOperator>(FlagsOp)->getFastMathFlags());
254 return Res;
255 }
256 }
257
CreateNeg(Value * S1,const Twine & Name,Instruction * InsertBefore,Value * FlagsOp)258 static Instruction *CreateNeg(Value *S1, const Twine &Name,
259 Instruction *InsertBefore, Value *FlagsOp) {
260 if (S1->getType()->isIntOrIntVectorTy())
261 return BinaryOperator::CreateNeg(S1, Name, InsertBefore);
262
263 if (auto *FMFSource = dyn_cast<Instruction>(FlagsOp))
264 return UnaryOperator::CreateFNegFMF(S1, FMFSource, Name, InsertBefore);
265
266 return UnaryOperator::CreateFNeg(S1, Name, InsertBefore);
267 }
268
269 /// Replace 0-X with X*-1.
LowerNegateToMultiply(Instruction * Neg)270 static BinaryOperator *LowerNegateToMultiply(Instruction *Neg) {
271 assert((isa<UnaryOperator>(Neg) || isa<BinaryOperator>(Neg)) &&
272 "Expected a Negate!");
273 // FIXME: It's not safe to lower a unary FNeg into a FMul by -1.0.
274 unsigned OpNo = isa<BinaryOperator>(Neg) ? 1 : 0;
275 Type *Ty = Neg->getType();
276 Constant *NegOne = Ty->isIntOrIntVectorTy() ?
277 ConstantInt::getAllOnesValue(Ty) : ConstantFP::get(Ty, -1.0);
278
279 BinaryOperator *Res = CreateMul(Neg->getOperand(OpNo), NegOne, "", Neg, Neg);
280 Neg->setOperand(OpNo, Constant::getNullValue(Ty)); // Drop use of op.
281 Res->takeName(Neg);
282 Neg->replaceAllUsesWith(Res);
283 Res->setDebugLoc(Neg->getDebugLoc());
284 return Res;
285 }
286
287 /// Returns k such that lambda(2^Bitwidth) = 2^k, where lambda is the Carmichael
288 /// function. This means that x^(2^k) === 1 mod 2^Bitwidth for
289 /// every odd x, i.e. x^(2^k) = 1 for every odd x in Bitwidth-bit arithmetic.
290 /// Note that 0 <= k < Bitwidth, and if Bitwidth > 3 then x^(2^k) = 0 for every
291 /// even x in Bitwidth-bit arithmetic.
CarmichaelShift(unsigned Bitwidth)292 static unsigned CarmichaelShift(unsigned Bitwidth) {
293 if (Bitwidth < 3)
294 return Bitwidth - 1;
295 return Bitwidth - 2;
296 }
297
298 /// Add the extra weight 'RHS' to the existing weight 'LHS',
299 /// reducing the combined weight using any special properties of the operation.
300 /// The existing weight LHS represents the computation X op X op ... op X where
301 /// X occurs LHS times. The combined weight represents X op X op ... op X with
302 /// X occurring LHS + RHS times. If op is "Xor" for example then the combined
303 /// operation is equivalent to X if LHS + RHS is odd, or 0 if LHS + RHS is even;
304 /// the routine returns 1 in LHS in the first case, and 0 in LHS in the second.
IncorporateWeight(APInt & LHS,const APInt & RHS,unsigned Opcode)305 static void IncorporateWeight(APInt &LHS, const APInt &RHS, unsigned Opcode) {
306 // If we were working with infinite precision arithmetic then the combined
307 // weight would be LHS + RHS. But we are using finite precision arithmetic,
308 // and the APInt sum LHS + RHS may not be correct if it wraps (it is correct
309 // for nilpotent operations and addition, but not for idempotent operations
310 // and multiplication), so it is important to correctly reduce the combined
311 // weight back into range if wrapping would be wrong.
312
313 // If RHS is zero then the weight didn't change.
314 if (RHS.isMinValue())
315 return;
316 // If LHS is zero then the combined weight is RHS.
317 if (LHS.isMinValue()) {
318 LHS = RHS;
319 return;
320 }
321 // From this point on we know that neither LHS nor RHS is zero.
322
323 if (Instruction::isIdempotent(Opcode)) {
324 // Idempotent means X op X === X, so any non-zero weight is equivalent to a
325 // weight of 1. Keeping weights at zero or one also means that wrapping is
326 // not a problem.
327 assert(LHS == 1 && RHS == 1 && "Weights not reduced!");
328 return; // Return a weight of 1.
329 }
330 if (Instruction::isNilpotent(Opcode)) {
331 // Nilpotent means X op X === 0, so reduce weights modulo 2.
332 assert(LHS == 1 && RHS == 1 && "Weights not reduced!");
333 LHS = 0; // 1 + 1 === 0 modulo 2.
334 return;
335 }
336 if (Opcode == Instruction::Add || Opcode == Instruction::FAdd) {
337 // TODO: Reduce the weight by exploiting nsw/nuw?
338 LHS += RHS;
339 return;
340 }
341
342 assert((Opcode == Instruction::Mul || Opcode == Instruction::FMul) &&
343 "Unknown associative operation!");
344 unsigned Bitwidth = LHS.getBitWidth();
345 // If CM is the Carmichael number then a weight W satisfying W >= CM+Bitwidth
346 // can be replaced with W-CM. That's because x^W=x^(W-CM) for every Bitwidth
347 // bit number x, since either x is odd in which case x^CM = 1, or x is even in
348 // which case both x^W and x^(W - CM) are zero. By subtracting off multiples
349 // of CM like this weights can always be reduced to the range [0, CM+Bitwidth)
350 // which by a happy accident means that they can always be represented using
351 // Bitwidth bits.
352 // TODO: Reduce the weight by exploiting nsw/nuw? (Could do much better than
353 // the Carmichael number).
354 if (Bitwidth > 3) {
355 /// CM - The value of Carmichael's lambda function.
356 APInt CM = APInt::getOneBitSet(Bitwidth, CarmichaelShift(Bitwidth));
357 // Any weight W >= Threshold can be replaced with W - CM.
358 APInt Threshold = CM + Bitwidth;
359 assert(LHS.ult(Threshold) && RHS.ult(Threshold) && "Weights not reduced!");
360 // For Bitwidth 4 or more the following sum does not overflow.
361 LHS += RHS;
362 while (LHS.uge(Threshold))
363 LHS -= CM;
364 } else {
365 // To avoid problems with overflow do everything the same as above but using
366 // a larger type.
367 unsigned CM = 1U << CarmichaelShift(Bitwidth);
368 unsigned Threshold = CM + Bitwidth;
369 assert(LHS.getZExtValue() < Threshold && RHS.getZExtValue() < Threshold &&
370 "Weights not reduced!");
371 unsigned Total = LHS.getZExtValue() + RHS.getZExtValue();
372 while (Total >= Threshold)
373 Total -= CM;
374 LHS = Total;
375 }
376 }
377
378 using RepeatedValue = std::pair<Value*, APInt>;
379
380 /// Given an associative binary expression, return the leaf
381 /// nodes in Ops along with their weights (how many times the leaf occurs). The
382 /// original expression is the same as
383 /// (Ops[0].first op Ops[0].first op ... Ops[0].first) <- Ops[0].second times
384 /// op
385 /// (Ops[1].first op Ops[1].first op ... Ops[1].first) <- Ops[1].second times
386 /// op
387 /// ...
388 /// op
389 /// (Ops[N].first op Ops[N].first op ... Ops[N].first) <- Ops[N].second times
390 ///
391 /// Note that the values Ops[0].first, ..., Ops[N].first are all distinct.
392 ///
393 /// This routine may modify the function, in which case it returns 'true'. The
394 /// changes it makes may well be destructive, changing the value computed by 'I'
395 /// to something completely different. Thus if the routine returns 'true' then
396 /// you MUST either replace I with a new expression computed from the Ops array,
397 /// or use RewriteExprTree to put the values back in.
398 ///
399 /// A leaf node is either not a binary operation of the same kind as the root
400 /// node 'I' (i.e. is not a binary operator at all, or is, but with a different
401 /// opcode), or is the same kind of binary operator but has a use which either
402 /// does not belong to the expression, or does belong to the expression but is
403 /// a leaf node. Every leaf node has at least one use that is a non-leaf node
404 /// of the expression, while for non-leaf nodes (except for the root 'I') every
405 /// use is a non-leaf node of the expression.
406 ///
407 /// For example:
408 /// expression graph node names
409 ///
410 /// + | I
411 /// / \ |
412 /// + + | A, B
413 /// / \ / \ |
414 /// * + * | C, D, E
415 /// / \ / \ / \ |
416 /// + * | F, G
417 ///
418 /// The leaf nodes are C, E, F and G. The Ops array will contain (maybe not in
419 /// that order) (C, 1), (E, 1), (F, 2), (G, 2).
420 ///
421 /// The expression is maximal: if some instruction is a binary operator of the
422 /// same kind as 'I', and all of its uses are non-leaf nodes of the expression,
423 /// then the instruction also belongs to the expression, is not a leaf node of
424 /// it, and its operands also belong to the expression (but may be leaf nodes).
425 ///
426 /// NOTE: This routine will set operands of non-leaf non-root nodes to undef in
427 /// order to ensure that every non-root node in the expression has *exactly one*
428 /// use by a non-leaf node of the expression. This destruction means that the
429 /// caller MUST either replace 'I' with a new expression or use something like
430 /// RewriteExprTree to put the values back in if the routine indicates that it
431 /// made a change by returning 'true'.
432 ///
433 /// In the above example either the right operand of A or the left operand of B
434 /// will be replaced by undef. If it is B's operand then this gives:
435 ///
436 /// + | I
437 /// / \ |
438 /// + + | A, B - operand of B replaced with undef
439 /// / \ \ |
440 /// * + * | C, D, E
441 /// / \ / \ / \ |
442 /// + * | F, G
443 ///
444 /// Note that such undef operands can only be reached by passing through 'I'.
445 /// For example, if you visit operands recursively starting from a leaf node
446 /// then you will never see such an undef operand unless you get back to 'I',
447 /// which requires passing through a phi node.
448 ///
449 /// Note that this routine may also mutate binary operators of the wrong type
450 /// that have all uses inside the expression (i.e. only used by non-leaf nodes
451 /// of the expression) if it can turn them into binary operators of the right
452 /// type and thus make the expression bigger.
LinearizeExprTree(Instruction * I,SmallVectorImpl<RepeatedValue> & Ops)453 static bool LinearizeExprTree(Instruction *I,
454 SmallVectorImpl<RepeatedValue> &Ops) {
455 assert((isa<UnaryOperator>(I) || isa<BinaryOperator>(I)) &&
456 "Expected a UnaryOperator or BinaryOperator!");
457 LLVM_DEBUG(dbgs() << "LINEARIZE: " << *I << '\n');
458 unsigned Bitwidth = I->getType()->getScalarType()->getPrimitiveSizeInBits();
459 unsigned Opcode = I->getOpcode();
460 assert(I->isAssociative() && I->isCommutative() &&
461 "Expected an associative and commutative operation!");
462
463 // Visit all operands of the expression, keeping track of their weight (the
464 // number of paths from the expression root to the operand, or if you like
465 // the number of times that operand occurs in the linearized expression).
466 // For example, if I = X + A, where X = A + B, then I, X and B have weight 1
467 // while A has weight two.
468
469 // Worklist of non-leaf nodes (their operands are in the expression too) along
470 // with their weights, representing a certain number of paths to the operator.
471 // If an operator occurs in the worklist multiple times then we found multiple
472 // ways to get to it.
473 SmallVector<std::pair<Instruction*, APInt>, 8> Worklist; // (Op, Weight)
474 Worklist.push_back(std::make_pair(I, APInt(Bitwidth, 1)));
475 bool Changed = false;
476
477 // Leaves of the expression are values that either aren't the right kind of
478 // operation (eg: a constant, or a multiply in an add tree), or are, but have
479 // some uses that are not inside the expression. For example, in I = X + X,
480 // X = A + B, the value X has two uses (by I) that are in the expression. If
481 // X has any other uses, for example in a return instruction, then we consider
482 // X to be a leaf, and won't analyze it further. When we first visit a value,
483 // if it has more than one use then at first we conservatively consider it to
484 // be a leaf. Later, as the expression is explored, we may discover some more
485 // uses of the value from inside the expression. If all uses turn out to be
486 // from within the expression (and the value is a binary operator of the right
487 // kind) then the value is no longer considered to be a leaf, and its operands
488 // are explored.
489
490 // Leaves - Keeps track of the set of putative leaves as well as the number of
491 // paths to each leaf seen so far.
492 using LeafMap = DenseMap<Value *, APInt>;
493 LeafMap Leaves; // Leaf -> Total weight so far.
494 SmallVector<Value *, 8> LeafOrder; // Ensure deterministic leaf output order.
495
496 #ifndef NDEBUG
497 SmallPtrSet<Value *, 8> Visited; // For sanity checking the iteration scheme.
498 #endif
499 while (!Worklist.empty()) {
500 std::pair<Instruction*, APInt> P = Worklist.pop_back_val();
501 I = P.first; // We examine the operands of this binary operator.
502
503 for (unsigned OpIdx = 0; OpIdx < I->getNumOperands(); ++OpIdx) { // Visit operands.
504 Value *Op = I->getOperand(OpIdx);
505 APInt Weight = P.second; // Number of paths to this operand.
506 LLVM_DEBUG(dbgs() << "OPERAND: " << *Op << " (" << Weight << ")\n");
507 assert(!Op->use_empty() && "No uses, so how did we get to it?!");
508
509 // If this is a binary operation of the right kind with only one use then
510 // add its operands to the expression.
511 if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
512 assert(Visited.insert(Op).second && "Not first visit!");
513 LLVM_DEBUG(dbgs() << "DIRECT ADD: " << *Op << " (" << Weight << ")\n");
514 Worklist.push_back(std::make_pair(BO, Weight));
515 continue;
516 }
517
518 // Appears to be a leaf. Is the operand already in the set of leaves?
519 LeafMap::iterator It = Leaves.find(Op);
520 if (It == Leaves.end()) {
521 // Not in the leaf map. Must be the first time we saw this operand.
522 assert(Visited.insert(Op).second && "Not first visit!");
523 if (!Op->hasOneUse()) {
524 // This value has uses not accounted for by the expression, so it is
525 // not safe to modify. Mark it as being a leaf.
526 LLVM_DEBUG(dbgs()
527 << "ADD USES LEAF: " << *Op << " (" << Weight << ")\n");
528 LeafOrder.push_back(Op);
529 Leaves[Op] = Weight;
530 continue;
531 }
532 // No uses outside the expression, try morphing it.
533 } else {
534 // Already in the leaf map.
535 assert(It != Leaves.end() && Visited.count(Op) &&
536 "In leaf map but not visited!");
537
538 // Update the number of paths to the leaf.
539 IncorporateWeight(It->second, Weight, Opcode);
540
541 #if 0 // TODO: Re-enable once PR13021 is fixed.
542 // The leaf already has one use from inside the expression. As we want
543 // exactly one such use, drop this new use of the leaf.
544 assert(!Op->hasOneUse() && "Only one use, but we got here twice!");
545 I->setOperand(OpIdx, UndefValue::get(I->getType()));
546 Changed = true;
547
548 // If the leaf is a binary operation of the right kind and we now see
549 // that its multiple original uses were in fact all by nodes belonging
550 // to the expression, then no longer consider it to be a leaf and add
551 // its operands to the expression.
552 if (BinaryOperator *BO = isReassociableOp(Op, Opcode)) {
553 LLVM_DEBUG(dbgs() << "UNLEAF: " << *Op << " (" << It->second << ")\n");
554 Worklist.push_back(std::make_pair(BO, It->second));
555 Leaves.erase(It);
556 continue;
557 }
558 #endif
559
560 // If we still have uses that are not accounted for by the expression
561 // then it is not safe to modify the value.
562 if (!Op->hasOneUse())
563 continue;
564
565 // No uses outside the expression, try morphing it.
566 Weight = It->second;
567 Leaves.erase(It); // Since the value may be morphed below.
568 }
569
570 // At this point we have a value which, first of all, is not a binary
571 // expression of the right kind, and secondly, is only used inside the
572 // expression. This means that it can safely be modified. See if we
573 // can usefully morph it into an expression of the right kind.
574 assert((!isa<Instruction>(Op) ||
575 cast<Instruction>(Op)->getOpcode() != Opcode
576 || (isa<FPMathOperator>(Op) &&
577 !cast<Instruction>(Op)->isFast())) &&
578 "Should have been handled above!");
579 assert(Op->hasOneUse() && "Has uses outside the expression tree!");
580
581 // If this is a multiply expression, turn any internal negations into
582 // multiplies by -1 so they can be reassociated.
583 if (Instruction *Tmp = dyn_cast<Instruction>(Op))
584 if ((Opcode == Instruction::Mul && match(Tmp, m_Neg(m_Value()))) ||
585 (Opcode == Instruction::FMul && match(Tmp, m_FNeg(m_Value())))) {
586 LLVM_DEBUG(dbgs()
587 << "MORPH LEAF: " << *Op << " (" << Weight << ") TO ");
588 Tmp = LowerNegateToMultiply(Tmp);
589 LLVM_DEBUG(dbgs() << *Tmp << '\n');
590 Worklist.push_back(std::make_pair(Tmp, Weight));
591 Changed = true;
592 continue;
593 }
594
595 // Failed to morph into an expression of the right type. This really is
596 // a leaf.
597 LLVM_DEBUG(dbgs() << "ADD LEAF: " << *Op << " (" << Weight << ")\n");
598 assert(!isReassociableOp(Op, Opcode) && "Value was morphed?");
599 LeafOrder.push_back(Op);
600 Leaves[Op] = Weight;
601 }
602 }
603
604 // The leaves, repeated according to their weights, represent the linearized
605 // form of the expression.
606 for (unsigned i = 0, e = LeafOrder.size(); i != e; ++i) {
607 Value *V = LeafOrder[i];
608 LeafMap::iterator It = Leaves.find(V);
609 if (It == Leaves.end())
610 // Node initially thought to be a leaf wasn't.
611 continue;
612 assert(!isReassociableOp(V, Opcode) && "Shouldn't be a leaf!");
613 APInt Weight = It->second;
614 if (Weight.isMinValue())
615 // Leaf already output or weight reduction eliminated it.
616 continue;
617 // Ensure the leaf is only output once.
618 It->second = 0;
619 Ops.push_back(std::make_pair(V, Weight));
620 }
621
622 // For nilpotent operations or addition there may be no operands, for example
623 // because the expression was "X xor X" or consisted of 2^Bitwidth additions:
624 // in both cases the weight reduces to 0 causing the value to be skipped.
625 if (Ops.empty()) {
626 Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, I->getType());
627 assert(Identity && "Associative operation without identity!");
628 Ops.emplace_back(Identity, APInt(Bitwidth, 1));
629 }
630
631 return Changed;
632 }
633
634 /// Now that the operands for this expression tree are
635 /// linearized and optimized, emit them in-order.
RewriteExprTree(BinaryOperator * I,SmallVectorImpl<ValueEntry> & Ops)636 void ReassociatePass::RewriteExprTree(BinaryOperator *I,
637 SmallVectorImpl<ValueEntry> &Ops) {
638 assert(Ops.size() > 1 && "Single values should be used directly!");
639
640 // Since our optimizations should never increase the number of operations, the
641 // new expression can usually be written reusing the existing binary operators
642 // from the original expression tree, without creating any new instructions,
643 // though the rewritten expression may have a completely different topology.
644 // We take care to not change anything if the new expression will be the same
645 // as the original. If more than trivial changes (like commuting operands)
646 // were made then we are obliged to clear out any optional subclass data like
647 // nsw flags.
648
649 /// NodesToRewrite - Nodes from the original expression available for writing
650 /// the new expression into.
651 SmallVector<BinaryOperator*, 8> NodesToRewrite;
652 unsigned Opcode = I->getOpcode();
653 BinaryOperator *Op = I;
654
655 /// NotRewritable - The operands being written will be the leaves of the new
656 /// expression and must not be used as inner nodes (via NodesToRewrite) by
657 /// mistake. Inner nodes are always reassociable, and usually leaves are not
658 /// (if they were they would have been incorporated into the expression and so
659 /// would not be leaves), so most of the time there is no danger of this. But
660 /// in rare cases a leaf may become reassociable if an optimization kills uses
661 /// of it, or it may momentarily become reassociable during rewriting (below)
662 /// due it being removed as an operand of one of its uses. Ensure that misuse
663 /// of leaf nodes as inner nodes cannot occur by remembering all of the future
664 /// leaves and refusing to reuse any of them as inner nodes.
665 SmallPtrSet<Value*, 8> NotRewritable;
666 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
667 NotRewritable.insert(Ops[i].Op);
668
669 // ExpressionChanged - Non-null if the rewritten expression differs from the
670 // original in some non-trivial way, requiring the clearing of optional flags.
671 // Flags are cleared from the operator in ExpressionChanged up to I inclusive.
672 BinaryOperator *ExpressionChanged = nullptr;
673 for (unsigned i = 0; ; ++i) {
674 // The last operation (which comes earliest in the IR) is special as both
675 // operands will come from Ops, rather than just one with the other being
676 // a subexpression.
677 if (i+2 == Ops.size()) {
678 Value *NewLHS = Ops[i].Op;
679 Value *NewRHS = Ops[i+1].Op;
680 Value *OldLHS = Op->getOperand(0);
681 Value *OldRHS = Op->getOperand(1);
682
683 if (NewLHS == OldLHS && NewRHS == OldRHS)
684 // Nothing changed, leave it alone.
685 break;
686
687 if (NewLHS == OldRHS && NewRHS == OldLHS) {
688 // The order of the operands was reversed. Swap them.
689 LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n');
690 Op->swapOperands();
691 LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n');
692 MadeChange = true;
693 ++NumChanged;
694 break;
695 }
696
697 // The new operation differs non-trivially from the original. Overwrite
698 // the old operands with the new ones.
699 LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n');
700 if (NewLHS != OldLHS) {
701 BinaryOperator *BO = isReassociableOp(OldLHS, Opcode);
702 if (BO && !NotRewritable.count(BO))
703 NodesToRewrite.push_back(BO);
704 Op->setOperand(0, NewLHS);
705 }
706 if (NewRHS != OldRHS) {
707 BinaryOperator *BO = isReassociableOp(OldRHS, Opcode);
708 if (BO && !NotRewritable.count(BO))
709 NodesToRewrite.push_back(BO);
710 Op->setOperand(1, NewRHS);
711 }
712 LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n');
713
714 ExpressionChanged = Op;
715 MadeChange = true;
716 ++NumChanged;
717
718 break;
719 }
720
721 // Not the last operation. The left-hand side will be a sub-expression
722 // while the right-hand side will be the current element of Ops.
723 Value *NewRHS = Ops[i].Op;
724 if (NewRHS != Op->getOperand(1)) {
725 LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n');
726 if (NewRHS == Op->getOperand(0)) {
727 // The new right-hand side was already present as the left operand. If
728 // we are lucky then swapping the operands will sort out both of them.
729 Op->swapOperands();
730 } else {
731 // Overwrite with the new right-hand side.
732 BinaryOperator *BO = isReassociableOp(Op->getOperand(1), Opcode);
733 if (BO && !NotRewritable.count(BO))
734 NodesToRewrite.push_back(BO);
735 Op->setOperand(1, NewRHS);
736 ExpressionChanged = Op;
737 }
738 LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n');
739 MadeChange = true;
740 ++NumChanged;
741 }
742
743 // Now deal with the left-hand side. If this is already an operation node
744 // from the original expression then just rewrite the rest of the expression
745 // into it.
746 BinaryOperator *BO = isReassociableOp(Op->getOperand(0), Opcode);
747 if (BO && !NotRewritable.count(BO)) {
748 Op = BO;
749 continue;
750 }
751
752 // Otherwise, grab a spare node from the original expression and use that as
753 // the left-hand side. If there are no nodes left then the optimizers made
754 // an expression with more nodes than the original! This usually means that
755 // they did something stupid but it might mean that the problem was just too
756 // hard (finding the mimimal number of multiplications needed to realize a
757 // multiplication expression is NP-complete). Whatever the reason, smart or
758 // stupid, create a new node if there are none left.
759 BinaryOperator *NewOp;
760 if (NodesToRewrite.empty()) {
761 Constant *Undef = UndefValue::get(I->getType());
762 NewOp = BinaryOperator::Create(Instruction::BinaryOps(Opcode),
763 Undef, Undef, "", I);
764 if (NewOp->getType()->isFPOrFPVectorTy())
765 NewOp->setFastMathFlags(I->getFastMathFlags());
766 } else {
767 NewOp = NodesToRewrite.pop_back_val();
768 }
769
770 LLVM_DEBUG(dbgs() << "RA: " << *Op << '\n');
771 Op->setOperand(0, NewOp);
772 LLVM_DEBUG(dbgs() << "TO: " << *Op << '\n');
773 ExpressionChanged = Op;
774 MadeChange = true;
775 ++NumChanged;
776 Op = NewOp;
777 }
778
779 // If the expression changed non-trivially then clear out all subclass data
780 // starting from the operator specified in ExpressionChanged, and compactify
781 // the operators to just before the expression root to guarantee that the
782 // expression tree is dominated by all of Ops.
783 if (ExpressionChanged)
784 do {
785 // Preserve FastMathFlags.
786 if (isa<FPMathOperator>(I)) {
787 FastMathFlags Flags = I->getFastMathFlags();
788 ExpressionChanged->clearSubclassOptionalData();
789 ExpressionChanged->setFastMathFlags(Flags);
790 } else
791 ExpressionChanged->clearSubclassOptionalData();
792
793 if (ExpressionChanged == I)
794 break;
795
796 // Discard any debug info related to the expressions that has changed (we
797 // can leave debug infor related to the root, since the result of the
798 // expression tree should be the same even after reassociation).
799 replaceDbgUsesWithUndef(ExpressionChanged);
800
801 ExpressionChanged->moveBefore(I);
802 ExpressionChanged = cast<BinaryOperator>(*ExpressionChanged->user_begin());
803 } while (true);
804
805 // Throw away any left over nodes from the original expression.
806 for (unsigned i = 0, e = NodesToRewrite.size(); i != e; ++i)
807 RedoInsts.insert(NodesToRewrite[i]);
808 }
809
810 /// Insert instructions before the instruction pointed to by BI,
811 /// that computes the negative version of the value specified. The negative
812 /// version of the value is returned, and BI is left pointing at the instruction
813 /// that should be processed next by the reassociation pass.
814 /// Also add intermediate instructions to the redo list that are modified while
815 /// pushing the negates through adds. These will be revisited to see if
816 /// additional opportunities have been exposed.
NegateValue(Value * V,Instruction * BI,ReassociatePass::OrderedSet & ToRedo)817 static Value *NegateValue(Value *V, Instruction *BI,
818 ReassociatePass::OrderedSet &ToRedo) {
819 if (auto *C = dyn_cast<Constant>(V))
820 return C->getType()->isFPOrFPVectorTy() ? ConstantExpr::getFNeg(C) :
821 ConstantExpr::getNeg(C);
822
823 // We are trying to expose opportunity for reassociation. One of the things
824 // that we want to do to achieve this is to push a negation as deep into an
825 // expression chain as possible, to expose the add instructions. In practice,
826 // this means that we turn this:
827 // X = -(A+12+C+D) into X = -A + -12 + -C + -D = -12 + -A + -C + -D
828 // so that later, a: Y = 12+X could get reassociated with the -12 to eliminate
829 // the constants. We assume that instcombine will clean up the mess later if
830 // we introduce tons of unnecessary negation instructions.
831 //
832 if (BinaryOperator *I =
833 isReassociableOp(V, Instruction::Add, Instruction::FAdd)) {
834 // Push the negates through the add.
835 I->setOperand(0, NegateValue(I->getOperand(0), BI, ToRedo));
836 I->setOperand(1, NegateValue(I->getOperand(1), BI, ToRedo));
837 if (I->getOpcode() == Instruction::Add) {
838 I->setHasNoUnsignedWrap(false);
839 I->setHasNoSignedWrap(false);
840 }
841
842 // We must move the add instruction here, because the neg instructions do
843 // not dominate the old add instruction in general. By moving it, we are
844 // assured that the neg instructions we just inserted dominate the
845 // instruction we are about to insert after them.
846 //
847 I->moveBefore(BI);
848 I->setName(I->getName()+".neg");
849
850 // Add the intermediate negates to the redo list as processing them later
851 // could expose more reassociating opportunities.
852 ToRedo.insert(I);
853 return I;
854 }
855
856 // Okay, we need to materialize a negated version of V with an instruction.
857 // Scan the use lists of V to see if we have one already.
858 for (User *U : V->users()) {
859 if (!match(U, m_Neg(m_Value())) && !match(U, m_FNeg(m_Value())))
860 continue;
861
862 // We found one! Now we have to make sure that the definition dominates
863 // this use. We do this by moving it to the entry block (if it is a
864 // non-instruction value) or right after the definition. These negates will
865 // be zapped by reassociate later, so we don't need much finesse here.
866 Instruction *TheNeg = cast<Instruction>(U);
867
868 // Verify that the negate is in this function, V might be a constant expr.
869 if (TheNeg->getParent()->getParent() != BI->getParent()->getParent())
870 continue;
871
872 bool FoundCatchSwitch = false;
873
874 BasicBlock::iterator InsertPt;
875 if (Instruction *InstInput = dyn_cast<Instruction>(V)) {
876 if (InvokeInst *II = dyn_cast<InvokeInst>(InstInput)) {
877 InsertPt = II->getNormalDest()->begin();
878 } else {
879 InsertPt = ++InstInput->getIterator();
880 }
881
882 const BasicBlock *BB = InsertPt->getParent();
883
884 // Make sure we don't move anything before PHIs or exception
885 // handling pads.
886 while (InsertPt != BB->end() && (isa<PHINode>(InsertPt) ||
887 InsertPt->isEHPad())) {
888 if (isa<CatchSwitchInst>(InsertPt))
889 // A catchswitch cannot have anything in the block except
890 // itself and PHIs. We'll bail out below.
891 FoundCatchSwitch = true;
892 ++InsertPt;
893 }
894 } else {
895 InsertPt = TheNeg->getParent()->getParent()->getEntryBlock().begin();
896 }
897
898 // We found a catchswitch in the block where we want to move the
899 // neg. We cannot move anything into that block. Bail and just
900 // create the neg before BI, as if we hadn't found an existing
901 // neg.
902 if (FoundCatchSwitch)
903 break;
904
905 TheNeg->moveBefore(&*InsertPt);
906 if (TheNeg->getOpcode() == Instruction::Sub) {
907 TheNeg->setHasNoUnsignedWrap(false);
908 TheNeg->setHasNoSignedWrap(false);
909 } else {
910 TheNeg->andIRFlags(BI);
911 }
912 ToRedo.insert(TheNeg);
913 return TheNeg;
914 }
915
916 // Insert a 'neg' instruction that subtracts the value from zero to get the
917 // negation.
918 Instruction *NewNeg = CreateNeg(V, V->getName() + ".neg", BI, BI);
919 ToRedo.insert(NewNeg);
920 return NewNeg;
921 }
922
923 /// Return true if we should break up this subtract of X-Y into (X + -Y).
ShouldBreakUpSubtract(Instruction * Sub)924 static bool ShouldBreakUpSubtract(Instruction *Sub) {
925 // If this is a negation, we can't split it up!
926 if (match(Sub, m_Neg(m_Value())) || match(Sub, m_FNeg(m_Value())))
927 return false;
928
929 // Don't breakup X - undef.
930 if (isa<UndefValue>(Sub->getOperand(1)))
931 return false;
932
933 // Don't bother to break this up unless either the LHS is an associable add or
934 // subtract or if this is only used by one.
935 Value *V0 = Sub->getOperand(0);
936 if (isReassociableOp(V0, Instruction::Add, Instruction::FAdd) ||
937 isReassociableOp(V0, Instruction::Sub, Instruction::FSub))
938 return true;
939 Value *V1 = Sub->getOperand(1);
940 if (isReassociableOp(V1, Instruction::Add, Instruction::FAdd) ||
941 isReassociableOp(V1, Instruction::Sub, Instruction::FSub))
942 return true;
943 Value *VB = Sub->user_back();
944 if (Sub->hasOneUse() &&
945 (isReassociableOp(VB, Instruction::Add, Instruction::FAdd) ||
946 isReassociableOp(VB, Instruction::Sub, Instruction::FSub)))
947 return true;
948
949 return false;
950 }
951
952 /// If we have (X-Y), and if either X is an add, or if this is only used by an
953 /// add, transform this into (X+(0-Y)) to promote better reassociation.
BreakUpSubtract(Instruction * Sub,ReassociatePass::OrderedSet & ToRedo)954 static BinaryOperator *BreakUpSubtract(Instruction *Sub,
955 ReassociatePass::OrderedSet &ToRedo) {
956 // Convert a subtract into an add and a neg instruction. This allows sub
957 // instructions to be commuted with other add instructions.
958 //
959 // Calculate the negative value of Operand 1 of the sub instruction,
960 // and set it as the RHS of the add instruction we just made.
961 Value *NegVal = NegateValue(Sub->getOperand(1), Sub, ToRedo);
962 BinaryOperator *New = CreateAdd(Sub->getOperand(0), NegVal, "", Sub, Sub);
963 Sub->setOperand(0, Constant::getNullValue(Sub->getType())); // Drop use of op.
964 Sub->setOperand(1, Constant::getNullValue(Sub->getType())); // Drop use of op.
965 New->takeName(Sub);
966
967 // Everyone now refers to the add instruction.
968 Sub->replaceAllUsesWith(New);
969 New->setDebugLoc(Sub->getDebugLoc());
970
971 LLVM_DEBUG(dbgs() << "Negated: " << *New << '\n');
972 return New;
973 }
974
975 /// If this is a shift of a reassociable multiply or is used by one, change
976 /// this into a multiply by a constant to assist with further reassociation.
ConvertShiftToMul(Instruction * Shl)977 static BinaryOperator *ConvertShiftToMul(Instruction *Shl) {
978 Constant *MulCst = ConstantInt::get(Shl->getType(), 1);
979 auto *SA = cast<ConstantInt>(Shl->getOperand(1));
980 MulCst = ConstantExpr::getShl(MulCst, SA);
981
982 BinaryOperator *Mul =
983 BinaryOperator::CreateMul(Shl->getOperand(0), MulCst, "", Shl);
984 Shl->setOperand(0, UndefValue::get(Shl->getType())); // Drop use of op.
985 Mul->takeName(Shl);
986
987 // Everyone now refers to the mul instruction.
988 Shl->replaceAllUsesWith(Mul);
989 Mul->setDebugLoc(Shl->getDebugLoc());
990
991 // We can safely preserve the nuw flag in all cases. It's also safe to turn a
992 // nuw nsw shl into a nuw nsw mul. However, nsw in isolation requires special
993 // handling. It can be preserved as long as we're not left shifting by
994 // bitwidth - 1.
995 bool NSW = cast<BinaryOperator>(Shl)->hasNoSignedWrap();
996 bool NUW = cast<BinaryOperator>(Shl)->hasNoUnsignedWrap();
997 unsigned BitWidth = Shl->getType()->getIntegerBitWidth();
998 if (NSW && (NUW || SA->getValue().ult(BitWidth - 1)))
999 Mul->setHasNoSignedWrap(true);
1000 Mul->setHasNoUnsignedWrap(NUW);
1001 return Mul;
1002 }
1003
1004 /// Scan backwards and forwards among values with the same rank as element i
1005 /// to see if X exists. If X does not exist, return i. This is useful when
1006 /// scanning for 'x' when we see '-x' because they both get the same rank.
FindInOperandList(const SmallVectorImpl<ValueEntry> & Ops,unsigned i,Value * X)1007 static unsigned FindInOperandList(const SmallVectorImpl<ValueEntry> &Ops,
1008 unsigned i, Value *X) {
1009 unsigned XRank = Ops[i].Rank;
1010 unsigned e = Ops.size();
1011 for (unsigned j = i+1; j != e && Ops[j].Rank == XRank; ++j) {
1012 if (Ops[j].Op == X)
1013 return j;
1014 if (Instruction *I1 = dyn_cast<Instruction>(Ops[j].Op))
1015 if (Instruction *I2 = dyn_cast<Instruction>(X))
1016 if (I1->isIdenticalTo(I2))
1017 return j;
1018 }
1019 // Scan backwards.
1020 for (unsigned j = i-1; j != ~0U && Ops[j].Rank == XRank; --j) {
1021 if (Ops[j].Op == X)
1022 return j;
1023 if (Instruction *I1 = dyn_cast<Instruction>(Ops[j].Op))
1024 if (Instruction *I2 = dyn_cast<Instruction>(X))
1025 if (I1->isIdenticalTo(I2))
1026 return j;
1027 }
1028 return i;
1029 }
1030
1031 /// Emit a tree of add instructions, summing Ops together
1032 /// and returning the result. Insert the tree before I.
EmitAddTreeOfValues(Instruction * I,SmallVectorImpl<WeakTrackingVH> & Ops)1033 static Value *EmitAddTreeOfValues(Instruction *I,
1034 SmallVectorImpl<WeakTrackingVH> &Ops) {
1035 if (Ops.size() == 1) return Ops.back();
1036
1037 Value *V1 = Ops.back();
1038 Ops.pop_back();
1039 Value *V2 = EmitAddTreeOfValues(I, Ops);
1040 return CreateAdd(V2, V1, "reass.add", I, I);
1041 }
1042
1043 /// If V is an expression tree that is a multiplication sequence,
1044 /// and if this sequence contains a multiply by Factor,
1045 /// remove Factor from the tree and return the new tree.
RemoveFactorFromExpression(Value * V,Value * Factor)1046 Value *ReassociatePass::RemoveFactorFromExpression(Value *V, Value *Factor) {
1047 BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul);
1048 if (!BO)
1049 return nullptr;
1050
1051 SmallVector<RepeatedValue, 8> Tree;
1052 MadeChange |= LinearizeExprTree(BO, Tree);
1053 SmallVector<ValueEntry, 8> Factors;
1054 Factors.reserve(Tree.size());
1055 for (unsigned i = 0, e = Tree.size(); i != e; ++i) {
1056 RepeatedValue E = Tree[i];
1057 Factors.append(E.second.getZExtValue(),
1058 ValueEntry(getRank(E.first), E.first));
1059 }
1060
1061 bool FoundFactor = false;
1062 bool NeedsNegate = false;
1063 for (unsigned i = 0, e = Factors.size(); i != e; ++i) {
1064 if (Factors[i].Op == Factor) {
1065 FoundFactor = true;
1066 Factors.erase(Factors.begin()+i);
1067 break;
1068 }
1069
1070 // If this is a negative version of this factor, remove it.
1071 if (ConstantInt *FC1 = dyn_cast<ConstantInt>(Factor)) {
1072 if (ConstantInt *FC2 = dyn_cast<ConstantInt>(Factors[i].Op))
1073 if (FC1->getValue() == -FC2->getValue()) {
1074 FoundFactor = NeedsNegate = true;
1075 Factors.erase(Factors.begin()+i);
1076 break;
1077 }
1078 } else if (ConstantFP *FC1 = dyn_cast<ConstantFP>(Factor)) {
1079 if (ConstantFP *FC2 = dyn_cast<ConstantFP>(Factors[i].Op)) {
1080 const APFloat &F1 = FC1->getValueAPF();
1081 APFloat F2(FC2->getValueAPF());
1082 F2.changeSign();
1083 if (F1 == F2) {
1084 FoundFactor = NeedsNegate = true;
1085 Factors.erase(Factors.begin() + i);
1086 break;
1087 }
1088 }
1089 }
1090 }
1091
1092 if (!FoundFactor) {
1093 // Make sure to restore the operands to the expression tree.
1094 RewriteExprTree(BO, Factors);
1095 return nullptr;
1096 }
1097
1098 BasicBlock::iterator InsertPt = ++BO->getIterator();
1099
1100 // If this was just a single multiply, remove the multiply and return the only
1101 // remaining operand.
1102 if (Factors.size() == 1) {
1103 RedoInsts.insert(BO);
1104 V = Factors[0].Op;
1105 } else {
1106 RewriteExprTree(BO, Factors);
1107 V = BO;
1108 }
1109
1110 if (NeedsNegate)
1111 V = CreateNeg(V, "neg", &*InsertPt, BO);
1112
1113 return V;
1114 }
1115
1116 /// If V is a single-use multiply, recursively add its operands as factors,
1117 /// otherwise add V to the list of factors.
1118 ///
1119 /// Ops is the top-level list of add operands we're trying to factor.
FindSingleUseMultiplyFactors(Value * V,SmallVectorImpl<Value * > & Factors)1120 static void FindSingleUseMultiplyFactors(Value *V,
1121 SmallVectorImpl<Value*> &Factors) {
1122 BinaryOperator *BO = isReassociableOp(V, Instruction::Mul, Instruction::FMul);
1123 if (!BO) {
1124 Factors.push_back(V);
1125 return;
1126 }
1127
1128 // Otherwise, add the LHS and RHS to the list of factors.
1129 FindSingleUseMultiplyFactors(BO->getOperand(1), Factors);
1130 FindSingleUseMultiplyFactors(BO->getOperand(0), Factors);
1131 }
1132
1133 /// Optimize a series of operands to an 'and', 'or', or 'xor' instruction.
1134 /// This optimizes based on identities. If it can be reduced to a single Value,
1135 /// it is returned, otherwise the Ops list is mutated as necessary.
OptimizeAndOrXor(unsigned Opcode,SmallVectorImpl<ValueEntry> & Ops)1136 static Value *OptimizeAndOrXor(unsigned Opcode,
1137 SmallVectorImpl<ValueEntry> &Ops) {
1138 // Scan the operand lists looking for X and ~X pairs, along with X,X pairs.
1139 // If we find any, we can simplify the expression. X&~X == 0, X|~X == -1.
1140 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1141 // First, check for X and ~X in the operand list.
1142 assert(i < Ops.size());
1143 Value *X;
1144 if (match(Ops[i].Op, m_Not(m_Value(X)))) { // Cannot occur for ^.
1145 unsigned FoundX = FindInOperandList(Ops, i, X);
1146 if (FoundX != i) {
1147 if (Opcode == Instruction::And) // ...&X&~X = 0
1148 return Constant::getNullValue(X->getType());
1149
1150 if (Opcode == Instruction::Or) // ...|X|~X = -1
1151 return Constant::getAllOnesValue(X->getType());
1152 }
1153 }
1154
1155 // Next, check for duplicate pairs of values, which we assume are next to
1156 // each other, due to our sorting criteria.
1157 assert(i < Ops.size());
1158 if (i+1 != Ops.size() && Ops[i+1].Op == Ops[i].Op) {
1159 if (Opcode == Instruction::And || Opcode == Instruction::Or) {
1160 // Drop duplicate values for And and Or.
1161 Ops.erase(Ops.begin()+i);
1162 --i; --e;
1163 ++NumAnnihil;
1164 continue;
1165 }
1166
1167 // Drop pairs of values for Xor.
1168 assert(Opcode == Instruction::Xor);
1169 if (e == 2)
1170 return Constant::getNullValue(Ops[0].Op->getType());
1171
1172 // Y ^ X^X -> Y
1173 Ops.erase(Ops.begin()+i, Ops.begin()+i+2);
1174 i -= 1; e -= 2;
1175 ++NumAnnihil;
1176 }
1177 }
1178 return nullptr;
1179 }
1180
1181 /// Helper function of CombineXorOpnd(). It creates a bitwise-and
1182 /// instruction with the given two operands, and return the resulting
1183 /// instruction. There are two special cases: 1) if the constant operand is 0,
1184 /// it will return NULL. 2) if the constant is ~0, the symbolic operand will
1185 /// be returned.
createAndInstr(Instruction * InsertBefore,Value * Opnd,const APInt & ConstOpnd)1186 static Value *createAndInstr(Instruction *InsertBefore, Value *Opnd,
1187 const APInt &ConstOpnd) {
1188 if (ConstOpnd.isNullValue())
1189 return nullptr;
1190
1191 if (ConstOpnd.isAllOnesValue())
1192 return Opnd;
1193
1194 Instruction *I = BinaryOperator::CreateAnd(
1195 Opnd, ConstantInt::get(Opnd->getType(), ConstOpnd), "and.ra",
1196 InsertBefore);
1197 I->setDebugLoc(InsertBefore->getDebugLoc());
1198 return I;
1199 }
1200
1201 // Helper function of OptimizeXor(). It tries to simplify "Opnd1 ^ ConstOpnd"
1202 // into "R ^ C", where C would be 0, and R is a symbolic value.
1203 //
1204 // If it was successful, true is returned, and the "R" and "C" is returned
1205 // via "Res" and "ConstOpnd", respectively; otherwise, false is returned,
1206 // and both "Res" and "ConstOpnd" remain unchanged.
CombineXorOpnd(Instruction * I,XorOpnd * Opnd1,APInt & ConstOpnd,Value * & Res)1207 bool ReassociatePass::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1,
1208 APInt &ConstOpnd, Value *&Res) {
1209 // Xor-Rule 1: (x | c1) ^ c2 = (x | c1) ^ (c1 ^ c1) ^ c2
1210 // = ((x | c1) ^ c1) ^ (c1 ^ c2)
1211 // = (x & ~c1) ^ (c1 ^ c2)
1212 // It is useful only when c1 == c2.
1213 if (!Opnd1->isOrExpr() || Opnd1->getConstPart().isNullValue())
1214 return false;
1215
1216 if (!Opnd1->getValue()->hasOneUse())
1217 return false;
1218
1219 const APInt &C1 = Opnd1->getConstPart();
1220 if (C1 != ConstOpnd)
1221 return false;
1222
1223 Value *X = Opnd1->getSymbolicPart();
1224 Res = createAndInstr(I, X, ~C1);
1225 // ConstOpnd was C2, now C1 ^ C2.
1226 ConstOpnd ^= C1;
1227
1228 if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue()))
1229 RedoInsts.insert(T);
1230 return true;
1231 }
1232
1233 // Helper function of OptimizeXor(). It tries to simplify
1234 // "Opnd1 ^ Opnd2 ^ ConstOpnd" into "R ^ C", where C would be 0, and R is a
1235 // symbolic value.
1236 //
1237 // If it was successful, true is returned, and the "R" and "C" is returned
1238 // via "Res" and "ConstOpnd", respectively (If the entire expression is
1239 // evaluated to a constant, the Res is set to NULL); otherwise, false is
1240 // returned, and both "Res" and "ConstOpnd" remain unchanged.
CombineXorOpnd(Instruction * I,XorOpnd * Opnd1,XorOpnd * Opnd2,APInt & ConstOpnd,Value * & Res)1241 bool ReassociatePass::CombineXorOpnd(Instruction *I, XorOpnd *Opnd1,
1242 XorOpnd *Opnd2, APInt &ConstOpnd,
1243 Value *&Res) {
1244 Value *X = Opnd1->getSymbolicPart();
1245 if (X != Opnd2->getSymbolicPart())
1246 return false;
1247
1248 // This many instruction become dead.(At least "Opnd1 ^ Opnd2" will die.)
1249 int DeadInstNum = 1;
1250 if (Opnd1->getValue()->hasOneUse())
1251 DeadInstNum++;
1252 if (Opnd2->getValue()->hasOneUse())
1253 DeadInstNum++;
1254
1255 // Xor-Rule 2:
1256 // (x | c1) ^ (x & c2)
1257 // = (x|c1) ^ (x&c2) ^ (c1 ^ c1) = ((x|c1) ^ c1) ^ (x & c2) ^ c1
1258 // = (x & ~c1) ^ (x & c2) ^ c1 // Xor-Rule 1
1259 // = (x & c3) ^ c1, where c3 = ~c1 ^ c2 // Xor-rule 3
1260 //
1261 if (Opnd1->isOrExpr() != Opnd2->isOrExpr()) {
1262 if (Opnd2->isOrExpr())
1263 std::swap(Opnd1, Opnd2);
1264
1265 const APInt &C1 = Opnd1->getConstPart();
1266 const APInt &C2 = Opnd2->getConstPart();
1267 APInt C3((~C1) ^ C2);
1268
1269 // Do not increase code size!
1270 if (!C3.isNullValue() && !C3.isAllOnesValue()) {
1271 int NewInstNum = ConstOpnd.getBoolValue() ? 1 : 2;
1272 if (NewInstNum > DeadInstNum)
1273 return false;
1274 }
1275
1276 Res = createAndInstr(I, X, C3);
1277 ConstOpnd ^= C1;
1278 } else if (Opnd1->isOrExpr()) {
1279 // Xor-Rule 3: (x | c1) ^ (x | c2) = (x & c3) ^ c3 where c3 = c1 ^ c2
1280 //
1281 const APInt &C1 = Opnd1->getConstPart();
1282 const APInt &C2 = Opnd2->getConstPart();
1283 APInt C3 = C1 ^ C2;
1284
1285 // Do not increase code size
1286 if (!C3.isNullValue() && !C3.isAllOnesValue()) {
1287 int NewInstNum = ConstOpnd.getBoolValue() ? 1 : 2;
1288 if (NewInstNum > DeadInstNum)
1289 return false;
1290 }
1291
1292 Res = createAndInstr(I, X, C3);
1293 ConstOpnd ^= C3;
1294 } else {
1295 // Xor-Rule 4: (x & c1) ^ (x & c2) = (x & (c1^c2))
1296 //
1297 const APInt &C1 = Opnd1->getConstPart();
1298 const APInt &C2 = Opnd2->getConstPart();
1299 APInt C3 = C1 ^ C2;
1300 Res = createAndInstr(I, X, C3);
1301 }
1302
1303 // Put the original operands in the Redo list; hope they will be deleted
1304 // as dead code.
1305 if (Instruction *T = dyn_cast<Instruction>(Opnd1->getValue()))
1306 RedoInsts.insert(T);
1307 if (Instruction *T = dyn_cast<Instruction>(Opnd2->getValue()))
1308 RedoInsts.insert(T);
1309
1310 return true;
1311 }
1312
1313 /// Optimize a series of operands to an 'xor' instruction. If it can be reduced
1314 /// to a single Value, it is returned, otherwise the Ops list is mutated as
1315 /// necessary.
OptimizeXor(Instruction * I,SmallVectorImpl<ValueEntry> & Ops)1316 Value *ReassociatePass::OptimizeXor(Instruction *I,
1317 SmallVectorImpl<ValueEntry> &Ops) {
1318 if (Value *V = OptimizeAndOrXor(Instruction::Xor, Ops))
1319 return V;
1320
1321 if (Ops.size() == 1)
1322 return nullptr;
1323
1324 SmallVector<XorOpnd, 8> Opnds;
1325 SmallVector<XorOpnd*, 8> OpndPtrs;
1326 Type *Ty = Ops[0].Op->getType();
1327 APInt ConstOpnd(Ty->getScalarSizeInBits(), 0);
1328
1329 // Step 1: Convert ValueEntry to XorOpnd
1330 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1331 Value *V = Ops[i].Op;
1332 const APInt *C;
1333 // TODO: Support non-splat vectors.
1334 if (match(V, m_APInt(C))) {
1335 ConstOpnd ^= *C;
1336 } else {
1337 XorOpnd O(V);
1338 O.setSymbolicRank(getRank(O.getSymbolicPart()));
1339 Opnds.push_back(O);
1340 }
1341 }
1342
1343 // NOTE: From this point on, do *NOT* add/delete element to/from "Opnds".
1344 // It would otherwise invalidate the "Opnds"'s iterator, and hence invalidate
1345 // the "OpndPtrs" as well. For the similar reason, do not fuse this loop
1346 // with the previous loop --- the iterator of the "Opnds" may be invalidated
1347 // when new elements are added to the vector.
1348 for (unsigned i = 0, e = Opnds.size(); i != e; ++i)
1349 OpndPtrs.push_back(&Opnds[i]);
1350
1351 // Step 2: Sort the Xor-Operands in a way such that the operands containing
1352 // the same symbolic value cluster together. For instance, the input operand
1353 // sequence ("x | 123", "y & 456", "x & 789") will be sorted into:
1354 // ("x | 123", "x & 789", "y & 456").
1355 //
1356 // The purpose is twofold:
1357 // 1) Cluster together the operands sharing the same symbolic-value.
1358 // 2) Operand having smaller symbolic-value-rank is permuted earlier, which
1359 // could potentially shorten crital path, and expose more loop-invariants.
1360 // Note that values' rank are basically defined in RPO order (FIXME).
1361 // So, if Rank(X) < Rank(Y) < Rank(Z), it means X is defined earlier
1362 // than Y which is defined earlier than Z. Permute "x | 1", "Y & 2",
1363 // "z" in the order of X-Y-Z is better than any other orders.
1364 llvm::stable_sort(OpndPtrs, [](XorOpnd *LHS, XorOpnd *RHS) {
1365 return LHS->getSymbolicRank() < RHS->getSymbolicRank();
1366 });
1367
1368 // Step 3: Combine adjacent operands
1369 XorOpnd *PrevOpnd = nullptr;
1370 bool Changed = false;
1371 for (unsigned i = 0, e = Opnds.size(); i < e; i++) {
1372 XorOpnd *CurrOpnd = OpndPtrs[i];
1373 // The combined value
1374 Value *CV;
1375
1376 // Step 3.1: Try simplifying "CurrOpnd ^ ConstOpnd"
1377 if (!ConstOpnd.isNullValue() &&
1378 CombineXorOpnd(I, CurrOpnd, ConstOpnd, CV)) {
1379 Changed = true;
1380 if (CV)
1381 *CurrOpnd = XorOpnd(CV);
1382 else {
1383 CurrOpnd->Invalidate();
1384 continue;
1385 }
1386 }
1387
1388 if (!PrevOpnd || CurrOpnd->getSymbolicPart() != PrevOpnd->getSymbolicPart()) {
1389 PrevOpnd = CurrOpnd;
1390 continue;
1391 }
1392
1393 // step 3.2: When previous and current operands share the same symbolic
1394 // value, try to simplify "PrevOpnd ^ CurrOpnd ^ ConstOpnd"
1395 if (CombineXorOpnd(I, CurrOpnd, PrevOpnd, ConstOpnd, CV)) {
1396 // Remove previous operand
1397 PrevOpnd->Invalidate();
1398 if (CV) {
1399 *CurrOpnd = XorOpnd(CV);
1400 PrevOpnd = CurrOpnd;
1401 } else {
1402 CurrOpnd->Invalidate();
1403 PrevOpnd = nullptr;
1404 }
1405 Changed = true;
1406 }
1407 }
1408
1409 // Step 4: Reassemble the Ops
1410 if (Changed) {
1411 Ops.clear();
1412 for (unsigned int i = 0, e = Opnds.size(); i < e; i++) {
1413 XorOpnd &O = Opnds[i];
1414 if (O.isInvalid())
1415 continue;
1416 ValueEntry VE(getRank(O.getValue()), O.getValue());
1417 Ops.push_back(VE);
1418 }
1419 if (!ConstOpnd.isNullValue()) {
1420 Value *C = ConstantInt::get(Ty, ConstOpnd);
1421 ValueEntry VE(getRank(C), C);
1422 Ops.push_back(VE);
1423 }
1424 unsigned Sz = Ops.size();
1425 if (Sz == 1)
1426 return Ops.back().Op;
1427 if (Sz == 0) {
1428 assert(ConstOpnd.isNullValue());
1429 return ConstantInt::get(Ty, ConstOpnd);
1430 }
1431 }
1432
1433 return nullptr;
1434 }
1435
1436 /// Optimize a series of operands to an 'add' instruction. This
1437 /// optimizes based on identities. If it can be reduced to a single Value, it
1438 /// is returned, otherwise the Ops list is mutated as necessary.
OptimizeAdd(Instruction * I,SmallVectorImpl<ValueEntry> & Ops)1439 Value *ReassociatePass::OptimizeAdd(Instruction *I,
1440 SmallVectorImpl<ValueEntry> &Ops) {
1441 // Scan the operand lists looking for X and -X pairs. If we find any, we
1442 // can simplify expressions like X+-X == 0 and X+~X ==-1. While we're at it,
1443 // scan for any
1444 // duplicates. We want to canonicalize Y+Y+Y+Z -> 3*Y+Z.
1445
1446 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1447 Value *TheOp = Ops[i].Op;
1448 // Check to see if we've seen this operand before. If so, we factor all
1449 // instances of the operand together. Due to our sorting criteria, we know
1450 // that these need to be next to each other in the vector.
1451 if (i+1 != Ops.size() && Ops[i+1].Op == TheOp) {
1452 // Rescan the list, remove all instances of this operand from the expr.
1453 unsigned NumFound = 0;
1454 do {
1455 Ops.erase(Ops.begin()+i);
1456 ++NumFound;
1457 } while (i != Ops.size() && Ops[i].Op == TheOp);
1458
1459 LLVM_DEBUG(dbgs() << "\nFACTORING [" << NumFound << "]: " << *TheOp
1460 << '\n');
1461 ++NumFactor;
1462
1463 // Insert a new multiply.
1464 Type *Ty = TheOp->getType();
1465 Constant *C = Ty->isIntOrIntVectorTy() ?
1466 ConstantInt::get(Ty, NumFound) : ConstantFP::get(Ty, NumFound);
1467 Instruction *Mul = CreateMul(TheOp, C, "factor", I, I);
1468
1469 // Now that we have inserted a multiply, optimize it. This allows us to
1470 // handle cases that require multiple factoring steps, such as this:
1471 // (X*2) + (X*2) + (X*2) -> (X*2)*3 -> X*6
1472 RedoInsts.insert(Mul);
1473
1474 // If every add operand was a duplicate, return the multiply.
1475 if (Ops.empty())
1476 return Mul;
1477
1478 // Otherwise, we had some input that didn't have the dupe, such as
1479 // "A + A + B" -> "A*2 + B". Add the new multiply to the list of
1480 // things being added by this operation.
1481 Ops.insert(Ops.begin(), ValueEntry(getRank(Mul), Mul));
1482
1483 --i;
1484 e = Ops.size();
1485 continue;
1486 }
1487
1488 // Check for X and -X or X and ~X in the operand list.
1489 Value *X;
1490 if (!match(TheOp, m_Neg(m_Value(X))) && !match(TheOp, m_Not(m_Value(X))) &&
1491 !match(TheOp, m_FNeg(m_Value(X))))
1492 continue;
1493
1494 unsigned FoundX = FindInOperandList(Ops, i, X);
1495 if (FoundX == i)
1496 continue;
1497
1498 // Remove X and -X from the operand list.
1499 if (Ops.size() == 2 &&
1500 (match(TheOp, m_Neg(m_Value())) || match(TheOp, m_FNeg(m_Value()))))
1501 return Constant::getNullValue(X->getType());
1502
1503 // Remove X and ~X from the operand list.
1504 if (Ops.size() == 2 && match(TheOp, m_Not(m_Value())))
1505 return Constant::getAllOnesValue(X->getType());
1506
1507 Ops.erase(Ops.begin()+i);
1508 if (i < FoundX)
1509 --FoundX;
1510 else
1511 --i; // Need to back up an extra one.
1512 Ops.erase(Ops.begin()+FoundX);
1513 ++NumAnnihil;
1514 --i; // Revisit element.
1515 e -= 2; // Removed two elements.
1516
1517 // if X and ~X we append -1 to the operand list.
1518 if (match(TheOp, m_Not(m_Value()))) {
1519 Value *V = Constant::getAllOnesValue(X->getType());
1520 Ops.insert(Ops.end(), ValueEntry(getRank(V), V));
1521 e += 1;
1522 }
1523 }
1524
1525 // Scan the operand list, checking to see if there are any common factors
1526 // between operands. Consider something like A*A+A*B*C+D. We would like to
1527 // reassociate this to A*(A+B*C)+D, which reduces the number of multiplies.
1528 // To efficiently find this, we count the number of times a factor occurs
1529 // for any ADD operands that are MULs.
1530 DenseMap<Value*, unsigned> FactorOccurrences;
1531
1532 // Keep track of each multiply we see, to avoid triggering on (X*4)+(X*4)
1533 // where they are actually the same multiply.
1534 unsigned MaxOcc = 0;
1535 Value *MaxOccVal = nullptr;
1536 for (unsigned i = 0, e = Ops.size(); i != e; ++i) {
1537 BinaryOperator *BOp =
1538 isReassociableOp(Ops[i].Op, Instruction::Mul, Instruction::FMul);
1539 if (!BOp)
1540 continue;
1541
1542 // Compute all of the factors of this added value.
1543 SmallVector<Value*, 8> Factors;
1544 FindSingleUseMultiplyFactors(BOp, Factors);
1545 assert(Factors.size() > 1 && "Bad linearize!");
1546
1547 // Add one to FactorOccurrences for each unique factor in this op.
1548 SmallPtrSet<Value*, 8> Duplicates;
1549 for (unsigned i = 0, e = Factors.size(); i != e; ++i) {
1550 Value *Factor = Factors[i];
1551 if (!Duplicates.insert(Factor).second)
1552 continue;
1553
1554 unsigned Occ = ++FactorOccurrences[Factor];
1555 if (Occ > MaxOcc) {
1556 MaxOcc = Occ;
1557 MaxOccVal = Factor;
1558 }
1559
1560 // If Factor is a negative constant, add the negated value as a factor
1561 // because we can percolate the negate out. Watch for minint, which
1562 // cannot be positivified.
1563 if (ConstantInt *CI = dyn_cast<ConstantInt>(Factor)) {
1564 if (CI->isNegative() && !CI->isMinValue(true)) {
1565 Factor = ConstantInt::get(CI->getContext(), -CI->getValue());
1566 if (!Duplicates.insert(Factor).second)
1567 continue;
1568 unsigned Occ = ++FactorOccurrences[Factor];
1569 if (Occ > MaxOcc) {
1570 MaxOcc = Occ;
1571 MaxOccVal = Factor;
1572 }
1573 }
1574 } else if (ConstantFP *CF = dyn_cast<ConstantFP>(Factor)) {
1575 if (CF->isNegative()) {
1576 APFloat F(CF->getValueAPF());
1577 F.changeSign();
1578 Factor = ConstantFP::get(CF->getContext(), F);
1579 if (!Duplicates.insert(Factor).second)
1580 continue;
1581 unsigned Occ = ++FactorOccurrences[Factor];
1582 if (Occ > MaxOcc) {
1583 MaxOcc = Occ;
1584 MaxOccVal = Factor;
1585 }
1586 }
1587 }
1588 }
1589 }
1590
1591 // If any factor occurred more than one time, we can pull it out.
1592 if (MaxOcc > 1) {
1593 LLVM_DEBUG(dbgs() << "\nFACTORING [" << MaxOcc << "]: " << *MaxOccVal
1594 << '\n');
1595 ++NumFactor;
1596
1597 // Create a new instruction that uses the MaxOccVal twice. If we don't do
1598 // this, we could otherwise run into situations where removing a factor
1599 // from an expression will drop a use of maxocc, and this can cause
1600 // RemoveFactorFromExpression on successive values to behave differently.
1601 Instruction *DummyInst =
1602 I->getType()->isIntOrIntVectorTy()
1603 ? BinaryOperator::CreateAdd(MaxOccVal, MaxOccVal)
1604 : BinaryOperator::CreateFAdd(MaxOccVal, MaxOccVal);
1605
1606 SmallVector<WeakTrackingVH, 4> NewMulOps;
1607 for (unsigned i = 0; i != Ops.size(); ++i) {
1608 // Only try to remove factors from expressions we're allowed to.
1609 BinaryOperator *BOp =
1610 isReassociableOp(Ops[i].Op, Instruction::Mul, Instruction::FMul);
1611 if (!BOp)
1612 continue;
1613
1614 if (Value *V = RemoveFactorFromExpression(Ops[i].Op, MaxOccVal)) {
1615 // The factorized operand may occur several times. Convert them all in
1616 // one fell swoop.
1617 for (unsigned j = Ops.size(); j != i;) {
1618 --j;
1619 if (Ops[j].Op == Ops[i].Op) {
1620 NewMulOps.push_back(V);
1621 Ops.erase(Ops.begin()+j);
1622 }
1623 }
1624 --i;
1625 }
1626 }
1627
1628 // No need for extra uses anymore.
1629 DummyInst->deleteValue();
1630
1631 unsigned NumAddedValues = NewMulOps.size();
1632 Value *V = EmitAddTreeOfValues(I, NewMulOps);
1633
1634 // Now that we have inserted the add tree, optimize it. This allows us to
1635 // handle cases that require multiple factoring steps, such as this:
1636 // A*A*B + A*A*C --> A*(A*B+A*C) --> A*(A*(B+C))
1637 assert(NumAddedValues > 1 && "Each occurrence should contribute a value");
1638 (void)NumAddedValues;
1639 if (Instruction *VI = dyn_cast<Instruction>(V))
1640 RedoInsts.insert(VI);
1641
1642 // Create the multiply.
1643 Instruction *V2 = CreateMul(V, MaxOccVal, "reass.mul", I, I);
1644
1645 // Rerun associate on the multiply in case the inner expression turned into
1646 // a multiply. We want to make sure that we keep things in canonical form.
1647 RedoInsts.insert(V2);
1648
1649 // If every add operand included the factor (e.g. "A*B + A*C"), then the
1650 // entire result expression is just the multiply "A*(B+C)".
1651 if (Ops.empty())
1652 return V2;
1653
1654 // Otherwise, we had some input that didn't have the factor, such as
1655 // "A*B + A*C + D" -> "A*(B+C) + D". Add the new multiply to the list of
1656 // things being added by this operation.
1657 Ops.insert(Ops.begin(), ValueEntry(getRank(V2), V2));
1658 }
1659
1660 return nullptr;
1661 }
1662
1663 /// Build up a vector of value/power pairs factoring a product.
1664 ///
1665 /// Given a series of multiplication operands, build a vector of factors and
1666 /// the powers each is raised to when forming the final product. Sort them in
1667 /// the order of descending power.
1668 ///
1669 /// (x*x) -> [(x, 2)]
1670 /// ((x*x)*x) -> [(x, 3)]
1671 /// ((((x*y)*x)*y)*x) -> [(x, 3), (y, 2)]
1672 ///
1673 /// \returns Whether any factors have a power greater than one.
collectMultiplyFactors(SmallVectorImpl<ValueEntry> & Ops,SmallVectorImpl<Factor> & Factors)1674 static bool collectMultiplyFactors(SmallVectorImpl<ValueEntry> &Ops,
1675 SmallVectorImpl<Factor> &Factors) {
1676 // FIXME: Have Ops be (ValueEntry, Multiplicity) pairs, simplifying this.
1677 // Compute the sum of powers of simplifiable factors.
1678 unsigned FactorPowerSum = 0;
1679 for (unsigned Idx = 1, Size = Ops.size(); Idx < Size; ++Idx) {
1680 Value *Op = Ops[Idx-1].Op;
1681
1682 // Count the number of occurrences of this value.
1683 unsigned Count = 1;
1684 for (; Idx < Size && Ops[Idx].Op == Op; ++Idx)
1685 ++Count;
1686 // Track for simplification all factors which occur 2 or more times.
1687 if (Count > 1)
1688 FactorPowerSum += Count;
1689 }
1690
1691 // We can only simplify factors if the sum of the powers of our simplifiable
1692 // factors is 4 or higher. When that is the case, we will *always* have
1693 // a simplification. This is an important invariant to prevent cyclicly
1694 // trying to simplify already minimal formations.
1695 if (FactorPowerSum < 4)
1696 return false;
1697
1698 // Now gather the simplifiable factors, removing them from Ops.
1699 FactorPowerSum = 0;
1700 for (unsigned Idx = 1; Idx < Ops.size(); ++Idx) {
1701 Value *Op = Ops[Idx-1].Op;
1702
1703 // Count the number of occurrences of this value.
1704 unsigned Count = 1;
1705 for (; Idx < Ops.size() && Ops[Idx].Op == Op; ++Idx)
1706 ++Count;
1707 if (Count == 1)
1708 continue;
1709 // Move an even number of occurrences to Factors.
1710 Count &= ~1U;
1711 Idx -= Count;
1712 FactorPowerSum += Count;
1713 Factors.push_back(Factor(Op, Count));
1714 Ops.erase(Ops.begin()+Idx, Ops.begin()+Idx+Count);
1715 }
1716
1717 // None of the adjustments above should have reduced the sum of factor powers
1718 // below our mininum of '4'.
1719 assert(FactorPowerSum >= 4);
1720
1721 llvm::stable_sort(Factors, [](const Factor &LHS, const Factor &RHS) {
1722 return LHS.Power > RHS.Power;
1723 });
1724 return true;
1725 }
1726
1727 /// Build a tree of multiplies, computing the product of Ops.
buildMultiplyTree(IRBuilderBase & Builder,SmallVectorImpl<Value * > & Ops)1728 static Value *buildMultiplyTree(IRBuilderBase &Builder,
1729 SmallVectorImpl<Value*> &Ops) {
1730 if (Ops.size() == 1)
1731 return Ops.back();
1732
1733 Value *LHS = Ops.pop_back_val();
1734 do {
1735 if (LHS->getType()->isIntOrIntVectorTy())
1736 LHS = Builder.CreateMul(LHS, Ops.pop_back_val());
1737 else
1738 LHS = Builder.CreateFMul(LHS, Ops.pop_back_val());
1739 } while (!Ops.empty());
1740
1741 return LHS;
1742 }
1743
1744 /// Build a minimal multiplication DAG for (a^x)*(b^y)*(c^z)*...
1745 ///
1746 /// Given a vector of values raised to various powers, where no two values are
1747 /// equal and the powers are sorted in decreasing order, compute the minimal
1748 /// DAG of multiplies to compute the final product, and return that product
1749 /// value.
1750 Value *
buildMinimalMultiplyDAG(IRBuilderBase & Builder,SmallVectorImpl<Factor> & Factors)1751 ReassociatePass::buildMinimalMultiplyDAG(IRBuilderBase &Builder,
1752 SmallVectorImpl<Factor> &Factors) {
1753 assert(Factors[0].Power);
1754 SmallVector<Value *, 4> OuterProduct;
1755 for (unsigned LastIdx = 0, Idx = 1, Size = Factors.size();
1756 Idx < Size && Factors[Idx].Power > 0; ++Idx) {
1757 if (Factors[Idx].Power != Factors[LastIdx].Power) {
1758 LastIdx = Idx;
1759 continue;
1760 }
1761
1762 // We want to multiply across all the factors with the same power so that
1763 // we can raise them to that power as a single entity. Build a mini tree
1764 // for that.
1765 SmallVector<Value *, 4> InnerProduct;
1766 InnerProduct.push_back(Factors[LastIdx].Base);
1767 do {
1768 InnerProduct.push_back(Factors[Idx].Base);
1769 ++Idx;
1770 } while (Idx < Size && Factors[Idx].Power == Factors[LastIdx].Power);
1771
1772 // Reset the base value of the first factor to the new expression tree.
1773 // We'll remove all the factors with the same power in a second pass.
1774 Value *M = Factors[LastIdx].Base = buildMultiplyTree(Builder, InnerProduct);
1775 if (Instruction *MI = dyn_cast<Instruction>(M))
1776 RedoInsts.insert(MI);
1777
1778 LastIdx = Idx;
1779 }
1780 // Unique factors with equal powers -- we've folded them into the first one's
1781 // base.
1782 Factors.erase(std::unique(Factors.begin(), Factors.end(),
1783 [](const Factor &LHS, const Factor &RHS) {
1784 return LHS.Power == RHS.Power;
1785 }),
1786 Factors.end());
1787
1788 // Iteratively collect the base of each factor with an add power into the
1789 // outer product, and halve each power in preparation for squaring the
1790 // expression.
1791 for (unsigned Idx = 0, Size = Factors.size(); Idx != Size; ++Idx) {
1792 if (Factors[Idx].Power & 1)
1793 OuterProduct.push_back(Factors[Idx].Base);
1794 Factors[Idx].Power >>= 1;
1795 }
1796 if (Factors[0].Power) {
1797 Value *SquareRoot = buildMinimalMultiplyDAG(Builder, Factors);
1798 OuterProduct.push_back(SquareRoot);
1799 OuterProduct.push_back(SquareRoot);
1800 }
1801 if (OuterProduct.size() == 1)
1802 return OuterProduct.front();
1803
1804 Value *V = buildMultiplyTree(Builder, OuterProduct);
1805 return V;
1806 }
1807
OptimizeMul(BinaryOperator * I,SmallVectorImpl<ValueEntry> & Ops)1808 Value *ReassociatePass::OptimizeMul(BinaryOperator *I,
1809 SmallVectorImpl<ValueEntry> &Ops) {
1810 // We can only optimize the multiplies when there is a chain of more than
1811 // three, such that a balanced tree might require fewer total multiplies.
1812 if (Ops.size() < 4)
1813 return nullptr;
1814
1815 // Try to turn linear trees of multiplies without other uses of the
1816 // intermediate stages into minimal multiply DAGs with perfect sub-expression
1817 // re-use.
1818 SmallVector<Factor, 4> Factors;
1819 if (!collectMultiplyFactors(Ops, Factors))
1820 return nullptr; // All distinct factors, so nothing left for us to do.
1821
1822 IRBuilder<> Builder(I);
1823 // The reassociate transformation for FP operations is performed only
1824 // if unsafe algebra is permitted by FastMathFlags. Propagate those flags
1825 // to the newly generated operations.
1826 if (auto FPI = dyn_cast<FPMathOperator>(I))
1827 Builder.setFastMathFlags(FPI->getFastMathFlags());
1828
1829 Value *V = buildMinimalMultiplyDAG(Builder, Factors);
1830 if (Ops.empty())
1831 return V;
1832
1833 ValueEntry NewEntry = ValueEntry(getRank(V), V);
1834 Ops.insert(llvm::lower_bound(Ops, NewEntry), NewEntry);
1835 return nullptr;
1836 }
1837
OptimizeExpression(BinaryOperator * I,SmallVectorImpl<ValueEntry> & Ops)1838 Value *ReassociatePass::OptimizeExpression(BinaryOperator *I,
1839 SmallVectorImpl<ValueEntry> &Ops) {
1840 // Now that we have the linearized expression tree, try to optimize it.
1841 // Start by folding any constants that we found.
1842 Constant *Cst = nullptr;
1843 unsigned Opcode = I->getOpcode();
1844 while (!Ops.empty() && isa<Constant>(Ops.back().Op)) {
1845 Constant *C = cast<Constant>(Ops.pop_back_val().Op);
1846 Cst = Cst ? ConstantExpr::get(Opcode, C, Cst) : C;
1847 }
1848 // If there was nothing but constants then we are done.
1849 if (Ops.empty())
1850 return Cst;
1851
1852 // Put the combined constant back at the end of the operand list, except if
1853 // there is no point. For example, an add of 0 gets dropped here, while a
1854 // multiplication by zero turns the whole expression into zero.
1855 if (Cst && Cst != ConstantExpr::getBinOpIdentity(Opcode, I->getType())) {
1856 if (Cst == ConstantExpr::getBinOpAbsorber(Opcode, I->getType()))
1857 return Cst;
1858 Ops.push_back(ValueEntry(0, Cst));
1859 }
1860
1861 if (Ops.size() == 1) return Ops[0].Op;
1862
1863 // Handle destructive annihilation due to identities between elements in the
1864 // argument list here.
1865 unsigned NumOps = Ops.size();
1866 switch (Opcode) {
1867 default: break;
1868 case Instruction::And:
1869 case Instruction::Or:
1870 if (Value *Result = OptimizeAndOrXor(Opcode, Ops))
1871 return Result;
1872 break;
1873
1874 case Instruction::Xor:
1875 if (Value *Result = OptimizeXor(I, Ops))
1876 return Result;
1877 break;
1878
1879 case Instruction::Add:
1880 case Instruction::FAdd:
1881 if (Value *Result = OptimizeAdd(I, Ops))
1882 return Result;
1883 break;
1884
1885 case Instruction::Mul:
1886 case Instruction::FMul:
1887 if (Value *Result = OptimizeMul(I, Ops))
1888 return Result;
1889 break;
1890 }
1891
1892 if (Ops.size() != NumOps)
1893 return OptimizeExpression(I, Ops);
1894 return nullptr;
1895 }
1896
1897 // Remove dead instructions and if any operands are trivially dead add them to
1898 // Insts so they will be removed as well.
RecursivelyEraseDeadInsts(Instruction * I,OrderedSet & Insts)1899 void ReassociatePass::RecursivelyEraseDeadInsts(Instruction *I,
1900 OrderedSet &Insts) {
1901 assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!");
1902 SmallVector<Value *, 4> Ops(I->op_begin(), I->op_end());
1903 ValueRankMap.erase(I);
1904 Insts.remove(I);
1905 RedoInsts.remove(I);
1906 llvm::salvageDebugInfo(*I);
1907 I->eraseFromParent();
1908 for (auto Op : Ops)
1909 if (Instruction *OpInst = dyn_cast<Instruction>(Op))
1910 if (OpInst->use_empty())
1911 Insts.insert(OpInst);
1912 }
1913
1914 /// Zap the given instruction, adding interesting operands to the work list.
EraseInst(Instruction * I)1915 void ReassociatePass::EraseInst(Instruction *I) {
1916 assert(isInstructionTriviallyDead(I) && "Trivially dead instructions only!");
1917 LLVM_DEBUG(dbgs() << "Erasing dead inst: "; I->dump());
1918
1919 SmallVector<Value*, 8> Ops(I->op_begin(), I->op_end());
1920 // Erase the dead instruction.
1921 ValueRankMap.erase(I);
1922 RedoInsts.remove(I);
1923 llvm::salvageDebugInfo(*I);
1924 I->eraseFromParent();
1925 // Optimize its operands.
1926 SmallPtrSet<Instruction *, 8> Visited; // Detect self-referential nodes.
1927 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
1928 if (Instruction *Op = dyn_cast<Instruction>(Ops[i])) {
1929 // If this is a node in an expression tree, climb to the expression root
1930 // and add that since that's where optimization actually happens.
1931 unsigned Opcode = Op->getOpcode();
1932 while (Op->hasOneUse() && Op->user_back()->getOpcode() == Opcode &&
1933 Visited.insert(Op).second)
1934 Op = Op->user_back();
1935
1936 // The instruction we're going to push may be coming from a
1937 // dead block, and Reassociate skips the processing of unreachable
1938 // blocks because it's a waste of time and also because it can
1939 // lead to infinite loop due to LLVM's non-standard definition
1940 // of dominance.
1941 if (ValueRankMap.find(Op) != ValueRankMap.end())
1942 RedoInsts.insert(Op);
1943 }
1944
1945 MadeChange = true;
1946 }
1947
1948 /// Recursively analyze an expression to build a list of instructions that have
1949 /// negative floating-point constant operands. The caller can then transform
1950 /// the list to create positive constants for better reassociation and CSE.
getNegatibleInsts(Value * V,SmallVectorImpl<Instruction * > & Candidates)1951 static void getNegatibleInsts(Value *V,
1952 SmallVectorImpl<Instruction *> &Candidates) {
1953 // Handle only one-use instructions. Combining negations does not justify
1954 // replicating instructions.
1955 Instruction *I;
1956 if (!match(V, m_OneUse(m_Instruction(I))))
1957 return;
1958
1959 // Handle expressions of multiplications and divisions.
1960 // TODO: This could look through floating-point casts.
1961 const APFloat *C;
1962 switch (I->getOpcode()) {
1963 case Instruction::FMul:
1964 // Not expecting non-canonical code here. Bail out and wait.
1965 if (match(I->getOperand(0), m_Constant()))
1966 break;
1967
1968 if (match(I->getOperand(1), m_APFloat(C)) && C->isNegative()) {
1969 Candidates.push_back(I);
1970 LLVM_DEBUG(dbgs() << "FMul with negative constant: " << *I << '\n');
1971 }
1972 getNegatibleInsts(I->getOperand(0), Candidates);
1973 getNegatibleInsts(I->getOperand(1), Candidates);
1974 break;
1975 case Instruction::FDiv:
1976 // Not expecting non-canonical code here. Bail out and wait.
1977 if (match(I->getOperand(0), m_Constant()) &&
1978 match(I->getOperand(1), m_Constant()))
1979 break;
1980
1981 if ((match(I->getOperand(0), m_APFloat(C)) && C->isNegative()) ||
1982 (match(I->getOperand(1), m_APFloat(C)) && C->isNegative())) {
1983 Candidates.push_back(I);
1984 LLVM_DEBUG(dbgs() << "FDiv with negative constant: " << *I << '\n');
1985 }
1986 getNegatibleInsts(I->getOperand(0), Candidates);
1987 getNegatibleInsts(I->getOperand(1), Candidates);
1988 break;
1989 default:
1990 break;
1991 }
1992 }
1993
1994 /// Given an fadd/fsub with an operand that is a one-use instruction
1995 /// (the fadd/fsub), try to change negative floating-point constants into
1996 /// positive constants to increase potential for reassociation and CSE.
canonicalizeNegFPConstantsForOp(Instruction * I,Instruction * Op,Value * OtherOp)1997 Instruction *ReassociatePass::canonicalizeNegFPConstantsForOp(Instruction *I,
1998 Instruction *Op,
1999 Value *OtherOp) {
2000 assert((I->getOpcode() == Instruction::FAdd ||
2001 I->getOpcode() == Instruction::FSub) && "Expected fadd/fsub");
2002
2003 // Collect instructions with negative FP constants from the subtree that ends
2004 // in Op.
2005 SmallVector<Instruction *, 4> Candidates;
2006 getNegatibleInsts(Op, Candidates);
2007 if (Candidates.empty())
2008 return nullptr;
2009
2010 // Don't canonicalize x + (-Constant * y) -> x - (Constant * y), if the
2011 // resulting subtract will be broken up later. This can get us into an
2012 // infinite loop during reassociation.
2013 bool IsFSub = I->getOpcode() == Instruction::FSub;
2014 bool NeedsSubtract = !IsFSub && Candidates.size() % 2 == 1;
2015 if (NeedsSubtract && ShouldBreakUpSubtract(I))
2016 return nullptr;
2017
2018 for (Instruction *Negatible : Candidates) {
2019 const APFloat *C;
2020 if (match(Negatible->getOperand(0), m_APFloat(C))) {
2021 assert(!match(Negatible->getOperand(1), m_Constant()) &&
2022 "Expecting only 1 constant operand");
2023 assert(C->isNegative() && "Expected negative FP constant");
2024 Negatible->setOperand(0, ConstantFP::get(Negatible->getType(), abs(*C)));
2025 MadeChange = true;
2026 }
2027 if (match(Negatible->getOperand(1), m_APFloat(C))) {
2028 assert(!match(Negatible->getOperand(0), m_Constant()) &&
2029 "Expecting only 1 constant operand");
2030 assert(C->isNegative() && "Expected negative FP constant");
2031 Negatible->setOperand(1, ConstantFP::get(Negatible->getType(), abs(*C)));
2032 MadeChange = true;
2033 }
2034 }
2035 assert(MadeChange == true && "Negative constant candidate was not changed");
2036
2037 // Negations cancelled out.
2038 if (Candidates.size() % 2 == 0)
2039 return I;
2040
2041 // Negate the final operand in the expression by flipping the opcode of this
2042 // fadd/fsub.
2043 assert(Candidates.size() % 2 == 1 && "Expected odd number");
2044 IRBuilder<> Builder(I);
2045 Value *NewInst = IsFSub ? Builder.CreateFAddFMF(OtherOp, Op, I)
2046 : Builder.CreateFSubFMF(OtherOp, Op, I);
2047 I->replaceAllUsesWith(NewInst);
2048 RedoInsts.insert(I);
2049 return dyn_cast<Instruction>(NewInst);
2050 }
2051
2052 /// Canonicalize expressions that contain a negative floating-point constant
2053 /// of the following form:
2054 /// OtherOp + (subtree) -> OtherOp {+/-} (canonical subtree)
2055 /// (subtree) + OtherOp -> OtherOp {+/-} (canonical subtree)
2056 /// OtherOp - (subtree) -> OtherOp {+/-} (canonical subtree)
2057 ///
2058 /// The fadd/fsub opcode may be switched to allow folding a negation into the
2059 /// input instruction.
canonicalizeNegFPConstants(Instruction * I)2060 Instruction *ReassociatePass::canonicalizeNegFPConstants(Instruction *I) {
2061 LLVM_DEBUG(dbgs() << "Combine negations for: " << *I << '\n');
2062 Value *X;
2063 Instruction *Op;
2064 if (match(I, m_FAdd(m_Value(X), m_OneUse(m_Instruction(Op)))))
2065 if (Instruction *R = canonicalizeNegFPConstantsForOp(I, Op, X))
2066 I = R;
2067 if (match(I, m_FAdd(m_OneUse(m_Instruction(Op)), m_Value(X))))
2068 if (Instruction *R = canonicalizeNegFPConstantsForOp(I, Op, X))
2069 I = R;
2070 if (match(I, m_FSub(m_Value(X), m_OneUse(m_Instruction(Op)))))
2071 if (Instruction *R = canonicalizeNegFPConstantsForOp(I, Op, X))
2072 I = R;
2073 return I;
2074 }
2075
2076 /// Inspect and optimize the given instruction. Note that erasing
2077 /// instructions is not allowed.
OptimizeInst(Instruction * I)2078 void ReassociatePass::OptimizeInst(Instruction *I) {
2079 // Only consider operations that we understand.
2080 if (!isa<UnaryOperator>(I) && !isa<BinaryOperator>(I))
2081 return;
2082
2083 if (I->getOpcode() == Instruction::Shl && isa<ConstantInt>(I->getOperand(1)))
2084 // If an operand of this shift is a reassociable multiply, or if the shift
2085 // is used by a reassociable multiply or add, turn into a multiply.
2086 if (isReassociableOp(I->getOperand(0), Instruction::Mul) ||
2087 (I->hasOneUse() &&
2088 (isReassociableOp(I->user_back(), Instruction::Mul) ||
2089 isReassociableOp(I->user_back(), Instruction::Add)))) {
2090 Instruction *NI = ConvertShiftToMul(I);
2091 RedoInsts.insert(I);
2092 MadeChange = true;
2093 I = NI;
2094 }
2095
2096 // Commute binary operators, to canonicalize the order of their operands.
2097 // This can potentially expose more CSE opportunities, and makes writing other
2098 // transformations simpler.
2099 if (I->isCommutative())
2100 canonicalizeOperands(I);
2101
2102 // Canonicalize negative constants out of expressions.
2103 if (Instruction *Res = canonicalizeNegFPConstants(I))
2104 I = Res;
2105
2106 // Don't optimize floating-point instructions unless they are 'fast'.
2107 if (I->getType()->isFPOrFPVectorTy() && !I->isFast())
2108 return;
2109
2110 // Do not reassociate boolean (i1) expressions. We want to preserve the
2111 // original order of evaluation for short-circuited comparisons that
2112 // SimplifyCFG has folded to AND/OR expressions. If the expression
2113 // is not further optimized, it is likely to be transformed back to a
2114 // short-circuited form for code gen, and the source order may have been
2115 // optimized for the most likely conditions.
2116 if (I->getType()->isIntegerTy(1))
2117 return;
2118
2119 // If this is a subtract instruction which is not already in negate form,
2120 // see if we can convert it to X+-Y.
2121 if (I->getOpcode() == Instruction::Sub) {
2122 if (ShouldBreakUpSubtract(I)) {
2123 Instruction *NI = BreakUpSubtract(I, RedoInsts);
2124 RedoInsts.insert(I);
2125 MadeChange = true;
2126 I = NI;
2127 } else if (match(I, m_Neg(m_Value()))) {
2128 // Otherwise, this is a negation. See if the operand is a multiply tree
2129 // and if this is not an inner node of a multiply tree.
2130 if (isReassociableOp(I->getOperand(1), Instruction::Mul) &&
2131 (!I->hasOneUse() ||
2132 !isReassociableOp(I->user_back(), Instruction::Mul))) {
2133 Instruction *NI = LowerNegateToMultiply(I);
2134 // If the negate was simplified, revisit the users to see if we can
2135 // reassociate further.
2136 for (User *U : NI->users()) {
2137 if (BinaryOperator *Tmp = dyn_cast<BinaryOperator>(U))
2138 RedoInsts.insert(Tmp);
2139 }
2140 RedoInsts.insert(I);
2141 MadeChange = true;
2142 I = NI;
2143 }
2144 }
2145 } else if (I->getOpcode() == Instruction::FNeg ||
2146 I->getOpcode() == Instruction::FSub) {
2147 if (ShouldBreakUpSubtract(I)) {
2148 Instruction *NI = BreakUpSubtract(I, RedoInsts);
2149 RedoInsts.insert(I);
2150 MadeChange = true;
2151 I = NI;
2152 } else if (match(I, m_FNeg(m_Value()))) {
2153 // Otherwise, this is a negation. See if the operand is a multiply tree
2154 // and if this is not an inner node of a multiply tree.
2155 Value *Op = isa<BinaryOperator>(I) ? I->getOperand(1) :
2156 I->getOperand(0);
2157 if (isReassociableOp(Op, Instruction::FMul) &&
2158 (!I->hasOneUse() ||
2159 !isReassociableOp(I->user_back(), Instruction::FMul))) {
2160 // If the negate was simplified, revisit the users to see if we can
2161 // reassociate further.
2162 Instruction *NI = LowerNegateToMultiply(I);
2163 for (User *U : NI->users()) {
2164 if (BinaryOperator *Tmp = dyn_cast<BinaryOperator>(U))
2165 RedoInsts.insert(Tmp);
2166 }
2167 RedoInsts.insert(I);
2168 MadeChange = true;
2169 I = NI;
2170 }
2171 }
2172 }
2173
2174 // If this instruction is an associative binary operator, process it.
2175 if (!I->isAssociative()) return;
2176 BinaryOperator *BO = cast<BinaryOperator>(I);
2177
2178 // If this is an interior node of a reassociable tree, ignore it until we
2179 // get to the root of the tree, to avoid N^2 analysis.
2180 unsigned Opcode = BO->getOpcode();
2181 if (BO->hasOneUse() && BO->user_back()->getOpcode() == Opcode) {
2182 // During the initial run we will get to the root of the tree.
2183 // But if we get here while we are redoing instructions, there is no
2184 // guarantee that the root will be visited. So Redo later
2185 if (BO->user_back() != BO &&
2186 BO->getParent() == BO->user_back()->getParent())
2187 RedoInsts.insert(BO->user_back());
2188 return;
2189 }
2190
2191 // If this is an add tree that is used by a sub instruction, ignore it
2192 // until we process the subtract.
2193 if (BO->hasOneUse() && BO->getOpcode() == Instruction::Add &&
2194 cast<Instruction>(BO->user_back())->getOpcode() == Instruction::Sub)
2195 return;
2196 if (BO->hasOneUse() && BO->getOpcode() == Instruction::FAdd &&
2197 cast<Instruction>(BO->user_back())->getOpcode() == Instruction::FSub)
2198 return;
2199
2200 ReassociateExpression(BO);
2201 }
2202
ReassociateExpression(BinaryOperator * I)2203 void ReassociatePass::ReassociateExpression(BinaryOperator *I) {
2204 // First, walk the expression tree, linearizing the tree, collecting the
2205 // operand information.
2206 SmallVector<RepeatedValue, 8> Tree;
2207 MadeChange |= LinearizeExprTree(I, Tree);
2208 SmallVector<ValueEntry, 8> Ops;
2209 Ops.reserve(Tree.size());
2210 for (unsigned i = 0, e = Tree.size(); i != e; ++i) {
2211 RepeatedValue E = Tree[i];
2212 Ops.append(E.second.getZExtValue(),
2213 ValueEntry(getRank(E.first), E.first));
2214 }
2215
2216 LLVM_DEBUG(dbgs() << "RAIn:\t"; PrintOps(I, Ops); dbgs() << '\n');
2217
2218 // Now that we have linearized the tree to a list and have gathered all of
2219 // the operands and their ranks, sort the operands by their rank. Use a
2220 // stable_sort so that values with equal ranks will have their relative
2221 // positions maintained (and so the compiler is deterministic). Note that
2222 // this sorts so that the highest ranking values end up at the beginning of
2223 // the vector.
2224 llvm::stable_sort(Ops);
2225
2226 // Now that we have the expression tree in a convenient
2227 // sorted form, optimize it globally if possible.
2228 if (Value *V = OptimizeExpression(I, Ops)) {
2229 if (V == I)
2230 // Self-referential expression in unreachable code.
2231 return;
2232 // This expression tree simplified to something that isn't a tree,
2233 // eliminate it.
2234 LLVM_DEBUG(dbgs() << "Reassoc to scalar: " << *V << '\n');
2235 I->replaceAllUsesWith(V);
2236 if (Instruction *VI = dyn_cast<Instruction>(V))
2237 if (I->getDebugLoc())
2238 VI->setDebugLoc(I->getDebugLoc());
2239 RedoInsts.insert(I);
2240 ++NumAnnihil;
2241 return;
2242 }
2243
2244 // We want to sink immediates as deeply as possible except in the case where
2245 // this is a multiply tree used only by an add, and the immediate is a -1.
2246 // In this case we reassociate to put the negation on the outside so that we
2247 // can fold the negation into the add: (-X)*Y + Z -> Z-X*Y
2248 if (I->hasOneUse()) {
2249 if (I->getOpcode() == Instruction::Mul &&
2250 cast<Instruction>(I->user_back())->getOpcode() == Instruction::Add &&
2251 isa<ConstantInt>(Ops.back().Op) &&
2252 cast<ConstantInt>(Ops.back().Op)->isMinusOne()) {
2253 ValueEntry Tmp = Ops.pop_back_val();
2254 Ops.insert(Ops.begin(), Tmp);
2255 } else if (I->getOpcode() == Instruction::FMul &&
2256 cast<Instruction>(I->user_back())->getOpcode() ==
2257 Instruction::FAdd &&
2258 isa<ConstantFP>(Ops.back().Op) &&
2259 cast<ConstantFP>(Ops.back().Op)->isExactlyValue(-1.0)) {
2260 ValueEntry Tmp = Ops.pop_back_val();
2261 Ops.insert(Ops.begin(), Tmp);
2262 }
2263 }
2264
2265 LLVM_DEBUG(dbgs() << "RAOut:\t"; PrintOps(I, Ops); dbgs() << '\n');
2266
2267 if (Ops.size() == 1) {
2268 if (Ops[0].Op == I)
2269 // Self-referential expression in unreachable code.
2270 return;
2271
2272 // This expression tree simplified to something that isn't a tree,
2273 // eliminate it.
2274 I->replaceAllUsesWith(Ops[0].Op);
2275 if (Instruction *OI = dyn_cast<Instruction>(Ops[0].Op))
2276 OI->setDebugLoc(I->getDebugLoc());
2277 RedoInsts.insert(I);
2278 return;
2279 }
2280
2281 if (Ops.size() > 2 && Ops.size() <= GlobalReassociateLimit) {
2282 // Find the pair with the highest count in the pairmap and move it to the
2283 // back of the list so that it can later be CSE'd.
2284 // example:
2285 // a*b*c*d*e
2286 // if c*e is the most "popular" pair, we can express this as
2287 // (((c*e)*d)*b)*a
2288 unsigned Max = 1;
2289 unsigned BestRank = 0;
2290 std::pair<unsigned, unsigned> BestPair;
2291 unsigned Idx = I->getOpcode() - Instruction::BinaryOpsBegin;
2292 for (unsigned i = 0; i < Ops.size() - 1; ++i)
2293 for (unsigned j = i + 1; j < Ops.size(); ++j) {
2294 unsigned Score = 0;
2295 Value *Op0 = Ops[i].Op;
2296 Value *Op1 = Ops[j].Op;
2297 if (std::less<Value *>()(Op1, Op0))
2298 std::swap(Op0, Op1);
2299 auto it = PairMap[Idx].find({Op0, Op1});
2300 if (it != PairMap[Idx].end()) {
2301 // Functions like BreakUpSubtract() can erase the Values we're using
2302 // as keys and create new Values after we built the PairMap. There's a
2303 // small chance that the new nodes can have the same address as
2304 // something already in the table. We shouldn't accumulate the stored
2305 // score in that case as it refers to the wrong Value.
2306 if (it->second.isValid())
2307 Score += it->second.Score;
2308 }
2309
2310 unsigned MaxRank = std::max(Ops[i].Rank, Ops[j].Rank);
2311 if (Score > Max || (Score == Max && MaxRank < BestRank)) {
2312 BestPair = {i, j};
2313 Max = Score;
2314 BestRank = MaxRank;
2315 }
2316 }
2317 if (Max > 1) {
2318 auto Op0 = Ops[BestPair.first];
2319 auto Op1 = Ops[BestPair.second];
2320 Ops.erase(&Ops[BestPair.second]);
2321 Ops.erase(&Ops[BestPair.first]);
2322 Ops.push_back(Op0);
2323 Ops.push_back(Op1);
2324 }
2325 }
2326 // Now that we ordered and optimized the expressions, splat them back into
2327 // the expression tree, removing any unneeded nodes.
2328 RewriteExprTree(I, Ops);
2329 }
2330
2331 void
BuildPairMap(ReversePostOrderTraversal<Function * > & RPOT)2332 ReassociatePass::BuildPairMap(ReversePostOrderTraversal<Function *> &RPOT) {
2333 // Make a "pairmap" of how often each operand pair occurs.
2334 for (BasicBlock *BI : RPOT) {
2335 for (Instruction &I : *BI) {
2336 if (!I.isAssociative())
2337 continue;
2338
2339 // Ignore nodes that aren't at the root of trees.
2340 if (I.hasOneUse() && I.user_back()->getOpcode() == I.getOpcode())
2341 continue;
2342
2343 // Collect all operands in a single reassociable expression.
2344 // Since Reassociate has already been run once, we can assume things
2345 // are already canonical according to Reassociation's regime.
2346 SmallVector<Value *, 8> Worklist = { I.getOperand(0), I.getOperand(1) };
2347 SmallVector<Value *, 8> Ops;
2348 while (!Worklist.empty() && Ops.size() <= GlobalReassociateLimit) {
2349 Value *Op = Worklist.pop_back_val();
2350 Instruction *OpI = dyn_cast<Instruction>(Op);
2351 if (!OpI || OpI->getOpcode() != I.getOpcode() || !OpI->hasOneUse()) {
2352 Ops.push_back(Op);
2353 continue;
2354 }
2355 // Be paranoid about self-referencing expressions in unreachable code.
2356 if (OpI->getOperand(0) != OpI)
2357 Worklist.push_back(OpI->getOperand(0));
2358 if (OpI->getOperand(1) != OpI)
2359 Worklist.push_back(OpI->getOperand(1));
2360 }
2361 // Skip extremely long expressions.
2362 if (Ops.size() > GlobalReassociateLimit)
2363 continue;
2364
2365 // Add all pairwise combinations of operands to the pair map.
2366 unsigned BinaryIdx = I.getOpcode() - Instruction::BinaryOpsBegin;
2367 SmallSet<std::pair<Value *, Value*>, 32> Visited;
2368 for (unsigned i = 0; i < Ops.size() - 1; ++i) {
2369 for (unsigned j = i + 1; j < Ops.size(); ++j) {
2370 // Canonicalize operand orderings.
2371 Value *Op0 = Ops[i];
2372 Value *Op1 = Ops[j];
2373 if (std::less<Value *>()(Op1, Op0))
2374 std::swap(Op0, Op1);
2375 if (!Visited.insert({Op0, Op1}).second)
2376 continue;
2377 auto res = PairMap[BinaryIdx].insert({{Op0, Op1}, {Op0, Op1, 1}});
2378 if (!res.second) {
2379 // If either key value has been erased then we've got the same
2380 // address by coincidence. That can't happen here because nothing is
2381 // erasing values but it can happen by the time we're querying the
2382 // map.
2383 assert(res.first->second.isValid() && "WeakVH invalidated");
2384 ++res.first->second.Score;
2385 }
2386 }
2387 }
2388 }
2389 }
2390 }
2391
run(Function & F,FunctionAnalysisManager &)2392 PreservedAnalyses ReassociatePass::run(Function &F, FunctionAnalysisManager &) {
2393 // Get the functions basic blocks in Reverse Post Order. This order is used by
2394 // BuildRankMap to pre calculate ranks correctly. It also excludes dead basic
2395 // blocks (it has been seen that the analysis in this pass could hang when
2396 // analysing dead basic blocks).
2397 ReversePostOrderTraversal<Function *> RPOT(&F);
2398
2399 // Calculate the rank map for F.
2400 BuildRankMap(F, RPOT);
2401
2402 // Build the pair map before running reassociate.
2403 // Technically this would be more accurate if we did it after one round
2404 // of reassociation, but in practice it doesn't seem to help much on
2405 // real-world code, so don't waste the compile time running reassociate
2406 // twice.
2407 // If a user wants, they could expicitly run reassociate twice in their
2408 // pass pipeline for further potential gains.
2409 // It might also be possible to update the pair map during runtime, but the
2410 // overhead of that may be large if there's many reassociable chains.
2411 BuildPairMap(RPOT);
2412
2413 MadeChange = false;
2414
2415 // Traverse the same blocks that were analysed by BuildRankMap.
2416 for (BasicBlock *BI : RPOT) {
2417 assert(RankMap.count(&*BI) && "BB should be ranked.");
2418 // Optimize every instruction in the basic block.
2419 for (BasicBlock::iterator II = BI->begin(), IE = BI->end(); II != IE;)
2420 if (isInstructionTriviallyDead(&*II)) {
2421 EraseInst(&*II++);
2422 } else {
2423 OptimizeInst(&*II);
2424 assert(II->getParent() == &*BI && "Moved to a different block!");
2425 ++II;
2426 }
2427
2428 // Make a copy of all the instructions to be redone so we can remove dead
2429 // instructions.
2430 OrderedSet ToRedo(RedoInsts);
2431 // Iterate over all instructions to be reevaluated and remove trivially dead
2432 // instructions. If any operand of the trivially dead instruction becomes
2433 // dead mark it for deletion as well. Continue this process until all
2434 // trivially dead instructions have been removed.
2435 while (!ToRedo.empty()) {
2436 Instruction *I = ToRedo.pop_back_val();
2437 if (isInstructionTriviallyDead(I)) {
2438 RecursivelyEraseDeadInsts(I, ToRedo);
2439 MadeChange = true;
2440 }
2441 }
2442
2443 // Now that we have removed dead instructions, we can reoptimize the
2444 // remaining instructions.
2445 while (!RedoInsts.empty()) {
2446 Instruction *I = RedoInsts.front();
2447 RedoInsts.erase(RedoInsts.begin());
2448 if (isInstructionTriviallyDead(I))
2449 EraseInst(I);
2450 else
2451 OptimizeInst(I);
2452 }
2453 }
2454
2455 // We are done with the rank map and pair map.
2456 RankMap.clear();
2457 ValueRankMap.clear();
2458 for (auto &Entry : PairMap)
2459 Entry.clear();
2460
2461 if (MadeChange) {
2462 PreservedAnalyses PA;
2463 PA.preserveSet<CFGAnalyses>();
2464 PA.preserve<AAManager>();
2465 PA.preserve<BasicAA>();
2466 PA.preserve<GlobalsAA>();
2467 return PA;
2468 }
2469
2470 return PreservedAnalyses::all();
2471 }
2472
2473 namespace {
2474
2475 class ReassociateLegacyPass : public FunctionPass {
2476 ReassociatePass Impl;
2477
2478 public:
2479 static char ID; // Pass identification, replacement for typeid
2480
ReassociateLegacyPass()2481 ReassociateLegacyPass() : FunctionPass(ID) {
2482 initializeReassociateLegacyPassPass(*PassRegistry::getPassRegistry());
2483 }
2484
runOnFunction(Function & F)2485 bool runOnFunction(Function &F) override {
2486 if (skipFunction(F))
2487 return false;
2488
2489 FunctionAnalysisManager DummyFAM;
2490 auto PA = Impl.run(F, DummyFAM);
2491 return !PA.areAllPreserved();
2492 }
2493
getAnalysisUsage(AnalysisUsage & AU) const2494 void getAnalysisUsage(AnalysisUsage &AU) const override {
2495 AU.setPreservesCFG();
2496 AU.addPreserved<AAResultsWrapperPass>();
2497 AU.addPreserved<BasicAAWrapperPass>();
2498 AU.addPreserved<GlobalsAAWrapperPass>();
2499 }
2500 };
2501
2502 } // end anonymous namespace
2503
2504 char ReassociateLegacyPass::ID = 0;
2505
2506 INITIALIZE_PASS(ReassociateLegacyPass, "reassociate",
2507 "Reassociate expressions", false, false)
2508
2509 // Public interface to the Reassociate pass
createReassociatePass()2510 FunctionPass *llvm::createReassociatePass() {
2511 return new ReassociateLegacyPass();
2512 }
2513