1 //===- InstCombineMulDivRem.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
10 // srem, urem, frem.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/SmallVector.h"
17 #include "llvm/Analysis/InstructionSimplify.h"
18 #include "llvm/IR/BasicBlock.h"
19 #include "llvm/IR/Constant.h"
20 #include "llvm/IR/Constants.h"
21 #include "llvm/IR/InstrTypes.h"
22 #include "llvm/IR/Instruction.h"
23 #include "llvm/IR/Instructions.h"
24 #include "llvm/IR/IntrinsicInst.h"
25 #include "llvm/IR/Intrinsics.h"
26 #include "llvm/IR/Operator.h"
27 #include "llvm/IR/PatternMatch.h"
28 #include "llvm/IR/Type.h"
29 #include "llvm/IR/Value.h"
30 #include "llvm/Support/Casting.h"
31 #include "llvm/Support/ErrorHandling.h"
32 #include "llvm/Transforms/InstCombine/InstCombiner.h"
33 #include "llvm/Transforms/Utils/BuildLibCalls.h"
34 #include <cassert>
35 
36 #define DEBUG_TYPE "instcombine"
37 #include "llvm/Transforms/Utils/InstructionWorklist.h"
38 
39 using namespace llvm;
40 using namespace PatternMatch;
41 
42 /// The specific integer value is used in a context where it is known to be
43 /// non-zero.  If this allows us to simplify the computation, do so and return
44 /// the new operand, otherwise return null.
45 static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC,
46                                         Instruction &CxtI) {
47   // If V has multiple uses, then we would have to do more analysis to determine
48   // if this is safe.  For example, the use could be in dynamically unreached
49   // code.
50   if (!V->hasOneUse()) return nullptr;
51 
52   bool MadeChange = false;
53 
54   // ((1 << A) >>u B) --> (1 << (A-B))
55   // Because V cannot be zero, we know that B is less than A.
56   Value *A = nullptr, *B = nullptr, *One = nullptr;
57   if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
58       match(One, m_One())) {
59     A = IC.Builder.CreateSub(A, B);
60     return IC.Builder.CreateShl(One, A);
61   }
62 
63   // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
64   // inexact.  Similarly for <<.
65   BinaryOperator *I = dyn_cast<BinaryOperator>(V);
66   if (I && I->isLogicalShift() &&
67       IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
68     // We know that this is an exact/nuw shift and that the input is a
69     // non-zero context as well.
70     if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
71       IC.replaceOperand(*I, 0, V2);
72       MadeChange = true;
73     }
74 
75     if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
76       I->setIsExact();
77       MadeChange = true;
78     }
79 
80     if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
81       I->setHasNoUnsignedWrap();
82       MadeChange = true;
83     }
84   }
85 
86   // TODO: Lots more we could do here:
87   //    If V is a phi node, we can call this on each of its operands.
88   //    "select cond, X, 0" can simplify to "X".
89 
90   return MadeChange ? V : nullptr;
91 }
92 
93 // TODO: This is a specific form of a much more general pattern.
94 //       We could detect a select with any binop identity constant, or we
95 //       could use SimplifyBinOp to see if either arm of the select reduces.
96 //       But that needs to be done carefully and/or while removing potential
97 //       reverse canonicalizations as in InstCombiner::foldSelectIntoOp().
98 static Value *foldMulSelectToNegate(BinaryOperator &I,
99                                     InstCombiner::BuilderTy &Builder) {
100   Value *Cond, *OtherOp;
101 
102   // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp
103   // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp
104   if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())),
105                         m_Value(OtherOp)))) {
106     bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
107     Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
108     return Builder.CreateSelect(Cond, OtherOp, Neg);
109   }
110   // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
111   // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp
112   if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())),
113                         m_Value(OtherOp)))) {
114     bool HasAnyNoWrap = I.hasNoSignedWrap() || I.hasNoUnsignedWrap();
115     Value *Neg = Builder.CreateNeg(OtherOp, "", false, HasAnyNoWrap);
116     return Builder.CreateSelect(Cond, Neg, OtherOp);
117   }
118 
119   // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp
120   // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp
121   if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(1.0),
122                                            m_SpecificFP(-1.0))),
123                          m_Value(OtherOp)))) {
124     IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
125     Builder.setFastMathFlags(I.getFastMathFlags());
126     return Builder.CreateSelect(Cond, OtherOp, Builder.CreateFNeg(OtherOp));
127   }
128 
129   // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp
130   // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp
131   if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(-1.0),
132                                            m_SpecificFP(1.0))),
133                          m_Value(OtherOp)))) {
134     IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
135     Builder.setFastMathFlags(I.getFastMathFlags());
136     return Builder.CreateSelect(Cond, Builder.CreateFNeg(OtherOp), OtherOp);
137   }
138 
139   return nullptr;
140 }
141 
142 Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
143   if (Value *V = simplifyMulInst(I.getOperand(0), I.getOperand(1),
144                                  SQ.getWithInstruction(&I)))
145     return replaceInstUsesWith(I, V);
146 
147   if (SimplifyAssociativeOrCommutative(I))
148     return &I;
149 
150   if (Instruction *X = foldVectorBinop(I))
151     return X;
152 
153   if (Instruction *Phi = foldBinopWithPhiOperands(I))
154     return Phi;
155 
156   if (Value *V = SimplifyUsingDistributiveLaws(I))
157     return replaceInstUsesWith(I, V);
158 
159   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
160   unsigned BitWidth = I.getType()->getScalarSizeInBits();
161 
162   // X * -1 == 0 - X
163   if (match(Op1, m_AllOnes())) {
164     BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName());
165     if (I.hasNoSignedWrap())
166       BO->setHasNoSignedWrap();
167     return BO;
168   }
169 
170   // Also allow combining multiply instructions on vectors.
171   {
172     Value *NewOp;
173     Constant *C1, *C2;
174     const APInt *IVal;
175     if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
176                         m_Constant(C1))) &&
177         match(C1, m_APInt(IVal))) {
178       // ((X << C2)*C1) == (X * (C1 << C2))
179       Constant *Shl = ConstantExpr::getShl(C1, C2);
180       BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0));
181       BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl);
182       if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap())
183         BO->setHasNoUnsignedWrap();
184       if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() &&
185           Shl->isNotMinSignedValue())
186         BO->setHasNoSignedWrap();
187       return BO;
188     }
189 
190     if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
191       // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
192       if (Constant *NewCst = ConstantExpr::getExactLogBase2(C1)) {
193         BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
194 
195         if (I.hasNoUnsignedWrap())
196           Shl->setHasNoUnsignedWrap();
197         if (I.hasNoSignedWrap()) {
198           const APInt *V;
199           if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1)
200             Shl->setHasNoSignedWrap();
201         }
202 
203         return Shl;
204       }
205     }
206   }
207 
208   if (Op0->hasOneUse() && match(Op1, m_NegatedPower2())) {
209     // Interpret  X * (-1<<C)  as  (-X) * (1<<C)  and try to sink the negation.
210     // The "* (1<<C)" thus becomes a potential shifting opportunity.
211     if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ true, Op0, *this))
212       return BinaryOperator::CreateMul(
213           NegOp0, ConstantExpr::getNeg(cast<Constant>(Op1)), I.getName());
214   }
215 
216   if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
217     return FoldedMul;
218 
219   if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
220     return replaceInstUsesWith(I, FoldedMul);
221 
222   // Simplify mul instructions with a constant RHS.
223   if (isa<Constant>(Op1)) {
224     // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
225     Value *X;
226     Constant *C1;
227     if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
228       Value *Mul = Builder.CreateMul(C1, Op1);
229       // Only go forward with the transform if C1*CI simplifies to a tidier
230       // constant.
231       if (!match(Mul, m_Mul(m_Value(), m_Value())))
232         return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul);
233     }
234   }
235 
236   // abs(X) * abs(X) -> X * X
237   // nabs(X) * nabs(X) -> X * X
238   if (Op0 == Op1) {
239     Value *X, *Y;
240     SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
241     if (SPF == SPF_ABS || SPF == SPF_NABS)
242       return BinaryOperator::CreateMul(X, X);
243 
244     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
245       return BinaryOperator::CreateMul(X, X);
246   }
247 
248   // -X * C --> X * -C
249   Value *X, *Y;
250   Constant *Op1C;
251   if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C)))
252     return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C));
253 
254   // -X * -Y --> X * Y
255   if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) {
256     auto *NewMul = BinaryOperator::CreateMul(X, Y);
257     if (I.hasNoSignedWrap() &&
258         cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() &&
259         cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap())
260       NewMul->setHasNoSignedWrap();
261     return NewMul;
262   }
263 
264   // -X * Y --> -(X * Y)
265   // X * -Y --> -(X * Y)
266   if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))
267     return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y));
268 
269   // (X / Y) *  Y = X - (X % Y)
270   // (X / Y) * -Y = (X % Y) - X
271   {
272     Value *Y = Op1;
273     BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0);
274     if (!Div || (Div->getOpcode() != Instruction::UDiv &&
275                  Div->getOpcode() != Instruction::SDiv)) {
276       Y = Op0;
277       Div = dyn_cast<BinaryOperator>(Op1);
278     }
279     Value *Neg = dyn_castNegVal(Y);
280     if (Div && Div->hasOneUse() &&
281         (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) &&
282         (Div->getOpcode() == Instruction::UDiv ||
283          Div->getOpcode() == Instruction::SDiv)) {
284       Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1);
285 
286       // If the division is exact, X % Y is zero, so we end up with X or -X.
287       if (Div->isExact()) {
288         if (DivOp1 == Y)
289           return replaceInstUsesWith(I, X);
290         return BinaryOperator::CreateNeg(X);
291       }
292 
293       auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
294                                                           : Instruction::SRem;
295       // X must be frozen because we are increasing its number of uses.
296       Value *XFreeze = Builder.CreateFreeze(X, X->getName() + ".fr");
297       Value *Rem = Builder.CreateBinOp(RemOpc, XFreeze, DivOp1);
298       if (DivOp1 == Y)
299         return BinaryOperator::CreateSub(XFreeze, Rem);
300       return BinaryOperator::CreateSub(Rem, XFreeze);
301     }
302   }
303 
304   // Fold the following two scenarios:
305   //   1) i1 mul -> i1 and.
306   //   2) X * Y --> X & Y, iff X, Y can be only {0,1}.
307   // Note: We could use known bits to generalize this and related patterns with
308   // shifts/truncs
309   Type *Ty = I.getType();
310   if (Ty->isIntOrIntVectorTy(1) ||
311       (match(Op0, m_And(m_Value(), m_One())) &&
312        match(Op1, m_And(m_Value(), m_One()))))
313     return BinaryOperator::CreateAnd(Op0, Op1);
314 
315   // X*(1 << Y) --> X << Y
316   // (1 << Y)*X --> X << Y
317   {
318     Value *Y;
319     BinaryOperator *BO = nullptr;
320     bool ShlNSW = false;
321     if (match(Op0, m_Shl(m_One(), m_Value(Y)))) {
322       BO = BinaryOperator::CreateShl(Op1, Y);
323       ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap();
324     } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) {
325       BO = BinaryOperator::CreateShl(Op0, Y);
326       ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap();
327     }
328     if (BO) {
329       if (I.hasNoUnsignedWrap())
330         BO->setHasNoUnsignedWrap();
331       if (I.hasNoSignedWrap() && ShlNSW)
332         BO->setHasNoSignedWrap();
333       return BO;
334     }
335   }
336 
337   // (zext bool X) * (zext bool Y) --> zext (and X, Y)
338   // (sext bool X) * (sext bool Y) --> zext (and X, Y)
339   // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same)
340   if (((match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
341        (match(Op0, m_SExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
342       X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
343       (Op0->hasOneUse() || Op1->hasOneUse() || X == Y)) {
344     Value *And = Builder.CreateAnd(X, Y, "mulbool");
345     return CastInst::Create(Instruction::ZExt, And, Ty);
346   }
347   // (sext bool X) * (zext bool Y) --> sext (and X, Y)
348   // (zext bool X) * (sext bool Y) --> sext (and X, Y)
349   // Note: -1 * 1 == 1 * -1  == -1
350   if (((match(Op0, m_SExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
351        (match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
352       X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
353       (Op0->hasOneUse() || Op1->hasOneUse())) {
354     Value *And = Builder.CreateAnd(X, Y, "mulbool");
355     return CastInst::Create(Instruction::SExt, And, Ty);
356   }
357 
358   // (zext bool X) * Y --> X ? Y : 0
359   // Y * (zext bool X) --> X ? Y : 0
360   if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
361     return SelectInst::Create(X, Op1, ConstantInt::getNullValue(Ty));
362   if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
363     return SelectInst::Create(X, Op0, ConstantInt::getNullValue(Ty));
364 
365   Constant *ImmC;
366   if (match(Op1, m_ImmConstant(ImmC))) {
367     // (sext bool X) * C --> X ? -C : 0
368     if (match(Op0, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
369       Constant *NegC = ConstantExpr::getNeg(ImmC);
370       return SelectInst::Create(X, NegC, ConstantInt::getNullValue(Ty));
371     }
372 
373     // (ashr i32 X, 31) * C --> (X < 0) ? -C : 0
374     const APInt *C;
375     if (match(Op0, m_OneUse(m_AShr(m_Value(X), m_APInt(C)))) &&
376         *C == C->getBitWidth() - 1) {
377       Constant *NegC = ConstantExpr::getNeg(ImmC);
378       Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
379       return SelectInst::Create(IsNeg, NegC, ConstantInt::getNullValue(Ty));
380     }
381   }
382 
383   // (lshr X, 31) * Y --> (X < 0) ? Y : 0
384   // TODO: We are not checking one-use because the elimination of the multiply
385   //       is better for analysis?
386   const APInt *C;
387   if (match(&I, m_c_BinOp(m_LShr(m_Value(X), m_APInt(C)), m_Value(Y))) &&
388       *C == C->getBitWidth() - 1) {
389     Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
390     return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty));
391   }
392 
393   // (and X, 1) * Y --> (trunc X) ? Y : 0
394   if (match(&I, m_c_BinOp(m_OneUse(m_And(m_Value(X), m_One())), m_Value(Y)))) {
395     Value *Tr = Builder.CreateTrunc(X, CmpInst::makeCmpResultType(Ty));
396     return SelectInst::Create(Tr, Y, ConstantInt::getNullValue(Ty));
397   }
398 
399   // ((ashr X, 31) | 1) * X --> abs(X)
400   // X * ((ashr X, 31) | 1) --> abs(X)
401   if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X),
402                                       m_SpecificIntAllowUndef(BitWidth - 1)),
403                                m_One()),
404                           m_Deferred(X)))) {
405     Value *Abs = Builder.CreateBinaryIntrinsic(
406         Intrinsic::abs, X,
407         ConstantInt::getBool(I.getContext(), I.hasNoSignedWrap()));
408     Abs->takeName(&I);
409     return replaceInstUsesWith(I, Abs);
410   }
411 
412   if (Instruction *Ext = narrowMathIfNoOverflow(I))
413     return Ext;
414 
415   bool Changed = false;
416   if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) {
417     Changed = true;
418     I.setHasNoSignedWrap(true);
419   }
420 
421   if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) {
422     Changed = true;
423     I.setHasNoUnsignedWrap(true);
424   }
425 
426   return Changed ? &I : nullptr;
427 }
428 
429 Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
430   BinaryOperator::BinaryOps Opcode = I.getOpcode();
431   assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
432          "Expected fmul or fdiv");
433 
434   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
435   Value *X, *Y;
436 
437   // -X * -Y --> X * Y
438   // -X / -Y --> X / Y
439   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
440     return BinaryOperator::CreateWithCopiedFlags(Opcode, X, Y, &I);
441 
442   // fabs(X) * fabs(X) -> X * X
443   // fabs(X) / fabs(X) -> X / X
444   if (Op0 == Op1 && match(Op0, m_FAbs(m_Value(X))))
445     return BinaryOperator::CreateWithCopiedFlags(Opcode, X, X, &I);
446 
447   // fabs(X) * fabs(Y) --> fabs(X * Y)
448   // fabs(X) / fabs(Y) --> fabs(X / Y)
449   if (match(Op0, m_FAbs(m_Value(X))) && match(Op1, m_FAbs(m_Value(Y))) &&
450       (Op0->hasOneUse() || Op1->hasOneUse())) {
451     IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
452     Builder.setFastMathFlags(I.getFastMathFlags());
453     Value *XY = Builder.CreateBinOp(Opcode, X, Y);
454     Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY);
455     Fabs->takeName(&I);
456     return replaceInstUsesWith(I, Fabs);
457   }
458 
459   return nullptr;
460 }
461 
462 Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
463   if (Value *V = simplifyFMulInst(I.getOperand(0), I.getOperand(1),
464                                   I.getFastMathFlags(),
465                                   SQ.getWithInstruction(&I)))
466     return replaceInstUsesWith(I, V);
467 
468   if (SimplifyAssociativeOrCommutative(I))
469     return &I;
470 
471   if (Instruction *X = foldVectorBinop(I))
472     return X;
473 
474   if (Instruction *Phi = foldBinopWithPhiOperands(I))
475     return Phi;
476 
477   if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
478     return FoldedMul;
479 
480   if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
481     return replaceInstUsesWith(I, FoldedMul);
482 
483   if (Instruction *R = foldFPSignBitOps(I))
484     return R;
485 
486   // X * -1.0 --> -X
487   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
488   if (match(Op1, m_SpecificFP(-1.0)))
489     return UnaryOperator::CreateFNegFMF(Op0, &I);
490 
491   // -X * C --> X * -C
492   Value *X, *Y;
493   Constant *C;
494   if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C)))
495     return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I);
496 
497   // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
498   if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
499     return replaceInstUsesWith(I, V);
500 
501   if (I.hasAllowReassoc()) {
502     // Reassociate constant RHS with another constant to form constant
503     // expression.
504     if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) {
505       Constant *C1;
506       if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) {
507         // (C1 / X) * C --> (C * C1) / X
508         Constant *CC1 =
509             ConstantFoldBinaryOpOperands(Instruction::FMul, C, C1, DL);
510         if (CC1 && CC1->isNormalFP())
511           return BinaryOperator::CreateFDivFMF(CC1, X, &I);
512       }
513       if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
514         // (X / C1) * C --> X * (C / C1)
515         Constant *CDivC1 =
516             ConstantFoldBinaryOpOperands(Instruction::FDiv, C, C1, DL);
517         if (CDivC1 && CDivC1->isNormalFP())
518           return BinaryOperator::CreateFMulFMF(X, CDivC1, &I);
519 
520         // If the constant was a denormal, try reassociating differently.
521         // (X / C1) * C --> X / (C1 / C)
522         Constant *C1DivC =
523             ConstantFoldBinaryOpOperands(Instruction::FDiv, C1, C, DL);
524         if (C1DivC && Op0->hasOneUse() && C1DivC->isNormalFP())
525           return BinaryOperator::CreateFDivFMF(X, C1DivC, &I);
526       }
527 
528       // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
529       // canonicalized to 'fadd X, C'. Distributing the multiply may allow
530       // further folds and (X * C) + C2 is 'fma'.
531       if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) {
532         // (X + C1) * C --> (X * C) + (C * C1)
533         if (Constant *CC1 = ConstantFoldBinaryOpOperands(
534                 Instruction::FMul, C, C1, DL)) {
535           Value *XC = Builder.CreateFMulFMF(X, C, &I);
536           return BinaryOperator::CreateFAddFMF(XC, CC1, &I);
537         }
538       }
539       if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) {
540         // (C1 - X) * C --> (C * C1) - (X * C)
541         if (Constant *CC1 = ConstantFoldBinaryOpOperands(
542                 Instruction::FMul, C, C1, DL)) {
543           Value *XC = Builder.CreateFMulFMF(X, C, &I);
544           return BinaryOperator::CreateFSubFMF(CC1, XC, &I);
545         }
546       }
547     }
548 
549     Value *Z;
550     if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))),
551                            m_Value(Z)))) {
552       // Sink division: (X / Y) * Z --> (X * Z) / Y
553       Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I);
554       return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I);
555     }
556 
557     // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
558     // nnan disallows the possibility of returning a number if both operands are
559     // negative (in that case, we should return NaN).
560     if (I.hasNoNaNs() && match(Op0, m_OneUse(m_Sqrt(m_Value(X)))) &&
561         match(Op1, m_OneUse(m_Sqrt(m_Value(Y))))) {
562       Value *XY = Builder.CreateFMulFMF(X, Y, &I);
563       Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I);
564       return replaceInstUsesWith(I, Sqrt);
565     }
566 
567     // The following transforms are done irrespective of the number of uses
568     // for the expression "1.0/sqrt(X)".
569     //  1) 1.0/sqrt(X) * X -> X/sqrt(X)
570     //  2) X * 1.0/sqrt(X) -> X/sqrt(X)
571     // We always expect the backend to reduce X/sqrt(X) to sqrt(X), if it
572     // has the necessary (reassoc) fast-math-flags.
573     if (I.hasNoSignedZeros() &&
574         match(Op0, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
575         match(Y, m_Sqrt(m_Value(X))) && Op1 == X)
576       return BinaryOperator::CreateFDivFMF(X, Y, &I);
577     if (I.hasNoSignedZeros() &&
578         match(Op1, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
579         match(Y, m_Sqrt(m_Value(X))) && Op0 == X)
580       return BinaryOperator::CreateFDivFMF(X, Y, &I);
581 
582     // Like the similar transform in instsimplify, this requires 'nsz' because
583     // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
584     if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 &&
585         Op0->hasNUses(2)) {
586       // Peek through fdiv to find squaring of square root:
587       // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
588       if (match(Op0, m_FDiv(m_Value(X), m_Sqrt(m_Value(Y))))) {
589         Value *XX = Builder.CreateFMulFMF(X, X, &I);
590         return BinaryOperator::CreateFDivFMF(XX, Y, &I);
591       }
592       // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
593       if (match(Op0, m_FDiv(m_Sqrt(m_Value(Y)), m_Value(X)))) {
594         Value *XX = Builder.CreateFMulFMF(X, X, &I);
595         return BinaryOperator::CreateFDivFMF(Y, XX, &I);
596       }
597     }
598 
599     if (I.isOnlyUserOfAnyOperand()) {
600       // pow(x, y) * pow(x, z) -> pow(x, y + z)
601       if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
602           match(Op1, m_Intrinsic<Intrinsic::pow>(m_Specific(X), m_Value(Z)))) {
603         auto *YZ = Builder.CreateFAddFMF(Y, Z, &I);
604         auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, YZ, &I);
605         return replaceInstUsesWith(I, NewPow);
606       }
607 
608       // powi(x, y) * powi(x, z) -> powi(x, y + z)
609       if (match(Op0, m_Intrinsic<Intrinsic::powi>(m_Value(X), m_Value(Y))) &&
610           match(Op1, m_Intrinsic<Intrinsic::powi>(m_Specific(X), m_Value(Z))) &&
611           Y->getType() == Z->getType()) {
612         auto *YZ = Builder.CreateAdd(Y, Z);
613         auto *NewPow = Builder.CreateIntrinsic(
614             Intrinsic::powi, {X->getType(), YZ->getType()}, {X, YZ}, &I);
615         return replaceInstUsesWith(I, NewPow);
616       }
617 
618       // exp(X) * exp(Y) -> exp(X + Y)
619       if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
620           match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y)))) {
621         Value *XY = Builder.CreateFAddFMF(X, Y, &I);
622         Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
623         return replaceInstUsesWith(I, Exp);
624       }
625 
626       // exp2(X) * exp2(Y) -> exp2(X + Y)
627       if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
628           match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y)))) {
629         Value *XY = Builder.CreateFAddFMF(X, Y, &I);
630         Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
631         return replaceInstUsesWith(I, Exp2);
632       }
633     }
634 
635     // (X*Y) * X => (X*X) * Y where Y != X
636     //  The purpose is two-fold:
637     //   1) to form a power expression (of X).
638     //   2) potentially shorten the critical path: After transformation, the
639     //  latency of the instruction Y is amortized by the expression of X*X,
640     //  and therefore Y is in a "less critical" position compared to what it
641     //  was before the transformation.
642     if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) &&
643         Op1 != Y) {
644       Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I);
645       return BinaryOperator::CreateFMulFMF(XX, Y, &I);
646     }
647     if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) &&
648         Op0 != Y) {
649       Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I);
650       return BinaryOperator::CreateFMulFMF(XX, Y, &I);
651     }
652   }
653 
654   // log2(X * 0.5) * Y = log2(X) * Y - Y
655   if (I.isFast()) {
656     IntrinsicInst *Log2 = nullptr;
657     if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
658             m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
659       Log2 = cast<IntrinsicInst>(Op0);
660       Y = Op1;
661     }
662     if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
663             m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
664       Log2 = cast<IntrinsicInst>(Op1);
665       Y = Op0;
666     }
667     if (Log2) {
668       Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::log2, X, &I);
669       Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I);
670       return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I);
671     }
672   }
673 
674   return nullptr;
675 }
676 
677 /// Fold a divide or remainder with a select instruction divisor when one of the
678 /// select operands is zero. In that case, we can use the other select operand
679 /// because div/rem by zero is undefined.
680 bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
681   SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1));
682   if (!SI)
683     return false;
684 
685   int NonNullOperand;
686   if (match(SI->getTrueValue(), m_Zero()))
687     // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
688     NonNullOperand = 2;
689   else if (match(SI->getFalseValue(), m_Zero()))
690     // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
691     NonNullOperand = 1;
692   else
693     return false;
694 
695   // Change the div/rem to use 'Y' instead of the select.
696   replaceOperand(I, 1, SI->getOperand(NonNullOperand));
697 
698   // Okay, we know we replace the operand of the div/rem with 'Y' with no
699   // problem.  However, the select, or the condition of the select may have
700   // multiple uses.  Based on our knowledge that the operand must be non-zero,
701   // propagate the known value for the select into other uses of it, and
702   // propagate a known value of the condition into its other users.
703 
704   // If the select and condition only have a single use, don't bother with this,
705   // early exit.
706   Value *SelectCond = SI->getCondition();
707   if (SI->use_empty() && SelectCond->hasOneUse())
708     return true;
709 
710   // Scan the current block backward, looking for other uses of SI.
711   BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
712   Type *CondTy = SelectCond->getType();
713   while (BBI != BBFront) {
714     --BBI;
715     // If we found an instruction that we can't assume will return, so
716     // information from below it cannot be propagated above it.
717     if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI))
718       break;
719 
720     // Replace uses of the select or its condition with the known values.
721     for (Use &Op : BBI->operands()) {
722       if (Op == SI) {
723         replaceUse(Op, SI->getOperand(NonNullOperand));
724         Worklist.push(&*BBI);
725       } else if (Op == SelectCond) {
726         replaceUse(Op, NonNullOperand == 1 ? ConstantInt::getTrue(CondTy)
727                                            : ConstantInt::getFalse(CondTy));
728         Worklist.push(&*BBI);
729       }
730     }
731 
732     // If we past the instruction, quit looking for it.
733     if (&*BBI == SI)
734       SI = nullptr;
735     if (&*BBI == SelectCond)
736       SelectCond = nullptr;
737 
738     // If we ran out of things to eliminate, break out of the loop.
739     if (!SelectCond && !SI)
740       break;
741 
742   }
743   return true;
744 }
745 
746 /// True if the multiply can not be expressed in an int this size.
747 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
748                               bool IsSigned) {
749   bool Overflow;
750   Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow);
751   return Overflow;
752 }
753 
754 /// True if C1 is a multiple of C2. Quotient contains C1/C2.
755 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
756                        bool IsSigned) {
757   assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal");
758 
759   // Bail if we will divide by zero.
760   if (C2.isZero())
761     return false;
762 
763   // Bail if we would divide INT_MIN by -1.
764   if (IsSigned && C1.isMinSignedValue() && C2.isAllOnes())
765     return false;
766 
767   APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
768   if (IsSigned)
769     APInt::sdivrem(C1, C2, Quotient, Remainder);
770   else
771     APInt::udivrem(C1, C2, Quotient, Remainder);
772 
773   return Remainder.isMinValue();
774 }
775 
776 /// This function implements the transforms common to both integer division
777 /// instructions (udiv and sdiv). It is called by the visitors to those integer
778 /// division instructions.
779 /// Common integer divide transforms
780 Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
781   if (Instruction *Phi = foldBinopWithPhiOperands(I))
782     return Phi;
783 
784   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
785   bool IsSigned = I.getOpcode() == Instruction::SDiv;
786   Type *Ty = I.getType();
787 
788   // The RHS is known non-zero.
789   if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
790     return replaceOperand(I, 1, V);
791 
792   // Handle cases involving: [su]div X, (select Cond, Y, Z)
793   // This does not apply for fdiv.
794   if (simplifyDivRemOfSelectWithZeroOp(I))
795     return &I;
796 
797   // If the divisor is a select-of-constants, try to constant fold all div ops:
798   // C / (select Cond, TrueC, FalseC) --> select Cond, (C / TrueC), (C / FalseC)
799   // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
800   if (match(Op0, m_ImmConstant()) &&
801       match(Op1, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
802     if (Instruction *R = FoldOpIntoSelect(I, cast<SelectInst>(Op1),
803                                           /*FoldWithMultiUse*/ true))
804       return R;
805   }
806 
807   const APInt *C2;
808   if (match(Op1, m_APInt(C2))) {
809     Value *X;
810     const APInt *C1;
811 
812     // (X / C1) / C2  -> X / (C1*C2)
813     if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
814         (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
815       APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
816       if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
817         return BinaryOperator::Create(I.getOpcode(), X,
818                                       ConstantInt::get(Ty, Product));
819     }
820 
821     if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
822         (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
823       APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
824 
825       // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
826       if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
827         auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X,
828                                               ConstantInt::get(Ty, Quotient));
829         NewDiv->setIsExact(I.isExact());
830         return NewDiv;
831       }
832 
833       // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
834       if (isMultiple(*C1, *C2, Quotient, IsSigned)) {
835         auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
836                                            ConstantInt::get(Ty, Quotient));
837         auto *OBO = cast<OverflowingBinaryOperator>(Op0);
838         Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
839         Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
840         return Mul;
841       }
842     }
843 
844     if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
845          C1->ult(C1->getBitWidth() - 1)) ||
846         (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))) &&
847          C1->ult(C1->getBitWidth()))) {
848       APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
849       APInt C1Shifted = APInt::getOneBitSet(
850           C1->getBitWidth(), static_cast<unsigned>(C1->getZExtValue()));
851 
852       // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
853       if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
854         auto *BO = BinaryOperator::Create(I.getOpcode(), X,
855                                           ConstantInt::get(Ty, Quotient));
856         BO->setIsExact(I.isExact());
857         return BO;
858       }
859 
860       // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
861       if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
862         auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
863                                            ConstantInt::get(Ty, Quotient));
864         auto *OBO = cast<OverflowingBinaryOperator>(Op0);
865         Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
866         Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
867         return Mul;
868       }
869     }
870 
871     if (!C2->isZero()) // avoid X udiv 0
872       if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
873         return FoldedDiv;
874   }
875 
876   if (match(Op0, m_One())) {
877     assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?");
878     if (IsSigned) {
879       // 1 / 0 --> undef ; 1 / 1 --> 1 ; 1 / -1 --> -1 ; 1 / anything else --> 0
880       // (Op1 + 1) u< 3 ? Op1 : 0
881       // Op1 must be frozen because we are increasing its number of uses.
882       Value *F1 = Builder.CreateFreeze(Op1, Op1->getName() + ".fr");
883       Value *Inc = Builder.CreateAdd(F1, Op0);
884       Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
885       return SelectInst::Create(Cmp, F1, ConstantInt::get(Ty, 0));
886     } else {
887       // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
888       // result is one, otherwise it's zero.
889       return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty);
890     }
891   }
892 
893   // See if we can fold away this div instruction.
894   if (SimplifyDemandedInstructionBits(I))
895     return &I;
896 
897   // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
898   Value *X, *Z;
899   if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1
900     if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
901         (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
902       return BinaryOperator::Create(I.getOpcode(), X, Op1);
903 
904   // (X << Y) / X -> 1 << Y
905   Value *Y;
906   if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y))))
907     return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y);
908   if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y))))
909     return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y);
910 
911   // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
912   if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) {
913     bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap();
914     bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap();
915     if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
916       replaceOperand(I, 0, ConstantInt::get(Ty, 1));
917       replaceOperand(I, 1, Y);
918       return &I;
919     }
920   }
921 
922   return nullptr;
923 }
924 
925 static const unsigned MaxDepth = 6;
926 
927 // Take the exact integer log2 of the value. If DoFold is true, create the
928 // actual instructions, otherwise return a non-null dummy value. Return nullptr
929 // on failure.
930 static Value *takeLog2(IRBuilderBase &Builder, Value *Op, unsigned Depth,
931                        bool DoFold) {
932   auto IfFold = [DoFold](function_ref<Value *()> Fn) {
933     if (!DoFold)
934       return reinterpret_cast<Value *>(-1);
935     return Fn();
936   };
937 
938   // FIXME: assert that Op1 isn't/doesn't contain undef.
939 
940   // log2(2^C) -> C
941   if (match(Op, m_Power2()))
942     return IfFold([&]() {
943       Constant *C = ConstantExpr::getExactLogBase2(cast<Constant>(Op));
944       if (!C)
945         llvm_unreachable("Failed to constant fold udiv -> logbase2");
946       return C;
947     });
948 
949   // The remaining tests are all recursive, so bail out if we hit the limit.
950   if (Depth++ == MaxDepth)
951     return nullptr;
952 
953   // log2(zext X) -> zext log2(X)
954   // FIXME: Require one use?
955   Value *X, *Y;
956   if (match(Op, m_ZExt(m_Value(X))))
957     if (Value *LogX = takeLog2(Builder, X, Depth, DoFold))
958       return IfFold([&]() { return Builder.CreateZExt(LogX, Op->getType()); });
959 
960   // log2(X << Y) -> log2(X) + Y
961   // FIXME: Require one use unless X is 1?
962   if (match(Op, m_Shl(m_Value(X), m_Value(Y))))
963     if (Value *LogX = takeLog2(Builder, X, Depth, DoFold))
964       return IfFold([&]() { return Builder.CreateAdd(LogX, Y); });
965 
966   // log2(Cond ? X : Y) -> Cond ? log2(X) : log2(Y)
967   // FIXME: missed optimization: if one of the hands of select is/contains
968   //        undef, just directly pick the other one.
969   // FIXME: can both hands contain undef?
970   // FIXME: Require one use?
971   if (SelectInst *SI = dyn_cast<SelectInst>(Op))
972     if (Value *LogX = takeLog2(Builder, SI->getOperand(1), Depth, DoFold))
973       if (Value *LogY = takeLog2(Builder, SI->getOperand(2), Depth, DoFold))
974         return IfFold([&]() {
975           return Builder.CreateSelect(SI->getOperand(0), LogX, LogY);
976         });
977 
978   // log2(umin(X, Y)) -> umin(log2(X), log2(Y))
979   // log2(umax(X, Y)) -> umax(log2(X), log2(Y))
980   auto *MinMax = dyn_cast<MinMaxIntrinsic>(Op);
981   if (MinMax && MinMax->hasOneUse() && !MinMax->isSigned())
982     if (Value *LogX = takeLog2(Builder, MinMax->getLHS(), Depth, DoFold))
983       if (Value *LogY = takeLog2(Builder, MinMax->getRHS(), Depth, DoFold))
984         return IfFold([&]() {
985           return Builder.CreateBinaryIntrinsic(
986               MinMax->getIntrinsicID(), LogX, LogY);
987         });
988 
989   return nullptr;
990 }
991 
992 /// If we have zero-extended operands of an unsigned div or rem, we may be able
993 /// to narrow the operation (sink the zext below the math).
994 static Instruction *narrowUDivURem(BinaryOperator &I,
995                                    InstCombiner::BuilderTy &Builder) {
996   Instruction::BinaryOps Opcode = I.getOpcode();
997   Value *N = I.getOperand(0);
998   Value *D = I.getOperand(1);
999   Type *Ty = I.getType();
1000   Value *X, *Y;
1001   if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) &&
1002       X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
1003     // udiv (zext X), (zext Y) --> zext (udiv X, Y)
1004     // urem (zext X), (zext Y) --> zext (urem X, Y)
1005     Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y);
1006     return new ZExtInst(NarrowOp, Ty);
1007   }
1008 
1009   Constant *C;
1010   if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) ||
1011       (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) {
1012     // If the constant is the same in the smaller type, use the narrow version.
1013     Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
1014     if (ConstantExpr::getZExt(TruncC, Ty) != C)
1015       return nullptr;
1016 
1017     // udiv (zext X), C --> zext (udiv X, C')
1018     // urem (zext X), C --> zext (urem X, C')
1019     // udiv C, (zext X) --> zext (udiv C', X)
1020     // urem C, (zext X) --> zext (urem C', X)
1021     Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC)
1022                                        : Builder.CreateBinOp(Opcode, TruncC, X);
1023     return new ZExtInst(NarrowOp, Ty);
1024   }
1025 
1026   return nullptr;
1027 }
1028 
1029 Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
1030   if (Value *V = simplifyUDivInst(I.getOperand(0), I.getOperand(1),
1031                                   SQ.getWithInstruction(&I)))
1032     return replaceInstUsesWith(I, V);
1033 
1034   if (Instruction *X = foldVectorBinop(I))
1035     return X;
1036 
1037   // Handle the integer div common cases
1038   if (Instruction *Common = commonIDivTransforms(I))
1039     return Common;
1040 
1041   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1042   Value *X;
1043   const APInt *C1, *C2;
1044   if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) {
1045     // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
1046     bool Overflow;
1047     APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow);
1048     if (!Overflow) {
1049       bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value()));
1050       BinaryOperator *BO = BinaryOperator::CreateUDiv(
1051           X, ConstantInt::get(X->getType(), C2ShlC1));
1052       if (IsExact)
1053         BO->setIsExact();
1054       return BO;
1055     }
1056   }
1057 
1058   // Op0 / C where C is large (negative) --> zext (Op0 >= C)
1059   // TODO: Could use isKnownNegative() to handle non-constant values.
1060   Type *Ty = I.getType();
1061   if (match(Op1, m_Negative())) {
1062     Value *Cmp = Builder.CreateICmpUGE(Op0, Op1);
1063     return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1064   }
1065   // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
1066   if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1067     Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1068     return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1069   }
1070 
1071   if (Instruction *NarrowDiv = narrowUDivURem(I, Builder))
1072     return NarrowDiv;
1073 
1074   // If the udiv operands are non-overflowing multiplies with a common operand,
1075   // then eliminate the common factor:
1076   // (A * B) / (A * X) --> B / X (and commuted variants)
1077   // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
1078   // TODO: If -reassociation handled this generally, we could remove this.
1079   Value *A, *B;
1080   if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) {
1081     if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) ||
1082         match(Op1, m_NUWMul(m_Value(X), m_Specific(A))))
1083       return BinaryOperator::CreateUDiv(B, X);
1084     if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) ||
1085         match(Op1, m_NUWMul(m_Value(X), m_Specific(B))))
1086       return BinaryOperator::CreateUDiv(A, X);
1087   }
1088 
1089   // Op1 udiv Op2 -> Op1 lshr log2(Op2), if log2() folds away.
1090   if (takeLog2(Builder, Op1, /*Depth*/0, /*DoFold*/false)) {
1091     Value *Res = takeLog2(Builder, Op1, /*Depth*/0, /*DoFold*/true);
1092     return replaceInstUsesWith(
1093         I, Builder.CreateLShr(Op0, Res, I.getName(), I.isExact()));
1094   }
1095 
1096   return nullptr;
1097 }
1098 
1099 Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
1100   if (Value *V = simplifySDivInst(I.getOperand(0), I.getOperand(1),
1101                                   SQ.getWithInstruction(&I)))
1102     return replaceInstUsesWith(I, V);
1103 
1104   if (Instruction *X = foldVectorBinop(I))
1105     return X;
1106 
1107   // Handle the integer div common cases
1108   if (Instruction *Common = commonIDivTransforms(I))
1109     return Common;
1110 
1111   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1112   Type *Ty = I.getType();
1113   Value *X;
1114   // sdiv Op0, -1 --> -Op0
1115   // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1116   if (match(Op1, m_AllOnes()) ||
1117       (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1118     return BinaryOperator::CreateNeg(Op0);
1119 
1120   // X / INT_MIN --> X == INT_MIN
1121   if (match(Op1, m_SignMask()))
1122     return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), Ty);
1123 
1124   // sdiv exact X,  1<<C  -->    ashr exact X, C   iff  1<<C  is non-negative
1125   // sdiv exact X, -1<<C  -->  -(ashr exact X, C)
1126   if (I.isExact() && ((match(Op1, m_Power2()) && match(Op1, m_NonNegative())) ||
1127                       match(Op1, m_NegatedPower2()))) {
1128     bool DivisorWasNegative = match(Op1, m_NegatedPower2());
1129     if (DivisorWasNegative)
1130       Op1 = ConstantExpr::getNeg(cast<Constant>(Op1));
1131     auto *AShr = BinaryOperator::CreateExactAShr(
1132         Op0, ConstantExpr::getExactLogBase2(cast<Constant>(Op1)), I.getName());
1133     if (!DivisorWasNegative)
1134       return AShr;
1135     Builder.Insert(AShr);
1136     AShr->setName(I.getName() + ".neg");
1137     return BinaryOperator::CreateNeg(AShr, I.getName());
1138   }
1139 
1140   const APInt *Op1C;
1141   if (match(Op1, m_APInt(Op1C))) {
1142     // If the dividend is sign-extended and the constant divisor is small enough
1143     // to fit in the source type, shrink the division to the narrower type:
1144     // (sext X) sdiv C --> sext (X sdiv C)
1145     Value *Op0Src;
1146     if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) &&
1147         Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) {
1148 
1149       // In the general case, we need to make sure that the dividend is not the
1150       // minimum signed value because dividing that by -1 is UB. But here, we
1151       // know that the -1 divisor case is already handled above.
1152 
1153       Constant *NarrowDivisor =
1154           ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
1155       Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
1156       return new SExtInst(NarrowOp, Ty);
1157     }
1158 
1159     // -X / C --> X / -C (if the negation doesn't overflow).
1160     // TODO: This could be enhanced to handle arbitrary vector constants by
1161     //       checking if all elements are not the min-signed-val.
1162     if (!Op1C->isMinSignedValue() &&
1163         match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) {
1164       Constant *NegC = ConstantInt::get(Ty, -(*Op1C));
1165       Instruction *BO = BinaryOperator::CreateSDiv(X, NegC);
1166       BO->setIsExact(I.isExact());
1167       return BO;
1168     }
1169   }
1170 
1171   // -X / Y --> -(X / Y)
1172   Value *Y;
1173   if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1174     return BinaryOperator::CreateNSWNeg(
1175         Builder.CreateSDiv(X, Y, I.getName(), I.isExact()));
1176 
1177   // abs(X) / X --> X > -1 ? 1 : -1
1178   // X / abs(X) --> X > -1 ? 1 : -1
1179   if (match(&I, m_c_BinOp(
1180                     m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(X), m_One())),
1181                     m_Deferred(X)))) {
1182     Value *Cond = Builder.CreateIsNotNeg(X);
1183     return SelectInst::Create(Cond, ConstantInt::get(Ty, 1),
1184                               ConstantInt::getAllOnesValue(Ty));
1185   }
1186 
1187   // If the sign bits of both operands are zero (i.e. we can prove they are
1188   // unsigned inputs), turn this into a udiv.
1189   APInt Mask(APInt::getSignMask(Ty->getScalarSizeInBits()));
1190   if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
1191     if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
1192       // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
1193       auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1194       BO->setIsExact(I.isExact());
1195       return BO;
1196     }
1197 
1198     if (match(Op1, m_NegatedPower2())) {
1199       // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) ->
1200       //                    -> -(X udiv (1 << C)) -> -(X u>> C)
1201       Constant *CNegLog2 = ConstantExpr::getExactLogBase2(
1202           ConstantExpr::getNeg(cast<Constant>(Op1)));
1203       Value *Shr = Builder.CreateLShr(Op0, CNegLog2, I.getName(), I.isExact());
1204       return BinaryOperator::CreateNeg(Shr);
1205     }
1206 
1207     if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1208       // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1209       // Safe because the only negative value (1 << Y) can take on is
1210       // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1211       // the sign bit set.
1212       auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1213       BO->setIsExact(I.isExact());
1214       return BO;
1215     }
1216   }
1217 
1218   return nullptr;
1219 }
1220 
1221 /// Remove negation and try to convert division into multiplication.
1222 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) {
1223   Constant *C;
1224   if (!match(I.getOperand(1), m_Constant(C)))
1225     return nullptr;
1226 
1227   // -X / C --> X / -C
1228   Value *X;
1229   if (match(I.getOperand(0), m_FNeg(m_Value(X))))
1230     return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I);
1231 
1232   // If the constant divisor has an exact inverse, this is always safe. If not,
1233   // then we can still create a reciprocal if fast-math-flags allow it and the
1234   // constant is a regular number (not zero, infinite, or denormal).
1235   if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
1236     return nullptr;
1237 
1238   // Disallow denormal constants because we don't know what would happen
1239   // on all targets.
1240   // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1241   // denorms are flushed?
1242   const DataLayout &DL = I.getModule()->getDataLayout();
1243   auto *RecipC = ConstantFoldBinaryOpOperands(
1244       Instruction::FDiv, ConstantFP::get(I.getType(), 1.0), C, DL);
1245   if (!RecipC || !RecipC->isNormalFP())
1246     return nullptr;
1247 
1248   // X / C --> X * (1 / C)
1249   return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I);
1250 }
1251 
1252 /// Remove negation and try to reassociate constant math.
1253 static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
1254   Constant *C;
1255   if (!match(I.getOperand(0), m_Constant(C)))
1256     return nullptr;
1257 
1258   // C / -X --> -C / X
1259   Value *X;
1260   if (match(I.getOperand(1), m_FNeg(m_Value(X))))
1261     return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I);
1262 
1263   if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1264     return nullptr;
1265 
1266   // Try to reassociate C / X expressions where X includes another constant.
1267   Constant *C2, *NewC = nullptr;
1268   const DataLayout &DL = I.getModule()->getDataLayout();
1269   if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) {
1270     // C / (X * C2) --> (C / C2) / X
1271     NewC = ConstantFoldBinaryOpOperands(Instruction::FDiv, C, C2, DL);
1272   } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) {
1273     // C / (X / C2) --> (C * C2) / X
1274     NewC = ConstantFoldBinaryOpOperands(Instruction::FMul, C, C2, DL);
1275   }
1276   // Disallow denormal constants because we don't know what would happen
1277   // on all targets.
1278   // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1279   // denorms are flushed?
1280   if (!NewC || !NewC->isNormalFP())
1281     return nullptr;
1282 
1283   return BinaryOperator::CreateFDivFMF(NewC, X, &I);
1284 }
1285 
1286 /// Negate the exponent of pow/exp to fold division-by-pow() into multiply.
1287 static Instruction *foldFDivPowDivisor(BinaryOperator &I,
1288                                        InstCombiner::BuilderTy &Builder) {
1289   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1290   auto *II = dyn_cast<IntrinsicInst>(Op1);
1291   if (!II || !II->hasOneUse() || !I.hasAllowReassoc() ||
1292       !I.hasAllowReciprocal())
1293     return nullptr;
1294 
1295   // Z / pow(X, Y) --> Z * pow(X, -Y)
1296   // Z / exp{2}(Y) --> Z * exp{2}(-Y)
1297   // In the general case, this creates an extra instruction, but fmul allows
1298   // for better canonicalization and optimization than fdiv.
1299   Intrinsic::ID IID = II->getIntrinsicID();
1300   SmallVector<Value *> Args;
1301   switch (IID) {
1302   case Intrinsic::pow:
1303     Args.push_back(II->getArgOperand(0));
1304     Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(1), &I));
1305     break;
1306   case Intrinsic::powi: {
1307     // Require 'ninf' assuming that makes powi(X, -INT_MIN) acceptable.
1308     // That is, X ** (huge negative number) is 0.0, ~1.0, or INF and so
1309     // dividing by that is INF, ~1.0, or 0.0. Code that uses powi allows
1310     // non-standard results, so this corner case should be acceptable if the
1311     // code rules out INF values.
1312     if (!I.hasNoInfs())
1313       return nullptr;
1314     Args.push_back(II->getArgOperand(0));
1315     Args.push_back(Builder.CreateNeg(II->getArgOperand(1)));
1316     Type *Tys[] = {I.getType(), II->getArgOperand(1)->getType()};
1317     Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &I);
1318     return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
1319   }
1320   case Intrinsic::exp:
1321   case Intrinsic::exp2:
1322     Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(0), &I));
1323     break;
1324   default:
1325     return nullptr;
1326   }
1327   Value *Pow = Builder.CreateIntrinsic(IID, I.getType(), Args, &I);
1328   return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
1329 }
1330 
1331 Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
1332   Module *M = I.getModule();
1333 
1334   if (Value *V = simplifyFDivInst(I.getOperand(0), I.getOperand(1),
1335                                   I.getFastMathFlags(),
1336                                   SQ.getWithInstruction(&I)))
1337     return replaceInstUsesWith(I, V);
1338 
1339   if (Instruction *X = foldVectorBinop(I))
1340     return X;
1341 
1342   if (Instruction *Phi = foldBinopWithPhiOperands(I))
1343     return Phi;
1344 
1345   if (Instruction *R = foldFDivConstantDivisor(I))
1346     return R;
1347 
1348   if (Instruction *R = foldFDivConstantDividend(I))
1349     return R;
1350 
1351   if (Instruction *R = foldFPSignBitOps(I))
1352     return R;
1353 
1354   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1355   if (isa<Constant>(Op0))
1356     if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1357       if (Instruction *R = FoldOpIntoSelect(I, SI))
1358         return R;
1359 
1360   if (isa<Constant>(Op1))
1361     if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1362       if (Instruction *R = FoldOpIntoSelect(I, SI))
1363         return R;
1364 
1365   if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
1366     Value *X, *Y;
1367     if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1368         (!isa<Constant>(Y) || !isa<Constant>(Op1))) {
1369       // (X / Y) / Z => X / (Y * Z)
1370       Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I);
1371       return BinaryOperator::CreateFDivFMF(X, YZ, &I);
1372     }
1373     if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1374         (!isa<Constant>(Y) || !isa<Constant>(Op0))) {
1375       // Z / (X / Y) => (Y * Z) / X
1376       Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I);
1377       return BinaryOperator::CreateFDivFMF(YZ, X, &I);
1378     }
1379     // Z / (1.0 / Y) => (Y * Z)
1380     //
1381     // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The
1382     // m_OneUse check is avoided because even in the case of the multiple uses
1383     // for 1.0/Y, the number of instructions remain the same and a division is
1384     // replaced by a multiplication.
1385     if (match(Op1, m_FDiv(m_SpecificFP(1.0), m_Value(Y))))
1386       return BinaryOperator::CreateFMulFMF(Y, Op0, &I);
1387   }
1388 
1389   if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
1390     // sin(X) / cos(X) -> tan(X)
1391     // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1392     Value *X;
1393     bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
1394                  match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
1395     bool IsCot =
1396         !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
1397                   match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
1398 
1399     if ((IsTan || IsCot) && hasFloatFn(M, &TLI, I.getType(), LibFunc_tan,
1400                                        LibFunc_tanf, LibFunc_tanl)) {
1401       IRBuilder<> B(&I);
1402       IRBuilder<>::FastMathFlagGuard FMFGuard(B);
1403       B.setFastMathFlags(I.getFastMathFlags());
1404       AttributeList Attrs =
1405           cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
1406       Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
1407                                         LibFunc_tanl, B, Attrs);
1408       if (IsCot)
1409         Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res);
1410       return replaceInstUsesWith(I, Res);
1411     }
1412   }
1413 
1414   // X / (X * Y) --> 1.0 / Y
1415   // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1416   // We can ignore the possibility that X is infinity because INF/INF is NaN.
1417   Value *X, *Y;
1418   if (I.hasNoNaNs() && I.hasAllowReassoc() &&
1419       match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) {
1420     replaceOperand(I, 0, ConstantFP::get(I.getType(), 1.0));
1421     replaceOperand(I, 1, Y);
1422     return &I;
1423   }
1424 
1425   // X / fabs(X) -> copysign(1.0, X)
1426   // fabs(X) / X -> copysign(1.0, X)
1427   if (I.hasNoNaNs() && I.hasNoInfs() &&
1428       (match(&I, m_FDiv(m_Value(X), m_FAbs(m_Deferred(X)))) ||
1429        match(&I, m_FDiv(m_FAbs(m_Value(X)), m_Deferred(X))))) {
1430     Value *V = Builder.CreateBinaryIntrinsic(
1431         Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I);
1432     return replaceInstUsesWith(I, V);
1433   }
1434 
1435   if (Instruction *Mul = foldFDivPowDivisor(I, Builder))
1436     return Mul;
1437 
1438   return nullptr;
1439 }
1440 
1441 /// This function implements the transforms common to both integer remainder
1442 /// instructions (urem and srem). It is called by the visitors to those integer
1443 /// remainder instructions.
1444 /// Common integer remainder transforms
1445 Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
1446   if (Instruction *Phi = foldBinopWithPhiOperands(I))
1447     return Phi;
1448 
1449   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1450 
1451   // The RHS is known non-zero.
1452   if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
1453     return replaceOperand(I, 1, V);
1454 
1455   // Handle cases involving: rem X, (select Cond, Y, Z)
1456   if (simplifyDivRemOfSelectWithZeroOp(I))
1457     return &I;
1458 
1459   // If the divisor is a select-of-constants, try to constant fold all rem ops:
1460   // C % (select Cond, TrueC, FalseC) --> select Cond, (C % TrueC), (C % FalseC)
1461   // TODO: Adapt simplifyDivRemOfSelectWithZeroOp to allow this and other folds.
1462   if (match(Op0, m_ImmConstant()) &&
1463       match(Op1, m_Select(m_Value(), m_ImmConstant(), m_ImmConstant()))) {
1464     if (Instruction *R = FoldOpIntoSelect(I, cast<SelectInst>(Op1),
1465                                           /*FoldWithMultiUse*/ true))
1466       return R;
1467   }
1468 
1469   if (isa<Constant>(Op1)) {
1470     if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
1471       if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
1472         if (Instruction *R = FoldOpIntoSelect(I, SI))
1473           return R;
1474       } else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
1475         const APInt *Op1Int;
1476         if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
1477             (I.getOpcode() == Instruction::URem ||
1478              !Op1Int->isMinSignedValue())) {
1479           // foldOpIntoPhi will speculate instructions to the end of the PHI's
1480           // predecessor blocks, so do this only if we know the srem or urem
1481           // will not fault.
1482           if (Instruction *NV = foldOpIntoPhi(I, PN))
1483             return NV;
1484         }
1485       }
1486 
1487       // See if we can fold away this rem instruction.
1488       if (SimplifyDemandedInstructionBits(I))
1489         return &I;
1490     }
1491   }
1492 
1493   return nullptr;
1494 }
1495 
1496 Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
1497   if (Value *V = simplifyURemInst(I.getOperand(0), I.getOperand(1),
1498                                   SQ.getWithInstruction(&I)))
1499     return replaceInstUsesWith(I, V);
1500 
1501   if (Instruction *X = foldVectorBinop(I))
1502     return X;
1503 
1504   if (Instruction *common = commonIRemTransforms(I))
1505     return common;
1506 
1507   if (Instruction *NarrowRem = narrowUDivURem(I, Builder))
1508     return NarrowRem;
1509 
1510   // X urem Y -> X and Y-1, where Y is a power of 2,
1511   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1512   Type *Ty = I.getType();
1513   if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1514     // This may increase instruction count, we don't enforce that Y is a
1515     // constant.
1516     Constant *N1 = Constant::getAllOnesValue(Ty);
1517     Value *Add = Builder.CreateAdd(Op1, N1);
1518     return BinaryOperator::CreateAnd(Op0, Add);
1519   }
1520 
1521   // 1 urem X -> zext(X != 1)
1522   if (match(Op0, m_One())) {
1523     Value *Cmp = Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
1524     return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1525   }
1526 
1527   // Op0 urem C -> Op0 < C ? Op0 : Op0 - C, where C >= signbit.
1528   // Op0 must be frozen because we are increasing its number of uses.
1529   if (match(Op1, m_Negative())) {
1530     Value *F0 = Builder.CreateFreeze(Op0, Op0->getName() + ".fr");
1531     Value *Cmp = Builder.CreateICmpULT(F0, Op1);
1532     Value *Sub = Builder.CreateSub(F0, Op1);
1533     return SelectInst::Create(Cmp, F0, Sub);
1534   }
1535 
1536   // If the divisor is a sext of a boolean, then the divisor must be max
1537   // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
1538   // max unsigned value. In that case, the remainder is 0:
1539   // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
1540   Value *X;
1541   if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1542     Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1543     return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0);
1544   }
1545 
1546   return nullptr;
1547 }
1548 
1549 Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
1550   if (Value *V = simplifySRemInst(I.getOperand(0), I.getOperand(1),
1551                                   SQ.getWithInstruction(&I)))
1552     return replaceInstUsesWith(I, V);
1553 
1554   if (Instruction *X = foldVectorBinop(I))
1555     return X;
1556 
1557   // Handle the integer rem common cases
1558   if (Instruction *Common = commonIRemTransforms(I))
1559     return Common;
1560 
1561   Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1562   {
1563     const APInt *Y;
1564     // X % -Y -> X % Y
1565     if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue())
1566       return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y));
1567   }
1568 
1569   // -X srem Y --> -(X srem Y)
1570   Value *X, *Y;
1571   if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1572     return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y));
1573 
1574   // If the sign bits of both operands are zero (i.e. we can prove they are
1575   // unsigned inputs), turn this into a urem.
1576   APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1577   if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
1578       MaskedValueIsZero(Op0, Mask, 0, &I)) {
1579     // X srem Y -> X urem Y, iff X and Y don't have sign bit set
1580     return BinaryOperator::CreateURem(Op0, Op1, I.getName());
1581   }
1582 
1583   // If it's a constant vector, flip any negative values positive.
1584   if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
1585     Constant *C = cast<Constant>(Op1);
1586     unsigned VWidth = cast<FixedVectorType>(C->getType())->getNumElements();
1587 
1588     bool hasNegative = false;
1589     bool hasMissing = false;
1590     for (unsigned i = 0; i != VWidth; ++i) {
1591       Constant *Elt = C->getAggregateElement(i);
1592       if (!Elt) {
1593         hasMissing = true;
1594         break;
1595       }
1596 
1597       if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
1598         if (RHS->isNegative())
1599           hasNegative = true;
1600     }
1601 
1602     if (hasNegative && !hasMissing) {
1603       SmallVector<Constant *, 16> Elts(VWidth);
1604       for (unsigned i = 0; i != VWidth; ++i) {
1605         Elts[i] = C->getAggregateElement(i);  // Handle undef, etc.
1606         if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
1607           if (RHS->isNegative())
1608             Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
1609         }
1610       }
1611 
1612       Constant *NewRHSV = ConstantVector::get(Elts);
1613       if (NewRHSV != C)  // Don't loop on -MININT
1614         return replaceOperand(I, 1, NewRHSV);
1615     }
1616   }
1617 
1618   return nullptr;
1619 }
1620 
1621 Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
1622   if (Value *V = simplifyFRemInst(I.getOperand(0), I.getOperand(1),
1623                                   I.getFastMathFlags(),
1624                                   SQ.getWithInstruction(&I)))
1625     return replaceInstUsesWith(I, V);
1626 
1627   if (Instruction *X = foldVectorBinop(I))
1628     return X;
1629 
1630   if (Instruction *Phi = foldBinopWithPhiOperands(I))
1631     return Phi;
1632 
1633   return nullptr;
1634 }
1635