1 //===- InstCombineMulDivRem.cpp -------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for mul, fmul, sdiv, udiv, fdiv,
10 // srem, urem, frem.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "InstCombineInternal.h"
15 #include "llvm/ADT/APFloat.h"
16 #include "llvm/ADT/APInt.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/InstructionSimplify.h"
19 #include "llvm/IR/BasicBlock.h"
20 #include "llvm/IR/Constant.h"
21 #include "llvm/IR/Constants.h"
22 #include "llvm/IR/InstrTypes.h"
23 #include "llvm/IR/Instruction.h"
24 #include "llvm/IR/Instructions.h"
25 #include "llvm/IR/IntrinsicInst.h"
26 #include "llvm/IR/Intrinsics.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/PatternMatch.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/KnownBits.h"
34 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
35 #include "llvm/Transforms/InstCombine/InstCombiner.h"
36 #include "llvm/Transforms/Utils/BuildLibCalls.h"
37 #include <cassert>
38 #include <cstddef>
39 #include <cstdint>
40 #include <utility>
41
42 using namespace llvm;
43 using namespace PatternMatch;
44
45 #define DEBUG_TYPE "instcombine"
46
47 /// The specific integer value is used in a context where it is known to be
48 /// non-zero. If this allows us to simplify the computation, do so and return
49 /// the new operand, otherwise return null.
simplifyValueKnownNonZero(Value * V,InstCombinerImpl & IC,Instruction & CxtI)50 static Value *simplifyValueKnownNonZero(Value *V, InstCombinerImpl &IC,
51 Instruction &CxtI) {
52 // If V has multiple uses, then we would have to do more analysis to determine
53 // if this is safe. For example, the use could be in dynamically unreached
54 // code.
55 if (!V->hasOneUse()) return nullptr;
56
57 bool MadeChange = false;
58
59 // ((1 << A) >>u B) --> (1 << (A-B))
60 // Because V cannot be zero, we know that B is less than A.
61 Value *A = nullptr, *B = nullptr, *One = nullptr;
62 if (match(V, m_LShr(m_OneUse(m_Shl(m_Value(One), m_Value(A))), m_Value(B))) &&
63 match(One, m_One())) {
64 A = IC.Builder.CreateSub(A, B);
65 return IC.Builder.CreateShl(One, A);
66 }
67
68 // (PowerOfTwo >>u B) --> isExact since shifting out the result would make it
69 // inexact. Similarly for <<.
70 BinaryOperator *I = dyn_cast<BinaryOperator>(V);
71 if (I && I->isLogicalShift() &&
72 IC.isKnownToBeAPowerOfTwo(I->getOperand(0), false, 0, &CxtI)) {
73 // We know that this is an exact/nuw shift and that the input is a
74 // non-zero context as well.
75 if (Value *V2 = simplifyValueKnownNonZero(I->getOperand(0), IC, CxtI)) {
76 IC.replaceOperand(*I, 0, V2);
77 MadeChange = true;
78 }
79
80 if (I->getOpcode() == Instruction::LShr && !I->isExact()) {
81 I->setIsExact();
82 MadeChange = true;
83 }
84
85 if (I->getOpcode() == Instruction::Shl && !I->hasNoUnsignedWrap()) {
86 I->setHasNoUnsignedWrap();
87 MadeChange = true;
88 }
89 }
90
91 // TODO: Lots more we could do here:
92 // If V is a phi node, we can call this on each of its operands.
93 // "select cond, X, 0" can simplify to "X".
94
95 return MadeChange ? V : nullptr;
96 }
97
98 // TODO: This is a specific form of a much more general pattern.
99 // We could detect a select with any binop identity constant, or we
100 // could use SimplifyBinOp to see if either arm of the select reduces.
101 // But that needs to be done carefully and/or while removing potential
102 // reverse canonicalizations as in InstCombiner::foldSelectIntoOp().
foldMulSelectToNegate(BinaryOperator & I,InstCombiner::BuilderTy & Builder)103 static Value *foldMulSelectToNegate(BinaryOperator &I,
104 InstCombiner::BuilderTy &Builder) {
105 Value *Cond, *OtherOp;
106
107 // mul (select Cond, 1, -1), OtherOp --> select Cond, OtherOp, -OtherOp
108 // mul OtherOp, (select Cond, 1, -1) --> select Cond, OtherOp, -OtherOp
109 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_One(), m_AllOnes())),
110 m_Value(OtherOp))))
111 return Builder.CreateSelect(Cond, OtherOp, Builder.CreateNeg(OtherOp));
112
113 // mul (select Cond, -1, 1), OtherOp --> select Cond, -OtherOp, OtherOp
114 // mul OtherOp, (select Cond, -1, 1) --> select Cond, -OtherOp, OtherOp
115 if (match(&I, m_c_Mul(m_OneUse(m_Select(m_Value(Cond), m_AllOnes(), m_One())),
116 m_Value(OtherOp))))
117 return Builder.CreateSelect(Cond, Builder.CreateNeg(OtherOp), OtherOp);
118
119 // fmul (select Cond, 1.0, -1.0), OtherOp --> select Cond, OtherOp, -OtherOp
120 // fmul OtherOp, (select Cond, 1.0, -1.0) --> select Cond, OtherOp, -OtherOp
121 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(1.0),
122 m_SpecificFP(-1.0))),
123 m_Value(OtherOp)))) {
124 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
125 Builder.setFastMathFlags(I.getFastMathFlags());
126 return Builder.CreateSelect(Cond, OtherOp, Builder.CreateFNeg(OtherOp));
127 }
128
129 // fmul (select Cond, -1.0, 1.0), OtherOp --> select Cond, -OtherOp, OtherOp
130 // fmul OtherOp, (select Cond, -1.0, 1.0) --> select Cond, -OtherOp, OtherOp
131 if (match(&I, m_c_FMul(m_OneUse(m_Select(m_Value(Cond), m_SpecificFP(-1.0),
132 m_SpecificFP(1.0))),
133 m_Value(OtherOp)))) {
134 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
135 Builder.setFastMathFlags(I.getFastMathFlags());
136 return Builder.CreateSelect(Cond, Builder.CreateFNeg(OtherOp), OtherOp);
137 }
138
139 return nullptr;
140 }
141
visitMul(BinaryOperator & I)142 Instruction *InstCombinerImpl::visitMul(BinaryOperator &I) {
143 if (Value *V = SimplifyMulInst(I.getOperand(0), I.getOperand(1),
144 SQ.getWithInstruction(&I)))
145 return replaceInstUsesWith(I, V);
146
147 if (SimplifyAssociativeOrCommutative(I))
148 return &I;
149
150 if (Instruction *X = foldVectorBinop(I))
151 return X;
152
153 if (Value *V = SimplifyUsingDistributiveLaws(I))
154 return replaceInstUsesWith(I, V);
155
156 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
157 unsigned BitWidth = I.getType()->getScalarSizeInBits();
158
159 // X * -1 == 0 - X
160 if (match(Op1, m_AllOnes())) {
161 BinaryOperator *BO = BinaryOperator::CreateNeg(Op0, I.getName());
162 if (I.hasNoSignedWrap())
163 BO->setHasNoSignedWrap();
164 return BO;
165 }
166
167 // Also allow combining multiply instructions on vectors.
168 {
169 Value *NewOp;
170 Constant *C1, *C2;
171 const APInt *IVal;
172 if (match(&I, m_Mul(m_Shl(m_Value(NewOp), m_Constant(C2)),
173 m_Constant(C1))) &&
174 match(C1, m_APInt(IVal))) {
175 // ((X << C2)*C1) == (X * (C1 << C2))
176 Constant *Shl = ConstantExpr::getShl(C1, C2);
177 BinaryOperator *Mul = cast<BinaryOperator>(I.getOperand(0));
178 BinaryOperator *BO = BinaryOperator::CreateMul(NewOp, Shl);
179 if (I.hasNoUnsignedWrap() && Mul->hasNoUnsignedWrap())
180 BO->setHasNoUnsignedWrap();
181 if (I.hasNoSignedWrap() && Mul->hasNoSignedWrap() &&
182 Shl->isNotMinSignedValue())
183 BO->setHasNoSignedWrap();
184 return BO;
185 }
186
187 if (match(&I, m_Mul(m_Value(NewOp), m_Constant(C1)))) {
188 // Replace X*(2^C) with X << C, where C is either a scalar or a vector.
189 if (Constant *NewCst = ConstantExpr::getExactLogBase2(C1)) {
190 BinaryOperator *Shl = BinaryOperator::CreateShl(NewOp, NewCst);
191
192 if (I.hasNoUnsignedWrap())
193 Shl->setHasNoUnsignedWrap();
194 if (I.hasNoSignedWrap()) {
195 const APInt *V;
196 if (match(NewCst, m_APInt(V)) && *V != V->getBitWidth() - 1)
197 Shl->setHasNoSignedWrap();
198 }
199
200 return Shl;
201 }
202 }
203 }
204
205 if (Op0->hasOneUse() && match(Op1, m_NegatedPower2())) {
206 // Interpret X * (-1<<C) as (-X) * (1<<C) and try to sink the negation.
207 // The "* (1<<C)" thus becomes a potential shifting opportunity.
208 if (Value *NegOp0 = Negator::Negate(/*IsNegation*/ true, Op0, *this))
209 return BinaryOperator::CreateMul(
210 NegOp0, ConstantExpr::getNeg(cast<Constant>(Op1)), I.getName());
211 }
212
213 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
214 return FoldedMul;
215
216 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
217 return replaceInstUsesWith(I, FoldedMul);
218
219 // Simplify mul instructions with a constant RHS.
220 if (isa<Constant>(Op1)) {
221 // Canonicalize (X+C1)*CI -> X*CI+C1*CI.
222 Value *X;
223 Constant *C1;
224 if (match(Op0, m_OneUse(m_Add(m_Value(X), m_Constant(C1))))) {
225 Value *Mul = Builder.CreateMul(C1, Op1);
226 // Only go forward with the transform if C1*CI simplifies to a tidier
227 // constant.
228 if (!match(Mul, m_Mul(m_Value(), m_Value())))
229 return BinaryOperator::CreateAdd(Builder.CreateMul(X, Op1), Mul);
230 }
231 }
232
233 // abs(X) * abs(X) -> X * X
234 // nabs(X) * nabs(X) -> X * X
235 if (Op0 == Op1) {
236 Value *X, *Y;
237 SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
238 if (SPF == SPF_ABS || SPF == SPF_NABS)
239 return BinaryOperator::CreateMul(X, X);
240
241 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
242 return BinaryOperator::CreateMul(X, X);
243 }
244
245 // -X * C --> X * -C
246 Value *X, *Y;
247 Constant *Op1C;
248 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Constant(Op1C)))
249 return BinaryOperator::CreateMul(X, ConstantExpr::getNeg(Op1C));
250
251 // -X * -Y --> X * Y
252 if (match(Op0, m_Neg(m_Value(X))) && match(Op1, m_Neg(m_Value(Y)))) {
253 auto *NewMul = BinaryOperator::CreateMul(X, Y);
254 if (I.hasNoSignedWrap() &&
255 cast<OverflowingBinaryOperator>(Op0)->hasNoSignedWrap() &&
256 cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap())
257 NewMul->setHasNoSignedWrap();
258 return NewMul;
259 }
260
261 // -X * Y --> -(X * Y)
262 // X * -Y --> -(X * Y)
263 if (match(&I, m_c_Mul(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))
264 return BinaryOperator::CreateNeg(Builder.CreateMul(X, Y));
265
266 // (X / Y) * Y = X - (X % Y)
267 // (X / Y) * -Y = (X % Y) - X
268 {
269 Value *Y = Op1;
270 BinaryOperator *Div = dyn_cast<BinaryOperator>(Op0);
271 if (!Div || (Div->getOpcode() != Instruction::UDiv &&
272 Div->getOpcode() != Instruction::SDiv)) {
273 Y = Op0;
274 Div = dyn_cast<BinaryOperator>(Op1);
275 }
276 Value *Neg = dyn_castNegVal(Y);
277 if (Div && Div->hasOneUse() &&
278 (Div->getOperand(1) == Y || Div->getOperand(1) == Neg) &&
279 (Div->getOpcode() == Instruction::UDiv ||
280 Div->getOpcode() == Instruction::SDiv)) {
281 Value *X = Div->getOperand(0), *DivOp1 = Div->getOperand(1);
282
283 // If the division is exact, X % Y is zero, so we end up with X or -X.
284 if (Div->isExact()) {
285 if (DivOp1 == Y)
286 return replaceInstUsesWith(I, X);
287 return BinaryOperator::CreateNeg(X);
288 }
289
290 auto RemOpc = Div->getOpcode() == Instruction::UDiv ? Instruction::URem
291 : Instruction::SRem;
292 Value *Rem = Builder.CreateBinOp(RemOpc, X, DivOp1);
293 if (DivOp1 == Y)
294 return BinaryOperator::CreateSub(X, Rem);
295 return BinaryOperator::CreateSub(Rem, X);
296 }
297 }
298
299 /// i1 mul -> i1 and.
300 if (I.getType()->isIntOrIntVectorTy(1))
301 return BinaryOperator::CreateAnd(Op0, Op1);
302
303 // X*(1 << Y) --> X << Y
304 // (1 << Y)*X --> X << Y
305 {
306 Value *Y;
307 BinaryOperator *BO = nullptr;
308 bool ShlNSW = false;
309 if (match(Op0, m_Shl(m_One(), m_Value(Y)))) {
310 BO = BinaryOperator::CreateShl(Op1, Y);
311 ShlNSW = cast<ShlOperator>(Op0)->hasNoSignedWrap();
312 } else if (match(Op1, m_Shl(m_One(), m_Value(Y)))) {
313 BO = BinaryOperator::CreateShl(Op0, Y);
314 ShlNSW = cast<ShlOperator>(Op1)->hasNoSignedWrap();
315 }
316 if (BO) {
317 if (I.hasNoUnsignedWrap())
318 BO->setHasNoUnsignedWrap();
319 if (I.hasNoSignedWrap() && ShlNSW)
320 BO->setHasNoSignedWrap();
321 return BO;
322 }
323 }
324
325 // (zext bool X) * (zext bool Y) --> zext (and X, Y)
326 // (sext bool X) * (sext bool Y) --> zext (and X, Y)
327 // Note: -1 * -1 == 1 * 1 == 1 (if the extends match, the result is the same)
328 if (((match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
329 (match(Op0, m_SExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
330 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
331 (Op0->hasOneUse() || Op1->hasOneUse() || X == Y)) {
332 Value *And = Builder.CreateAnd(X, Y, "mulbool");
333 return CastInst::Create(Instruction::ZExt, And, I.getType());
334 }
335 // (sext bool X) * (zext bool Y) --> sext (and X, Y)
336 // (zext bool X) * (sext bool Y) --> sext (and X, Y)
337 // Note: -1 * 1 == 1 * -1 == -1
338 if (((match(Op0, m_SExt(m_Value(X))) && match(Op1, m_ZExt(m_Value(Y)))) ||
339 (match(Op0, m_ZExt(m_Value(X))) && match(Op1, m_SExt(m_Value(Y))))) &&
340 X->getType()->isIntOrIntVectorTy(1) && X->getType() == Y->getType() &&
341 (Op0->hasOneUse() || Op1->hasOneUse())) {
342 Value *And = Builder.CreateAnd(X, Y, "mulbool");
343 return CastInst::Create(Instruction::SExt, And, I.getType());
344 }
345
346 // (bool X) * Y --> X ? Y : 0
347 // Y * (bool X) --> X ? Y : 0
348 if (match(Op0, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
349 return SelectInst::Create(X, Op1, ConstantInt::get(I.getType(), 0));
350 if (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
351 return SelectInst::Create(X, Op0, ConstantInt::get(I.getType(), 0));
352
353 // (lshr X, 31) * Y --> (ashr X, 31) & Y
354 // Y * (lshr X, 31) --> (ashr X, 31) & Y
355 // TODO: We are not checking one-use because the elimination of the multiply
356 // is better for analysis?
357 // TODO: Should we canonicalize to '(X < 0) ? Y : 0' instead? That would be
358 // more similar to what we're doing above.
359 const APInt *C;
360 if (match(Op0, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
361 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op1);
362 if (match(Op1, m_LShr(m_Value(X), m_APInt(C))) && *C == C->getBitWidth() - 1)
363 return BinaryOperator::CreateAnd(Builder.CreateAShr(X, *C), Op0);
364
365 // ((ashr X, 31) | 1) * X --> abs(X)
366 // X * ((ashr X, 31) | 1) --> abs(X)
367 if (match(&I, m_c_BinOp(m_Or(m_AShr(m_Value(X),
368 m_SpecificIntAllowUndef(BitWidth - 1)),
369 m_One()),
370 m_Deferred(X)))) {
371 Value *Abs = Builder.CreateBinaryIntrinsic(
372 Intrinsic::abs, X,
373 ConstantInt::getBool(I.getContext(), I.hasNoSignedWrap()));
374 Abs->takeName(&I);
375 return replaceInstUsesWith(I, Abs);
376 }
377
378 if (Instruction *Ext = narrowMathIfNoOverflow(I))
379 return Ext;
380
381 bool Changed = false;
382 if (!I.hasNoSignedWrap() && willNotOverflowSignedMul(Op0, Op1, I)) {
383 Changed = true;
384 I.setHasNoSignedWrap(true);
385 }
386
387 if (!I.hasNoUnsignedWrap() && willNotOverflowUnsignedMul(Op0, Op1, I)) {
388 Changed = true;
389 I.setHasNoUnsignedWrap(true);
390 }
391
392 return Changed ? &I : nullptr;
393 }
394
foldFPSignBitOps(BinaryOperator & I)395 Instruction *InstCombinerImpl::foldFPSignBitOps(BinaryOperator &I) {
396 BinaryOperator::BinaryOps Opcode = I.getOpcode();
397 assert((Opcode == Instruction::FMul || Opcode == Instruction::FDiv) &&
398 "Expected fmul or fdiv");
399
400 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
401 Value *X, *Y;
402
403 // -X * -Y --> X * Y
404 // -X / -Y --> X / Y
405 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_FNeg(m_Value(Y))))
406 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, Y, &I);
407
408 // fabs(X) * fabs(X) -> X * X
409 // fabs(X) / fabs(X) -> X / X
410 if (Op0 == Op1 && match(Op0, m_FAbs(m_Value(X))))
411 return BinaryOperator::CreateWithCopiedFlags(Opcode, X, X, &I);
412
413 // fabs(X) * fabs(Y) --> fabs(X * Y)
414 // fabs(X) / fabs(Y) --> fabs(X / Y)
415 if (match(Op0, m_FAbs(m_Value(X))) && match(Op1, m_FAbs(m_Value(Y))) &&
416 (Op0->hasOneUse() || Op1->hasOneUse())) {
417 IRBuilder<>::FastMathFlagGuard FMFGuard(Builder);
418 Builder.setFastMathFlags(I.getFastMathFlags());
419 Value *XY = Builder.CreateBinOp(Opcode, X, Y);
420 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, XY);
421 Fabs->takeName(&I);
422 return replaceInstUsesWith(I, Fabs);
423 }
424
425 return nullptr;
426 }
427
visitFMul(BinaryOperator & I)428 Instruction *InstCombinerImpl::visitFMul(BinaryOperator &I) {
429 if (Value *V = SimplifyFMulInst(I.getOperand(0), I.getOperand(1),
430 I.getFastMathFlags(),
431 SQ.getWithInstruction(&I)))
432 return replaceInstUsesWith(I, V);
433
434 if (SimplifyAssociativeOrCommutative(I))
435 return &I;
436
437 if (Instruction *X = foldVectorBinop(I))
438 return X;
439
440 if (Instruction *FoldedMul = foldBinOpIntoSelectOrPhi(I))
441 return FoldedMul;
442
443 if (Value *FoldedMul = foldMulSelectToNegate(I, Builder))
444 return replaceInstUsesWith(I, FoldedMul);
445
446 if (Instruction *R = foldFPSignBitOps(I))
447 return R;
448
449 // X * -1.0 --> -X
450 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
451 if (match(Op1, m_SpecificFP(-1.0)))
452 return UnaryOperator::CreateFNegFMF(Op0, &I);
453
454 // -X * C --> X * -C
455 Value *X, *Y;
456 Constant *C;
457 if (match(Op0, m_FNeg(m_Value(X))) && match(Op1, m_Constant(C)))
458 return BinaryOperator::CreateFMulFMF(X, ConstantExpr::getFNeg(C), &I);
459
460 // (select A, B, C) * (select A, D, E) --> select A, (B*D), (C*E)
461 if (Value *V = SimplifySelectsFeedingBinaryOp(I, Op0, Op1))
462 return replaceInstUsesWith(I, V);
463
464 if (I.hasAllowReassoc()) {
465 // Reassociate constant RHS with another constant to form constant
466 // expression.
467 if (match(Op1, m_Constant(C)) && C->isFiniteNonZeroFP()) {
468 Constant *C1;
469 if (match(Op0, m_OneUse(m_FDiv(m_Constant(C1), m_Value(X))))) {
470 // (C1 / X) * C --> (C * C1) / X
471 Constant *CC1 = ConstantExpr::getFMul(C, C1);
472 if (CC1->isNormalFP())
473 return BinaryOperator::CreateFDivFMF(CC1, X, &I);
474 }
475 if (match(Op0, m_FDiv(m_Value(X), m_Constant(C1)))) {
476 // (X / C1) * C --> X * (C / C1)
477 Constant *CDivC1 = ConstantExpr::getFDiv(C, C1);
478 if (CDivC1->isNormalFP())
479 return BinaryOperator::CreateFMulFMF(X, CDivC1, &I);
480
481 // If the constant was a denormal, try reassociating differently.
482 // (X / C1) * C --> X / (C1 / C)
483 Constant *C1DivC = ConstantExpr::getFDiv(C1, C);
484 if (Op0->hasOneUse() && C1DivC->isNormalFP())
485 return BinaryOperator::CreateFDivFMF(X, C1DivC, &I);
486 }
487
488 // We do not need to match 'fadd C, X' and 'fsub X, C' because they are
489 // canonicalized to 'fadd X, C'. Distributing the multiply may allow
490 // further folds and (X * C) + C2 is 'fma'.
491 if (match(Op0, m_OneUse(m_FAdd(m_Value(X), m_Constant(C1))))) {
492 // (X + C1) * C --> (X * C) + (C * C1)
493 Constant *CC1 = ConstantExpr::getFMul(C, C1);
494 Value *XC = Builder.CreateFMulFMF(X, C, &I);
495 return BinaryOperator::CreateFAddFMF(XC, CC1, &I);
496 }
497 if (match(Op0, m_OneUse(m_FSub(m_Constant(C1), m_Value(X))))) {
498 // (C1 - X) * C --> (C * C1) - (X * C)
499 Constant *CC1 = ConstantExpr::getFMul(C, C1);
500 Value *XC = Builder.CreateFMulFMF(X, C, &I);
501 return BinaryOperator::CreateFSubFMF(CC1, XC, &I);
502 }
503 }
504
505 Value *Z;
506 if (match(&I, m_c_FMul(m_OneUse(m_FDiv(m_Value(X), m_Value(Y))),
507 m_Value(Z)))) {
508 // Sink division: (X / Y) * Z --> (X * Z) / Y
509 Value *NewFMul = Builder.CreateFMulFMF(X, Z, &I);
510 return BinaryOperator::CreateFDivFMF(NewFMul, Y, &I);
511 }
512
513 // sqrt(X) * sqrt(Y) -> sqrt(X * Y)
514 // nnan disallows the possibility of returning a number if both operands are
515 // negative (in that case, we should return NaN).
516 if (I.hasNoNaNs() &&
517 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(X)))) &&
518 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
519 Value *XY = Builder.CreateFMulFMF(X, Y, &I);
520 Value *Sqrt = Builder.CreateUnaryIntrinsic(Intrinsic::sqrt, XY, &I);
521 return replaceInstUsesWith(I, Sqrt);
522 }
523
524 // The following transforms are done irrespective of the number of uses
525 // for the expression "1.0/sqrt(X)".
526 // 1) 1.0/sqrt(X) * X -> X/sqrt(X)
527 // 2) X * 1.0/sqrt(X) -> X/sqrt(X)
528 // We always expect the backend to reduce X/sqrt(X) to sqrt(X), if it
529 // has the necessary (reassoc) fast-math-flags.
530 if (I.hasNoSignedZeros() &&
531 match(Op0, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
532 match(Y, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && Op1 == X)
533 return BinaryOperator::CreateFDivFMF(X, Y, &I);
534 if (I.hasNoSignedZeros() &&
535 match(Op1, (m_FDiv(m_SpecificFP(1.0), m_Value(Y)))) &&
536 match(Y, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) && Op0 == X)
537 return BinaryOperator::CreateFDivFMF(X, Y, &I);
538
539 // Like the similar transform in instsimplify, this requires 'nsz' because
540 // sqrt(-0.0) = -0.0, and -0.0 * -0.0 does not simplify to -0.0.
541 if (I.hasNoNaNs() && I.hasNoSignedZeros() && Op0 == Op1 &&
542 Op0->hasNUses(2)) {
543 // Peek through fdiv to find squaring of square root:
544 // (X / sqrt(Y)) * (X / sqrt(Y)) --> (X * X) / Y
545 if (match(Op0, m_FDiv(m_Value(X),
546 m_Intrinsic<Intrinsic::sqrt>(m_Value(Y))))) {
547 Value *XX = Builder.CreateFMulFMF(X, X, &I);
548 return BinaryOperator::CreateFDivFMF(XX, Y, &I);
549 }
550 // (sqrt(Y) / X) * (sqrt(Y) / X) --> Y / (X * X)
551 if (match(Op0, m_FDiv(m_Intrinsic<Intrinsic::sqrt>(m_Value(Y)),
552 m_Value(X)))) {
553 Value *XX = Builder.CreateFMulFMF(X, X, &I);
554 return BinaryOperator::CreateFDivFMF(Y, XX, &I);
555 }
556 }
557
558 if (I.isOnlyUserOfAnyOperand()) {
559 // pow(x, y) * pow(x, z) -> pow(x, y + z)
560 if (match(Op0, m_Intrinsic<Intrinsic::pow>(m_Value(X), m_Value(Y))) &&
561 match(Op1, m_Intrinsic<Intrinsic::pow>(m_Specific(X), m_Value(Z)))) {
562 auto *YZ = Builder.CreateFAddFMF(Y, Z, &I);
563 auto *NewPow = Builder.CreateBinaryIntrinsic(Intrinsic::pow, X, YZ, &I);
564 return replaceInstUsesWith(I, NewPow);
565 }
566
567 // exp(X) * exp(Y) -> exp(X + Y)
568 if (match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))) &&
569 match(Op1, m_Intrinsic<Intrinsic::exp>(m_Value(Y)))) {
570 Value *XY = Builder.CreateFAddFMF(X, Y, &I);
571 Value *Exp = Builder.CreateUnaryIntrinsic(Intrinsic::exp, XY, &I);
572 return replaceInstUsesWith(I, Exp);
573 }
574
575 // exp2(X) * exp2(Y) -> exp2(X + Y)
576 if (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) &&
577 match(Op1, m_Intrinsic<Intrinsic::exp2>(m_Value(Y)))) {
578 Value *XY = Builder.CreateFAddFMF(X, Y, &I);
579 Value *Exp2 = Builder.CreateUnaryIntrinsic(Intrinsic::exp2, XY, &I);
580 return replaceInstUsesWith(I, Exp2);
581 }
582 }
583
584 // (X*Y) * X => (X*X) * Y where Y != X
585 // The purpose is two-fold:
586 // 1) to form a power expression (of X).
587 // 2) potentially shorten the critical path: After transformation, the
588 // latency of the instruction Y is amortized by the expression of X*X,
589 // and therefore Y is in a "less critical" position compared to what it
590 // was before the transformation.
591 if (match(Op0, m_OneUse(m_c_FMul(m_Specific(Op1), m_Value(Y)))) &&
592 Op1 != Y) {
593 Value *XX = Builder.CreateFMulFMF(Op1, Op1, &I);
594 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
595 }
596 if (match(Op1, m_OneUse(m_c_FMul(m_Specific(Op0), m_Value(Y)))) &&
597 Op0 != Y) {
598 Value *XX = Builder.CreateFMulFMF(Op0, Op0, &I);
599 return BinaryOperator::CreateFMulFMF(XX, Y, &I);
600 }
601 }
602
603 // log2(X * 0.5) * Y = log2(X) * Y - Y
604 if (I.isFast()) {
605 IntrinsicInst *Log2 = nullptr;
606 if (match(Op0, m_OneUse(m_Intrinsic<Intrinsic::log2>(
607 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
608 Log2 = cast<IntrinsicInst>(Op0);
609 Y = Op1;
610 }
611 if (match(Op1, m_OneUse(m_Intrinsic<Intrinsic::log2>(
612 m_OneUse(m_FMul(m_Value(X), m_SpecificFP(0.5))))))) {
613 Log2 = cast<IntrinsicInst>(Op1);
614 Y = Op0;
615 }
616 if (Log2) {
617 Value *Log2 = Builder.CreateUnaryIntrinsic(Intrinsic::log2, X, &I);
618 Value *LogXTimesY = Builder.CreateFMulFMF(Log2, Y, &I);
619 return BinaryOperator::CreateFSubFMF(LogXTimesY, Y, &I);
620 }
621 }
622
623 return nullptr;
624 }
625
626 /// Fold a divide or remainder with a select instruction divisor when one of the
627 /// select operands is zero. In that case, we can use the other select operand
628 /// because div/rem by zero is undefined.
simplifyDivRemOfSelectWithZeroOp(BinaryOperator & I)629 bool InstCombinerImpl::simplifyDivRemOfSelectWithZeroOp(BinaryOperator &I) {
630 SelectInst *SI = dyn_cast<SelectInst>(I.getOperand(1));
631 if (!SI)
632 return false;
633
634 int NonNullOperand;
635 if (match(SI->getTrueValue(), m_Zero()))
636 // div/rem X, (Cond ? 0 : Y) -> div/rem X, Y
637 NonNullOperand = 2;
638 else if (match(SI->getFalseValue(), m_Zero()))
639 // div/rem X, (Cond ? Y : 0) -> div/rem X, Y
640 NonNullOperand = 1;
641 else
642 return false;
643
644 // Change the div/rem to use 'Y' instead of the select.
645 replaceOperand(I, 1, SI->getOperand(NonNullOperand));
646
647 // Okay, we know we replace the operand of the div/rem with 'Y' with no
648 // problem. However, the select, or the condition of the select may have
649 // multiple uses. Based on our knowledge that the operand must be non-zero,
650 // propagate the known value for the select into other uses of it, and
651 // propagate a known value of the condition into its other users.
652
653 // If the select and condition only have a single use, don't bother with this,
654 // early exit.
655 Value *SelectCond = SI->getCondition();
656 if (SI->use_empty() && SelectCond->hasOneUse())
657 return true;
658
659 // Scan the current block backward, looking for other uses of SI.
660 BasicBlock::iterator BBI = I.getIterator(), BBFront = I.getParent()->begin();
661 Type *CondTy = SelectCond->getType();
662 while (BBI != BBFront) {
663 --BBI;
664 // If we found an instruction that we can't assume will return, so
665 // information from below it cannot be propagated above it.
666 if (!isGuaranteedToTransferExecutionToSuccessor(&*BBI))
667 break;
668
669 // Replace uses of the select or its condition with the known values.
670 for (Use &Op : BBI->operands()) {
671 if (Op == SI) {
672 replaceUse(Op, SI->getOperand(NonNullOperand));
673 Worklist.push(&*BBI);
674 } else if (Op == SelectCond) {
675 replaceUse(Op, NonNullOperand == 1 ? ConstantInt::getTrue(CondTy)
676 : ConstantInt::getFalse(CondTy));
677 Worklist.push(&*BBI);
678 }
679 }
680
681 // If we past the instruction, quit looking for it.
682 if (&*BBI == SI)
683 SI = nullptr;
684 if (&*BBI == SelectCond)
685 SelectCond = nullptr;
686
687 // If we ran out of things to eliminate, break out of the loop.
688 if (!SelectCond && !SI)
689 break;
690
691 }
692 return true;
693 }
694
695 /// True if the multiply can not be expressed in an int this size.
multiplyOverflows(const APInt & C1,const APInt & C2,APInt & Product,bool IsSigned)696 static bool multiplyOverflows(const APInt &C1, const APInt &C2, APInt &Product,
697 bool IsSigned) {
698 bool Overflow;
699 Product = IsSigned ? C1.smul_ov(C2, Overflow) : C1.umul_ov(C2, Overflow);
700 return Overflow;
701 }
702
703 /// True if C1 is a multiple of C2. Quotient contains C1/C2.
isMultiple(const APInt & C1,const APInt & C2,APInt & Quotient,bool IsSigned)704 static bool isMultiple(const APInt &C1, const APInt &C2, APInt &Quotient,
705 bool IsSigned) {
706 assert(C1.getBitWidth() == C2.getBitWidth() && "Constant widths not equal");
707
708 // Bail if we will divide by zero.
709 if (C2.isNullValue())
710 return false;
711
712 // Bail if we would divide INT_MIN by -1.
713 if (IsSigned && C1.isMinSignedValue() && C2.isAllOnesValue())
714 return false;
715
716 APInt Remainder(C1.getBitWidth(), /*val=*/0ULL, IsSigned);
717 if (IsSigned)
718 APInt::sdivrem(C1, C2, Quotient, Remainder);
719 else
720 APInt::udivrem(C1, C2, Quotient, Remainder);
721
722 return Remainder.isMinValue();
723 }
724
725 /// This function implements the transforms common to both integer division
726 /// instructions (udiv and sdiv). It is called by the visitors to those integer
727 /// division instructions.
728 /// Common integer divide transforms
commonIDivTransforms(BinaryOperator & I)729 Instruction *InstCombinerImpl::commonIDivTransforms(BinaryOperator &I) {
730 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
731 bool IsSigned = I.getOpcode() == Instruction::SDiv;
732 Type *Ty = I.getType();
733
734 // The RHS is known non-zero.
735 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
736 return replaceOperand(I, 1, V);
737
738 // Handle cases involving: [su]div X, (select Cond, Y, Z)
739 // This does not apply for fdiv.
740 if (simplifyDivRemOfSelectWithZeroOp(I))
741 return &I;
742
743 const APInt *C2;
744 if (match(Op1, m_APInt(C2))) {
745 Value *X;
746 const APInt *C1;
747
748 // (X / C1) / C2 -> X / (C1*C2)
749 if ((IsSigned && match(Op0, m_SDiv(m_Value(X), m_APInt(C1)))) ||
750 (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_APInt(C1))))) {
751 APInt Product(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
752 if (!multiplyOverflows(*C1, *C2, Product, IsSigned))
753 return BinaryOperator::Create(I.getOpcode(), X,
754 ConstantInt::get(Ty, Product));
755 }
756
757 if ((IsSigned && match(Op0, m_NSWMul(m_Value(X), m_APInt(C1)))) ||
758 (!IsSigned && match(Op0, m_NUWMul(m_Value(X), m_APInt(C1))))) {
759 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
760
761 // (X * C1) / C2 -> X / (C2 / C1) if C2 is a multiple of C1.
762 if (isMultiple(*C2, *C1, Quotient, IsSigned)) {
763 auto *NewDiv = BinaryOperator::Create(I.getOpcode(), X,
764 ConstantInt::get(Ty, Quotient));
765 NewDiv->setIsExact(I.isExact());
766 return NewDiv;
767 }
768
769 // (X * C1) / C2 -> X * (C1 / C2) if C1 is a multiple of C2.
770 if (isMultiple(*C1, *C2, Quotient, IsSigned)) {
771 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
772 ConstantInt::get(Ty, Quotient));
773 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
774 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
775 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
776 return Mul;
777 }
778 }
779
780 if ((IsSigned && match(Op0, m_NSWShl(m_Value(X), m_APInt(C1))) &&
781 *C1 != C1->getBitWidth() - 1) ||
782 (!IsSigned && match(Op0, m_NUWShl(m_Value(X), m_APInt(C1))))) {
783 APInt Quotient(C1->getBitWidth(), /*val=*/0ULL, IsSigned);
784 APInt C1Shifted = APInt::getOneBitSet(
785 C1->getBitWidth(), static_cast<unsigned>(C1->getLimitedValue()));
786
787 // (X << C1) / C2 -> X / (C2 >> C1) if C2 is a multiple of 1 << C1.
788 if (isMultiple(*C2, C1Shifted, Quotient, IsSigned)) {
789 auto *BO = BinaryOperator::Create(I.getOpcode(), X,
790 ConstantInt::get(Ty, Quotient));
791 BO->setIsExact(I.isExact());
792 return BO;
793 }
794
795 // (X << C1) / C2 -> X * ((1 << C1) / C2) if 1 << C1 is a multiple of C2.
796 if (isMultiple(C1Shifted, *C2, Quotient, IsSigned)) {
797 auto *Mul = BinaryOperator::Create(Instruction::Mul, X,
798 ConstantInt::get(Ty, Quotient));
799 auto *OBO = cast<OverflowingBinaryOperator>(Op0);
800 Mul->setHasNoUnsignedWrap(!IsSigned && OBO->hasNoUnsignedWrap());
801 Mul->setHasNoSignedWrap(OBO->hasNoSignedWrap());
802 return Mul;
803 }
804 }
805
806 if (!C2->isNullValue()) // avoid X udiv 0
807 if (Instruction *FoldedDiv = foldBinOpIntoSelectOrPhi(I))
808 return FoldedDiv;
809 }
810
811 if (match(Op0, m_One())) {
812 assert(!Ty->isIntOrIntVectorTy(1) && "i1 divide not removed?");
813 if (IsSigned) {
814 // If Op1 is 0 then it's undefined behaviour, if Op1 is 1 then the
815 // result is one, if Op1 is -1 then the result is minus one, otherwise
816 // it's zero.
817 Value *Inc = Builder.CreateAdd(Op1, Op0);
818 Value *Cmp = Builder.CreateICmpULT(Inc, ConstantInt::get(Ty, 3));
819 return SelectInst::Create(Cmp, Op1, ConstantInt::get(Ty, 0));
820 } else {
821 // If Op1 is 0 then it's undefined behaviour. If Op1 is 1 then the
822 // result is one, otherwise it's zero.
823 return new ZExtInst(Builder.CreateICmpEQ(Op1, Op0), Ty);
824 }
825 }
826
827 // See if we can fold away this div instruction.
828 if (SimplifyDemandedInstructionBits(I))
829 return &I;
830
831 // (X - (X rem Y)) / Y -> X / Y; usually originates as ((X / Y) * Y) / Y
832 Value *X, *Z;
833 if (match(Op0, m_Sub(m_Value(X), m_Value(Z)))) // (X - Z) / Y; Y = Op1
834 if ((IsSigned && match(Z, m_SRem(m_Specific(X), m_Specific(Op1)))) ||
835 (!IsSigned && match(Z, m_URem(m_Specific(X), m_Specific(Op1)))))
836 return BinaryOperator::Create(I.getOpcode(), X, Op1);
837
838 // (X << Y) / X -> 1 << Y
839 Value *Y;
840 if (IsSigned && match(Op0, m_NSWShl(m_Specific(Op1), m_Value(Y))))
841 return BinaryOperator::CreateNSWShl(ConstantInt::get(Ty, 1), Y);
842 if (!IsSigned && match(Op0, m_NUWShl(m_Specific(Op1), m_Value(Y))))
843 return BinaryOperator::CreateNUWShl(ConstantInt::get(Ty, 1), Y);
844
845 // X / (X * Y) -> 1 / Y if the multiplication does not overflow.
846 if (match(Op1, m_c_Mul(m_Specific(Op0), m_Value(Y)))) {
847 bool HasNSW = cast<OverflowingBinaryOperator>(Op1)->hasNoSignedWrap();
848 bool HasNUW = cast<OverflowingBinaryOperator>(Op1)->hasNoUnsignedWrap();
849 if ((IsSigned && HasNSW) || (!IsSigned && HasNUW)) {
850 replaceOperand(I, 0, ConstantInt::get(Ty, 1));
851 replaceOperand(I, 1, Y);
852 return &I;
853 }
854 }
855
856 return nullptr;
857 }
858
859 static const unsigned MaxDepth = 6;
860
861 namespace {
862
863 using FoldUDivOperandCb = Instruction *(*)(Value *Op0, Value *Op1,
864 const BinaryOperator &I,
865 InstCombinerImpl &IC);
866
867 /// Used to maintain state for visitUDivOperand().
868 struct UDivFoldAction {
869 /// Informs visitUDiv() how to fold this operand. This can be zero if this
870 /// action joins two actions together.
871 FoldUDivOperandCb FoldAction;
872
873 /// Which operand to fold.
874 Value *OperandToFold;
875
876 union {
877 /// The instruction returned when FoldAction is invoked.
878 Instruction *FoldResult;
879
880 /// Stores the LHS action index if this action joins two actions together.
881 size_t SelectLHSIdx;
882 };
883
UDivFoldAction__anon478819c30111::UDivFoldAction884 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand)
885 : FoldAction(FA), OperandToFold(InputOperand), FoldResult(nullptr) {}
UDivFoldAction__anon478819c30111::UDivFoldAction886 UDivFoldAction(FoldUDivOperandCb FA, Value *InputOperand, size_t SLHS)
887 : FoldAction(FA), OperandToFold(InputOperand), SelectLHSIdx(SLHS) {}
888 };
889
890 } // end anonymous namespace
891
892 // X udiv 2^C -> X >> C
foldUDivPow2Cst(Value * Op0,Value * Op1,const BinaryOperator & I,InstCombinerImpl & IC)893 static Instruction *foldUDivPow2Cst(Value *Op0, Value *Op1,
894 const BinaryOperator &I,
895 InstCombinerImpl &IC) {
896 Constant *C1 = ConstantExpr::getExactLogBase2(cast<Constant>(Op1));
897 if (!C1)
898 llvm_unreachable("Failed to constant fold udiv -> logbase2");
899 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, C1);
900 if (I.isExact())
901 LShr->setIsExact();
902 return LShr;
903 }
904
905 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
906 // X udiv (zext (C1 << N)), where C1 is "1<<C2" --> X >> (N+C2)
foldUDivShl(Value * Op0,Value * Op1,const BinaryOperator & I,InstCombinerImpl & IC)907 static Instruction *foldUDivShl(Value *Op0, Value *Op1, const BinaryOperator &I,
908 InstCombinerImpl &IC) {
909 Value *ShiftLeft;
910 if (!match(Op1, m_ZExt(m_Value(ShiftLeft))))
911 ShiftLeft = Op1;
912
913 Constant *CI;
914 Value *N;
915 if (!match(ShiftLeft, m_Shl(m_Constant(CI), m_Value(N))))
916 llvm_unreachable("match should never fail here!");
917 Constant *Log2Base = ConstantExpr::getExactLogBase2(CI);
918 if (!Log2Base)
919 llvm_unreachable("getLogBase2 should never fail here!");
920 N = IC.Builder.CreateAdd(N, Log2Base);
921 if (Op1 != ShiftLeft)
922 N = IC.Builder.CreateZExt(N, Op1->getType());
923 BinaryOperator *LShr = BinaryOperator::CreateLShr(Op0, N);
924 if (I.isExact())
925 LShr->setIsExact();
926 return LShr;
927 }
928
929 // Recursively visits the possible right hand operands of a udiv
930 // instruction, seeing through select instructions, to determine if we can
931 // replace the udiv with something simpler. If we find that an operand is not
932 // able to simplify the udiv, we abort the entire transformation.
visitUDivOperand(Value * Op0,Value * Op1,const BinaryOperator & I,SmallVectorImpl<UDivFoldAction> & Actions,unsigned Depth=0)933 static size_t visitUDivOperand(Value *Op0, Value *Op1, const BinaryOperator &I,
934 SmallVectorImpl<UDivFoldAction> &Actions,
935 unsigned Depth = 0) {
936 // FIXME: assert that Op1 isn't/doesn't contain undef.
937
938 // Check to see if this is an unsigned division with an exact power of 2,
939 // if so, convert to a right shift.
940 if (match(Op1, m_Power2())) {
941 Actions.push_back(UDivFoldAction(foldUDivPow2Cst, Op1));
942 return Actions.size();
943 }
944
945 // X udiv (C1 << N), where C1 is "1<<C2" --> X >> (N+C2)
946 if (match(Op1, m_Shl(m_Power2(), m_Value())) ||
947 match(Op1, m_ZExt(m_Shl(m_Power2(), m_Value())))) {
948 Actions.push_back(UDivFoldAction(foldUDivShl, Op1));
949 return Actions.size();
950 }
951
952 // The remaining tests are all recursive, so bail out if we hit the limit.
953 if (Depth++ == MaxDepth)
954 return 0;
955
956 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
957 // FIXME: missed optimization: if one of the hands of select is/contains
958 // undef, just directly pick the other one.
959 // FIXME: can both hands contain undef?
960 if (size_t LHSIdx =
961 visitUDivOperand(Op0, SI->getOperand(1), I, Actions, Depth))
962 if (visitUDivOperand(Op0, SI->getOperand(2), I, Actions, Depth)) {
963 Actions.push_back(UDivFoldAction(nullptr, Op1, LHSIdx - 1));
964 return Actions.size();
965 }
966
967 return 0;
968 }
969
970 /// If we have zero-extended operands of an unsigned div or rem, we may be able
971 /// to narrow the operation (sink the zext below the math).
narrowUDivURem(BinaryOperator & I,InstCombiner::BuilderTy & Builder)972 static Instruction *narrowUDivURem(BinaryOperator &I,
973 InstCombiner::BuilderTy &Builder) {
974 Instruction::BinaryOps Opcode = I.getOpcode();
975 Value *N = I.getOperand(0);
976 Value *D = I.getOperand(1);
977 Type *Ty = I.getType();
978 Value *X, *Y;
979 if (match(N, m_ZExt(m_Value(X))) && match(D, m_ZExt(m_Value(Y))) &&
980 X->getType() == Y->getType() && (N->hasOneUse() || D->hasOneUse())) {
981 // udiv (zext X), (zext Y) --> zext (udiv X, Y)
982 // urem (zext X), (zext Y) --> zext (urem X, Y)
983 Value *NarrowOp = Builder.CreateBinOp(Opcode, X, Y);
984 return new ZExtInst(NarrowOp, Ty);
985 }
986
987 Constant *C;
988 if ((match(N, m_OneUse(m_ZExt(m_Value(X)))) && match(D, m_Constant(C))) ||
989 (match(D, m_OneUse(m_ZExt(m_Value(X)))) && match(N, m_Constant(C)))) {
990 // If the constant is the same in the smaller type, use the narrow version.
991 Constant *TruncC = ConstantExpr::getTrunc(C, X->getType());
992 if (ConstantExpr::getZExt(TruncC, Ty) != C)
993 return nullptr;
994
995 // udiv (zext X), C --> zext (udiv X, C')
996 // urem (zext X), C --> zext (urem X, C')
997 // udiv C, (zext X) --> zext (udiv C', X)
998 // urem C, (zext X) --> zext (urem C', X)
999 Value *NarrowOp = isa<Constant>(D) ? Builder.CreateBinOp(Opcode, X, TruncC)
1000 : Builder.CreateBinOp(Opcode, TruncC, X);
1001 return new ZExtInst(NarrowOp, Ty);
1002 }
1003
1004 return nullptr;
1005 }
1006
visitUDiv(BinaryOperator & I)1007 Instruction *InstCombinerImpl::visitUDiv(BinaryOperator &I) {
1008 if (Value *V = SimplifyUDivInst(I.getOperand(0), I.getOperand(1),
1009 SQ.getWithInstruction(&I)))
1010 return replaceInstUsesWith(I, V);
1011
1012 if (Instruction *X = foldVectorBinop(I))
1013 return X;
1014
1015 // Handle the integer div common cases
1016 if (Instruction *Common = commonIDivTransforms(I))
1017 return Common;
1018
1019 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1020 Value *X;
1021 const APInt *C1, *C2;
1022 if (match(Op0, m_LShr(m_Value(X), m_APInt(C1))) && match(Op1, m_APInt(C2))) {
1023 // (X lshr C1) udiv C2 --> X udiv (C2 << C1)
1024 bool Overflow;
1025 APInt C2ShlC1 = C2->ushl_ov(*C1, Overflow);
1026 if (!Overflow) {
1027 bool IsExact = I.isExact() && match(Op0, m_Exact(m_Value()));
1028 BinaryOperator *BO = BinaryOperator::CreateUDiv(
1029 X, ConstantInt::get(X->getType(), C2ShlC1));
1030 if (IsExact)
1031 BO->setIsExact();
1032 return BO;
1033 }
1034 }
1035
1036 // Op0 / C where C is large (negative) --> zext (Op0 >= C)
1037 // TODO: Could use isKnownNegative() to handle non-constant values.
1038 Type *Ty = I.getType();
1039 if (match(Op1, m_Negative())) {
1040 Value *Cmp = Builder.CreateICmpUGE(Op0, Op1);
1041 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1042 }
1043 // Op0 / (sext i1 X) --> zext (Op0 == -1) (if X is 0, the div is undefined)
1044 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1045 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1046 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1047 }
1048
1049 if (Instruction *NarrowDiv = narrowUDivURem(I, Builder))
1050 return NarrowDiv;
1051
1052 // If the udiv operands are non-overflowing multiplies with a common operand,
1053 // then eliminate the common factor:
1054 // (A * B) / (A * X) --> B / X (and commuted variants)
1055 // TODO: The code would be reduced if we had m_c_NUWMul pattern matching.
1056 // TODO: If -reassociation handled this generally, we could remove this.
1057 Value *A, *B;
1058 if (match(Op0, m_NUWMul(m_Value(A), m_Value(B)))) {
1059 if (match(Op1, m_NUWMul(m_Specific(A), m_Value(X))) ||
1060 match(Op1, m_NUWMul(m_Value(X), m_Specific(A))))
1061 return BinaryOperator::CreateUDiv(B, X);
1062 if (match(Op1, m_NUWMul(m_Specific(B), m_Value(X))) ||
1063 match(Op1, m_NUWMul(m_Value(X), m_Specific(B))))
1064 return BinaryOperator::CreateUDiv(A, X);
1065 }
1066
1067 // (LHS udiv (select (select (...)))) -> (LHS >> (select (select (...))))
1068 SmallVector<UDivFoldAction, 6> UDivActions;
1069 if (visitUDivOperand(Op0, Op1, I, UDivActions))
1070 for (unsigned i = 0, e = UDivActions.size(); i != e; ++i) {
1071 FoldUDivOperandCb Action = UDivActions[i].FoldAction;
1072 Value *ActionOp1 = UDivActions[i].OperandToFold;
1073 Instruction *Inst;
1074 if (Action)
1075 Inst = Action(Op0, ActionOp1, I, *this);
1076 else {
1077 // This action joins two actions together. The RHS of this action is
1078 // simply the last action we processed, we saved the LHS action index in
1079 // the joining action.
1080 size_t SelectRHSIdx = i - 1;
1081 Value *SelectRHS = UDivActions[SelectRHSIdx].FoldResult;
1082 size_t SelectLHSIdx = UDivActions[i].SelectLHSIdx;
1083 Value *SelectLHS = UDivActions[SelectLHSIdx].FoldResult;
1084 Inst = SelectInst::Create(cast<SelectInst>(ActionOp1)->getCondition(),
1085 SelectLHS, SelectRHS);
1086 }
1087
1088 // If this is the last action to process, return it to the InstCombiner.
1089 // Otherwise, we insert it before the UDiv and record it so that we may
1090 // use it as part of a joining action (i.e., a SelectInst).
1091 if (e - i != 1) {
1092 Inst->insertBefore(&I);
1093 UDivActions[i].FoldResult = Inst;
1094 } else
1095 return Inst;
1096 }
1097
1098 return nullptr;
1099 }
1100
visitSDiv(BinaryOperator & I)1101 Instruction *InstCombinerImpl::visitSDiv(BinaryOperator &I) {
1102 if (Value *V = SimplifySDivInst(I.getOperand(0), I.getOperand(1),
1103 SQ.getWithInstruction(&I)))
1104 return replaceInstUsesWith(I, V);
1105
1106 if (Instruction *X = foldVectorBinop(I))
1107 return X;
1108
1109 // Handle the integer div common cases
1110 if (Instruction *Common = commonIDivTransforms(I))
1111 return Common;
1112
1113 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1114 Type *Ty = I.getType();
1115 Value *X;
1116 // sdiv Op0, -1 --> -Op0
1117 // sdiv Op0, (sext i1 X) --> -Op0 (because if X is 0, the op is undefined)
1118 if (match(Op1, m_AllOnes()) ||
1119 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1120 return BinaryOperator::CreateNeg(Op0);
1121
1122 // X / INT_MIN --> X == INT_MIN
1123 if (match(Op1, m_SignMask()))
1124 return new ZExtInst(Builder.CreateICmpEQ(Op0, Op1), Ty);
1125
1126 // sdiv exact X, 1<<C --> ashr exact X, C iff 1<<C is non-negative
1127 // sdiv exact X, -1<<C --> -(ashr exact X, C)
1128 if (I.isExact() && ((match(Op1, m_Power2()) && match(Op1, m_NonNegative())) ||
1129 match(Op1, m_NegatedPower2()))) {
1130 bool DivisorWasNegative = match(Op1, m_NegatedPower2());
1131 if (DivisorWasNegative)
1132 Op1 = ConstantExpr::getNeg(cast<Constant>(Op1));
1133 auto *AShr = BinaryOperator::CreateExactAShr(
1134 Op0, ConstantExpr::getExactLogBase2(cast<Constant>(Op1)), I.getName());
1135 if (!DivisorWasNegative)
1136 return AShr;
1137 Builder.Insert(AShr);
1138 AShr->setName(I.getName() + ".neg");
1139 return BinaryOperator::CreateNeg(AShr, I.getName());
1140 }
1141
1142 const APInt *Op1C;
1143 if (match(Op1, m_APInt(Op1C))) {
1144 // If the dividend is sign-extended and the constant divisor is small enough
1145 // to fit in the source type, shrink the division to the narrower type:
1146 // (sext X) sdiv C --> sext (X sdiv C)
1147 Value *Op0Src;
1148 if (match(Op0, m_OneUse(m_SExt(m_Value(Op0Src)))) &&
1149 Op0Src->getType()->getScalarSizeInBits() >= Op1C->getMinSignedBits()) {
1150
1151 // In the general case, we need to make sure that the dividend is not the
1152 // minimum signed value because dividing that by -1 is UB. But here, we
1153 // know that the -1 divisor case is already handled above.
1154
1155 Constant *NarrowDivisor =
1156 ConstantExpr::getTrunc(cast<Constant>(Op1), Op0Src->getType());
1157 Value *NarrowOp = Builder.CreateSDiv(Op0Src, NarrowDivisor);
1158 return new SExtInst(NarrowOp, Ty);
1159 }
1160
1161 // -X / C --> X / -C (if the negation doesn't overflow).
1162 // TODO: This could be enhanced to handle arbitrary vector constants by
1163 // checking if all elements are not the min-signed-val.
1164 if (!Op1C->isMinSignedValue() &&
1165 match(Op0, m_NSWSub(m_Zero(), m_Value(X)))) {
1166 Constant *NegC = ConstantInt::get(Ty, -(*Op1C));
1167 Instruction *BO = BinaryOperator::CreateSDiv(X, NegC);
1168 BO->setIsExact(I.isExact());
1169 return BO;
1170 }
1171 }
1172
1173 // -X / Y --> -(X / Y)
1174 Value *Y;
1175 if (match(&I, m_SDiv(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1176 return BinaryOperator::CreateNSWNeg(
1177 Builder.CreateSDiv(X, Y, I.getName(), I.isExact()));
1178
1179 // abs(X) / X --> X > -1 ? 1 : -1
1180 // X / abs(X) --> X > -1 ? 1 : -1
1181 if (match(&I, m_c_BinOp(
1182 m_OneUse(m_Intrinsic<Intrinsic::abs>(m_Value(X), m_One())),
1183 m_Deferred(X)))) {
1184 Constant *NegOne = ConstantInt::getAllOnesValue(Ty);
1185 Value *Cond = Builder.CreateICmpSGT(X, NegOne);
1186 return SelectInst::Create(Cond, ConstantInt::get(Ty, 1), NegOne);
1187 }
1188
1189 // If the sign bits of both operands are zero (i.e. we can prove they are
1190 // unsigned inputs), turn this into a udiv.
1191 APInt Mask(APInt::getSignMask(Ty->getScalarSizeInBits()));
1192 if (MaskedValueIsZero(Op0, Mask, 0, &I)) {
1193 if (MaskedValueIsZero(Op1, Mask, 0, &I)) {
1194 // X sdiv Y -> X udiv Y, iff X and Y don't have sign bit set
1195 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1196 BO->setIsExact(I.isExact());
1197 return BO;
1198 }
1199
1200 if (match(Op1, m_NegatedPower2())) {
1201 // X sdiv (-(1 << C)) -> -(X sdiv (1 << C)) ->
1202 // -> -(X udiv (1 << C)) -> -(X u>> C)
1203 return BinaryOperator::CreateNeg(Builder.Insert(foldUDivPow2Cst(
1204 Op0, ConstantExpr::getNeg(cast<Constant>(Op1)), I, *this)));
1205 }
1206
1207 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1208 // X sdiv (1 << Y) -> X udiv (1 << Y) ( -> X u>> Y)
1209 // Safe because the only negative value (1 << Y) can take on is
1210 // INT_MIN, and X sdiv INT_MIN == X udiv INT_MIN == 0 if X doesn't have
1211 // the sign bit set.
1212 auto *BO = BinaryOperator::CreateUDiv(Op0, Op1, I.getName());
1213 BO->setIsExact(I.isExact());
1214 return BO;
1215 }
1216 }
1217
1218 return nullptr;
1219 }
1220
1221 /// Remove negation and try to convert division into multiplication.
foldFDivConstantDivisor(BinaryOperator & I)1222 static Instruction *foldFDivConstantDivisor(BinaryOperator &I) {
1223 Constant *C;
1224 if (!match(I.getOperand(1), m_Constant(C)))
1225 return nullptr;
1226
1227 // -X / C --> X / -C
1228 Value *X;
1229 if (match(I.getOperand(0), m_FNeg(m_Value(X))))
1230 return BinaryOperator::CreateFDivFMF(X, ConstantExpr::getFNeg(C), &I);
1231
1232 // If the constant divisor has an exact inverse, this is always safe. If not,
1233 // then we can still create a reciprocal if fast-math-flags allow it and the
1234 // constant is a regular number (not zero, infinite, or denormal).
1235 if (!(C->hasExactInverseFP() || (I.hasAllowReciprocal() && C->isNormalFP())))
1236 return nullptr;
1237
1238 // Disallow denormal constants because we don't know what would happen
1239 // on all targets.
1240 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1241 // denorms are flushed?
1242 auto *RecipC = ConstantExpr::getFDiv(ConstantFP::get(I.getType(), 1.0), C);
1243 if (!RecipC->isNormalFP())
1244 return nullptr;
1245
1246 // X / C --> X * (1 / C)
1247 return BinaryOperator::CreateFMulFMF(I.getOperand(0), RecipC, &I);
1248 }
1249
1250 /// Remove negation and try to reassociate constant math.
foldFDivConstantDividend(BinaryOperator & I)1251 static Instruction *foldFDivConstantDividend(BinaryOperator &I) {
1252 Constant *C;
1253 if (!match(I.getOperand(0), m_Constant(C)))
1254 return nullptr;
1255
1256 // C / -X --> -C / X
1257 Value *X;
1258 if (match(I.getOperand(1), m_FNeg(m_Value(X))))
1259 return BinaryOperator::CreateFDivFMF(ConstantExpr::getFNeg(C), X, &I);
1260
1261 if (!I.hasAllowReassoc() || !I.hasAllowReciprocal())
1262 return nullptr;
1263
1264 // Try to reassociate C / X expressions where X includes another constant.
1265 Constant *C2, *NewC = nullptr;
1266 if (match(I.getOperand(1), m_FMul(m_Value(X), m_Constant(C2)))) {
1267 // C / (X * C2) --> (C / C2) / X
1268 NewC = ConstantExpr::getFDiv(C, C2);
1269 } else if (match(I.getOperand(1), m_FDiv(m_Value(X), m_Constant(C2)))) {
1270 // C / (X / C2) --> (C * C2) / X
1271 NewC = ConstantExpr::getFMul(C, C2);
1272 }
1273 // Disallow denormal constants because we don't know what would happen
1274 // on all targets.
1275 // TODO: Use Intrinsic::canonicalize or let function attributes tell us that
1276 // denorms are flushed?
1277 if (!NewC || !NewC->isNormalFP())
1278 return nullptr;
1279
1280 return BinaryOperator::CreateFDivFMF(NewC, X, &I);
1281 }
1282
1283 /// Negate the exponent of pow/exp to fold division-by-pow() into multiply.
foldFDivPowDivisor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1284 static Instruction *foldFDivPowDivisor(BinaryOperator &I,
1285 InstCombiner::BuilderTy &Builder) {
1286 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1287 auto *II = dyn_cast<IntrinsicInst>(Op1);
1288 if (!II || !II->hasOneUse() || !I.hasAllowReassoc() ||
1289 !I.hasAllowReciprocal())
1290 return nullptr;
1291
1292 // Z / pow(X, Y) --> Z * pow(X, -Y)
1293 // Z / exp{2}(Y) --> Z * exp{2}(-Y)
1294 // In the general case, this creates an extra instruction, but fmul allows
1295 // for better canonicalization and optimization than fdiv.
1296 Intrinsic::ID IID = II->getIntrinsicID();
1297 SmallVector<Value *> Args;
1298 switch (IID) {
1299 case Intrinsic::pow:
1300 Args.push_back(II->getArgOperand(0));
1301 Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(1), &I));
1302 break;
1303 case Intrinsic::powi: {
1304 // Require 'ninf' assuming that makes powi(X, -INT_MIN) acceptable.
1305 // That is, X ** (huge negative number) is 0.0, ~1.0, or INF and so
1306 // dividing by that is INF, ~1.0, or 0.0. Code that uses powi allows
1307 // non-standard results, so this corner case should be acceptable if the
1308 // code rules out INF values.
1309 if (!I.hasNoInfs())
1310 return nullptr;
1311 Args.push_back(II->getArgOperand(0));
1312 Args.push_back(Builder.CreateNeg(II->getArgOperand(1)));
1313 Type *Tys[] = {I.getType(), II->getArgOperand(1)->getType()};
1314 Value *Pow = Builder.CreateIntrinsic(IID, Tys, Args, &I);
1315 return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
1316 }
1317 case Intrinsic::exp:
1318 case Intrinsic::exp2:
1319 Args.push_back(Builder.CreateFNegFMF(II->getArgOperand(0), &I));
1320 break;
1321 default:
1322 return nullptr;
1323 }
1324 Value *Pow = Builder.CreateIntrinsic(IID, I.getType(), Args, &I);
1325 return BinaryOperator::CreateFMulFMF(Op0, Pow, &I);
1326 }
1327
visitFDiv(BinaryOperator & I)1328 Instruction *InstCombinerImpl::visitFDiv(BinaryOperator &I) {
1329 if (Value *V = SimplifyFDivInst(I.getOperand(0), I.getOperand(1),
1330 I.getFastMathFlags(),
1331 SQ.getWithInstruction(&I)))
1332 return replaceInstUsesWith(I, V);
1333
1334 if (Instruction *X = foldVectorBinop(I))
1335 return X;
1336
1337 if (Instruction *R = foldFDivConstantDivisor(I))
1338 return R;
1339
1340 if (Instruction *R = foldFDivConstantDividend(I))
1341 return R;
1342
1343 if (Instruction *R = foldFPSignBitOps(I))
1344 return R;
1345
1346 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1347 if (isa<Constant>(Op0))
1348 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1349 if (Instruction *R = FoldOpIntoSelect(I, SI))
1350 return R;
1351
1352 if (isa<Constant>(Op1))
1353 if (SelectInst *SI = dyn_cast<SelectInst>(Op0))
1354 if (Instruction *R = FoldOpIntoSelect(I, SI))
1355 return R;
1356
1357 if (I.hasAllowReassoc() && I.hasAllowReciprocal()) {
1358 Value *X, *Y;
1359 if (match(Op0, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1360 (!isa<Constant>(Y) || !isa<Constant>(Op1))) {
1361 // (X / Y) / Z => X / (Y * Z)
1362 Value *YZ = Builder.CreateFMulFMF(Y, Op1, &I);
1363 return BinaryOperator::CreateFDivFMF(X, YZ, &I);
1364 }
1365 if (match(Op1, m_OneUse(m_FDiv(m_Value(X), m_Value(Y)))) &&
1366 (!isa<Constant>(Y) || !isa<Constant>(Op0))) {
1367 // Z / (X / Y) => (Y * Z) / X
1368 Value *YZ = Builder.CreateFMulFMF(Y, Op0, &I);
1369 return BinaryOperator::CreateFDivFMF(YZ, X, &I);
1370 }
1371 // Z / (1.0 / Y) => (Y * Z)
1372 //
1373 // This is a special case of Z / (X / Y) => (Y * Z) / X, with X = 1.0. The
1374 // m_OneUse check is avoided because even in the case of the multiple uses
1375 // for 1.0/Y, the number of instructions remain the same and a division is
1376 // replaced by a multiplication.
1377 if (match(Op1, m_FDiv(m_SpecificFP(1.0), m_Value(Y))))
1378 return BinaryOperator::CreateFMulFMF(Y, Op0, &I);
1379 }
1380
1381 if (I.hasAllowReassoc() && Op0->hasOneUse() && Op1->hasOneUse()) {
1382 // sin(X) / cos(X) -> tan(X)
1383 // cos(X) / sin(X) -> 1/tan(X) (cotangent)
1384 Value *X;
1385 bool IsTan = match(Op0, m_Intrinsic<Intrinsic::sin>(m_Value(X))) &&
1386 match(Op1, m_Intrinsic<Intrinsic::cos>(m_Specific(X)));
1387 bool IsCot =
1388 !IsTan && match(Op0, m_Intrinsic<Intrinsic::cos>(m_Value(X))) &&
1389 match(Op1, m_Intrinsic<Intrinsic::sin>(m_Specific(X)));
1390
1391 if ((IsTan || IsCot) &&
1392 hasFloatFn(&TLI, I.getType(), LibFunc_tan, LibFunc_tanf, LibFunc_tanl)) {
1393 IRBuilder<> B(&I);
1394 IRBuilder<>::FastMathFlagGuard FMFGuard(B);
1395 B.setFastMathFlags(I.getFastMathFlags());
1396 AttributeList Attrs =
1397 cast<CallBase>(Op0)->getCalledFunction()->getAttributes();
1398 Value *Res = emitUnaryFloatFnCall(X, &TLI, LibFunc_tan, LibFunc_tanf,
1399 LibFunc_tanl, B, Attrs);
1400 if (IsCot)
1401 Res = B.CreateFDiv(ConstantFP::get(I.getType(), 1.0), Res);
1402 return replaceInstUsesWith(I, Res);
1403 }
1404 }
1405
1406 // X / (X * Y) --> 1.0 / Y
1407 // Reassociate to (X / X -> 1.0) is legal when NaNs are not allowed.
1408 // We can ignore the possibility that X is infinity because INF/INF is NaN.
1409 Value *X, *Y;
1410 if (I.hasNoNaNs() && I.hasAllowReassoc() &&
1411 match(Op1, m_c_FMul(m_Specific(Op0), m_Value(Y)))) {
1412 replaceOperand(I, 0, ConstantFP::get(I.getType(), 1.0));
1413 replaceOperand(I, 1, Y);
1414 return &I;
1415 }
1416
1417 // X / fabs(X) -> copysign(1.0, X)
1418 // fabs(X) / X -> copysign(1.0, X)
1419 if (I.hasNoNaNs() && I.hasNoInfs() &&
1420 (match(&I, m_FDiv(m_Value(X), m_FAbs(m_Deferred(X)))) ||
1421 match(&I, m_FDiv(m_FAbs(m_Value(X)), m_Deferred(X))))) {
1422 Value *V = Builder.CreateBinaryIntrinsic(
1423 Intrinsic::copysign, ConstantFP::get(I.getType(), 1.0), X, &I);
1424 return replaceInstUsesWith(I, V);
1425 }
1426
1427 if (Instruction *Mul = foldFDivPowDivisor(I, Builder))
1428 return Mul;
1429
1430 return nullptr;
1431 }
1432
1433 /// This function implements the transforms common to both integer remainder
1434 /// instructions (urem and srem). It is called by the visitors to those integer
1435 /// remainder instructions.
1436 /// Common integer remainder transforms
commonIRemTransforms(BinaryOperator & I)1437 Instruction *InstCombinerImpl::commonIRemTransforms(BinaryOperator &I) {
1438 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1439
1440 // The RHS is known non-zero.
1441 if (Value *V = simplifyValueKnownNonZero(I.getOperand(1), *this, I))
1442 return replaceOperand(I, 1, V);
1443
1444 // Handle cases involving: rem X, (select Cond, Y, Z)
1445 if (simplifyDivRemOfSelectWithZeroOp(I))
1446 return &I;
1447
1448 if (isa<Constant>(Op1)) {
1449 if (Instruction *Op0I = dyn_cast<Instruction>(Op0)) {
1450 if (SelectInst *SI = dyn_cast<SelectInst>(Op0I)) {
1451 if (Instruction *R = FoldOpIntoSelect(I, SI))
1452 return R;
1453 } else if (auto *PN = dyn_cast<PHINode>(Op0I)) {
1454 const APInt *Op1Int;
1455 if (match(Op1, m_APInt(Op1Int)) && !Op1Int->isMinValue() &&
1456 (I.getOpcode() == Instruction::URem ||
1457 !Op1Int->isMinSignedValue())) {
1458 // foldOpIntoPhi will speculate instructions to the end of the PHI's
1459 // predecessor blocks, so do this only if we know the srem or urem
1460 // will not fault.
1461 if (Instruction *NV = foldOpIntoPhi(I, PN))
1462 return NV;
1463 }
1464 }
1465
1466 // See if we can fold away this rem instruction.
1467 if (SimplifyDemandedInstructionBits(I))
1468 return &I;
1469 }
1470 }
1471
1472 return nullptr;
1473 }
1474
visitURem(BinaryOperator & I)1475 Instruction *InstCombinerImpl::visitURem(BinaryOperator &I) {
1476 if (Value *V = SimplifyURemInst(I.getOperand(0), I.getOperand(1),
1477 SQ.getWithInstruction(&I)))
1478 return replaceInstUsesWith(I, V);
1479
1480 if (Instruction *X = foldVectorBinop(I))
1481 return X;
1482
1483 if (Instruction *common = commonIRemTransforms(I))
1484 return common;
1485
1486 if (Instruction *NarrowRem = narrowUDivURem(I, Builder))
1487 return NarrowRem;
1488
1489 // X urem Y -> X and Y-1, where Y is a power of 2,
1490 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1491 Type *Ty = I.getType();
1492 if (isKnownToBeAPowerOfTwo(Op1, /*OrZero*/ true, 0, &I)) {
1493 // This may increase instruction count, we don't enforce that Y is a
1494 // constant.
1495 Constant *N1 = Constant::getAllOnesValue(Ty);
1496 Value *Add = Builder.CreateAdd(Op1, N1);
1497 return BinaryOperator::CreateAnd(Op0, Add);
1498 }
1499
1500 // 1 urem X -> zext(X != 1)
1501 if (match(Op0, m_One())) {
1502 Value *Cmp = Builder.CreateICmpNE(Op1, ConstantInt::get(Ty, 1));
1503 return CastInst::CreateZExtOrBitCast(Cmp, Ty);
1504 }
1505
1506 // X urem C -> X < C ? X : X - C, where C >= signbit.
1507 if (match(Op1, m_Negative())) {
1508 Value *Cmp = Builder.CreateICmpULT(Op0, Op1);
1509 Value *Sub = Builder.CreateSub(Op0, Op1);
1510 return SelectInst::Create(Cmp, Op0, Sub);
1511 }
1512
1513 // If the divisor is a sext of a boolean, then the divisor must be max
1514 // unsigned value (-1). Therefore, the remainder is Op0 unless Op0 is also
1515 // max unsigned value. In that case, the remainder is 0:
1516 // urem Op0, (sext i1 X) --> (Op0 == -1) ? 0 : Op0
1517 Value *X;
1518 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)) {
1519 Value *Cmp = Builder.CreateICmpEQ(Op0, ConstantInt::getAllOnesValue(Ty));
1520 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Op0);
1521 }
1522
1523 return nullptr;
1524 }
1525
visitSRem(BinaryOperator & I)1526 Instruction *InstCombinerImpl::visitSRem(BinaryOperator &I) {
1527 if (Value *V = SimplifySRemInst(I.getOperand(0), I.getOperand(1),
1528 SQ.getWithInstruction(&I)))
1529 return replaceInstUsesWith(I, V);
1530
1531 if (Instruction *X = foldVectorBinop(I))
1532 return X;
1533
1534 // Handle the integer rem common cases
1535 if (Instruction *Common = commonIRemTransforms(I))
1536 return Common;
1537
1538 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1539 {
1540 const APInt *Y;
1541 // X % -Y -> X % Y
1542 if (match(Op1, m_Negative(Y)) && !Y->isMinSignedValue())
1543 return replaceOperand(I, 1, ConstantInt::get(I.getType(), -*Y));
1544 }
1545
1546 // -X srem Y --> -(X srem Y)
1547 Value *X, *Y;
1548 if (match(&I, m_SRem(m_OneUse(m_NSWSub(m_Zero(), m_Value(X))), m_Value(Y))))
1549 return BinaryOperator::CreateNSWNeg(Builder.CreateSRem(X, Y));
1550
1551 // If the sign bits of both operands are zero (i.e. we can prove they are
1552 // unsigned inputs), turn this into a urem.
1553 APInt Mask(APInt::getSignMask(I.getType()->getScalarSizeInBits()));
1554 if (MaskedValueIsZero(Op1, Mask, 0, &I) &&
1555 MaskedValueIsZero(Op0, Mask, 0, &I)) {
1556 // X srem Y -> X urem Y, iff X and Y don't have sign bit set
1557 return BinaryOperator::CreateURem(Op0, Op1, I.getName());
1558 }
1559
1560 // If it's a constant vector, flip any negative values positive.
1561 if (isa<ConstantVector>(Op1) || isa<ConstantDataVector>(Op1)) {
1562 Constant *C = cast<Constant>(Op1);
1563 unsigned VWidth = cast<FixedVectorType>(C->getType())->getNumElements();
1564
1565 bool hasNegative = false;
1566 bool hasMissing = false;
1567 for (unsigned i = 0; i != VWidth; ++i) {
1568 Constant *Elt = C->getAggregateElement(i);
1569 if (!Elt) {
1570 hasMissing = true;
1571 break;
1572 }
1573
1574 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elt))
1575 if (RHS->isNegative())
1576 hasNegative = true;
1577 }
1578
1579 if (hasNegative && !hasMissing) {
1580 SmallVector<Constant *, 16> Elts(VWidth);
1581 for (unsigned i = 0; i != VWidth; ++i) {
1582 Elts[i] = C->getAggregateElement(i); // Handle undef, etc.
1583 if (ConstantInt *RHS = dyn_cast<ConstantInt>(Elts[i])) {
1584 if (RHS->isNegative())
1585 Elts[i] = cast<ConstantInt>(ConstantExpr::getNeg(RHS));
1586 }
1587 }
1588
1589 Constant *NewRHSV = ConstantVector::get(Elts);
1590 if (NewRHSV != C) // Don't loop on -MININT
1591 return replaceOperand(I, 1, NewRHSV);
1592 }
1593 }
1594
1595 return nullptr;
1596 }
1597
visitFRem(BinaryOperator & I)1598 Instruction *InstCombinerImpl::visitFRem(BinaryOperator &I) {
1599 if (Value *V = SimplifyFRemInst(I.getOperand(0), I.getOperand(1),
1600 I.getFastMathFlags(),
1601 SQ.getWithInstruction(&I)))
1602 return replaceInstUsesWith(I, V);
1603
1604 if (Instruction *X = foldVectorBinop(I))
1605 return X;
1606
1607 return nullptr;
1608 }
1609