1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions. This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "llvm/Analysis/InstructionSimplify.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/ADT/Statistic.h"
22 #include "llvm/Analysis/AliasAnalysis.h"
23 #include "llvm/Analysis/AssumptionCache.h"
24 #include "llvm/Analysis/CaptureTracking.h"
25 #include "llvm/Analysis/CmpInstAnalysis.h"
26 #include "llvm/Analysis/ConstantFolding.h"
27 #include "llvm/Analysis/LoopAnalysisManager.h"
28 #include "llvm/Analysis/MemoryBuiltins.h"
29 #include "llvm/Analysis/OverflowInstAnalysis.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/VectorUtils.h"
32 #include "llvm/IR/ConstantRange.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/Dominators.h"
35 #include "llvm/IR/GetElementPtrTypeIterator.h"
36 #include "llvm/IR/GlobalAlias.h"
37 #include "llvm/IR/InstrTypes.h"
38 #include "llvm/IR/Instructions.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/IR/PatternMatch.h"
41 #include "llvm/IR/ValueHandle.h"
42 #include "llvm/Support/KnownBits.h"
43 #include <algorithm>
44 using namespace llvm;
45 using namespace llvm::PatternMatch;
46
47 #define DEBUG_TYPE "instsimplify"
48
49 enum { RecursionLimit = 3 };
50
51 STATISTIC(NumExpand, "Number of expansions");
52 STATISTIC(NumReassoc, "Number of reassociations");
53
54 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned);
55 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
56 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
57 const SimplifyQuery &, unsigned);
58 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
59 unsigned);
60 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
61 const SimplifyQuery &, unsigned);
62 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
63 unsigned);
64 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
65 const SimplifyQuery &Q, unsigned MaxRecurse);
66 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
67 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
68 static Value *SimplifyCastInst(unsigned, Value *, Type *,
69 const SimplifyQuery &, unsigned);
70 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &,
71 unsigned);
72 static Value *SimplifySelectInst(Value *, Value *, Value *,
73 const SimplifyQuery &, unsigned);
74
foldSelectWithBinaryOp(Value * Cond,Value * TrueVal,Value * FalseVal)75 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
76 Value *FalseVal) {
77 BinaryOperator::BinaryOps BinOpCode;
78 if (auto *BO = dyn_cast<BinaryOperator>(Cond))
79 BinOpCode = BO->getOpcode();
80 else
81 return nullptr;
82
83 CmpInst::Predicate ExpectedPred, Pred1, Pred2;
84 if (BinOpCode == BinaryOperator::Or) {
85 ExpectedPred = ICmpInst::ICMP_NE;
86 } else if (BinOpCode == BinaryOperator::And) {
87 ExpectedPred = ICmpInst::ICMP_EQ;
88 } else
89 return nullptr;
90
91 // %A = icmp eq %TV, %FV
92 // %B = icmp eq %X, %Y (and one of these is a select operand)
93 // %C = and %A, %B
94 // %D = select %C, %TV, %FV
95 // -->
96 // %FV
97
98 // %A = icmp ne %TV, %FV
99 // %B = icmp ne %X, %Y (and one of these is a select operand)
100 // %C = or %A, %B
101 // %D = select %C, %TV, %FV
102 // -->
103 // %TV
104 Value *X, *Y;
105 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
106 m_Specific(FalseVal)),
107 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
108 Pred1 != Pred2 || Pred1 != ExpectedPred)
109 return nullptr;
110
111 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
112 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
113
114 return nullptr;
115 }
116
117 /// For a boolean type or a vector of boolean type, return false or a vector
118 /// with every element false.
getFalse(Type * Ty)119 static Constant *getFalse(Type *Ty) {
120 return ConstantInt::getFalse(Ty);
121 }
122
123 /// For a boolean type or a vector of boolean type, return true or a vector
124 /// with every element true.
getTrue(Type * Ty)125 static Constant *getTrue(Type *Ty) {
126 return ConstantInt::getTrue(Ty);
127 }
128
129 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
isSameCompare(Value * V,CmpInst::Predicate Pred,Value * LHS,Value * RHS)130 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
131 Value *RHS) {
132 CmpInst *Cmp = dyn_cast<CmpInst>(V);
133 if (!Cmp)
134 return false;
135 CmpInst::Predicate CPred = Cmp->getPredicate();
136 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
137 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
138 return true;
139 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
140 CRHS == LHS;
141 }
142
143 /// Simplify comparison with true or false branch of select:
144 /// %sel = select i1 %cond, i32 %tv, i32 %fv
145 /// %cmp = icmp sle i32 %sel, %rhs
146 /// Compose new comparison by substituting %sel with either %tv or %fv
147 /// and see if it simplifies.
simplifyCmpSelCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse,Constant * TrueOrFalse)148 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
149 Value *RHS, Value *Cond,
150 const SimplifyQuery &Q, unsigned MaxRecurse,
151 Constant *TrueOrFalse) {
152 Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
153 if (SimplifiedCmp == Cond) {
154 // %cmp simplified to the select condition (%cond).
155 return TrueOrFalse;
156 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
157 // It didn't simplify. However, if composed comparison is equivalent
158 // to the select condition (%cond) then we can replace it.
159 return TrueOrFalse;
160 }
161 return SimplifiedCmp;
162 }
163
164 /// Simplify comparison with true branch of select
simplifyCmpSelTrueCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)165 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
166 Value *RHS, Value *Cond,
167 const SimplifyQuery &Q,
168 unsigned MaxRecurse) {
169 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
170 getTrue(Cond->getType()));
171 }
172
173 /// Simplify comparison with false branch of select
simplifyCmpSelFalseCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)174 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
175 Value *RHS, Value *Cond,
176 const SimplifyQuery &Q,
177 unsigned MaxRecurse) {
178 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
179 getFalse(Cond->getType()));
180 }
181
182 /// We know comparison with both branches of select can be simplified, but they
183 /// are not equal. This routine handles some logical simplifications.
handleOtherCmpSelSimplifications(Value * TCmp,Value * FCmp,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)184 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
185 Value *Cond,
186 const SimplifyQuery &Q,
187 unsigned MaxRecurse) {
188 // If the false value simplified to false, then the result of the compare
189 // is equal to "Cond && TCmp". This also catches the case when the false
190 // value simplified to false and the true value to true, returning "Cond".
191 if (match(FCmp, m_Zero()))
192 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
193 return V;
194 // If the true value simplified to true, then the result of the compare
195 // is equal to "Cond || FCmp".
196 if (match(TCmp, m_One()))
197 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
198 return V;
199 // Finally, if the false value simplified to true and the true value to
200 // false, then the result of the compare is equal to "!Cond".
201 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
202 if (Value *V = SimplifyXorInst(
203 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
204 return V;
205 return nullptr;
206 }
207
208 /// Does the given value dominate the specified phi node?
valueDominatesPHI(Value * V,PHINode * P,const DominatorTree * DT)209 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
210 Instruction *I = dyn_cast<Instruction>(V);
211 if (!I)
212 // Arguments and constants dominate all instructions.
213 return true;
214
215 // If we are processing instructions (and/or basic blocks) that have not been
216 // fully added to a function, the parent nodes may still be null. Simply
217 // return the conservative answer in these cases.
218 if (!I->getParent() || !P->getParent() || !I->getFunction())
219 return false;
220
221 // If we have a DominatorTree then do a precise test.
222 if (DT)
223 return DT->dominates(I, P);
224
225 // Otherwise, if the instruction is in the entry block and is not an invoke,
226 // then it obviously dominates all phi nodes.
227 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
228 !isa<CallBrInst>(I))
229 return true;
230
231 return false;
232 }
233
234 /// Try to simplify a binary operator of form "V op OtherOp" where V is
235 /// "(B0 opex B1)" by distributing 'op' across 'opex' as
236 /// "(B0 op OtherOp) opex (B1 op OtherOp)".
expandBinOp(Instruction::BinaryOps Opcode,Value * V,Value * OtherOp,Instruction::BinaryOps OpcodeToExpand,const SimplifyQuery & Q,unsigned MaxRecurse)237 static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
238 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
239 const SimplifyQuery &Q, unsigned MaxRecurse) {
240 auto *B = dyn_cast<BinaryOperator>(V);
241 if (!B || B->getOpcode() != OpcodeToExpand)
242 return nullptr;
243 Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
244 Value *L = SimplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(),
245 MaxRecurse);
246 if (!L)
247 return nullptr;
248 Value *R = SimplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(),
249 MaxRecurse);
250 if (!R)
251 return nullptr;
252
253 // Does the expanded pair of binops simplify to the existing binop?
254 if ((L == B0 && R == B1) ||
255 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
256 ++NumExpand;
257 return B;
258 }
259
260 // Otherwise, return "L op' R" if it simplifies.
261 Value *S = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
262 if (!S)
263 return nullptr;
264
265 ++NumExpand;
266 return S;
267 }
268
269 /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
270 /// distributing op over op'.
expandCommutativeBinOp(Instruction::BinaryOps Opcode,Value * L,Value * R,Instruction::BinaryOps OpcodeToExpand,const SimplifyQuery & Q,unsigned MaxRecurse)271 static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode,
272 Value *L, Value *R,
273 Instruction::BinaryOps OpcodeToExpand,
274 const SimplifyQuery &Q,
275 unsigned MaxRecurse) {
276 // Recursion is always used, so bail out at once if we already hit the limit.
277 if (!MaxRecurse--)
278 return nullptr;
279
280 if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
281 return V;
282 if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
283 return V;
284 return nullptr;
285 }
286
287 /// Generic simplifications for associative binary operations.
288 /// Returns the simpler value, or null if none was found.
SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)289 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
290 Value *LHS, Value *RHS,
291 const SimplifyQuery &Q,
292 unsigned MaxRecurse) {
293 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
294
295 // Recursion is always used, so bail out at once if we already hit the limit.
296 if (!MaxRecurse--)
297 return nullptr;
298
299 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
300 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
301
302 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
303 if (Op0 && Op0->getOpcode() == Opcode) {
304 Value *A = Op0->getOperand(0);
305 Value *B = Op0->getOperand(1);
306 Value *C = RHS;
307
308 // Does "B op C" simplify?
309 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
310 // It does! Return "A op V" if it simplifies or is already available.
311 // If V equals B then "A op V" is just the LHS.
312 if (V == B) return LHS;
313 // Otherwise return "A op V" if it simplifies.
314 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
315 ++NumReassoc;
316 return W;
317 }
318 }
319 }
320
321 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
322 if (Op1 && Op1->getOpcode() == Opcode) {
323 Value *A = LHS;
324 Value *B = Op1->getOperand(0);
325 Value *C = Op1->getOperand(1);
326
327 // Does "A op B" simplify?
328 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
329 // It does! Return "V op C" if it simplifies or is already available.
330 // If V equals B then "V op C" is just the RHS.
331 if (V == B) return RHS;
332 // Otherwise return "V op C" if it simplifies.
333 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
334 ++NumReassoc;
335 return W;
336 }
337 }
338 }
339
340 // The remaining transforms require commutativity as well as associativity.
341 if (!Instruction::isCommutative(Opcode))
342 return nullptr;
343
344 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
345 if (Op0 && Op0->getOpcode() == Opcode) {
346 Value *A = Op0->getOperand(0);
347 Value *B = Op0->getOperand(1);
348 Value *C = RHS;
349
350 // Does "C op A" simplify?
351 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
352 // It does! Return "V op B" if it simplifies or is already available.
353 // If V equals A then "V op B" is just the LHS.
354 if (V == A) return LHS;
355 // Otherwise return "V op B" if it simplifies.
356 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
357 ++NumReassoc;
358 return W;
359 }
360 }
361 }
362
363 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
364 if (Op1 && Op1->getOpcode() == Opcode) {
365 Value *A = LHS;
366 Value *B = Op1->getOperand(0);
367 Value *C = Op1->getOperand(1);
368
369 // Does "C op A" simplify?
370 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
371 // It does! Return "B op V" if it simplifies or is already available.
372 // If V equals C then "B op V" is just the RHS.
373 if (V == C) return RHS;
374 // Otherwise return "B op V" if it simplifies.
375 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
376 ++NumReassoc;
377 return W;
378 }
379 }
380 }
381
382 return nullptr;
383 }
384
385 /// In the case of a binary operation with a select instruction as an operand,
386 /// try to simplify the binop by seeing whether evaluating it on both branches
387 /// of the select results in the same value. Returns the common value if so,
388 /// otherwise returns null.
ThreadBinOpOverSelect(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)389 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
390 Value *RHS, const SimplifyQuery &Q,
391 unsigned MaxRecurse) {
392 // Recursion is always used, so bail out at once if we already hit the limit.
393 if (!MaxRecurse--)
394 return nullptr;
395
396 SelectInst *SI;
397 if (isa<SelectInst>(LHS)) {
398 SI = cast<SelectInst>(LHS);
399 } else {
400 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
401 SI = cast<SelectInst>(RHS);
402 }
403
404 // Evaluate the BinOp on the true and false branches of the select.
405 Value *TV;
406 Value *FV;
407 if (SI == LHS) {
408 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
409 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
410 } else {
411 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
412 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
413 }
414
415 // If they simplified to the same value, then return the common value.
416 // If they both failed to simplify then return null.
417 if (TV == FV)
418 return TV;
419
420 // If one branch simplified to undef, return the other one.
421 if (TV && Q.isUndefValue(TV))
422 return FV;
423 if (FV && Q.isUndefValue(FV))
424 return TV;
425
426 // If applying the operation did not change the true and false select values,
427 // then the result of the binop is the select itself.
428 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
429 return SI;
430
431 // If one branch simplified and the other did not, and the simplified
432 // value is equal to the unsimplified one, return the simplified value.
433 // For example, select (cond, X, X & Z) & Z -> X & Z.
434 if ((FV && !TV) || (TV && !FV)) {
435 // Check that the simplified value has the form "X op Y" where "op" is the
436 // same as the original operation.
437 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
438 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
439 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
440 // We already know that "op" is the same as for the simplified value. See
441 // if the operands match too. If so, return the simplified value.
442 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
443 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
444 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
445 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
446 Simplified->getOperand(1) == UnsimplifiedRHS)
447 return Simplified;
448 if (Simplified->isCommutative() &&
449 Simplified->getOperand(1) == UnsimplifiedLHS &&
450 Simplified->getOperand(0) == UnsimplifiedRHS)
451 return Simplified;
452 }
453 }
454
455 return nullptr;
456 }
457
458 /// In the case of a comparison with a select instruction, try to simplify the
459 /// comparison by seeing whether both branches of the select result in the same
460 /// value. Returns the common value if so, otherwise returns null.
461 /// For example, if we have:
462 /// %tmp = select i1 %cmp, i32 1, i32 2
463 /// %cmp1 = icmp sle i32 %tmp, 3
464 /// We can simplify %cmp1 to true, because both branches of select are
465 /// less than 3. We compose new comparison by substituting %tmp with both
466 /// branches of select and see if it can be simplified.
ThreadCmpOverSelect(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)467 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
468 Value *RHS, const SimplifyQuery &Q,
469 unsigned MaxRecurse) {
470 // Recursion is always used, so bail out at once if we already hit the limit.
471 if (!MaxRecurse--)
472 return nullptr;
473
474 // Make sure the select is on the LHS.
475 if (!isa<SelectInst>(LHS)) {
476 std::swap(LHS, RHS);
477 Pred = CmpInst::getSwappedPredicate(Pred);
478 }
479 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
480 SelectInst *SI = cast<SelectInst>(LHS);
481 Value *Cond = SI->getCondition();
482 Value *TV = SI->getTrueValue();
483 Value *FV = SI->getFalseValue();
484
485 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
486 // Does "cmp TV, RHS" simplify?
487 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
488 if (!TCmp)
489 return nullptr;
490
491 // Does "cmp FV, RHS" simplify?
492 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
493 if (!FCmp)
494 return nullptr;
495
496 // If both sides simplified to the same value, then use it as the result of
497 // the original comparison.
498 if (TCmp == FCmp)
499 return TCmp;
500
501 // The remaining cases only make sense if the select condition has the same
502 // type as the result of the comparison, so bail out if this is not so.
503 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
504 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
505
506 return nullptr;
507 }
508
509 /// In the case of a binary operation with an operand that is a PHI instruction,
510 /// try to simplify the binop by seeing whether evaluating it on the incoming
511 /// phi values yields the same result for every value. If so returns the common
512 /// value, otherwise returns null.
ThreadBinOpOverPHI(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)513 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
514 Value *RHS, const SimplifyQuery &Q,
515 unsigned MaxRecurse) {
516 // Recursion is always used, so bail out at once if we already hit the limit.
517 if (!MaxRecurse--)
518 return nullptr;
519
520 PHINode *PI;
521 if (isa<PHINode>(LHS)) {
522 PI = cast<PHINode>(LHS);
523 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
524 if (!valueDominatesPHI(RHS, PI, Q.DT))
525 return nullptr;
526 } else {
527 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
528 PI = cast<PHINode>(RHS);
529 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
530 if (!valueDominatesPHI(LHS, PI, Q.DT))
531 return nullptr;
532 }
533
534 // Evaluate the BinOp on the incoming phi values.
535 Value *CommonValue = nullptr;
536 for (Value *Incoming : PI->incoming_values()) {
537 // If the incoming value is the phi node itself, it can safely be skipped.
538 if (Incoming == PI) continue;
539 Value *V = PI == LHS ?
540 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
541 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
542 // If the operation failed to simplify, or simplified to a different value
543 // to previously, then give up.
544 if (!V || (CommonValue && V != CommonValue))
545 return nullptr;
546 CommonValue = V;
547 }
548
549 return CommonValue;
550 }
551
552 /// In the case of a comparison with a PHI instruction, try to simplify the
553 /// comparison by seeing whether comparing with all of the incoming phi values
554 /// yields the same result every time. If so returns the common result,
555 /// otherwise returns null.
ThreadCmpOverPHI(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)556 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
557 const SimplifyQuery &Q, unsigned MaxRecurse) {
558 // Recursion is always used, so bail out at once if we already hit the limit.
559 if (!MaxRecurse--)
560 return nullptr;
561
562 // Make sure the phi is on the LHS.
563 if (!isa<PHINode>(LHS)) {
564 std::swap(LHS, RHS);
565 Pred = CmpInst::getSwappedPredicate(Pred);
566 }
567 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
568 PHINode *PI = cast<PHINode>(LHS);
569
570 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
571 if (!valueDominatesPHI(RHS, PI, Q.DT))
572 return nullptr;
573
574 // Evaluate the BinOp on the incoming phi values.
575 Value *CommonValue = nullptr;
576 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
577 Value *Incoming = PI->getIncomingValue(u);
578 Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
579 // If the incoming value is the phi node itself, it can safely be skipped.
580 if (Incoming == PI) continue;
581 // Change the context instruction to the "edge" that flows into the phi.
582 // This is important because that is where incoming is actually "evaluated"
583 // even though it is used later somewhere else.
584 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
585 MaxRecurse);
586 // If the operation failed to simplify, or simplified to a different value
587 // to previously, then give up.
588 if (!V || (CommonValue && V != CommonValue))
589 return nullptr;
590 CommonValue = V;
591 }
592
593 return CommonValue;
594 }
595
foldOrCommuteConstant(Instruction::BinaryOps Opcode,Value * & Op0,Value * & Op1,const SimplifyQuery & Q)596 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
597 Value *&Op0, Value *&Op1,
598 const SimplifyQuery &Q) {
599 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
600 if (auto *CRHS = dyn_cast<Constant>(Op1))
601 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
602
603 // Canonicalize the constant to the RHS if this is a commutative operation.
604 if (Instruction::isCommutative(Opcode))
605 std::swap(Op0, Op1);
606 }
607 return nullptr;
608 }
609
610 /// Given operands for an Add, see if we can fold the result.
611 /// If not, this returns null.
SimplifyAddInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)612 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
613 const SimplifyQuery &Q, unsigned MaxRecurse) {
614 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
615 return C;
616
617 // X + undef -> undef
618 if (Q.isUndefValue(Op1))
619 return Op1;
620
621 // X + 0 -> X
622 if (match(Op1, m_Zero()))
623 return Op0;
624
625 // If two operands are negative, return 0.
626 if (isKnownNegation(Op0, Op1))
627 return Constant::getNullValue(Op0->getType());
628
629 // X + (Y - X) -> Y
630 // (Y - X) + X -> Y
631 // Eg: X + -X -> 0
632 Value *Y = nullptr;
633 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
634 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
635 return Y;
636
637 // X + ~X -> -1 since ~X = -X-1
638 Type *Ty = Op0->getType();
639 if (match(Op0, m_Not(m_Specific(Op1))) ||
640 match(Op1, m_Not(m_Specific(Op0))))
641 return Constant::getAllOnesValue(Ty);
642
643 // add nsw/nuw (xor Y, signmask), signmask --> Y
644 // The no-wrapping add guarantees that the top bit will be set by the add.
645 // Therefore, the xor must be clearing the already set sign bit of Y.
646 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
647 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
648 return Y;
649
650 // add nuw %x, -1 -> -1, because %x can only be 0.
651 if (IsNUW && match(Op1, m_AllOnes()))
652 return Op1; // Which is -1.
653
654 /// i1 add -> xor.
655 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
656 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
657 return V;
658
659 // Try some generic simplifications for associative operations.
660 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
661 MaxRecurse))
662 return V;
663
664 // Threading Add over selects and phi nodes is pointless, so don't bother.
665 // Threading over the select in "A + select(cond, B, C)" means evaluating
666 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
667 // only if B and C are equal. If B and C are equal then (since we assume
668 // that operands have already been simplified) "select(cond, B, C)" should
669 // have been simplified to the common value of B and C already. Analysing
670 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
671 // for threading over phi nodes.
672
673 return nullptr;
674 }
675
SimplifyAddInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Query)676 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
677 const SimplifyQuery &Query) {
678 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
679 }
680
681 /// Compute the base pointer and cumulative constant offsets for V.
682 ///
683 /// This strips all constant offsets off of V, leaving it the base pointer, and
684 /// accumulates the total constant offset applied in the returned constant. It
685 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
686 /// no constant offsets applied.
687 ///
688 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
689 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
690 /// folding.
stripAndComputeConstantOffsets(const DataLayout & DL,Value * & V,bool AllowNonInbounds=false)691 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
692 bool AllowNonInbounds = false) {
693 assert(V->getType()->isPtrOrPtrVectorTy());
694
695 Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
696 APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth());
697
698 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
699 // As that strip may trace through `addrspacecast`, need to sext or trunc
700 // the offset calculated.
701 IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
702 Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth());
703
704 Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset);
705 if (VectorType *VecTy = dyn_cast<VectorType>(V->getType()))
706 return ConstantVector::getSplat(VecTy->getElementCount(), OffsetIntPtr);
707 return OffsetIntPtr;
708 }
709
710 /// Compute the constant difference between two pointer values.
711 /// If the difference is not a constant, returns zero.
computePointerDifference(const DataLayout & DL,Value * LHS,Value * RHS)712 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
713 Value *RHS) {
714 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
715 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
716
717 // If LHS and RHS are not related via constant offsets to the same base
718 // value, there is nothing we can do here.
719 if (LHS != RHS)
720 return nullptr;
721
722 // Otherwise, the difference of LHS - RHS can be computed as:
723 // LHS - RHS
724 // = (LHSOffset + Base) - (RHSOffset + Base)
725 // = LHSOffset - RHSOffset
726 return ConstantExpr::getSub(LHSOffset, RHSOffset);
727 }
728
729 /// Given operands for a Sub, see if we can fold the result.
730 /// If not, this returns null.
SimplifySubInst(Value * Op0,Value * Op1,bool isNSW,bool isNUW,const SimplifyQuery & Q,unsigned MaxRecurse)731 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
732 const SimplifyQuery &Q, unsigned MaxRecurse) {
733 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
734 return C;
735
736 // X - undef -> undef
737 // undef - X -> undef
738 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
739 return UndefValue::get(Op0->getType());
740
741 // X - 0 -> X
742 if (match(Op1, m_Zero()))
743 return Op0;
744
745 // X - X -> 0
746 if (Op0 == Op1)
747 return Constant::getNullValue(Op0->getType());
748
749 // Is this a negation?
750 if (match(Op0, m_Zero())) {
751 // 0 - X -> 0 if the sub is NUW.
752 if (isNUW)
753 return Constant::getNullValue(Op0->getType());
754
755 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
756 if (Known.Zero.isMaxSignedValue()) {
757 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
758 // Op1 must be 0 because negating the minimum signed value is undefined.
759 if (isNSW)
760 return Constant::getNullValue(Op0->getType());
761
762 // 0 - X -> X if X is 0 or the minimum signed value.
763 return Op1;
764 }
765 }
766
767 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
768 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
769 Value *X = nullptr, *Y = nullptr, *Z = Op1;
770 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
771 // See if "V === Y - Z" simplifies.
772 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
773 // It does! Now see if "X + V" simplifies.
774 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
775 // It does, we successfully reassociated!
776 ++NumReassoc;
777 return W;
778 }
779 // See if "V === X - Z" simplifies.
780 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
781 // It does! Now see if "Y + V" simplifies.
782 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
783 // It does, we successfully reassociated!
784 ++NumReassoc;
785 return W;
786 }
787 }
788
789 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
790 // For example, X - (X + 1) -> -1
791 X = Op0;
792 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
793 // See if "V === X - Y" simplifies.
794 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
795 // It does! Now see if "V - Z" simplifies.
796 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
797 // It does, we successfully reassociated!
798 ++NumReassoc;
799 return W;
800 }
801 // See if "V === X - Z" simplifies.
802 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
803 // It does! Now see if "V - Y" simplifies.
804 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
805 // It does, we successfully reassociated!
806 ++NumReassoc;
807 return W;
808 }
809 }
810
811 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
812 // For example, X - (X - Y) -> Y.
813 Z = Op0;
814 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
815 // See if "V === Z - X" simplifies.
816 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
817 // It does! Now see if "V + Y" simplifies.
818 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
819 // It does, we successfully reassociated!
820 ++NumReassoc;
821 return W;
822 }
823
824 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
825 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
826 match(Op1, m_Trunc(m_Value(Y))))
827 if (X->getType() == Y->getType())
828 // See if "V === X - Y" simplifies.
829 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
830 // It does! Now see if "trunc V" simplifies.
831 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
832 Q, MaxRecurse - 1))
833 // It does, return the simplified "trunc V".
834 return W;
835
836 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
837 if (match(Op0, m_PtrToInt(m_Value(X))) &&
838 match(Op1, m_PtrToInt(m_Value(Y))))
839 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
840 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
841
842 // i1 sub -> xor.
843 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
844 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
845 return V;
846
847 // Threading Sub over selects and phi nodes is pointless, so don't bother.
848 // Threading over the select in "A - select(cond, B, C)" means evaluating
849 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
850 // only if B and C are equal. If B and C are equal then (since we assume
851 // that operands have already been simplified) "select(cond, B, C)" should
852 // have been simplified to the common value of B and C already. Analysing
853 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
854 // for threading over phi nodes.
855
856 return nullptr;
857 }
858
SimplifySubInst(Value * Op0,Value * Op1,bool isNSW,bool isNUW,const SimplifyQuery & Q)859 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
860 const SimplifyQuery &Q) {
861 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
862 }
863
864 /// Given operands for a Mul, see if we can fold the result.
865 /// If not, this returns null.
SimplifyMulInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)866 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
867 unsigned MaxRecurse) {
868 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
869 return C;
870
871 // X * undef -> 0
872 // X * 0 -> 0
873 if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
874 return Constant::getNullValue(Op0->getType());
875
876 // X * 1 -> X
877 if (match(Op1, m_One()))
878 return Op0;
879
880 // (X / Y) * Y -> X if the division is exact.
881 Value *X = nullptr;
882 if (Q.IIQ.UseInstrInfo &&
883 (match(Op0,
884 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
885 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
886 return X;
887
888 // i1 mul -> and.
889 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
890 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
891 return V;
892
893 // Try some generic simplifications for associative operations.
894 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
895 MaxRecurse))
896 return V;
897
898 // Mul distributes over Add. Try some generic simplifications based on this.
899 if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
900 Instruction::Add, Q, MaxRecurse))
901 return V;
902
903 // If the operation is with the result of a select instruction, check whether
904 // operating on either branch of the select always yields the same value.
905 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
906 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
907 MaxRecurse))
908 return V;
909
910 // If the operation is with the result of a phi instruction, check whether
911 // operating on all incoming values of the phi always yields the same value.
912 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
913 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
914 MaxRecurse))
915 return V;
916
917 return nullptr;
918 }
919
SimplifyMulInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)920 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
921 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
922 }
923
924 /// Check for common or similar folds of integer division or integer remainder.
925 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
simplifyDivRem(Value * Op0,Value * Op1,bool IsDiv,const SimplifyQuery & Q)926 static Value *simplifyDivRem(Value *Op0, Value *Op1, bool IsDiv,
927 const SimplifyQuery &Q) {
928 Type *Ty = Op0->getType();
929
930 // X / undef -> poison
931 // X % undef -> poison
932 if (Q.isUndefValue(Op1))
933 return PoisonValue::get(Ty);
934
935 // X / 0 -> poison
936 // X % 0 -> poison
937 // We don't need to preserve faults!
938 if (match(Op1, m_Zero()))
939 return PoisonValue::get(Ty);
940
941 // If any element of a constant divisor fixed width vector is zero or undef
942 // the behavior is undefined and we can fold the whole op to poison.
943 auto *Op1C = dyn_cast<Constant>(Op1);
944 auto *VTy = dyn_cast<FixedVectorType>(Ty);
945 if (Op1C && VTy) {
946 unsigned NumElts = VTy->getNumElements();
947 for (unsigned i = 0; i != NumElts; ++i) {
948 Constant *Elt = Op1C->getAggregateElement(i);
949 if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt)))
950 return PoisonValue::get(Ty);
951 }
952 }
953
954 // undef / X -> 0
955 // undef % X -> 0
956 if (Q.isUndefValue(Op0))
957 return Constant::getNullValue(Ty);
958
959 // 0 / X -> 0
960 // 0 % X -> 0
961 if (match(Op0, m_Zero()))
962 return Constant::getNullValue(Op0->getType());
963
964 // X / X -> 1
965 // X % X -> 0
966 if (Op0 == Op1)
967 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
968
969 // X / 1 -> X
970 // X % 1 -> 0
971 // If this is a boolean op (single-bit element type), we can't have
972 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
973 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1.
974 Value *X;
975 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) ||
976 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
977 return IsDiv ? Op0 : Constant::getNullValue(Ty);
978
979 return nullptr;
980 }
981
982 /// Given a predicate and two operands, return true if the comparison is true.
983 /// This is a helper for div/rem simplification where we return some other value
984 /// when we can prove a relationship between the operands.
isICmpTrue(ICmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)985 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
986 const SimplifyQuery &Q, unsigned MaxRecurse) {
987 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
988 Constant *C = dyn_cast_or_null<Constant>(V);
989 return (C && C->isAllOnesValue());
990 }
991
992 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
993 /// to simplify X % Y to X.
isDivZero(Value * X,Value * Y,const SimplifyQuery & Q,unsigned MaxRecurse,bool IsSigned)994 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
995 unsigned MaxRecurse, bool IsSigned) {
996 // Recursion is always used, so bail out at once if we already hit the limit.
997 if (!MaxRecurse--)
998 return false;
999
1000 if (IsSigned) {
1001 // |X| / |Y| --> 0
1002 //
1003 // We require that 1 operand is a simple constant. That could be extended to
1004 // 2 variables if we computed the sign bit for each.
1005 //
1006 // Make sure that a constant is not the minimum signed value because taking
1007 // the abs() of that is undefined.
1008 Type *Ty = X->getType();
1009 const APInt *C;
1010 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1011 // Is the variable divisor magnitude always greater than the constant
1012 // dividend magnitude?
1013 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1014 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1015 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1016 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1017 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1018 return true;
1019 }
1020 if (match(Y, m_APInt(C))) {
1021 // Special-case: we can't take the abs() of a minimum signed value. If
1022 // that's the divisor, then all we have to do is prove that the dividend
1023 // is also not the minimum signed value.
1024 if (C->isMinSignedValue())
1025 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1026
1027 // Is the variable dividend magnitude always less than the constant
1028 // divisor magnitude?
1029 // |X| < |C| --> X > -abs(C) and X < abs(C)
1030 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1031 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1032 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1033 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1034 return true;
1035 }
1036 return false;
1037 }
1038
1039 // IsSigned == false.
1040 // Is the dividend unsigned less than the divisor?
1041 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1042 }
1043
1044 /// These are simplifications common to SDiv and UDiv.
simplifyDiv(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1045 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1046 const SimplifyQuery &Q, unsigned MaxRecurse) {
1047 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1048 return C;
1049
1050 if (Value *V = simplifyDivRem(Op0, Op1, true, Q))
1051 return V;
1052
1053 bool IsSigned = Opcode == Instruction::SDiv;
1054
1055 // (X * Y) / Y -> X if the multiplication does not overflow.
1056 Value *X;
1057 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1058 auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1059 // If the Mul does not overflow, then we are good to go.
1060 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1061 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)))
1062 return X;
1063 // If X has the form X = A / Y, then X * Y cannot overflow.
1064 if ((IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1065 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1)))))
1066 return X;
1067 }
1068
1069 // (X rem Y) / Y -> 0
1070 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1071 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1072 return Constant::getNullValue(Op0->getType());
1073
1074 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1075 ConstantInt *C1, *C2;
1076 if (!IsSigned && match(Op0, m_UDiv(m_Value(X), m_ConstantInt(C1))) &&
1077 match(Op1, m_ConstantInt(C2))) {
1078 bool Overflow;
1079 (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1080 if (Overflow)
1081 return Constant::getNullValue(Op0->getType());
1082 }
1083
1084 // If the operation is with the result of a select instruction, check whether
1085 // operating on either branch of the select always yields the same value.
1086 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1087 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1088 return V;
1089
1090 // If the operation is with the result of a phi instruction, check whether
1091 // operating on all incoming values of the phi always yields the same value.
1092 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1093 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1094 return V;
1095
1096 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1097 return Constant::getNullValue(Op0->getType());
1098
1099 return nullptr;
1100 }
1101
1102 /// These are simplifications common to SRem and URem.
simplifyRem(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1103 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1104 const SimplifyQuery &Q, unsigned MaxRecurse) {
1105 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1106 return C;
1107
1108 if (Value *V = simplifyDivRem(Op0, Op1, false, Q))
1109 return V;
1110
1111 // (X % Y) % Y -> X % Y
1112 if ((Opcode == Instruction::SRem &&
1113 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1114 (Opcode == Instruction::URem &&
1115 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1116 return Op0;
1117
1118 // (X << Y) % X -> 0
1119 if (Q.IIQ.UseInstrInfo &&
1120 ((Opcode == Instruction::SRem &&
1121 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1122 (Opcode == Instruction::URem &&
1123 match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1124 return Constant::getNullValue(Op0->getType());
1125
1126 // If the operation is with the result of a select instruction, check whether
1127 // operating on either branch of the select always yields the same value.
1128 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1129 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1130 return V;
1131
1132 // If the operation is with the result of a phi instruction, check whether
1133 // operating on all incoming values of the phi always yields the same value.
1134 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1135 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1136 return V;
1137
1138 // If X / Y == 0, then X % Y == X.
1139 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem))
1140 return Op0;
1141
1142 return nullptr;
1143 }
1144
1145 /// Given operands for an SDiv, see if we can fold the result.
1146 /// If not, this returns null.
SimplifySDivInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1147 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1148 unsigned MaxRecurse) {
1149 // If two operands are negated and no signed overflow, return -1.
1150 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1151 return Constant::getAllOnesValue(Op0->getType());
1152
1153 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse);
1154 }
1155
SimplifySDivInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1156 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1157 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
1158 }
1159
1160 /// Given operands for a UDiv, see if we can fold the result.
1161 /// If not, this returns null.
SimplifyUDivInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1162 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1163 unsigned MaxRecurse) {
1164 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse);
1165 }
1166
SimplifyUDivInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1167 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1168 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
1169 }
1170
1171 /// Given operands for an SRem, see if we can fold the result.
1172 /// If not, this returns null.
SimplifySRemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1173 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1174 unsigned MaxRecurse) {
1175 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1176 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1177 Value *X;
1178 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1179 return ConstantInt::getNullValue(Op0->getType());
1180
1181 // If the two operands are negated, return 0.
1182 if (isKnownNegation(Op0, Op1))
1183 return ConstantInt::getNullValue(Op0->getType());
1184
1185 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1186 }
1187
SimplifySRemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1188 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1189 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
1190 }
1191
1192 /// Given operands for a URem, see if we can fold the result.
1193 /// If not, this returns null.
SimplifyURemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1194 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1195 unsigned MaxRecurse) {
1196 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1197 }
1198
SimplifyURemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1199 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1200 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
1201 }
1202
1203 /// Returns true if a shift by \c Amount always yields poison.
isPoisonShift(Value * Amount,const SimplifyQuery & Q)1204 static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1205 Constant *C = dyn_cast<Constant>(Amount);
1206 if (!C)
1207 return false;
1208
1209 // X shift by undef -> poison because it may shift by the bitwidth.
1210 if (Q.isUndefValue(C))
1211 return true;
1212
1213 // Shifting by the bitwidth or more is undefined.
1214 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1215 if (CI->getValue().uge(CI->getType()->getScalarSizeInBits()))
1216 return true;
1217
1218 // If all lanes of a vector shift are undefined the whole shift is.
1219 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1220 for (unsigned I = 0,
1221 E = cast<FixedVectorType>(C->getType())->getNumElements();
1222 I != E; ++I)
1223 if (!isPoisonShift(C->getAggregateElement(I), Q))
1224 return false;
1225 return true;
1226 }
1227
1228 return false;
1229 }
1230
1231 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1232 /// If not, this returns null.
SimplifyShift(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool IsNSW,const SimplifyQuery & Q,unsigned MaxRecurse)1233 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1234 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1235 unsigned MaxRecurse) {
1236 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1237 return C;
1238
1239 // 0 shift by X -> 0
1240 if (match(Op0, m_Zero()))
1241 return Constant::getNullValue(Op0->getType());
1242
1243 // X shift by 0 -> X
1244 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1245 // would be poison.
1246 Value *X;
1247 if (match(Op1, m_Zero()) ||
1248 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1249 return Op0;
1250
1251 // Fold undefined shifts.
1252 if (isPoisonShift(Op1, Q))
1253 return PoisonValue::get(Op0->getType());
1254
1255 // If the operation is with the result of a select instruction, check whether
1256 // operating on either branch of the select always yields the same value.
1257 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1258 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1259 return V;
1260
1261 // If the operation is with the result of a phi instruction, check whether
1262 // operating on all incoming values of the phi always yields the same value.
1263 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1264 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1265 return V;
1266
1267 // If any bits in the shift amount make that value greater than or equal to
1268 // the number of bits in the type, the shift is undefined.
1269 KnownBits KnownAmt = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1270 if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1271 return PoisonValue::get(Op0->getType());
1272
1273 // If all valid bits in the shift amount are known zero, the first operand is
1274 // unchanged.
1275 unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1276 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1277 return Op0;
1278
1279 // Check for nsw shl leading to a poison value.
1280 if (IsNSW) {
1281 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1282 KnownBits KnownVal = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1283 KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1284
1285 if (KnownVal.Zero.isSignBitSet())
1286 KnownShl.Zero.setSignBit();
1287 if (KnownVal.One.isSignBitSet())
1288 KnownShl.One.setSignBit();
1289
1290 if (KnownShl.hasConflict())
1291 return PoisonValue::get(Op0->getType());
1292 }
1293
1294 return nullptr;
1295 }
1296
1297 /// Given operands for an Shl, LShr or AShr, see if we can
1298 /// fold the result. If not, this returns null.
SimplifyRightShift(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q,unsigned MaxRecurse)1299 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1300 Value *Op1, bool isExact, const SimplifyQuery &Q,
1301 unsigned MaxRecurse) {
1302 if (Value *V =
1303 SimplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1304 return V;
1305
1306 // X >> X -> 0
1307 if (Op0 == Op1)
1308 return Constant::getNullValue(Op0->getType());
1309
1310 // undef >> X -> 0
1311 // undef >> X -> undef (if it's exact)
1312 if (Q.isUndefValue(Op0))
1313 return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1314
1315 // The low bit cannot be shifted out of an exact shift if it is set.
1316 if (isExact) {
1317 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1318 if (Op0Known.One[0])
1319 return Op0;
1320 }
1321
1322 return nullptr;
1323 }
1324
1325 /// Given operands for an Shl, see if we can fold the result.
1326 /// If not, this returns null.
SimplifyShlInst(Value * Op0,Value * Op1,bool isNSW,bool isNUW,const SimplifyQuery & Q,unsigned MaxRecurse)1327 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1328 const SimplifyQuery &Q, unsigned MaxRecurse) {
1329 if (Value *V =
1330 SimplifyShift(Instruction::Shl, Op0, Op1, isNSW, Q, MaxRecurse))
1331 return V;
1332
1333 // undef << X -> 0
1334 // undef << X -> undef if (if it's NSW/NUW)
1335 if (Q.isUndefValue(Op0))
1336 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1337
1338 // (X >> A) << A -> X
1339 Value *X;
1340 if (Q.IIQ.UseInstrInfo &&
1341 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1342 return X;
1343
1344 // shl nuw i8 C, %x -> C iff C has sign bit set.
1345 if (isNUW && match(Op0, m_Negative()))
1346 return Op0;
1347 // NOTE: could use computeKnownBits() / LazyValueInfo,
1348 // but the cost-benefit analysis suggests it isn't worth it.
1349
1350 return nullptr;
1351 }
1352
SimplifyShlInst(Value * Op0,Value * Op1,bool isNSW,bool isNUW,const SimplifyQuery & Q)1353 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1354 const SimplifyQuery &Q) {
1355 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
1356 }
1357
1358 /// Given operands for an LShr, see if we can fold the result.
1359 /// If not, this returns null.
SimplifyLShrInst(Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q,unsigned MaxRecurse)1360 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1361 const SimplifyQuery &Q, unsigned MaxRecurse) {
1362 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1363 MaxRecurse))
1364 return V;
1365
1366 // (X << A) >> A -> X
1367 Value *X;
1368 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1369 return X;
1370
1371 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1372 // We can return X as we do in the above case since OR alters no bits in X.
1373 // SimplifyDemandedBits in InstCombine can do more general optimization for
1374 // bit manipulation. This pattern aims to provide opportunities for other
1375 // optimizers by supporting a simple but common case in InstSimplify.
1376 Value *Y;
1377 const APInt *ShRAmt, *ShLAmt;
1378 if (match(Op1, m_APInt(ShRAmt)) &&
1379 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1380 *ShRAmt == *ShLAmt) {
1381 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1382 const unsigned Width = Op0->getType()->getScalarSizeInBits();
1383 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
1384 if (ShRAmt->uge(EffWidthY))
1385 return X;
1386 }
1387
1388 return nullptr;
1389 }
1390
SimplifyLShrInst(Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q)1391 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1392 const SimplifyQuery &Q) {
1393 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1394 }
1395
1396 /// Given operands for an AShr, see if we can fold the result.
1397 /// If not, this returns null.
SimplifyAShrInst(Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q,unsigned MaxRecurse)1398 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1399 const SimplifyQuery &Q, unsigned MaxRecurse) {
1400 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1401 MaxRecurse))
1402 return V;
1403
1404 // all ones >>a X -> -1
1405 // Do not return Op0 because it may contain undef elements if it's a vector.
1406 if (match(Op0, m_AllOnes()))
1407 return Constant::getAllOnesValue(Op0->getType());
1408
1409 // (X << A) >> A -> X
1410 Value *X;
1411 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1412 return X;
1413
1414 // Arithmetic shifting an all-sign-bit value is a no-op.
1415 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1416 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1417 return Op0;
1418
1419 return nullptr;
1420 }
1421
SimplifyAShrInst(Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q)1422 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1423 const SimplifyQuery &Q) {
1424 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1425 }
1426
1427 /// Commuted variants are assumed to be handled by calling this function again
1428 /// with the parameters swapped.
simplifyUnsignedRangeCheck(ICmpInst * ZeroICmp,ICmpInst * UnsignedICmp,bool IsAnd,const SimplifyQuery & Q)1429 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1430 ICmpInst *UnsignedICmp, bool IsAnd,
1431 const SimplifyQuery &Q) {
1432 Value *X, *Y;
1433
1434 ICmpInst::Predicate EqPred;
1435 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1436 !ICmpInst::isEquality(EqPred))
1437 return nullptr;
1438
1439 ICmpInst::Predicate UnsignedPred;
1440
1441 Value *A, *B;
1442 // Y = (A - B);
1443 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1444 if (match(UnsignedICmp,
1445 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1446 ICmpInst::isUnsigned(UnsignedPred)) {
1447 // A >=/<= B || (A - B) != 0 <--> true
1448 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1449 UnsignedPred == ICmpInst::ICMP_ULE) &&
1450 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1451 return ConstantInt::getTrue(UnsignedICmp->getType());
1452 // A </> B && (A - B) == 0 <--> false
1453 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1454 UnsignedPred == ICmpInst::ICMP_UGT) &&
1455 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1456 return ConstantInt::getFalse(UnsignedICmp->getType());
1457
1458 // A </> B && (A - B) != 0 <--> A </> B
1459 // A </> B || (A - B) != 0 <--> (A - B) != 0
1460 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1461 UnsignedPred == ICmpInst::ICMP_UGT))
1462 return IsAnd ? UnsignedICmp : ZeroICmp;
1463
1464 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1465 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1466 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1467 UnsignedPred == ICmpInst::ICMP_UGE))
1468 return IsAnd ? ZeroICmp : UnsignedICmp;
1469 }
1470
1471 // Given Y = (A - B)
1472 // Y >= A && Y != 0 --> Y >= A iff B != 0
1473 // Y < A || Y == 0 --> Y < A iff B != 0
1474 if (match(UnsignedICmp,
1475 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1476 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1477 EqPred == ICmpInst::ICMP_NE &&
1478 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1479 return UnsignedICmp;
1480 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1481 EqPred == ICmpInst::ICMP_EQ &&
1482 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1483 return UnsignedICmp;
1484 }
1485 }
1486
1487 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1488 ICmpInst::isUnsigned(UnsignedPred))
1489 ;
1490 else if (match(UnsignedICmp,
1491 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1492 ICmpInst::isUnsigned(UnsignedPred))
1493 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1494 else
1495 return nullptr;
1496
1497 // X > Y && Y == 0 --> Y == 0 iff X != 0
1498 // X > Y || Y == 0 --> X > Y iff X != 0
1499 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1500 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1501 return IsAnd ? ZeroICmp : UnsignedICmp;
1502
1503 // X <= Y && Y != 0 --> X <= Y iff X != 0
1504 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1505 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1506 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1507 return IsAnd ? UnsignedICmp : ZeroICmp;
1508
1509 // The transforms below here are expected to be handled more generally with
1510 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1511 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1512 // these are candidates for removal.
1513
1514 // X < Y && Y != 0 --> X < Y
1515 // X < Y || Y != 0 --> Y != 0
1516 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1517 return IsAnd ? UnsignedICmp : ZeroICmp;
1518
1519 // X >= Y && Y == 0 --> Y == 0
1520 // X >= Y || Y == 0 --> X >= Y
1521 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1522 return IsAnd ? ZeroICmp : UnsignedICmp;
1523
1524 // X < Y && Y == 0 --> false
1525 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1526 IsAnd)
1527 return getFalse(UnsignedICmp->getType());
1528
1529 // X >= Y || Y != 0 --> true
1530 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1531 !IsAnd)
1532 return getTrue(UnsignedICmp->getType());
1533
1534 return nullptr;
1535 }
1536
1537 /// Commuted variants are assumed to be handled by calling this function again
1538 /// with the parameters swapped.
simplifyAndOfICmpsWithSameOperands(ICmpInst * Op0,ICmpInst * Op1)1539 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1540 ICmpInst::Predicate Pred0, Pred1;
1541 Value *A ,*B;
1542 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1543 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1544 return nullptr;
1545
1546 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1547 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1548 // can eliminate Op1 from this 'and'.
1549 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1550 return Op0;
1551
1552 // Check for any combination of predicates that are guaranteed to be disjoint.
1553 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1554 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1555 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1556 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1557 return getFalse(Op0->getType());
1558
1559 return nullptr;
1560 }
1561
1562 /// Commuted variants are assumed to be handled by calling this function again
1563 /// with the parameters swapped.
simplifyOrOfICmpsWithSameOperands(ICmpInst * Op0,ICmpInst * Op1)1564 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1565 ICmpInst::Predicate Pred0, Pred1;
1566 Value *A ,*B;
1567 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1568 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1569 return nullptr;
1570
1571 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1572 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1573 // can eliminate Op0 from this 'or'.
1574 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1575 return Op1;
1576
1577 // Check for any combination of predicates that cover the entire range of
1578 // possibilities.
1579 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1580 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1581 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1582 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1583 return getTrue(Op0->getType());
1584
1585 return nullptr;
1586 }
1587
1588 /// Test if a pair of compares with a shared operand and 2 constants has an
1589 /// empty set intersection, full set union, or if one compare is a superset of
1590 /// the other.
simplifyAndOrOfICmpsWithConstants(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1591 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1592 bool IsAnd) {
1593 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1594 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1595 return nullptr;
1596
1597 const APInt *C0, *C1;
1598 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1599 !match(Cmp1->getOperand(1), m_APInt(C1)))
1600 return nullptr;
1601
1602 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1603 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1604
1605 // For and-of-compares, check if the intersection is empty:
1606 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1607 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1608 return getFalse(Cmp0->getType());
1609
1610 // For or-of-compares, check if the union is full:
1611 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1612 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1613 return getTrue(Cmp0->getType());
1614
1615 // Is one range a superset of the other?
1616 // If this is and-of-compares, take the smaller set:
1617 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1618 // If this is or-of-compares, take the larger set:
1619 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1620 if (Range0.contains(Range1))
1621 return IsAnd ? Cmp1 : Cmp0;
1622 if (Range1.contains(Range0))
1623 return IsAnd ? Cmp0 : Cmp1;
1624
1625 return nullptr;
1626 }
1627
simplifyAndOrOfICmpsWithZero(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1628 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1629 bool IsAnd) {
1630 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1631 if (!match(Cmp0->getOperand(1), m_Zero()) ||
1632 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1633 return nullptr;
1634
1635 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1636 return nullptr;
1637
1638 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1639 Value *X = Cmp0->getOperand(0);
1640 Value *Y = Cmp1->getOperand(0);
1641
1642 // If one of the compares is a masked version of a (not) null check, then
1643 // that compare implies the other, so we eliminate the other. Optionally, look
1644 // through a pointer-to-int cast to match a null check of a pointer type.
1645
1646 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1647 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1648 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1649 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1650 if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1651 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1652 return Cmp1;
1653
1654 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1655 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1656 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1657 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1658 if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1659 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1660 return Cmp0;
1661
1662 return nullptr;
1663 }
1664
simplifyAndOfICmpsWithAdd(ICmpInst * Op0,ICmpInst * Op1,const InstrInfoQuery & IIQ)1665 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1666 const InstrInfoQuery &IIQ) {
1667 // (icmp (add V, C0), C1) & (icmp V, C0)
1668 ICmpInst::Predicate Pred0, Pred1;
1669 const APInt *C0, *C1;
1670 Value *V;
1671 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1672 return nullptr;
1673
1674 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1675 return nullptr;
1676
1677 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1678 if (AddInst->getOperand(1) != Op1->getOperand(1))
1679 return nullptr;
1680
1681 Type *ITy = Op0->getType();
1682 bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1683 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1684
1685 const APInt Delta = *C1 - *C0;
1686 if (C0->isStrictlyPositive()) {
1687 if (Delta == 2) {
1688 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1689 return getFalse(ITy);
1690 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1691 return getFalse(ITy);
1692 }
1693 if (Delta == 1) {
1694 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1695 return getFalse(ITy);
1696 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1697 return getFalse(ITy);
1698 }
1699 }
1700 if (C0->getBoolValue() && isNUW) {
1701 if (Delta == 2)
1702 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1703 return getFalse(ITy);
1704 if (Delta == 1)
1705 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1706 return getFalse(ITy);
1707 }
1708
1709 return nullptr;
1710 }
1711
1712 /// Try to eliminate compares with signed or unsigned min/max constants.
simplifyAndOrOfICmpsWithLimitConst(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1713 static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1,
1714 bool IsAnd) {
1715 // Canonicalize an equality compare as Cmp0.
1716 if (Cmp1->isEquality())
1717 std::swap(Cmp0, Cmp1);
1718 if (!Cmp0->isEquality())
1719 return nullptr;
1720
1721 // The non-equality compare must include a common operand (X). Canonicalize
1722 // the common operand as operand 0 (the predicate is swapped if the common
1723 // operand was operand 1).
1724 ICmpInst::Predicate Pred0 = Cmp0->getPredicate();
1725 Value *X = Cmp0->getOperand(0);
1726 ICmpInst::Predicate Pred1;
1727 bool HasNotOp = match(Cmp1, m_c_ICmp(Pred1, m_Not(m_Specific(X)), m_Value()));
1728 if (!HasNotOp && !match(Cmp1, m_c_ICmp(Pred1, m_Specific(X), m_Value())))
1729 return nullptr;
1730 if (ICmpInst::isEquality(Pred1))
1731 return nullptr;
1732
1733 // The equality compare must be against a constant. Flip bits if we matched
1734 // a bitwise not. Convert a null pointer constant to an integer zero value.
1735 APInt MinMaxC;
1736 const APInt *C;
1737 if (match(Cmp0->getOperand(1), m_APInt(C)))
1738 MinMaxC = HasNotOp ? ~*C : *C;
1739 else if (isa<ConstantPointerNull>(Cmp0->getOperand(1)))
1740 MinMaxC = APInt::getNullValue(8);
1741 else
1742 return nullptr;
1743
1744 // DeMorganize if this is 'or': P0 || P1 --> !P0 && !P1.
1745 if (!IsAnd) {
1746 Pred0 = ICmpInst::getInversePredicate(Pred0);
1747 Pred1 = ICmpInst::getInversePredicate(Pred1);
1748 }
1749
1750 // Normalize to unsigned compare and unsigned min/max value.
1751 // Example for 8-bit: -128 + 128 -> 0; 127 + 128 -> 255
1752 if (ICmpInst::isSigned(Pred1)) {
1753 Pred1 = ICmpInst::getUnsignedPredicate(Pred1);
1754 MinMaxC += APInt::getSignedMinValue(MinMaxC.getBitWidth());
1755 }
1756
1757 // (X != MAX) && (X < Y) --> X < Y
1758 // (X == MAX) || (X >= Y) --> X >= Y
1759 if (MinMaxC.isMaxValue())
1760 if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT)
1761 return Cmp1;
1762
1763 // (X != MIN) && (X > Y) --> X > Y
1764 // (X == MIN) || (X <= Y) --> X <= Y
1765 if (MinMaxC.isMinValue())
1766 if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_UGT)
1767 return Cmp1;
1768
1769 return nullptr;
1770 }
1771
simplifyAndOfICmps(ICmpInst * Op0,ICmpInst * Op1,const SimplifyQuery & Q)1772 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1773 const SimplifyQuery &Q) {
1774 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1775 return X;
1776 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1777 return X;
1778
1779 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1780 return X;
1781 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
1782 return X;
1783
1784 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1785 return X;
1786
1787 if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, true))
1788 return X;
1789
1790 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1791 return X;
1792
1793 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1794 return X;
1795 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1796 return X;
1797
1798 return nullptr;
1799 }
1800
simplifyOrOfICmpsWithAdd(ICmpInst * Op0,ICmpInst * Op1,const InstrInfoQuery & IIQ)1801 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1802 const InstrInfoQuery &IIQ) {
1803 // (icmp (add V, C0), C1) | (icmp V, C0)
1804 ICmpInst::Predicate Pred0, Pred1;
1805 const APInt *C0, *C1;
1806 Value *V;
1807 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1808 return nullptr;
1809
1810 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1811 return nullptr;
1812
1813 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1814 if (AddInst->getOperand(1) != Op1->getOperand(1))
1815 return nullptr;
1816
1817 Type *ITy = Op0->getType();
1818 bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1819 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1820
1821 const APInt Delta = *C1 - *C0;
1822 if (C0->isStrictlyPositive()) {
1823 if (Delta == 2) {
1824 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1825 return getTrue(ITy);
1826 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1827 return getTrue(ITy);
1828 }
1829 if (Delta == 1) {
1830 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1831 return getTrue(ITy);
1832 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1833 return getTrue(ITy);
1834 }
1835 }
1836 if (C0->getBoolValue() && isNUW) {
1837 if (Delta == 2)
1838 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1839 return getTrue(ITy);
1840 if (Delta == 1)
1841 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1842 return getTrue(ITy);
1843 }
1844
1845 return nullptr;
1846 }
1847
simplifyOrOfICmps(ICmpInst * Op0,ICmpInst * Op1,const SimplifyQuery & Q)1848 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1849 const SimplifyQuery &Q) {
1850 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1851 return X;
1852 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1853 return X;
1854
1855 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1856 return X;
1857 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
1858 return X;
1859
1860 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1861 return X;
1862
1863 if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, false))
1864 return X;
1865
1866 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1867 return X;
1868
1869 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1870 return X;
1871 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1872 return X;
1873
1874 return nullptr;
1875 }
1876
simplifyAndOrOfFCmps(const TargetLibraryInfo * TLI,FCmpInst * LHS,FCmpInst * RHS,bool IsAnd)1877 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI,
1878 FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1879 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1880 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1881 if (LHS0->getType() != RHS0->getType())
1882 return nullptr;
1883
1884 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1885 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1886 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1887 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1888 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1889 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1890 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1891 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1892 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1893 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1894 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1895 if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) ||
1896 (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1)))
1897 return RHS;
1898
1899 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1900 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1901 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1902 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1903 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1904 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1905 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1906 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1907 if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) ||
1908 (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1)))
1909 return LHS;
1910 }
1911
1912 return nullptr;
1913 }
1914
simplifyAndOrOfCmps(const SimplifyQuery & Q,Value * Op0,Value * Op1,bool IsAnd)1915 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q,
1916 Value *Op0, Value *Op1, bool IsAnd) {
1917 // Look through casts of the 'and' operands to find compares.
1918 auto *Cast0 = dyn_cast<CastInst>(Op0);
1919 auto *Cast1 = dyn_cast<CastInst>(Op1);
1920 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1921 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1922 Op0 = Cast0->getOperand(0);
1923 Op1 = Cast1->getOperand(0);
1924 }
1925
1926 Value *V = nullptr;
1927 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1928 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1929 if (ICmp0 && ICmp1)
1930 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1931 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1932
1933 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1934 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1935 if (FCmp0 && FCmp1)
1936 V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd);
1937
1938 if (!V)
1939 return nullptr;
1940 if (!Cast0)
1941 return V;
1942
1943 // If we looked through casts, we can only handle a constant simplification
1944 // because we are not allowed to create a cast instruction here.
1945 if (auto *C = dyn_cast<Constant>(V))
1946 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
1947
1948 return nullptr;
1949 }
1950
1951 /// Given a bitwise logic op, check if the operands are add/sub with a common
1952 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
simplifyLogicOfAddSub(Value * Op0,Value * Op1,Instruction::BinaryOps Opcode)1953 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
1954 Instruction::BinaryOps Opcode) {
1955 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
1956 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
1957 Value *X;
1958 Constant *C1, *C2;
1959 if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
1960 match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
1961 (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
1962 match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
1963 if (ConstantExpr::getNot(C1) == C2) {
1964 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
1965 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
1966 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
1967 Type *Ty = Op0->getType();
1968 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
1969 : ConstantInt::getAllOnesValue(Ty);
1970 }
1971 }
1972 return nullptr;
1973 }
1974
1975 /// Given operands for an And, see if we can fold the result.
1976 /// If not, this returns null.
SimplifyAndInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1977 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1978 unsigned MaxRecurse) {
1979 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
1980 return C;
1981
1982 // X & undef -> 0
1983 if (Q.isUndefValue(Op1))
1984 return Constant::getNullValue(Op0->getType());
1985
1986 // X & X = X
1987 if (Op0 == Op1)
1988 return Op0;
1989
1990 // X & 0 = 0
1991 if (match(Op1, m_Zero()))
1992 return Constant::getNullValue(Op0->getType());
1993
1994 // X & -1 = X
1995 if (match(Op1, m_AllOnes()))
1996 return Op0;
1997
1998 // A & ~A = ~A & A = 0
1999 if (match(Op0, m_Not(m_Specific(Op1))) ||
2000 match(Op1, m_Not(m_Specific(Op0))))
2001 return Constant::getNullValue(Op0->getType());
2002
2003 // (A | ?) & A = A
2004 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2005 return Op1;
2006
2007 // A & (A | ?) = A
2008 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
2009 return Op0;
2010
2011 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2012 return V;
2013
2014 // A mask that only clears known zeros of a shifted value is a no-op.
2015 Value *X;
2016 const APInt *Mask;
2017 const APInt *ShAmt;
2018 if (match(Op1, m_APInt(Mask))) {
2019 // If all bits in the inverted and shifted mask are clear:
2020 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2021 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2022 (~(*Mask)).lshr(*ShAmt).isNullValue())
2023 return Op0;
2024
2025 // If all bits in the inverted and shifted mask are clear:
2026 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2027 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2028 (~(*Mask)).shl(*ShAmt).isNullValue())
2029 return Op0;
2030 }
2031
2032 // If we have a multiplication overflow check that is being 'and'ed with a
2033 // check that one of the multipliers is not zero, we can omit the 'and', and
2034 // only keep the overflow check.
2035 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2036 return Op1;
2037 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, true))
2038 return Op0;
2039
2040 // A & (-A) = A if A is a power of two or zero.
2041 if (match(Op0, m_Neg(m_Specific(Op1))) ||
2042 match(Op1, m_Neg(m_Specific(Op0)))) {
2043 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2044 Q.DT))
2045 return Op0;
2046 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2047 Q.DT))
2048 return Op1;
2049 }
2050
2051 // This is a similar pattern used for checking if a value is a power-of-2:
2052 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2053 // A & (A - 1) --> 0 (if A is a power-of-2 or 0)
2054 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2055 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2056 return Constant::getNullValue(Op1->getType());
2057 if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) &&
2058 isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2059 return Constant::getNullValue(Op0->getType());
2060
2061 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2062 return V;
2063
2064 // Try some generic simplifications for associative operations.
2065 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
2066 MaxRecurse))
2067 return V;
2068
2069 // And distributes over Or. Try some generic simplifications based on this.
2070 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2071 Instruction::Or, Q, MaxRecurse))
2072 return V;
2073
2074 // And distributes over Xor. Try some generic simplifications based on this.
2075 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2076 Instruction::Xor, Q, MaxRecurse))
2077 return V;
2078
2079 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2080 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2081 // A & (A && B) -> A && B
2082 if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2083 return Op1;
2084 else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2085 return Op0;
2086 }
2087 // If the operation is with the result of a select instruction, check
2088 // whether operating on either branch of the select always yields the same
2089 // value.
2090 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
2091 MaxRecurse))
2092 return V;
2093 }
2094
2095 // If the operation is with the result of a phi instruction, check whether
2096 // operating on all incoming values of the phi always yields the same value.
2097 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2098 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
2099 MaxRecurse))
2100 return V;
2101
2102 // Assuming the effective width of Y is not larger than A, i.e. all bits
2103 // from X and Y are disjoint in (X << A) | Y,
2104 // if the mask of this AND op covers all bits of X or Y, while it covers
2105 // no bits from the other, we can bypass this AND op. E.g.,
2106 // ((X << A) | Y) & Mask -> Y,
2107 // if Mask = ((1 << effective_width_of(Y)) - 1)
2108 // ((X << A) | Y) & Mask -> X << A,
2109 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2110 // SimplifyDemandedBits in InstCombine can optimize the general case.
2111 // This pattern aims to help other passes for a common case.
2112 Value *Y, *XShifted;
2113 if (match(Op1, m_APInt(Mask)) &&
2114 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2115 m_Value(XShifted)),
2116 m_Value(Y)))) {
2117 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2118 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2119 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2120 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
2121 if (EffWidthY <= ShftCnt) {
2122 const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI,
2123 Q.DT);
2124 const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros();
2125 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2126 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2127 // If the mask is extracting all bits from X or Y as is, we can skip
2128 // this AND op.
2129 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2130 return Y;
2131 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2132 return XShifted;
2133 }
2134 }
2135
2136 return nullptr;
2137 }
2138
SimplifyAndInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2139 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2140 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
2141 }
2142
2143 /// Given operands for an Or, see if we can fold the result.
2144 /// If not, this returns null.
SimplifyOrInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2145 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2146 unsigned MaxRecurse) {
2147 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2148 return C;
2149
2150 // X | undef -> -1
2151 // X | -1 = -1
2152 // Do not return Op1 because it may contain undef elements if it's a vector.
2153 if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2154 return Constant::getAllOnesValue(Op0->getType());
2155
2156 // X | X = X
2157 // X | 0 = X
2158 if (Op0 == Op1 || match(Op1, m_Zero()))
2159 return Op0;
2160
2161 // A | ~A = ~A | A = -1
2162 if (match(Op0, m_Not(m_Specific(Op1))) ||
2163 match(Op1, m_Not(m_Specific(Op0))))
2164 return Constant::getAllOnesValue(Op0->getType());
2165
2166 // (A & ?) | A = A
2167 if (match(Op0, m_c_And(m_Specific(Op1), m_Value())))
2168 return Op1;
2169
2170 // A | (A & ?) = A
2171 if (match(Op1, m_c_And(m_Specific(Op0), m_Value())))
2172 return Op0;
2173
2174 // ~(A & ?) | A = -1
2175 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
2176 return Constant::getAllOnesValue(Op1->getType());
2177
2178 // A | ~(A & ?) = -1
2179 if (match(Op1, m_Not(m_c_And(m_Specific(Op0), m_Value()))))
2180 return Constant::getAllOnesValue(Op0->getType());
2181
2182 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2183 return V;
2184
2185 Value *A, *B, *NotA;
2186 // (A & ~B) | (A ^ B) -> (A ^ B)
2187 // (~B & A) | (A ^ B) -> (A ^ B)
2188 // (A & ~B) | (B ^ A) -> (B ^ A)
2189 // (~B & A) | (B ^ A) -> (B ^ A)
2190 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
2191 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
2192 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
2193 return Op1;
2194
2195 // Commute the 'or' operands.
2196 // (A ^ B) | (A & ~B) -> (A ^ B)
2197 // (A ^ B) | (~B & A) -> (A ^ B)
2198 // (B ^ A) | (A & ~B) -> (B ^ A)
2199 // (B ^ A) | (~B & A) -> (B ^ A)
2200 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2201 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
2202 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
2203 return Op0;
2204
2205 // (A & B) | (~A ^ B) -> (~A ^ B)
2206 // (B & A) | (~A ^ B) -> (~A ^ B)
2207 // (A & B) | (B ^ ~A) -> (B ^ ~A)
2208 // (B & A) | (B ^ ~A) -> (B ^ ~A)
2209 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2210 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
2211 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
2212 return Op1;
2213
2214 // Commute the 'or' operands.
2215 // (~A ^ B) | (A & B) -> (~A ^ B)
2216 // (~A ^ B) | (B & A) -> (~A ^ B)
2217 // (B ^ ~A) | (A & B) -> (B ^ ~A)
2218 // (B ^ ~A) | (B & A) -> (B ^ ~A)
2219 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
2220 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
2221 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
2222 return Op0;
2223
2224 // (~A & B) | ~(A | B) --> ~A
2225 // (~A & B) | ~(B | A) --> ~A
2226 // (B & ~A) | ~(A | B) --> ~A
2227 // (B & ~A) | ~(B | A) --> ~A
2228 if (match(Op0, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))),
2229 m_Value(B))) &&
2230 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2231 return NotA;
2232
2233 // Commute the 'or' operands.
2234 // ~(A | B) | (~A & B) --> ~A
2235 // ~(B | A) | (~A & B) --> ~A
2236 // ~(A | B) | (B & ~A) --> ~A
2237 // ~(B | A) | (B & ~A) --> ~A
2238 if (match(Op1, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))),
2239 m_Value(B))) &&
2240 match(Op0, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2241 return NotA;
2242
2243 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2244 return V;
2245
2246 // If we have a multiplication overflow check that is being 'and'ed with a
2247 // check that one of the multipliers is not zero, we can omit the 'and', and
2248 // only keep the overflow check.
2249 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2250 return Op1;
2251 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2252 return Op0;
2253
2254 // Try some generic simplifications for associative operations.
2255 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
2256 MaxRecurse))
2257 return V;
2258
2259 // Or distributes over And. Try some generic simplifications based on this.
2260 if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2261 Instruction::And, Q, MaxRecurse))
2262 return V;
2263
2264 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2265 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2266 // A | (A || B) -> A || B
2267 if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2268 return Op1;
2269 else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2270 return Op0;
2271 }
2272 // If the operation is with the result of a select instruction, check
2273 // whether operating on either branch of the select always yields the same
2274 // value.
2275 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
2276 MaxRecurse))
2277 return V;
2278 }
2279
2280 // (A & C1)|(B & C2)
2281 const APInt *C1, *C2;
2282 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2283 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2284 if (*C1 == ~*C2) {
2285 // (A & C1)|(B & C2)
2286 // If we have: ((V + N) & C1) | (V & C2)
2287 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2288 // replace with V+N.
2289 Value *N;
2290 if (C2->isMask() && // C2 == 0+1+
2291 match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2292 // Add commutes, try both ways.
2293 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2294 return A;
2295 }
2296 // Or commutes, try both ways.
2297 if (C1->isMask() &&
2298 match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2299 // Add commutes, try both ways.
2300 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2301 return B;
2302 }
2303 }
2304 }
2305
2306 // If the operation is with the result of a phi instruction, check whether
2307 // operating on all incoming values of the phi always yields the same value.
2308 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2309 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2310 return V;
2311
2312 return nullptr;
2313 }
2314
SimplifyOrInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2315 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2316 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
2317 }
2318
2319 /// Given operands for a Xor, see if we can fold the result.
2320 /// If not, this returns null.
SimplifyXorInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2321 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2322 unsigned MaxRecurse) {
2323 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2324 return C;
2325
2326 // A ^ undef -> undef
2327 if (Q.isUndefValue(Op1))
2328 return Op1;
2329
2330 // A ^ 0 = A
2331 if (match(Op1, m_Zero()))
2332 return Op0;
2333
2334 // A ^ A = 0
2335 if (Op0 == Op1)
2336 return Constant::getNullValue(Op0->getType());
2337
2338 // A ^ ~A = ~A ^ A = -1
2339 if (match(Op0, m_Not(m_Specific(Op1))) ||
2340 match(Op1, m_Not(m_Specific(Op0))))
2341 return Constant::getAllOnesValue(Op0->getType());
2342
2343 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2344 return V;
2345
2346 // Try some generic simplifications for associative operations.
2347 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
2348 MaxRecurse))
2349 return V;
2350
2351 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2352 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2353 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2354 // only if B and C are equal. If B and C are equal then (since we assume
2355 // that operands have already been simplified) "select(cond, B, C)" should
2356 // have been simplified to the common value of B and C already. Analysing
2357 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2358 // for threading over phi nodes.
2359
2360 return nullptr;
2361 }
2362
SimplifyXorInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2363 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2364 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
2365 }
2366
2367
GetCompareTy(Value * Op)2368 static Type *GetCompareTy(Value *Op) {
2369 return CmpInst::makeCmpResultType(Op->getType());
2370 }
2371
2372 /// Rummage around inside V looking for something equivalent to the comparison
2373 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2374 /// Helper function for analyzing max/min idioms.
ExtractEquivalentCondition(Value * V,CmpInst::Predicate Pred,Value * LHS,Value * RHS)2375 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2376 Value *LHS, Value *RHS) {
2377 SelectInst *SI = dyn_cast<SelectInst>(V);
2378 if (!SI)
2379 return nullptr;
2380 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2381 if (!Cmp)
2382 return nullptr;
2383 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2384 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2385 return Cmp;
2386 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2387 LHS == CmpRHS && RHS == CmpLHS)
2388 return Cmp;
2389 return nullptr;
2390 }
2391
2392 // A significant optimization not implemented here is assuming that alloca
2393 // addresses are not equal to incoming argument values. They don't *alias*,
2394 // as we say, but that doesn't mean they aren't equal, so we take a
2395 // conservative approach.
2396 //
2397 // This is inspired in part by C++11 5.10p1:
2398 // "Two pointers of the same type compare equal if and only if they are both
2399 // null, both point to the same function, or both represent the same
2400 // address."
2401 //
2402 // This is pretty permissive.
2403 //
2404 // It's also partly due to C11 6.5.9p6:
2405 // "Two pointers compare equal if and only if both are null pointers, both are
2406 // pointers to the same object (including a pointer to an object and a
2407 // subobject at its beginning) or function, both are pointers to one past the
2408 // last element of the same array object, or one is a pointer to one past the
2409 // end of one array object and the other is a pointer to the start of a
2410 // different array object that happens to immediately follow the first array
2411 // object in the address space.)
2412 //
2413 // C11's version is more restrictive, however there's no reason why an argument
2414 // couldn't be a one-past-the-end value for a stack object in the caller and be
2415 // equal to the beginning of a stack object in the callee.
2416 //
2417 // If the C and C++ standards are ever made sufficiently restrictive in this
2418 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2419 // this optimization.
2420 static Constant *
computePointerICmp(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2421 computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2422 const SimplifyQuery &Q) {
2423 const DataLayout &DL = Q.DL;
2424 const TargetLibraryInfo *TLI = Q.TLI;
2425 const DominatorTree *DT = Q.DT;
2426 const Instruction *CxtI = Q.CxtI;
2427 const InstrInfoQuery &IIQ = Q.IIQ;
2428
2429 // First, skip past any trivial no-ops.
2430 LHS = LHS->stripPointerCasts();
2431 RHS = RHS->stripPointerCasts();
2432
2433 // A non-null pointer is not equal to a null pointer.
2434 if (isa<ConstantPointerNull>(RHS) && ICmpInst::isEquality(Pred) &&
2435 llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr,
2436 IIQ.UseInstrInfo))
2437 return ConstantInt::get(GetCompareTy(LHS),
2438 !CmpInst::isTrueWhenEqual(Pred));
2439
2440 // We can only fold certain predicates on pointer comparisons.
2441 switch (Pred) {
2442 default:
2443 return nullptr;
2444
2445 // Equality comaprisons are easy to fold.
2446 case CmpInst::ICMP_EQ:
2447 case CmpInst::ICMP_NE:
2448 break;
2449
2450 // We can only handle unsigned relational comparisons because 'inbounds' on
2451 // a GEP only protects against unsigned wrapping.
2452 case CmpInst::ICMP_UGT:
2453 case CmpInst::ICMP_UGE:
2454 case CmpInst::ICMP_ULT:
2455 case CmpInst::ICMP_ULE:
2456 // However, we have to switch them to their signed variants to handle
2457 // negative indices from the base pointer.
2458 Pred = ICmpInst::getSignedPredicate(Pred);
2459 break;
2460 }
2461
2462 // Strip off any constant offsets so that we can reason about them.
2463 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2464 // here and compare base addresses like AliasAnalysis does, however there are
2465 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2466 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2467 // doesn't need to guarantee pointer inequality when it says NoAlias.
2468 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2469 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2470
2471 // If LHS and RHS are related via constant offsets to the same base
2472 // value, we can replace it with an icmp which just compares the offsets.
2473 if (LHS == RHS)
2474 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2475
2476 // Various optimizations for (in)equality comparisons.
2477 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2478 // Different non-empty allocations that exist at the same time have
2479 // different addresses (if the program can tell). Global variables always
2480 // exist, so they always exist during the lifetime of each other and all
2481 // allocas. Two different allocas usually have different addresses...
2482 //
2483 // However, if there's an @llvm.stackrestore dynamically in between two
2484 // allocas, they may have the same address. It's tempting to reduce the
2485 // scope of the problem by only looking at *static* allocas here. That would
2486 // cover the majority of allocas while significantly reducing the likelihood
2487 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2488 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2489 // an entry block. Also, if we have a block that's not attached to a
2490 // function, we can't tell if it's "static" under the current definition.
2491 // Theoretically, this problem could be fixed by creating a new kind of
2492 // instruction kind specifically for static allocas. Such a new instruction
2493 // could be required to be at the top of the entry block, thus preventing it
2494 // from being subject to a @llvm.stackrestore. Instcombine could even
2495 // convert regular allocas into these special allocas. It'd be nifty.
2496 // However, until then, this problem remains open.
2497 //
2498 // So, we'll assume that two non-empty allocas have different addresses
2499 // for now.
2500 //
2501 // With all that, if the offsets are within the bounds of their allocations
2502 // (and not one-past-the-end! so we can't use inbounds!), and their
2503 // allocations aren't the same, the pointers are not equal.
2504 //
2505 // Note that it's not necessary to check for LHS being a global variable
2506 // address, due to canonicalization and constant folding.
2507 if (isa<AllocaInst>(LHS) &&
2508 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2509 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2510 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2511 uint64_t LHSSize, RHSSize;
2512 ObjectSizeOpts Opts;
2513 Opts.NullIsUnknownSize =
2514 NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction());
2515 if (LHSOffsetCI && RHSOffsetCI &&
2516 getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2517 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
2518 const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2519 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2520 if (!LHSOffsetValue.isNegative() &&
2521 !RHSOffsetValue.isNegative() &&
2522 LHSOffsetValue.ult(LHSSize) &&
2523 RHSOffsetValue.ult(RHSSize)) {
2524 return ConstantInt::get(GetCompareTy(LHS),
2525 !CmpInst::isTrueWhenEqual(Pred));
2526 }
2527 }
2528
2529 // Repeat the above check but this time without depending on DataLayout
2530 // or being able to compute a precise size.
2531 if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2532 !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2533 LHSOffset->isNullValue() &&
2534 RHSOffset->isNullValue())
2535 return ConstantInt::get(GetCompareTy(LHS),
2536 !CmpInst::isTrueWhenEqual(Pred));
2537 }
2538
2539 // Even if an non-inbounds GEP occurs along the path we can still optimize
2540 // equality comparisons concerning the result. We avoid walking the whole
2541 // chain again by starting where the last calls to
2542 // stripAndComputeConstantOffsets left off and accumulate the offsets.
2543 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2544 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2545 if (LHS == RHS)
2546 return ConstantExpr::getICmp(Pred,
2547 ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2548 ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2549
2550 // If one side of the equality comparison must come from a noalias call
2551 // (meaning a system memory allocation function), and the other side must
2552 // come from a pointer that cannot overlap with dynamically-allocated
2553 // memory within the lifetime of the current function (allocas, byval
2554 // arguments, globals), then determine the comparison result here.
2555 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2556 getUnderlyingObjects(LHS, LHSUObjs);
2557 getUnderlyingObjects(RHS, RHSUObjs);
2558
2559 // Is the set of underlying objects all noalias calls?
2560 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2561 return all_of(Objects, isNoAliasCall);
2562 };
2563
2564 // Is the set of underlying objects all things which must be disjoint from
2565 // noalias calls. For allocas, we consider only static ones (dynamic
2566 // allocas might be transformed into calls to malloc not simultaneously
2567 // live with the compared-to allocation). For globals, we exclude symbols
2568 // that might be resolve lazily to symbols in another dynamically-loaded
2569 // library (and, thus, could be malloc'ed by the implementation).
2570 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2571 return all_of(Objects, [](const Value *V) {
2572 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2573 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2574 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2575 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2576 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2577 !GV->isThreadLocal();
2578 if (const Argument *A = dyn_cast<Argument>(V))
2579 return A->hasByValAttr();
2580 return false;
2581 });
2582 };
2583
2584 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2585 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2586 return ConstantInt::get(GetCompareTy(LHS),
2587 !CmpInst::isTrueWhenEqual(Pred));
2588
2589 // Fold comparisons for non-escaping pointer even if the allocation call
2590 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2591 // dynamic allocation call could be either of the operands.
2592 Value *MI = nullptr;
2593 if (isAllocLikeFn(LHS, TLI) &&
2594 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2595 MI = LHS;
2596 else if (isAllocLikeFn(RHS, TLI) &&
2597 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2598 MI = RHS;
2599 // FIXME: We should also fold the compare when the pointer escapes, but the
2600 // compare dominates the pointer escape
2601 if (MI && !PointerMayBeCaptured(MI, true, true))
2602 return ConstantInt::get(GetCompareTy(LHS),
2603 CmpInst::isFalseWhenEqual(Pred));
2604 }
2605
2606 // Otherwise, fail.
2607 return nullptr;
2608 }
2609
2610 /// Fold an icmp when its operands have i1 scalar type.
simplifyICmpOfBools(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2611 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2612 Value *RHS, const SimplifyQuery &Q) {
2613 Type *ITy = GetCompareTy(LHS); // The return type.
2614 Type *OpTy = LHS->getType(); // The operand type.
2615 if (!OpTy->isIntOrIntVectorTy(1))
2616 return nullptr;
2617
2618 // A boolean compared to true/false can be simplified in 14 out of the 20
2619 // (10 predicates * 2 constants) possible combinations. Cases not handled here
2620 // require a 'not' of the LHS, so those must be transformed in InstCombine.
2621 if (match(RHS, m_Zero())) {
2622 switch (Pred) {
2623 case CmpInst::ICMP_NE: // X != 0 -> X
2624 case CmpInst::ICMP_UGT: // X >u 0 -> X
2625 case CmpInst::ICMP_SLT: // X <s 0 -> X
2626 return LHS;
2627
2628 case CmpInst::ICMP_ULT: // X <u 0 -> false
2629 case CmpInst::ICMP_SGT: // X >s 0 -> false
2630 return getFalse(ITy);
2631
2632 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2633 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2634 return getTrue(ITy);
2635
2636 default: break;
2637 }
2638 } else if (match(RHS, m_One())) {
2639 switch (Pred) {
2640 case CmpInst::ICMP_EQ: // X == 1 -> X
2641 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2642 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2643 return LHS;
2644
2645 case CmpInst::ICMP_UGT: // X >u 1 -> false
2646 case CmpInst::ICMP_SLT: // X <s -1 -> false
2647 return getFalse(ITy);
2648
2649 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2650 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2651 return getTrue(ITy);
2652
2653 default: break;
2654 }
2655 }
2656
2657 switch (Pred) {
2658 default:
2659 break;
2660 case ICmpInst::ICMP_UGE:
2661 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2662 return getTrue(ITy);
2663 break;
2664 case ICmpInst::ICMP_SGE:
2665 /// For signed comparison, the values for an i1 are 0 and -1
2666 /// respectively. This maps into a truth table of:
2667 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2668 /// 0 | 0 | 1 (0 >= 0) | 1
2669 /// 0 | 1 | 1 (0 >= -1) | 1
2670 /// 1 | 0 | 0 (-1 >= 0) | 0
2671 /// 1 | 1 | 1 (-1 >= -1) | 1
2672 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2673 return getTrue(ITy);
2674 break;
2675 case ICmpInst::ICMP_ULE:
2676 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2677 return getTrue(ITy);
2678 break;
2679 }
2680
2681 return nullptr;
2682 }
2683
2684 /// Try hard to fold icmp with zero RHS because this is a common case.
simplifyICmpWithZero(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2685 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2686 Value *RHS, const SimplifyQuery &Q) {
2687 if (!match(RHS, m_Zero()))
2688 return nullptr;
2689
2690 Type *ITy = GetCompareTy(LHS); // The return type.
2691 switch (Pred) {
2692 default:
2693 llvm_unreachable("Unknown ICmp predicate!");
2694 case ICmpInst::ICMP_ULT:
2695 return getFalse(ITy);
2696 case ICmpInst::ICMP_UGE:
2697 return getTrue(ITy);
2698 case ICmpInst::ICMP_EQ:
2699 case ICmpInst::ICMP_ULE:
2700 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2701 return getFalse(ITy);
2702 break;
2703 case ICmpInst::ICMP_NE:
2704 case ICmpInst::ICMP_UGT:
2705 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2706 return getTrue(ITy);
2707 break;
2708 case ICmpInst::ICMP_SLT: {
2709 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2710 if (LHSKnown.isNegative())
2711 return getTrue(ITy);
2712 if (LHSKnown.isNonNegative())
2713 return getFalse(ITy);
2714 break;
2715 }
2716 case ICmpInst::ICMP_SLE: {
2717 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2718 if (LHSKnown.isNegative())
2719 return getTrue(ITy);
2720 if (LHSKnown.isNonNegative() &&
2721 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2722 return getFalse(ITy);
2723 break;
2724 }
2725 case ICmpInst::ICMP_SGE: {
2726 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2727 if (LHSKnown.isNegative())
2728 return getFalse(ITy);
2729 if (LHSKnown.isNonNegative())
2730 return getTrue(ITy);
2731 break;
2732 }
2733 case ICmpInst::ICMP_SGT: {
2734 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2735 if (LHSKnown.isNegative())
2736 return getFalse(ITy);
2737 if (LHSKnown.isNonNegative() &&
2738 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2739 return getTrue(ITy);
2740 break;
2741 }
2742 }
2743
2744 return nullptr;
2745 }
2746
simplifyICmpWithConstant(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const InstrInfoQuery & IIQ)2747 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2748 Value *RHS, const InstrInfoQuery &IIQ) {
2749 Type *ITy = GetCompareTy(RHS); // The return type.
2750
2751 Value *X;
2752 // Sign-bit checks can be optimized to true/false after unsigned
2753 // floating-point casts:
2754 // icmp slt (bitcast (uitofp X)), 0 --> false
2755 // icmp sgt (bitcast (uitofp X)), -1 --> true
2756 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
2757 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
2758 return ConstantInt::getFalse(ITy);
2759 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
2760 return ConstantInt::getTrue(ITy);
2761 }
2762
2763 const APInt *C;
2764 if (!match(RHS, m_APIntAllowUndef(C)))
2765 return nullptr;
2766
2767 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2768 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2769 if (RHS_CR.isEmptySet())
2770 return ConstantInt::getFalse(ITy);
2771 if (RHS_CR.isFullSet())
2772 return ConstantInt::getTrue(ITy);
2773
2774 ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo);
2775 if (!LHS_CR.isFullSet()) {
2776 if (RHS_CR.contains(LHS_CR))
2777 return ConstantInt::getTrue(ITy);
2778 if (RHS_CR.inverse().contains(LHS_CR))
2779 return ConstantInt::getFalse(ITy);
2780 }
2781
2782 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
2783 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
2784 const APInt *MulC;
2785 if (ICmpInst::isEquality(Pred) &&
2786 ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
2787 *MulC != 0 && C->urem(*MulC) != 0) ||
2788 (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
2789 *MulC != 0 && C->srem(*MulC) != 0)))
2790 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
2791
2792 return nullptr;
2793 }
2794
simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,BinaryOperator * LBO,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)2795 static Value *simplifyICmpWithBinOpOnLHS(
2796 CmpInst::Predicate Pred, BinaryOperator *LBO, Value *RHS,
2797 const SimplifyQuery &Q, unsigned MaxRecurse) {
2798 Type *ITy = GetCompareTy(RHS); // The return type.
2799
2800 Value *Y = nullptr;
2801 // icmp pred (or X, Y), X
2802 if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2803 if (Pred == ICmpInst::ICMP_ULT)
2804 return getFalse(ITy);
2805 if (Pred == ICmpInst::ICMP_UGE)
2806 return getTrue(ITy);
2807
2808 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2809 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2810 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2811 if (RHSKnown.isNonNegative() && YKnown.isNegative())
2812 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2813 if (RHSKnown.isNegative() || YKnown.isNonNegative())
2814 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2815 }
2816 }
2817
2818 // icmp pred (and X, Y), X
2819 if (match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
2820 if (Pred == ICmpInst::ICMP_UGT)
2821 return getFalse(ITy);
2822 if (Pred == ICmpInst::ICMP_ULE)
2823 return getTrue(ITy);
2824 }
2825
2826 // icmp pred (urem X, Y), Y
2827 if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2828 switch (Pred) {
2829 default:
2830 break;
2831 case ICmpInst::ICMP_SGT:
2832 case ICmpInst::ICMP_SGE: {
2833 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2834 if (!Known.isNonNegative())
2835 break;
2836 LLVM_FALLTHROUGH;
2837 }
2838 case ICmpInst::ICMP_EQ:
2839 case ICmpInst::ICMP_UGT:
2840 case ICmpInst::ICMP_UGE:
2841 return getFalse(ITy);
2842 case ICmpInst::ICMP_SLT:
2843 case ICmpInst::ICMP_SLE: {
2844 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2845 if (!Known.isNonNegative())
2846 break;
2847 LLVM_FALLTHROUGH;
2848 }
2849 case ICmpInst::ICMP_NE:
2850 case ICmpInst::ICMP_ULT:
2851 case ICmpInst::ICMP_ULE:
2852 return getTrue(ITy);
2853 }
2854 }
2855
2856 // icmp pred (urem X, Y), X
2857 if (match(LBO, m_URem(m_Specific(RHS), m_Value()))) {
2858 if (Pred == ICmpInst::ICMP_ULE)
2859 return getTrue(ITy);
2860 if (Pred == ICmpInst::ICMP_UGT)
2861 return getFalse(ITy);
2862 }
2863
2864 // x >> y <=u x
2865 // x udiv y <=u x.
2866 if (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2867 match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
2868 // icmp pred (X op Y), X
2869 if (Pred == ICmpInst::ICMP_UGT)
2870 return getFalse(ITy);
2871 if (Pred == ICmpInst::ICMP_ULE)
2872 return getTrue(ITy);
2873 }
2874
2875 // (x*C1)/C2 <= x for C1 <= C2.
2876 // This holds even if the multiplication overflows: Assume that x != 0 and
2877 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
2878 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
2879 //
2880 // Additionally, either the multiplication and division might be represented
2881 // as shifts:
2882 // (x*C1)>>C2 <= x for C1 < 2**C2.
2883 // (x<<C1)/C2 <= x for 2**C1 < C2.
2884 const APInt *C1, *C2;
2885 if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2886 C1->ule(*C2)) ||
2887 (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2888 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
2889 (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2890 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
2891 if (Pred == ICmpInst::ICMP_UGT)
2892 return getFalse(ITy);
2893 if (Pred == ICmpInst::ICMP_ULE)
2894 return getTrue(ITy);
2895 }
2896
2897 return nullptr;
2898 }
2899
2900
2901 // If only one of the icmp's operands has NSW flags, try to prove that:
2902 //
2903 // icmp slt (x + C1), (x +nsw C2)
2904 //
2905 // is equivalent to:
2906 //
2907 // icmp slt C1, C2
2908 //
2909 // which is true if x + C2 has the NSW flags set and:
2910 // *) C1 < C2 && C1 >= 0, or
2911 // *) C2 < C1 && C1 <= 0.
2912 //
trySimplifyICmpWithAdds(CmpInst::Predicate Pred,Value * LHS,Value * RHS)2913 static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
2914 Value *RHS) {
2915 // TODO: only support icmp slt for now.
2916 if (Pred != CmpInst::ICMP_SLT)
2917 return false;
2918
2919 // Canonicalize nsw add as RHS.
2920 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
2921 std::swap(LHS, RHS);
2922 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
2923 return false;
2924
2925 Value *X;
2926 const APInt *C1, *C2;
2927 if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
2928 !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
2929 return false;
2930
2931 return (C1->slt(*C2) && C1->isNonNegative()) ||
2932 (C2->slt(*C1) && C1->isNonPositive());
2933 }
2934
2935
2936 /// TODO: A large part of this logic is duplicated in InstCombine's
2937 /// foldICmpBinOp(). We should be able to share that and avoid the code
2938 /// duplication.
simplifyICmpWithBinOp(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)2939 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2940 Value *RHS, const SimplifyQuery &Q,
2941 unsigned MaxRecurse) {
2942 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2943 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2944 if (MaxRecurse && (LBO || RBO)) {
2945 // Analyze the case when either LHS or RHS is an add instruction.
2946 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2947 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2948 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2949 if (LBO && LBO->getOpcode() == Instruction::Add) {
2950 A = LBO->getOperand(0);
2951 B = LBO->getOperand(1);
2952 NoLHSWrapProblem =
2953 ICmpInst::isEquality(Pred) ||
2954 (CmpInst::isUnsigned(Pred) &&
2955 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
2956 (CmpInst::isSigned(Pred) &&
2957 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
2958 }
2959 if (RBO && RBO->getOpcode() == Instruction::Add) {
2960 C = RBO->getOperand(0);
2961 D = RBO->getOperand(1);
2962 NoRHSWrapProblem =
2963 ICmpInst::isEquality(Pred) ||
2964 (CmpInst::isUnsigned(Pred) &&
2965 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
2966 (CmpInst::isSigned(Pred) &&
2967 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
2968 }
2969
2970 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
2971 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
2972 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
2973 Constant::getNullValue(RHS->getType()), Q,
2974 MaxRecurse - 1))
2975 return V;
2976
2977 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
2978 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
2979 if (Value *V =
2980 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
2981 C == LHS ? D : C, Q, MaxRecurse - 1))
2982 return V;
2983
2984 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
2985 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
2986 trySimplifyICmpWithAdds(Pred, LHS, RHS);
2987 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
2988 // Determine Y and Z in the form icmp (X+Y), (X+Z).
2989 Value *Y, *Z;
2990 if (A == C) {
2991 // C + B == C + D -> B == D
2992 Y = B;
2993 Z = D;
2994 } else if (A == D) {
2995 // D + B == C + D -> B == C
2996 Y = B;
2997 Z = C;
2998 } else if (B == C) {
2999 // A + C == C + D -> A == D
3000 Y = A;
3001 Z = D;
3002 } else {
3003 assert(B == D);
3004 // A + D == C + D -> A == C
3005 Y = A;
3006 Z = C;
3007 }
3008 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3009 return V;
3010 }
3011 }
3012
3013 if (LBO)
3014 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3015 return V;
3016
3017 if (RBO)
3018 if (Value *V = simplifyICmpWithBinOpOnLHS(
3019 ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3020 return V;
3021
3022 // 0 - (zext X) pred C
3023 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3024 const APInt *C;
3025 if (match(RHS, m_APInt(C))) {
3026 if (C->isStrictlyPositive()) {
3027 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3028 return ConstantInt::getTrue(GetCompareTy(RHS));
3029 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3030 return ConstantInt::getFalse(GetCompareTy(RHS));
3031 }
3032 if (C->isNonNegative()) {
3033 if (Pred == ICmpInst::ICMP_SLE)
3034 return ConstantInt::getTrue(GetCompareTy(RHS));
3035 if (Pred == ICmpInst::ICMP_SGT)
3036 return ConstantInt::getFalse(GetCompareTy(RHS));
3037 }
3038 }
3039 }
3040
3041 // If C2 is a power-of-2 and C is not:
3042 // (C2 << X) == C --> false
3043 // (C2 << X) != C --> true
3044 const APInt *C;
3045 if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3046 match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
3047 // C2 << X can equal zero in some circumstances.
3048 // This simplification might be unsafe if C is zero.
3049 //
3050 // We know it is safe if:
3051 // - The shift is nsw. We can't shift out the one bit.
3052 // - The shift is nuw. We can't shift out the one bit.
3053 // - C2 is one.
3054 // - C isn't zero.
3055 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3056 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3057 match(LHS, m_Shl(m_One(), m_Value())) || !C->isNullValue()) {
3058 if (Pred == ICmpInst::ICMP_EQ)
3059 return ConstantInt::getFalse(GetCompareTy(RHS));
3060 if (Pred == ICmpInst::ICMP_NE)
3061 return ConstantInt::getTrue(GetCompareTy(RHS));
3062 }
3063 }
3064
3065 // TODO: This is overly constrained. LHS can be any power-of-2.
3066 // (1 << X) >u 0x8000 --> false
3067 // (1 << X) <=u 0x8000 --> true
3068 if (match(LHS, m_Shl(m_One(), m_Value())) && match(RHS, m_SignMask())) {
3069 if (Pred == ICmpInst::ICMP_UGT)
3070 return ConstantInt::getFalse(GetCompareTy(RHS));
3071 if (Pred == ICmpInst::ICMP_ULE)
3072 return ConstantInt::getTrue(GetCompareTy(RHS));
3073 }
3074
3075 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
3076 LBO->getOperand(1) == RBO->getOperand(1)) {
3077 switch (LBO->getOpcode()) {
3078 default:
3079 break;
3080 case Instruction::UDiv:
3081 case Instruction::LShr:
3082 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3083 !Q.IIQ.isExact(RBO))
3084 break;
3085 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3086 RBO->getOperand(0), Q, MaxRecurse - 1))
3087 return V;
3088 break;
3089 case Instruction::SDiv:
3090 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3091 !Q.IIQ.isExact(RBO))
3092 break;
3093 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3094 RBO->getOperand(0), Q, MaxRecurse - 1))
3095 return V;
3096 break;
3097 case Instruction::AShr:
3098 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3099 break;
3100 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3101 RBO->getOperand(0), Q, MaxRecurse - 1))
3102 return V;
3103 break;
3104 case Instruction::Shl: {
3105 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3106 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3107 if (!NUW && !NSW)
3108 break;
3109 if (!NSW && ICmpInst::isSigned(Pred))
3110 break;
3111 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3112 RBO->getOperand(0), Q, MaxRecurse - 1))
3113 return V;
3114 break;
3115 }
3116 }
3117 }
3118 return nullptr;
3119 }
3120
3121 /// Simplify integer comparisons where at least one operand of the compare
3122 /// matches an integer min/max idiom.
simplifyICmpWithMinMax(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3123 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3124 Value *RHS, const SimplifyQuery &Q,
3125 unsigned MaxRecurse) {
3126 Type *ITy = GetCompareTy(LHS); // The return type.
3127 Value *A, *B;
3128 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3129 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3130
3131 // Signed variants on "max(a,b)>=a -> true".
3132 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3133 if (A != RHS)
3134 std::swap(A, B); // smax(A, B) pred A.
3135 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3136 // We analyze this as smax(A, B) pred A.
3137 P = Pred;
3138 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3139 (A == LHS || B == LHS)) {
3140 if (A != LHS)
3141 std::swap(A, B); // A pred smax(A, B).
3142 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3143 // We analyze this as smax(A, B) swapped-pred A.
3144 P = CmpInst::getSwappedPredicate(Pred);
3145 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3146 (A == RHS || B == RHS)) {
3147 if (A != RHS)
3148 std::swap(A, B); // smin(A, B) pred A.
3149 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3150 // We analyze this as smax(-A, -B) swapped-pred -A.
3151 // Note that we do not need to actually form -A or -B thanks to EqP.
3152 P = CmpInst::getSwappedPredicate(Pred);
3153 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3154 (A == LHS || B == LHS)) {
3155 if (A != LHS)
3156 std::swap(A, B); // A pred smin(A, B).
3157 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3158 // We analyze this as smax(-A, -B) pred -A.
3159 // Note that we do not need to actually form -A or -B thanks to EqP.
3160 P = Pred;
3161 }
3162 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3163 // Cases correspond to "max(A, B) p A".
3164 switch (P) {
3165 default:
3166 break;
3167 case CmpInst::ICMP_EQ:
3168 case CmpInst::ICMP_SLE:
3169 // Equivalent to "A EqP B". This may be the same as the condition tested
3170 // in the max/min; if so, we can just return that.
3171 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3172 return V;
3173 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3174 return V;
3175 // Otherwise, see if "A EqP B" simplifies.
3176 if (MaxRecurse)
3177 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3178 return V;
3179 break;
3180 case CmpInst::ICMP_NE:
3181 case CmpInst::ICMP_SGT: {
3182 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3183 // Equivalent to "A InvEqP B". This may be the same as the condition
3184 // tested in the max/min; if so, we can just return that.
3185 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3186 return V;
3187 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3188 return V;
3189 // Otherwise, see if "A InvEqP B" simplifies.
3190 if (MaxRecurse)
3191 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3192 return V;
3193 break;
3194 }
3195 case CmpInst::ICMP_SGE:
3196 // Always true.
3197 return getTrue(ITy);
3198 case CmpInst::ICMP_SLT:
3199 // Always false.
3200 return getFalse(ITy);
3201 }
3202 }
3203
3204 // Unsigned variants on "max(a,b)>=a -> true".
3205 P = CmpInst::BAD_ICMP_PREDICATE;
3206 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3207 if (A != RHS)
3208 std::swap(A, B); // umax(A, B) pred A.
3209 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3210 // We analyze this as umax(A, B) pred A.
3211 P = Pred;
3212 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3213 (A == LHS || B == LHS)) {
3214 if (A != LHS)
3215 std::swap(A, B); // A pred umax(A, B).
3216 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3217 // We analyze this as umax(A, B) swapped-pred A.
3218 P = CmpInst::getSwappedPredicate(Pred);
3219 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3220 (A == RHS || B == RHS)) {
3221 if (A != RHS)
3222 std::swap(A, B); // umin(A, B) pred A.
3223 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3224 // We analyze this as umax(-A, -B) swapped-pred -A.
3225 // Note that we do not need to actually form -A or -B thanks to EqP.
3226 P = CmpInst::getSwappedPredicate(Pred);
3227 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3228 (A == LHS || B == LHS)) {
3229 if (A != LHS)
3230 std::swap(A, B); // A pred umin(A, B).
3231 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3232 // We analyze this as umax(-A, -B) pred -A.
3233 // Note that we do not need to actually form -A or -B thanks to EqP.
3234 P = Pred;
3235 }
3236 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3237 // Cases correspond to "max(A, B) p A".
3238 switch (P) {
3239 default:
3240 break;
3241 case CmpInst::ICMP_EQ:
3242 case CmpInst::ICMP_ULE:
3243 // Equivalent to "A EqP B". This may be the same as the condition tested
3244 // in the max/min; if so, we can just return that.
3245 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3246 return V;
3247 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3248 return V;
3249 // Otherwise, see if "A EqP B" simplifies.
3250 if (MaxRecurse)
3251 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3252 return V;
3253 break;
3254 case CmpInst::ICMP_NE:
3255 case CmpInst::ICMP_UGT: {
3256 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3257 // Equivalent to "A InvEqP B". This may be the same as the condition
3258 // tested in the max/min; if so, we can just return that.
3259 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3260 return V;
3261 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3262 return V;
3263 // Otherwise, see if "A InvEqP B" simplifies.
3264 if (MaxRecurse)
3265 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3266 return V;
3267 break;
3268 }
3269 case CmpInst::ICMP_UGE:
3270 return getTrue(ITy);
3271 case CmpInst::ICMP_ULT:
3272 return getFalse(ITy);
3273 }
3274 }
3275
3276 // Comparing 1 each of min/max with a common operand?
3277 // Canonicalize min operand to RHS.
3278 if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3279 match(LHS, m_SMin(m_Value(), m_Value()))) {
3280 std::swap(LHS, RHS);
3281 Pred = ICmpInst::getSwappedPredicate(Pred);
3282 }
3283
3284 Value *C, *D;
3285 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3286 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3287 (A == C || A == D || B == C || B == D)) {
3288 // smax(A, B) >=s smin(A, D) --> true
3289 if (Pred == CmpInst::ICMP_SGE)
3290 return getTrue(ITy);
3291 // smax(A, B) <s smin(A, D) --> false
3292 if (Pred == CmpInst::ICMP_SLT)
3293 return getFalse(ITy);
3294 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3295 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3296 (A == C || A == D || B == C || B == D)) {
3297 // umax(A, B) >=u umin(A, D) --> true
3298 if (Pred == CmpInst::ICMP_UGE)
3299 return getTrue(ITy);
3300 // umax(A, B) <u umin(A, D) --> false
3301 if (Pred == CmpInst::ICMP_ULT)
3302 return getFalse(ITy);
3303 }
3304
3305 return nullptr;
3306 }
3307
simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)3308 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3309 Value *LHS, Value *RHS,
3310 const SimplifyQuery &Q) {
3311 // Gracefully handle instructions that have not been inserted yet.
3312 if (!Q.AC || !Q.CxtI || !Q.CxtI->getParent())
3313 return nullptr;
3314
3315 for (Value *AssumeBaseOp : {LHS, RHS}) {
3316 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3317 if (!AssumeVH)
3318 continue;
3319
3320 CallInst *Assume = cast<CallInst>(AssumeVH);
3321 if (Optional<bool> Imp =
3322 isImpliedCondition(Assume->getArgOperand(0), Predicate, LHS, RHS,
3323 Q.DL))
3324 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3325 return ConstantInt::get(GetCompareTy(LHS), *Imp);
3326 }
3327 }
3328
3329 return nullptr;
3330 }
3331
3332 /// Given operands for an ICmpInst, see if we can fold the result.
3333 /// If not, this returns null.
SimplifyICmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3334 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3335 const SimplifyQuery &Q, unsigned MaxRecurse) {
3336 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3337 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3338
3339 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3340 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3341 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3342
3343 // If we have a constant, make sure it is on the RHS.
3344 std::swap(LHS, RHS);
3345 Pred = CmpInst::getSwappedPredicate(Pred);
3346 }
3347 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3348
3349 Type *ITy = GetCompareTy(LHS); // The return type.
3350
3351 // For EQ and NE, we can always pick a value for the undef to make the
3352 // predicate pass or fail, so we can return undef.
3353 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3354 if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3355 return UndefValue::get(ITy);
3356
3357 // icmp X, X -> true/false
3358 // icmp X, undef -> true/false because undef could be X.
3359 if (LHS == RHS || Q.isUndefValue(RHS))
3360 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3361
3362 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3363 return V;
3364
3365 // TODO: Sink/common this with other potentially expensive calls that use
3366 // ValueTracking? See comment below for isKnownNonEqual().
3367 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3368 return V;
3369
3370 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3371 return V;
3372
3373 // If both operands have range metadata, use the metadata
3374 // to simplify the comparison.
3375 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3376 auto RHS_Instr = cast<Instruction>(RHS);
3377 auto LHS_Instr = cast<Instruction>(LHS);
3378
3379 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3380 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3381 auto RHS_CR = getConstantRangeFromMetadata(
3382 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3383 auto LHS_CR = getConstantRangeFromMetadata(
3384 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3385
3386 if (LHS_CR.icmp(Pred, RHS_CR))
3387 return ConstantInt::getTrue(RHS->getContext());
3388
3389 if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
3390 return ConstantInt::getFalse(RHS->getContext());
3391 }
3392 }
3393
3394 // Compare of cast, for example (zext X) != 0 -> X != 0
3395 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3396 Instruction *LI = cast<CastInst>(LHS);
3397 Value *SrcOp = LI->getOperand(0);
3398 Type *SrcTy = SrcOp->getType();
3399 Type *DstTy = LI->getType();
3400
3401 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3402 // if the integer type is the same size as the pointer type.
3403 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3404 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3405 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3406 // Transfer the cast to the constant.
3407 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3408 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3409 Q, MaxRecurse-1))
3410 return V;
3411 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3412 if (RI->getOperand(0)->getType() == SrcTy)
3413 // Compare without the cast.
3414 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3415 Q, MaxRecurse-1))
3416 return V;
3417 }
3418 }
3419
3420 if (isa<ZExtInst>(LHS)) {
3421 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3422 // same type.
3423 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3424 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3425 // Compare X and Y. Note that signed predicates become unsigned.
3426 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3427 SrcOp, RI->getOperand(0), Q,
3428 MaxRecurse-1))
3429 return V;
3430 }
3431 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3432 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3433 if (SrcOp == RI->getOperand(0)) {
3434 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3435 return ConstantInt::getTrue(ITy);
3436 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3437 return ConstantInt::getFalse(ITy);
3438 }
3439 }
3440 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3441 // too. If not, then try to deduce the result of the comparison.
3442 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3443 // Compute the constant that would happen if we truncated to SrcTy then
3444 // reextended to DstTy.
3445 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3446 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3447
3448 // If the re-extended constant didn't change then this is effectively
3449 // also a case of comparing two zero-extended values.
3450 if (RExt == CI && MaxRecurse)
3451 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3452 SrcOp, Trunc, Q, MaxRecurse-1))
3453 return V;
3454
3455 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3456 // there. Use this to work out the result of the comparison.
3457 if (RExt != CI) {
3458 switch (Pred) {
3459 default: llvm_unreachable("Unknown ICmp predicate!");
3460 // LHS <u RHS.
3461 case ICmpInst::ICMP_EQ:
3462 case ICmpInst::ICMP_UGT:
3463 case ICmpInst::ICMP_UGE:
3464 return ConstantInt::getFalse(CI->getContext());
3465
3466 case ICmpInst::ICMP_NE:
3467 case ICmpInst::ICMP_ULT:
3468 case ICmpInst::ICMP_ULE:
3469 return ConstantInt::getTrue(CI->getContext());
3470
3471 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3472 // is non-negative then LHS <s RHS.
3473 case ICmpInst::ICMP_SGT:
3474 case ICmpInst::ICMP_SGE:
3475 return CI->getValue().isNegative() ?
3476 ConstantInt::getTrue(CI->getContext()) :
3477 ConstantInt::getFalse(CI->getContext());
3478
3479 case ICmpInst::ICMP_SLT:
3480 case ICmpInst::ICMP_SLE:
3481 return CI->getValue().isNegative() ?
3482 ConstantInt::getFalse(CI->getContext()) :
3483 ConstantInt::getTrue(CI->getContext());
3484 }
3485 }
3486 }
3487 }
3488
3489 if (isa<SExtInst>(LHS)) {
3490 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3491 // same type.
3492 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3493 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3494 // Compare X and Y. Note that the predicate does not change.
3495 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3496 Q, MaxRecurse-1))
3497 return V;
3498 }
3499 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3500 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3501 if (SrcOp == RI->getOperand(0)) {
3502 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3503 return ConstantInt::getTrue(ITy);
3504 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3505 return ConstantInt::getFalse(ITy);
3506 }
3507 }
3508 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3509 // too. If not, then try to deduce the result of the comparison.
3510 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3511 // Compute the constant that would happen if we truncated to SrcTy then
3512 // reextended to DstTy.
3513 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3514 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3515
3516 // If the re-extended constant didn't change then this is effectively
3517 // also a case of comparing two sign-extended values.
3518 if (RExt == CI && MaxRecurse)
3519 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3520 return V;
3521
3522 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3523 // bits there. Use this to work out the result of the comparison.
3524 if (RExt != CI) {
3525 switch (Pred) {
3526 default: llvm_unreachable("Unknown ICmp predicate!");
3527 case ICmpInst::ICMP_EQ:
3528 return ConstantInt::getFalse(CI->getContext());
3529 case ICmpInst::ICMP_NE:
3530 return ConstantInt::getTrue(CI->getContext());
3531
3532 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3533 // LHS >s RHS.
3534 case ICmpInst::ICMP_SGT:
3535 case ICmpInst::ICMP_SGE:
3536 return CI->getValue().isNegative() ?
3537 ConstantInt::getTrue(CI->getContext()) :
3538 ConstantInt::getFalse(CI->getContext());
3539 case ICmpInst::ICMP_SLT:
3540 case ICmpInst::ICMP_SLE:
3541 return CI->getValue().isNegative() ?
3542 ConstantInt::getFalse(CI->getContext()) :
3543 ConstantInt::getTrue(CI->getContext());
3544
3545 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3546 // LHS >u RHS.
3547 case ICmpInst::ICMP_UGT:
3548 case ICmpInst::ICMP_UGE:
3549 // Comparison is true iff the LHS <s 0.
3550 if (MaxRecurse)
3551 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3552 Constant::getNullValue(SrcTy),
3553 Q, MaxRecurse-1))
3554 return V;
3555 break;
3556 case ICmpInst::ICMP_ULT:
3557 case ICmpInst::ICMP_ULE:
3558 // Comparison is true iff the LHS >=s 0.
3559 if (MaxRecurse)
3560 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3561 Constant::getNullValue(SrcTy),
3562 Q, MaxRecurse-1))
3563 return V;
3564 break;
3565 }
3566 }
3567 }
3568 }
3569 }
3570
3571 // icmp eq|ne X, Y -> false|true if X != Y
3572 // This is potentially expensive, and we have already computedKnownBits for
3573 // compares with 0 above here, so only try this for a non-zero compare.
3574 if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
3575 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3576 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3577 }
3578
3579 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3580 return V;
3581
3582 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3583 return V;
3584
3585 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
3586 return V;
3587
3588 // Simplify comparisons of related pointers using a powerful, recursive
3589 // GEP-walk when we have target data available..
3590 if (LHS->getType()->isPointerTy())
3591 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
3592 return C;
3593 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3594 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3595 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3596 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3597 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3598 Q.DL.getTypeSizeInBits(CRHS->getType()))
3599 if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
3600 CRHS->getPointerOperand(), Q))
3601 return C;
3602
3603 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3604 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3605 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3606 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3607 (ICmpInst::isEquality(Pred) ||
3608 (GLHS->isInBounds() && GRHS->isInBounds() &&
3609 Pred == ICmpInst::getSignedPredicate(Pred)))) {
3610 // The bases are equal and the indices are constant. Build a constant
3611 // expression GEP with the same indices and a null base pointer to see
3612 // what constant folding can make out of it.
3613 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3614 SmallVector<Value *, 4> IndicesLHS(GLHS->indices());
3615 Constant *NewLHS = ConstantExpr::getGetElementPtr(
3616 GLHS->getSourceElementType(), Null, IndicesLHS);
3617
3618 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3619 Constant *NewRHS = ConstantExpr::getGetElementPtr(
3620 GLHS->getSourceElementType(), Null, IndicesRHS);
3621 Constant *NewICmp = ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3622 return ConstantFoldConstant(NewICmp, Q.DL);
3623 }
3624 }
3625 }
3626
3627 // If the comparison is with the result of a select instruction, check whether
3628 // comparing with either branch of the select always yields the same value.
3629 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3630 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3631 return V;
3632
3633 // If the comparison is with the result of a phi instruction, check whether
3634 // doing the compare with each incoming phi value yields a common result.
3635 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3636 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3637 return V;
3638
3639 return nullptr;
3640 }
3641
SimplifyICmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)3642 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3643 const SimplifyQuery &Q) {
3644 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3645 }
3646
3647 /// Given operands for an FCmpInst, see if we can fold the result.
3648 /// If not, this returns null.
SimplifyFCmpInst(unsigned Predicate,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)3649 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3650 FastMathFlags FMF, const SimplifyQuery &Q,
3651 unsigned MaxRecurse) {
3652 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3653 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3654
3655 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3656 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3657 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3658
3659 // If we have a constant, make sure it is on the RHS.
3660 std::swap(LHS, RHS);
3661 Pred = CmpInst::getSwappedPredicate(Pred);
3662 }
3663
3664 // Fold trivial predicates.
3665 Type *RetTy = GetCompareTy(LHS);
3666 if (Pred == FCmpInst::FCMP_FALSE)
3667 return getFalse(RetTy);
3668 if (Pred == FCmpInst::FCMP_TRUE)
3669 return getTrue(RetTy);
3670
3671 // Fold (un)ordered comparison if we can determine there are no NaNs.
3672 if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD)
3673 if (FMF.noNaNs() ||
3674 (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI)))
3675 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
3676
3677 // NaN is unordered; NaN is not ordered.
3678 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&
3679 "Comparison must be either ordered or unordered");
3680 if (match(RHS, m_NaN()))
3681 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3682
3683 // fcmp pred x, undef and fcmp pred undef, x
3684 // fold to true if unordered, false if ordered
3685 if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
3686 // Choosing NaN for the undef will always make unordered comparison succeed
3687 // and ordered comparison fail.
3688 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3689 }
3690
3691 // fcmp x,x -> true/false. Not all compares are foldable.
3692 if (LHS == RHS) {
3693 if (CmpInst::isTrueWhenEqual(Pred))
3694 return getTrue(RetTy);
3695 if (CmpInst::isFalseWhenEqual(Pred))
3696 return getFalse(RetTy);
3697 }
3698
3699 // Handle fcmp with constant RHS.
3700 // TODO: Use match with a specific FP value, so these work with vectors with
3701 // undef lanes.
3702 const APFloat *C;
3703 if (match(RHS, m_APFloat(C))) {
3704 // Check whether the constant is an infinity.
3705 if (C->isInfinity()) {
3706 if (C->isNegative()) {
3707 switch (Pred) {
3708 case FCmpInst::FCMP_OLT:
3709 // No value is ordered and less than negative infinity.
3710 return getFalse(RetTy);
3711 case FCmpInst::FCMP_UGE:
3712 // All values are unordered with or at least negative infinity.
3713 return getTrue(RetTy);
3714 default:
3715 break;
3716 }
3717 } else {
3718 switch (Pred) {
3719 case FCmpInst::FCMP_OGT:
3720 // No value is ordered and greater than infinity.
3721 return getFalse(RetTy);
3722 case FCmpInst::FCMP_ULE:
3723 // All values are unordered with and at most infinity.
3724 return getTrue(RetTy);
3725 default:
3726 break;
3727 }
3728 }
3729
3730 // LHS == Inf
3731 if (Pred == FCmpInst::FCMP_OEQ && isKnownNeverInfinity(LHS, Q.TLI))
3732 return getFalse(RetTy);
3733 // LHS != Inf
3734 if (Pred == FCmpInst::FCMP_UNE && isKnownNeverInfinity(LHS, Q.TLI))
3735 return getTrue(RetTy);
3736 // LHS == Inf || LHS == NaN
3737 if (Pred == FCmpInst::FCMP_UEQ && isKnownNeverInfinity(LHS, Q.TLI) &&
3738 isKnownNeverNaN(LHS, Q.TLI))
3739 return getFalse(RetTy);
3740 // LHS != Inf && LHS != NaN
3741 if (Pred == FCmpInst::FCMP_ONE && isKnownNeverInfinity(LHS, Q.TLI) &&
3742 isKnownNeverNaN(LHS, Q.TLI))
3743 return getTrue(RetTy);
3744 }
3745 if (C->isNegative() && !C->isNegZero()) {
3746 assert(!C->isNaN() && "Unexpected NaN constant!");
3747 // TODO: We can catch more cases by using a range check rather than
3748 // relying on CannotBeOrderedLessThanZero.
3749 switch (Pred) {
3750 case FCmpInst::FCMP_UGE:
3751 case FCmpInst::FCMP_UGT:
3752 case FCmpInst::FCMP_UNE:
3753 // (X >= 0) implies (X > C) when (C < 0)
3754 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3755 return getTrue(RetTy);
3756 break;
3757 case FCmpInst::FCMP_OEQ:
3758 case FCmpInst::FCMP_OLE:
3759 case FCmpInst::FCMP_OLT:
3760 // (X >= 0) implies !(X < C) when (C < 0)
3761 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3762 return getFalse(RetTy);
3763 break;
3764 default:
3765 break;
3766 }
3767 }
3768
3769 // Check comparison of [minnum/maxnum with constant] with other constant.
3770 const APFloat *C2;
3771 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
3772 *C2 < *C) ||
3773 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
3774 *C2 > *C)) {
3775 bool IsMaxNum =
3776 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
3777 // The ordered relationship and minnum/maxnum guarantee that we do not
3778 // have NaN constants, so ordered/unordered preds are handled the same.
3779 switch (Pred) {
3780 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ:
3781 // minnum(X, LesserC) == C --> false
3782 // maxnum(X, GreaterC) == C --> false
3783 return getFalse(RetTy);
3784 case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE:
3785 // minnum(X, LesserC) != C --> true
3786 // maxnum(X, GreaterC) != C --> true
3787 return getTrue(RetTy);
3788 case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE:
3789 case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT:
3790 // minnum(X, LesserC) >= C --> false
3791 // minnum(X, LesserC) > C --> false
3792 // maxnum(X, GreaterC) >= C --> true
3793 // maxnum(X, GreaterC) > C --> true
3794 return ConstantInt::get(RetTy, IsMaxNum);
3795 case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE:
3796 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT:
3797 // minnum(X, LesserC) <= C --> true
3798 // minnum(X, LesserC) < C --> true
3799 // maxnum(X, GreaterC) <= C --> false
3800 // maxnum(X, GreaterC) < C --> false
3801 return ConstantInt::get(RetTy, !IsMaxNum);
3802 default:
3803 // TRUE/FALSE/ORD/UNO should be handled before this.
3804 llvm_unreachable("Unexpected fcmp predicate");
3805 }
3806 }
3807 }
3808
3809 if (match(RHS, m_AnyZeroFP())) {
3810 switch (Pred) {
3811 case FCmpInst::FCMP_OGE:
3812 case FCmpInst::FCMP_ULT:
3813 // Positive or zero X >= 0.0 --> true
3814 // Positive or zero X < 0.0 --> false
3815 if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) &&
3816 CannotBeOrderedLessThanZero(LHS, Q.TLI))
3817 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
3818 break;
3819 case FCmpInst::FCMP_UGE:
3820 case FCmpInst::FCMP_OLT:
3821 // Positive or zero or nan X >= 0.0 --> true
3822 // Positive or zero or nan X < 0.0 --> false
3823 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3824 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
3825 break;
3826 default:
3827 break;
3828 }
3829 }
3830
3831 // If the comparison is with the result of a select instruction, check whether
3832 // comparing with either branch of the select always yields the same value.
3833 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3834 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3835 return V;
3836
3837 // If the comparison is with the result of a phi instruction, check whether
3838 // doing the compare with each incoming phi value yields a common result.
3839 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3840 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3841 return V;
3842
3843 return nullptr;
3844 }
3845
SimplifyFCmpInst(unsigned Predicate,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q)3846 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3847 FastMathFlags FMF, const SimplifyQuery &Q) {
3848 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
3849 }
3850
SimplifyWithOpReplaced(Value * V,Value * Op,Value * RepOp,const SimplifyQuery & Q,bool AllowRefinement,unsigned MaxRecurse)3851 static Value *SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3852 const SimplifyQuery &Q,
3853 bool AllowRefinement,
3854 unsigned MaxRecurse) {
3855 // Trivial replacement.
3856 if (V == Op)
3857 return RepOp;
3858
3859 // We cannot replace a constant, and shouldn't even try.
3860 if (isa<Constant>(Op))
3861 return nullptr;
3862
3863 auto *I = dyn_cast<Instruction>(V);
3864 if (!I || !is_contained(I->operands(), Op))
3865 return nullptr;
3866
3867 // Replace Op with RepOp in instruction operands.
3868 SmallVector<Value *, 8> NewOps(I->getNumOperands());
3869 transform(I->operands(), NewOps.begin(),
3870 [&](Value *V) { return V == Op ? RepOp : V; });
3871
3872 if (!AllowRefinement) {
3873 // General InstSimplify functions may refine the result, e.g. by returning
3874 // a constant for a potentially poison value. To avoid this, implement only
3875 // a few non-refining but profitable transforms here.
3876
3877 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3878 unsigned Opcode = BO->getOpcode();
3879 // id op x -> x, x op id -> x
3880 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
3881 return NewOps[1];
3882 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
3883 /* RHS */ true))
3884 return NewOps[0];
3885
3886 // x & x -> x, x | x -> x
3887 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
3888 NewOps[0] == NewOps[1])
3889 return NewOps[0];
3890 }
3891
3892 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3893 // getelementptr x, 0 -> x
3894 if (NewOps.size() == 2 && match(NewOps[1], m_Zero()) &&
3895 !GEP->isInBounds())
3896 return NewOps[0];
3897 }
3898 } else if (MaxRecurse) {
3899 // The simplification queries below may return the original value. Consider:
3900 // %div = udiv i32 %arg, %arg2
3901 // %mul = mul nsw i32 %div, %arg2
3902 // %cmp = icmp eq i32 %mul, %arg
3903 // %sel = select i1 %cmp, i32 %div, i32 undef
3904 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
3905 // simplifies back to %arg. This can only happen because %mul does not
3906 // dominate %div. To ensure a consistent return value contract, we make sure
3907 // that this case returns nullptr as well.
3908 auto PreventSelfSimplify = [V](Value *Simplified) {
3909 return Simplified != V ? Simplified : nullptr;
3910 };
3911
3912 if (auto *B = dyn_cast<BinaryOperator>(I))
3913 return PreventSelfSimplify(SimplifyBinOp(B->getOpcode(), NewOps[0],
3914 NewOps[1], Q, MaxRecurse - 1));
3915
3916 if (CmpInst *C = dyn_cast<CmpInst>(I))
3917 return PreventSelfSimplify(SimplifyCmpInst(C->getPredicate(), NewOps[0],
3918 NewOps[1], Q, MaxRecurse - 1));
3919
3920 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
3921 return PreventSelfSimplify(SimplifyGEPInst(GEP->getSourceElementType(),
3922 NewOps, Q, MaxRecurse - 1));
3923
3924 if (isa<SelectInst>(I))
3925 return PreventSelfSimplify(
3926 SimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q,
3927 MaxRecurse - 1));
3928 // TODO: We could hand off more cases to instsimplify here.
3929 }
3930
3931 // If all operands are constant after substituting Op for RepOp then we can
3932 // constant fold the instruction.
3933 SmallVector<Constant *, 8> ConstOps;
3934 for (Value *NewOp : NewOps) {
3935 if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
3936 ConstOps.push_back(ConstOp);
3937 else
3938 return nullptr;
3939 }
3940
3941 // Consider:
3942 // %cmp = icmp eq i32 %x, 2147483647
3943 // %add = add nsw i32 %x, 1
3944 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
3945 //
3946 // We can't replace %sel with %add unless we strip away the flags (which
3947 // will be done in InstCombine).
3948 // TODO: This may be unsound, because it only catches some forms of
3949 // refinement.
3950 if (!AllowRefinement && canCreatePoison(cast<Operator>(I)))
3951 return nullptr;
3952
3953 if (CmpInst *C = dyn_cast<CmpInst>(I))
3954 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
3955 ConstOps[1], Q.DL, Q.TLI);
3956
3957 if (LoadInst *LI = dyn_cast<LoadInst>(I))
3958 if (!LI->isVolatile())
3959 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
3960
3961 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
3962 }
3963
SimplifyWithOpReplaced(Value * V,Value * Op,Value * RepOp,const SimplifyQuery & Q,bool AllowRefinement)3964 Value *llvm::SimplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3965 const SimplifyQuery &Q,
3966 bool AllowRefinement) {
3967 return ::SimplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
3968 RecursionLimit);
3969 }
3970
3971 /// Try to simplify a select instruction when its condition operand is an
3972 /// integer comparison where one operand of the compare is a constant.
simplifySelectBitTest(Value * TrueVal,Value * FalseVal,Value * X,const APInt * Y,bool TrueWhenUnset)3973 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
3974 const APInt *Y, bool TrueWhenUnset) {
3975 const APInt *C;
3976
3977 // (X & Y) == 0 ? X & ~Y : X --> X
3978 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
3979 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
3980 *Y == ~*C)
3981 return TrueWhenUnset ? FalseVal : TrueVal;
3982
3983 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
3984 // (X & Y) != 0 ? X : X & ~Y --> X
3985 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
3986 *Y == ~*C)
3987 return TrueWhenUnset ? FalseVal : TrueVal;
3988
3989 if (Y->isPowerOf2()) {
3990 // (X & Y) == 0 ? X | Y : X --> X | Y
3991 // (X & Y) != 0 ? X | Y : X --> X
3992 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
3993 *Y == *C)
3994 return TrueWhenUnset ? TrueVal : FalseVal;
3995
3996 // (X & Y) == 0 ? X : X | Y --> X
3997 // (X & Y) != 0 ? X : X | Y --> X | Y
3998 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
3999 *Y == *C)
4000 return TrueWhenUnset ? TrueVal : FalseVal;
4001 }
4002
4003 return nullptr;
4004 }
4005
4006 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
4007 /// eq/ne.
simplifySelectWithFakeICmpEq(Value * CmpLHS,Value * CmpRHS,ICmpInst::Predicate Pred,Value * TrueVal,Value * FalseVal)4008 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
4009 ICmpInst::Predicate Pred,
4010 Value *TrueVal, Value *FalseVal) {
4011 Value *X;
4012 APInt Mask;
4013 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
4014 return nullptr;
4015
4016 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
4017 Pred == ICmpInst::ICMP_EQ);
4018 }
4019
4020 /// Try to simplify a select instruction when its condition operand is an
4021 /// integer comparison.
simplifySelectWithICmpCond(Value * CondVal,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4022 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4023 Value *FalseVal, const SimplifyQuery &Q,
4024 unsigned MaxRecurse) {
4025 ICmpInst::Predicate Pred;
4026 Value *CmpLHS, *CmpRHS;
4027 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4028 return nullptr;
4029
4030 // Canonicalize ne to eq predicate.
4031 if (Pred == ICmpInst::ICMP_NE) {
4032 Pred = ICmpInst::ICMP_EQ;
4033 std::swap(TrueVal, FalseVal);
4034 }
4035
4036 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4037 Value *X;
4038 const APInt *Y;
4039 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4040 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4041 /*TrueWhenUnset=*/true))
4042 return V;
4043
4044 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4045 Value *ShAmt;
4046 auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4047 m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4048 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4049 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4050 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4051 return X;
4052
4053 // Test for a zero-shift-guard-op around rotates. These are used to
4054 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4055 // intrinsics do not have that problem.
4056 // We do not allow this transform for the general funnel shift case because
4057 // that would not preserve the poison safety of the original code.
4058 auto isRotate =
4059 m_CombineOr(m_FShl(m_Value(X), m_Deferred(X), m_Value(ShAmt)),
4060 m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4061 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4062 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4063 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4064 Pred == ICmpInst::ICMP_EQ)
4065 return FalseVal;
4066
4067 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4068 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4069 if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4070 match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4071 return FalseVal;
4072 if (match(TrueVal,
4073 m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4074 match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4075 return FalseVal;
4076 }
4077
4078 // Check for other compares that behave like bit test.
4079 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred,
4080 TrueVal, FalseVal))
4081 return V;
4082
4083 // If we have a scalar equality comparison, then we know the value in one of
4084 // the arms of the select. See if substituting this value into the arm and
4085 // simplifying the result yields the same value as the other arm.
4086 // Note that the equivalence/replacement opportunity does not hold for vectors
4087 // because each element of a vector select is chosen independently.
4088 if (Pred == ICmpInst::ICMP_EQ && !CondVal->getType()->isVectorTy()) {
4089 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4090 /* AllowRefinement */ false, MaxRecurse) ==
4091 TrueVal ||
4092 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q,
4093 /* AllowRefinement */ false, MaxRecurse) ==
4094 TrueVal)
4095 return FalseVal;
4096 if (SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4097 /* AllowRefinement */ true, MaxRecurse) ==
4098 FalseVal ||
4099 SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q,
4100 /* AllowRefinement */ true, MaxRecurse) ==
4101 FalseVal)
4102 return FalseVal;
4103 }
4104
4105 return nullptr;
4106 }
4107
4108 /// Try to simplify a select instruction when its condition operand is a
4109 /// floating-point comparison.
simplifySelectWithFCmp(Value * Cond,Value * T,Value * F,const SimplifyQuery & Q)4110 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4111 const SimplifyQuery &Q) {
4112 FCmpInst::Predicate Pred;
4113 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4114 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4115 return nullptr;
4116
4117 // This transform is safe if we do not have (do not care about) -0.0 or if
4118 // at least one operand is known to not be -0.0. Otherwise, the select can
4119 // change the sign of a zero operand.
4120 bool HasNoSignedZeros = Q.CxtI && isa<FPMathOperator>(Q.CxtI) &&
4121 Q.CxtI->hasNoSignedZeros();
4122 const APFloat *C;
4123 if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4124 (match(F, m_APFloat(C)) && C->isNonZero())) {
4125 // (T == F) ? T : F --> F
4126 // (F == T) ? T : F --> F
4127 if (Pred == FCmpInst::FCMP_OEQ)
4128 return F;
4129
4130 // (T != F) ? T : F --> T
4131 // (F != T) ? T : F --> T
4132 if (Pred == FCmpInst::FCMP_UNE)
4133 return T;
4134 }
4135
4136 return nullptr;
4137 }
4138
4139 /// Given operands for a SelectInst, see if we can fold the result.
4140 /// If not, this returns null.
SimplifySelectInst(Value * Cond,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4141 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4142 const SimplifyQuery &Q, unsigned MaxRecurse) {
4143 if (auto *CondC = dyn_cast<Constant>(Cond)) {
4144 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4145 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4146 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
4147
4148 // select undef, X, Y -> X or Y
4149 if (Q.isUndefValue(CondC))
4150 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4151
4152 // TODO: Vector constants with undef elements don't simplify.
4153
4154 // select true, X, Y -> X
4155 if (CondC->isAllOnesValue())
4156 return TrueVal;
4157 // select false, X, Y -> Y
4158 if (CondC->isNullValue())
4159 return FalseVal;
4160 }
4161
4162 // select i1 Cond, i1 true, i1 false --> i1 Cond
4163 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4164 "Select must have bool or bool vector condition");
4165 assert(TrueVal->getType() == FalseVal->getType() &&
4166 "Select must have same types for true/false ops");
4167 if (Cond->getType() == TrueVal->getType() &&
4168 match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4169 return Cond;
4170
4171 // select ?, X, X -> X
4172 if (TrueVal == FalseVal)
4173 return TrueVal;
4174
4175 // If the true or false value is undef, we can fold to the other value as
4176 // long as the other value isn't poison.
4177 // select ?, undef, X -> X
4178 if (Q.isUndefValue(TrueVal) &&
4179 isGuaranteedNotToBeUndefOrPoison(FalseVal, Q.AC, Q.CxtI, Q.DT))
4180 return FalseVal;
4181 // select ?, X, undef -> X
4182 if (Q.isUndefValue(FalseVal) &&
4183 isGuaranteedNotToBeUndefOrPoison(TrueVal, Q.AC, Q.CxtI, Q.DT))
4184 return TrueVal;
4185
4186 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4187 Constant *TrueC, *FalseC;
4188 if (isa<FixedVectorType>(TrueVal->getType()) &&
4189 match(TrueVal, m_Constant(TrueC)) &&
4190 match(FalseVal, m_Constant(FalseC))) {
4191 unsigned NumElts =
4192 cast<FixedVectorType>(TrueC->getType())->getNumElements();
4193 SmallVector<Constant *, 16> NewC;
4194 for (unsigned i = 0; i != NumElts; ++i) {
4195 // Bail out on incomplete vector constants.
4196 Constant *TEltC = TrueC->getAggregateElement(i);
4197 Constant *FEltC = FalseC->getAggregateElement(i);
4198 if (!TEltC || !FEltC)
4199 break;
4200
4201 // If the elements match (undef or not), that value is the result. If only
4202 // one element is undef, choose the defined element as the safe result.
4203 if (TEltC == FEltC)
4204 NewC.push_back(TEltC);
4205 else if (Q.isUndefValue(TEltC) &&
4206 isGuaranteedNotToBeUndefOrPoison(FEltC))
4207 NewC.push_back(FEltC);
4208 else if (Q.isUndefValue(FEltC) &&
4209 isGuaranteedNotToBeUndefOrPoison(TEltC))
4210 NewC.push_back(TEltC);
4211 else
4212 break;
4213 }
4214 if (NewC.size() == NumElts)
4215 return ConstantVector::get(NewC);
4216 }
4217
4218 if (Value *V =
4219 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4220 return V;
4221
4222 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4223 return V;
4224
4225 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4226 return V;
4227
4228 Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4229 if (Imp)
4230 return *Imp ? TrueVal : FalseVal;
4231
4232 return nullptr;
4233 }
4234
SimplifySelectInst(Value * Cond,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q)4235 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4236 const SimplifyQuery &Q) {
4237 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4238 }
4239
4240 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4241 /// If not, this returns null.
SimplifyGEPInst(Type * SrcTy,ArrayRef<Value * > Ops,const SimplifyQuery & Q,unsigned)4242 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
4243 const SimplifyQuery &Q, unsigned) {
4244 // The type of the GEP pointer operand.
4245 unsigned AS =
4246 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
4247
4248 // getelementptr P -> P.
4249 if (Ops.size() == 1)
4250 return Ops[0];
4251
4252 // Compute the (pointer) type returned by the GEP instruction.
4253 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
4254 Type *GEPTy = PointerType::get(LastType, AS);
4255 for (Value *Op : Ops) {
4256 // If one of the operands is a vector, the result type is a vector of
4257 // pointers. All vector operands must have the same number of elements.
4258 if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4259 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4260 break;
4261 }
4262 }
4263
4264 // getelementptr poison, idx -> poison
4265 // getelementptr baseptr, poison -> poison
4266 if (any_of(Ops, [](const auto *V) { return isa<PoisonValue>(V); }))
4267 return PoisonValue::get(GEPTy);
4268
4269 if (Q.isUndefValue(Ops[0]))
4270 return UndefValue::get(GEPTy);
4271
4272 bool IsScalableVec =
4273 isa<ScalableVectorType>(SrcTy) || any_of(Ops, [](const Value *V) {
4274 return isa<ScalableVectorType>(V->getType());
4275 });
4276
4277 if (Ops.size() == 2) {
4278 // getelementptr P, 0 -> P.
4279 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy)
4280 return Ops[0];
4281
4282 Type *Ty = SrcTy;
4283 if (!IsScalableVec && Ty->isSized()) {
4284 Value *P;
4285 uint64_t C;
4286 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
4287 // getelementptr P, N -> P if P points to a type of zero size.
4288 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy)
4289 return Ops[0];
4290
4291 // The following transforms are only safe if the ptrtoint cast
4292 // doesn't truncate the pointers.
4293 if (Ops[1]->getType()->getScalarSizeInBits() ==
4294 Q.DL.getPointerSizeInBits(AS)) {
4295 auto CanSimplify = [GEPTy, &P, V = Ops[0]]() -> bool {
4296 return P->getType() == GEPTy &&
4297 getUnderlyingObject(P) == getUnderlyingObject(V);
4298 };
4299 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
4300 if (TyAllocSize == 1 &&
4301 match(Ops[1], m_Sub(m_PtrToInt(m_Value(P)),
4302 m_PtrToInt(m_Specific(Ops[0])))) &&
4303 CanSimplify())
4304 return P;
4305
4306 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
4307 // size 1 << C.
4308 if (match(Ops[1], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
4309 m_PtrToInt(m_Specific(Ops[0]))),
4310 m_ConstantInt(C))) &&
4311 TyAllocSize == 1ULL << C && CanSimplify())
4312 return P;
4313
4314 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
4315 // size C.
4316 if (match(Ops[1], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
4317 m_PtrToInt(m_Specific(Ops[0]))),
4318 m_SpecificInt(TyAllocSize))) &&
4319 CanSimplify())
4320 return P;
4321 }
4322 }
4323 }
4324
4325 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
4326 all_of(Ops.slice(1).drop_back(1),
4327 [](Value *Idx) { return match(Idx, m_Zero()); })) {
4328 unsigned IdxWidth =
4329 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
4330 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) {
4331 APInt BasePtrOffset(IdxWidth, 0);
4332 Value *StrippedBasePtr =
4333 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
4334 BasePtrOffset);
4335
4336 // Avoid creating inttoptr of zero here: While LLVMs treatment of
4337 // inttoptr is generally conservative, this particular case is folded to
4338 // a null pointer, which will have incorrect provenance.
4339
4340 // gep (gep V, C), (sub 0, V) -> C
4341 if (match(Ops.back(),
4342 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
4343 !BasePtrOffset.isNullValue()) {
4344 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
4345 return ConstantExpr::getIntToPtr(CI, GEPTy);
4346 }
4347 // gep (gep V, C), (xor V, -1) -> C-1
4348 if (match(Ops.back(),
4349 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
4350 !BasePtrOffset.isOneValue()) {
4351 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
4352 return ConstantExpr::getIntToPtr(CI, GEPTy);
4353 }
4354 }
4355 }
4356
4357 // Check to see if this is constant foldable.
4358 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); }))
4359 return nullptr;
4360
4361 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
4362 Ops.slice(1));
4363 return ConstantFoldConstant(CE, Q.DL);
4364 }
4365
SimplifyGEPInst(Type * SrcTy,ArrayRef<Value * > Ops,const SimplifyQuery & Q)4366 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
4367 const SimplifyQuery &Q) {
4368 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit);
4369 }
4370
4371 /// Given operands for an InsertValueInst, see if we can fold the result.
4372 /// If not, this returns null.
SimplifyInsertValueInst(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q,unsigned)4373 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
4374 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q,
4375 unsigned) {
4376 if (Constant *CAgg = dyn_cast<Constant>(Agg))
4377 if (Constant *CVal = dyn_cast<Constant>(Val))
4378 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
4379
4380 // insertvalue x, undef, n -> x
4381 if (Q.isUndefValue(Val))
4382 return Agg;
4383
4384 // insertvalue x, (extractvalue y, n), n
4385 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
4386 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
4387 EV->getIndices() == Idxs) {
4388 // insertvalue undef, (extractvalue y, n), n -> y
4389 if (Q.isUndefValue(Agg))
4390 return EV->getAggregateOperand();
4391
4392 // insertvalue y, (extractvalue y, n), n -> y
4393 if (Agg == EV->getAggregateOperand())
4394 return Agg;
4395 }
4396
4397 return nullptr;
4398 }
4399
SimplifyInsertValueInst(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q)4400 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
4401 ArrayRef<unsigned> Idxs,
4402 const SimplifyQuery &Q) {
4403 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
4404 }
4405
SimplifyInsertElementInst(Value * Vec,Value * Val,Value * Idx,const SimplifyQuery & Q)4406 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
4407 const SimplifyQuery &Q) {
4408 // Try to constant fold.
4409 auto *VecC = dyn_cast<Constant>(Vec);
4410 auto *ValC = dyn_cast<Constant>(Val);
4411 auto *IdxC = dyn_cast<Constant>(Idx);
4412 if (VecC && ValC && IdxC)
4413 return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
4414
4415 // For fixed-length vector, fold into poison if index is out of bounds.
4416 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
4417 if (isa<FixedVectorType>(Vec->getType()) &&
4418 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
4419 return PoisonValue::get(Vec->getType());
4420 }
4421
4422 // If index is undef, it might be out of bounds (see above case)
4423 if (Q.isUndefValue(Idx))
4424 return PoisonValue::get(Vec->getType());
4425
4426 // If the scalar is poison, or it is undef and there is no risk of
4427 // propagating poison from the vector value, simplify to the vector value.
4428 if (isa<PoisonValue>(Val) ||
4429 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
4430 return Vec;
4431
4432 // If we are extracting a value from a vector, then inserting it into the same
4433 // place, that's the input vector:
4434 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
4435 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
4436 return Vec;
4437
4438 return nullptr;
4439 }
4440
4441 /// Given operands for an ExtractValueInst, see if we can fold the result.
4442 /// If not, this returns null.
SimplifyExtractValueInst(Value * Agg,ArrayRef<unsigned> Idxs,const SimplifyQuery &,unsigned)4443 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4444 const SimplifyQuery &, unsigned) {
4445 if (auto *CAgg = dyn_cast<Constant>(Agg))
4446 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
4447
4448 // extractvalue x, (insertvalue y, elt, n), n -> elt
4449 unsigned NumIdxs = Idxs.size();
4450 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
4451 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
4452 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
4453 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
4454 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
4455 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
4456 Idxs.slice(0, NumCommonIdxs)) {
4457 if (NumIdxs == NumInsertValueIdxs)
4458 return IVI->getInsertedValueOperand();
4459 break;
4460 }
4461 }
4462
4463 return nullptr;
4464 }
4465
SimplifyExtractValueInst(Value * Agg,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q)4466 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4467 const SimplifyQuery &Q) {
4468 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
4469 }
4470
4471 /// Given operands for an ExtractElementInst, see if we can fold the result.
4472 /// If not, this returns null.
SimplifyExtractElementInst(Value * Vec,Value * Idx,const SimplifyQuery & Q,unsigned)4473 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
4474 const SimplifyQuery &Q, unsigned) {
4475 auto *VecVTy = cast<VectorType>(Vec->getType());
4476 if (auto *CVec = dyn_cast<Constant>(Vec)) {
4477 if (auto *CIdx = dyn_cast<Constant>(Idx))
4478 return ConstantExpr::getExtractElement(CVec, CIdx);
4479
4480 // The index is not relevant if our vector is a splat.
4481 if (auto *Splat = CVec->getSplatValue())
4482 return Splat;
4483
4484 if (Q.isUndefValue(Vec))
4485 return UndefValue::get(VecVTy->getElementType());
4486 }
4487
4488 // If extracting a specified index from the vector, see if we can recursively
4489 // find a previously computed scalar that was inserted into the vector.
4490 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
4491 // For fixed-length vector, fold into undef if index is out of bounds.
4492 if (isa<FixedVectorType>(VecVTy) &&
4493 IdxC->getValue().uge(cast<FixedVectorType>(VecVTy)->getNumElements()))
4494 return PoisonValue::get(VecVTy->getElementType());
4495 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4496 return Elt;
4497 }
4498
4499 // An undef extract index can be arbitrarily chosen to be an out-of-range
4500 // index value, which would result in the instruction being poison.
4501 if (Q.isUndefValue(Idx))
4502 return PoisonValue::get(VecVTy->getElementType());
4503
4504 return nullptr;
4505 }
4506
SimplifyExtractElementInst(Value * Vec,Value * Idx,const SimplifyQuery & Q)4507 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
4508 const SimplifyQuery &Q) {
4509 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
4510 }
4511
4512 /// See if we can fold the given phi. If not, returns null.
SimplifyPHINode(PHINode * PN,const SimplifyQuery & Q)4513 static Value *SimplifyPHINode(PHINode *PN, const SimplifyQuery &Q) {
4514 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
4515 // here, because the PHI we may succeed simplifying to was not
4516 // def-reachable from the original PHI!
4517
4518 // If all of the PHI's incoming values are the same then replace the PHI node
4519 // with the common value.
4520 Value *CommonValue = nullptr;
4521 bool HasUndefInput = false;
4522 for (Value *Incoming : PN->incoming_values()) {
4523 // If the incoming value is the phi node itself, it can safely be skipped.
4524 if (Incoming == PN) continue;
4525 if (Q.isUndefValue(Incoming)) {
4526 // Remember that we saw an undef value, but otherwise ignore them.
4527 HasUndefInput = true;
4528 continue;
4529 }
4530 if (CommonValue && Incoming != CommonValue)
4531 return nullptr; // Not the same, bail out.
4532 CommonValue = Incoming;
4533 }
4534
4535 // If CommonValue is null then all of the incoming values were either undef or
4536 // equal to the phi node itself.
4537 if (!CommonValue)
4538 return UndefValue::get(PN->getType());
4539
4540 // If we have a PHI node like phi(X, undef, X), where X is defined by some
4541 // instruction, we cannot return X as the result of the PHI node unless it
4542 // dominates the PHI block.
4543 if (HasUndefInput)
4544 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4545
4546 return CommonValue;
4547 }
4548
SimplifyCastInst(unsigned CastOpc,Value * Op,Type * Ty,const SimplifyQuery & Q,unsigned MaxRecurse)4549 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4550 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) {
4551 if (auto *C = dyn_cast<Constant>(Op))
4552 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4553
4554 if (auto *CI = dyn_cast<CastInst>(Op)) {
4555 auto *Src = CI->getOperand(0);
4556 Type *SrcTy = Src->getType();
4557 Type *MidTy = CI->getType();
4558 Type *DstTy = Ty;
4559 if (Src->getType() == Ty) {
4560 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4561 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4562 Type *SrcIntPtrTy =
4563 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4564 Type *MidIntPtrTy =
4565 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4566 Type *DstIntPtrTy =
4567 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4568 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4569 SrcIntPtrTy, MidIntPtrTy,
4570 DstIntPtrTy) == Instruction::BitCast)
4571 return Src;
4572 }
4573 }
4574
4575 // bitcast x -> x
4576 if (CastOpc == Instruction::BitCast)
4577 if (Op->getType() == Ty)
4578 return Op;
4579
4580 return nullptr;
4581 }
4582
SimplifyCastInst(unsigned CastOpc,Value * Op,Type * Ty,const SimplifyQuery & Q)4583 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4584 const SimplifyQuery &Q) {
4585 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
4586 }
4587
4588 /// For the given destination element of a shuffle, peek through shuffles to
4589 /// match a root vector source operand that contains that element in the same
4590 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
foldIdentityShuffles(int DestElt,Value * Op0,Value * Op1,int MaskVal,Value * RootVec,unsigned MaxRecurse)4591 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4592 int MaskVal, Value *RootVec,
4593 unsigned MaxRecurse) {
4594 if (!MaxRecurse--)
4595 return nullptr;
4596
4597 // Bail out if any mask value is undefined. That kind of shuffle may be
4598 // simplified further based on demanded bits or other folds.
4599 if (MaskVal == -1)
4600 return nullptr;
4601
4602 // The mask value chooses which source operand we need to look at next.
4603 int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
4604 int RootElt = MaskVal;
4605 Value *SourceOp = Op0;
4606 if (MaskVal >= InVecNumElts) {
4607 RootElt = MaskVal - InVecNumElts;
4608 SourceOp = Op1;
4609 }
4610
4611 // If the source operand is a shuffle itself, look through it to find the
4612 // matching root vector.
4613 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4614 return foldIdentityShuffles(
4615 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4616 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
4617 }
4618
4619 // TODO: Look through bitcasts? What if the bitcast changes the vector element
4620 // size?
4621
4622 // The source operand is not a shuffle. Initialize the root vector value for
4623 // this shuffle if that has not been done yet.
4624 if (!RootVec)
4625 RootVec = SourceOp;
4626
4627 // Give up as soon as a source operand does not match the existing root value.
4628 if (RootVec != SourceOp)
4629 return nullptr;
4630
4631 // The element must be coming from the same lane in the source vector
4632 // (although it may have crossed lanes in intermediate shuffles).
4633 if (RootElt != DestElt)
4634 return nullptr;
4635
4636 return RootVec;
4637 }
4638
SimplifyShuffleVectorInst(Value * Op0,Value * Op1,ArrayRef<int> Mask,Type * RetTy,const SimplifyQuery & Q,unsigned MaxRecurse)4639 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4640 ArrayRef<int> Mask, Type *RetTy,
4641 const SimplifyQuery &Q,
4642 unsigned MaxRecurse) {
4643 if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; }))
4644 return UndefValue::get(RetTy);
4645
4646 auto *InVecTy = cast<VectorType>(Op0->getType());
4647 unsigned MaskNumElts = Mask.size();
4648 ElementCount InVecEltCount = InVecTy->getElementCount();
4649
4650 bool Scalable = InVecEltCount.isScalable();
4651
4652 SmallVector<int, 32> Indices;
4653 Indices.assign(Mask.begin(), Mask.end());
4654
4655 // Canonicalization: If mask does not select elements from an input vector,
4656 // replace that input vector with poison.
4657 if (!Scalable) {
4658 bool MaskSelects0 = false, MaskSelects1 = false;
4659 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
4660 for (unsigned i = 0; i != MaskNumElts; ++i) {
4661 if (Indices[i] == -1)
4662 continue;
4663 if ((unsigned)Indices[i] < InVecNumElts)
4664 MaskSelects0 = true;
4665 else
4666 MaskSelects1 = true;
4667 }
4668 if (!MaskSelects0)
4669 Op0 = PoisonValue::get(InVecTy);
4670 if (!MaskSelects1)
4671 Op1 = PoisonValue::get(InVecTy);
4672 }
4673
4674 auto *Op0Const = dyn_cast<Constant>(Op0);
4675 auto *Op1Const = dyn_cast<Constant>(Op1);
4676
4677 // If all operands are constant, constant fold the shuffle. This
4678 // transformation depends on the value of the mask which is not known at
4679 // compile time for scalable vectors
4680 if (Op0Const && Op1Const)
4681 return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
4682
4683 // Canonicalization: if only one input vector is constant, it shall be the
4684 // second one. This transformation depends on the value of the mask which
4685 // is not known at compile time for scalable vectors
4686 if (!Scalable && Op0Const && !Op1Const) {
4687 std::swap(Op0, Op1);
4688 ShuffleVectorInst::commuteShuffleMask(Indices,
4689 InVecEltCount.getKnownMinValue());
4690 }
4691
4692 // A splat of an inserted scalar constant becomes a vector constant:
4693 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
4694 // NOTE: We may have commuted above, so analyze the updated Indices, not the
4695 // original mask constant.
4696 // NOTE: This transformation depends on the value of the mask which is not
4697 // known at compile time for scalable vectors
4698 Constant *C;
4699 ConstantInt *IndexC;
4700 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
4701 m_ConstantInt(IndexC)))) {
4702 // Match a splat shuffle mask of the insert index allowing undef elements.
4703 int InsertIndex = IndexC->getZExtValue();
4704 if (all_of(Indices, [InsertIndex](int MaskElt) {
4705 return MaskElt == InsertIndex || MaskElt == -1;
4706 })) {
4707 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
4708
4709 // Shuffle mask undefs become undefined constant result elements.
4710 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
4711 for (unsigned i = 0; i != MaskNumElts; ++i)
4712 if (Indices[i] == -1)
4713 VecC[i] = UndefValue::get(C->getType());
4714 return ConstantVector::get(VecC);
4715 }
4716 }
4717
4718 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4719 // value type is same as the input vectors' type.
4720 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4721 if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
4722 is_splat(OpShuf->getShuffleMask()))
4723 return Op0;
4724
4725 // All remaining transformation depend on the value of the mask, which is
4726 // not known at compile time for scalable vectors.
4727 if (Scalable)
4728 return nullptr;
4729
4730 // Don't fold a shuffle with undef mask elements. This may get folded in a
4731 // better way using demanded bits or other analysis.
4732 // TODO: Should we allow this?
4733 if (is_contained(Indices, -1))
4734 return nullptr;
4735
4736 // Check if every element of this shuffle can be mapped back to the
4737 // corresponding element of a single root vector. If so, we don't need this
4738 // shuffle. This handles simple identity shuffles as well as chains of
4739 // shuffles that may widen/narrow and/or move elements across lanes and back.
4740 Value *RootVec = nullptr;
4741 for (unsigned i = 0; i != MaskNumElts; ++i) {
4742 // Note that recursion is limited for each vector element, so if any element
4743 // exceeds the limit, this will fail to simplify.
4744 RootVec =
4745 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
4746
4747 // We can't replace a widening/narrowing shuffle with one of its operands.
4748 if (!RootVec || RootVec->getType() != RetTy)
4749 return nullptr;
4750 }
4751 return RootVec;
4752 }
4753
4754 /// Given operands for a ShuffleVectorInst, fold the result or return null.
SimplifyShuffleVectorInst(Value * Op0,Value * Op1,ArrayRef<int> Mask,Type * RetTy,const SimplifyQuery & Q)4755 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4756 ArrayRef<int> Mask, Type *RetTy,
4757 const SimplifyQuery &Q) {
4758 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
4759 }
4760
foldConstant(Instruction::UnaryOps Opcode,Value * & Op,const SimplifyQuery & Q)4761 static Constant *foldConstant(Instruction::UnaryOps Opcode,
4762 Value *&Op, const SimplifyQuery &Q) {
4763 if (auto *C = dyn_cast<Constant>(Op))
4764 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
4765 return nullptr;
4766 }
4767
4768 /// Given the operand for an FNeg, see if we can fold the result. If not, this
4769 /// returns null.
simplifyFNegInst(Value * Op,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)4770 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
4771 const SimplifyQuery &Q, unsigned MaxRecurse) {
4772 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
4773 return C;
4774
4775 Value *X;
4776 // fneg (fneg X) ==> X
4777 if (match(Op, m_FNeg(m_Value(X))))
4778 return X;
4779
4780 return nullptr;
4781 }
4782
SimplifyFNegInst(Value * Op,FastMathFlags FMF,const SimplifyQuery & Q)4783 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF,
4784 const SimplifyQuery &Q) {
4785 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
4786 }
4787
propagateNaN(Constant * In)4788 static Constant *propagateNaN(Constant *In) {
4789 // If the input is a vector with undef elements, just return a default NaN.
4790 if (!In->isNaN())
4791 return ConstantFP::getNaN(In->getType());
4792
4793 // Propagate the existing NaN constant when possible.
4794 // TODO: Should we quiet a signaling NaN?
4795 return In;
4796 }
4797
4798 /// Perform folds that are common to any floating-point operation. This implies
4799 /// transforms based on undef/NaN because the operation itself makes no
4800 /// difference to the result.
simplifyFPOp(ArrayRef<Value * > Ops,FastMathFlags FMF,const SimplifyQuery & Q)4801 static Constant *simplifyFPOp(ArrayRef<Value *> Ops,
4802 FastMathFlags FMF,
4803 const SimplifyQuery &Q) {
4804 for (Value *V : Ops) {
4805 bool IsNan = match(V, m_NaN());
4806 bool IsInf = match(V, m_Inf());
4807 bool IsUndef = Q.isUndefValue(V);
4808
4809 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
4810 // (an undef operand can be chosen to be Nan/Inf), then the result of
4811 // this operation is poison.
4812 if (FMF.noNaNs() && (IsNan || IsUndef))
4813 return PoisonValue::get(V->getType());
4814 if (FMF.noInfs() && (IsInf || IsUndef))
4815 return PoisonValue::get(V->getType());
4816
4817 if (IsUndef || IsNan)
4818 return propagateNaN(cast<Constant>(V));
4819 }
4820 return nullptr;
4821 }
4822
4823 /// Given operands for an FAdd, see if we can fold the result. If not, this
4824 /// returns null.
SimplifyFAddInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)4825 static Value *SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4826 const SimplifyQuery &Q, unsigned MaxRecurse) {
4827 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
4828 return C;
4829
4830 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
4831 return C;
4832
4833 // fadd X, -0 ==> X
4834 if (match(Op1, m_NegZeroFP()))
4835 return Op0;
4836
4837 // fadd X, 0 ==> X, when we know X is not -0
4838 if (match(Op1, m_PosZeroFP()) &&
4839 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4840 return Op0;
4841
4842 // With nnan: -X + X --> 0.0 (and commuted variant)
4843 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
4844 // Negative zeros are allowed because we always end up with positive zero:
4845 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4846 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4847 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
4848 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
4849 if (FMF.noNaNs()) {
4850 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
4851 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
4852 return ConstantFP::getNullValue(Op0->getType());
4853
4854 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
4855 match(Op1, m_FNeg(m_Specific(Op0))))
4856 return ConstantFP::getNullValue(Op0->getType());
4857 }
4858
4859 // (X - Y) + Y --> X
4860 // Y + (X - Y) --> X
4861 Value *X;
4862 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
4863 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
4864 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
4865 return X;
4866
4867 return nullptr;
4868 }
4869
4870 /// Given operands for an FSub, see if we can fold the result. If not, this
4871 /// returns null.
SimplifyFSubInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)4872 static Value *SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4873 const SimplifyQuery &Q, unsigned MaxRecurse) {
4874 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
4875 return C;
4876
4877 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
4878 return C;
4879
4880 // fsub X, +0 ==> X
4881 if (match(Op1, m_PosZeroFP()))
4882 return Op0;
4883
4884 // fsub X, -0 ==> X, when we know X is not -0
4885 if (match(Op1, m_NegZeroFP()) &&
4886 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4887 return Op0;
4888
4889 // fsub -0.0, (fsub -0.0, X) ==> X
4890 // fsub -0.0, (fneg X) ==> X
4891 Value *X;
4892 if (match(Op0, m_NegZeroFP()) &&
4893 match(Op1, m_FNeg(m_Value(X))))
4894 return X;
4895
4896 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
4897 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
4898 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
4899 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
4900 match(Op1, m_FNeg(m_Value(X)))))
4901 return X;
4902
4903 // fsub nnan x, x ==> 0.0
4904 if (FMF.noNaNs() && Op0 == Op1)
4905 return Constant::getNullValue(Op0->getType());
4906
4907 // Y - (Y - X) --> X
4908 // (X + Y) - Y --> X
4909 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
4910 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
4911 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
4912 return X;
4913
4914 return nullptr;
4915 }
4916
SimplifyFMAFMul(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)4917 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
4918 const SimplifyQuery &Q, unsigned MaxRecurse) {
4919 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
4920 return C;
4921
4922 // fmul X, 1.0 ==> X
4923 if (match(Op1, m_FPOne()))
4924 return Op0;
4925
4926 // fmul 1.0, X ==> X
4927 if (match(Op0, m_FPOne()))
4928 return Op1;
4929
4930 // fmul nnan nsz X, 0 ==> 0
4931 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP()))
4932 return ConstantFP::getNullValue(Op0->getType());
4933
4934 // fmul nnan nsz 0, X ==> 0
4935 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
4936 return ConstantFP::getNullValue(Op1->getType());
4937
4938 // sqrt(X) * sqrt(X) --> X, if we can:
4939 // 1. Remove the intermediate rounding (reassociate).
4940 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
4941 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
4942 Value *X;
4943 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) &&
4944 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros())
4945 return X;
4946
4947 return nullptr;
4948 }
4949
4950 /// Given the operands for an FMul, see if we can fold the result
SimplifyFMulInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)4951 static Value *SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4952 const SimplifyQuery &Q, unsigned MaxRecurse) {
4953 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
4954 return C;
4955
4956 // Now apply simplifications that do not require rounding.
4957 return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse);
4958 }
4959
SimplifyFAddInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q)4960 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4961 const SimplifyQuery &Q) {
4962 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit);
4963 }
4964
4965
SimplifyFSubInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q)4966 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4967 const SimplifyQuery &Q) {
4968 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit);
4969 }
4970
SimplifyFMulInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q)4971 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4972 const SimplifyQuery &Q) {
4973 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit);
4974 }
4975
SimplifyFMAFMul(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q)4976 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
4977 const SimplifyQuery &Q) {
4978 return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit);
4979 }
4980
SimplifyFDivInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned)4981 static Value *SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4982 const SimplifyQuery &Q, unsigned) {
4983 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
4984 return C;
4985
4986 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
4987 return C;
4988
4989 // X / 1.0 -> X
4990 if (match(Op1, m_FPOne()))
4991 return Op0;
4992
4993 // 0 / X -> 0
4994 // Requires that NaNs are off (X could be zero) and signed zeroes are
4995 // ignored (X could be positive or negative, so the output sign is unknown).
4996 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
4997 return ConstantFP::getNullValue(Op0->getType());
4998
4999 if (FMF.noNaNs()) {
5000 // X / X -> 1.0 is legal when NaNs are ignored.
5001 // We can ignore infinities because INF/INF is NaN.
5002 if (Op0 == Op1)
5003 return ConstantFP::get(Op0->getType(), 1.0);
5004
5005 // (X * Y) / Y --> X if we can reassociate to the above form.
5006 Value *X;
5007 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5008 return X;
5009
5010 // -X / X -> -1.0 and
5011 // X / -X -> -1.0 are legal when NaNs are ignored.
5012 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5013 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5014 match(Op1, m_FNegNSZ(m_Specific(Op0))))
5015 return ConstantFP::get(Op0->getType(), -1.0);
5016 }
5017
5018 return nullptr;
5019 }
5020
SimplifyFDivInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q)5021 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5022 const SimplifyQuery &Q) {
5023 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit);
5024 }
5025
SimplifyFRemInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned)5026 static Value *SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5027 const SimplifyQuery &Q, unsigned) {
5028 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5029 return C;
5030
5031 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q))
5032 return C;
5033
5034 // Unlike fdiv, the result of frem always matches the sign of the dividend.
5035 // The constant match may include undef elements in a vector, so return a full
5036 // zero constant as the result.
5037 if (FMF.noNaNs()) {
5038 // +0 % X -> 0
5039 if (match(Op0, m_PosZeroFP()))
5040 return ConstantFP::getNullValue(Op0->getType());
5041 // -0 % X -> -0
5042 if (match(Op0, m_NegZeroFP()))
5043 return ConstantFP::getNegativeZero(Op0->getType());
5044 }
5045
5046 return nullptr;
5047 }
5048
SimplifyFRemInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q)5049 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5050 const SimplifyQuery &Q) {
5051 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit);
5052 }
5053
5054 //=== Helper functions for higher up the class hierarchy.
5055
5056 /// Given the operand for a UnaryOperator, see if we can fold the result.
5057 /// If not, this returns null.
simplifyUnOp(unsigned Opcode,Value * Op,const SimplifyQuery & Q,unsigned MaxRecurse)5058 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5059 unsigned MaxRecurse) {
5060 switch (Opcode) {
5061 case Instruction::FNeg:
5062 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5063 default:
5064 llvm_unreachable("Unexpected opcode");
5065 }
5066 }
5067
5068 /// Given the operand for a UnaryOperator, see if we can fold the result.
5069 /// If not, this returns null.
5070 /// Try to use FastMathFlags when folding the result.
simplifyFPUnOp(unsigned Opcode,Value * Op,const FastMathFlags & FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5071 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5072 const FastMathFlags &FMF,
5073 const SimplifyQuery &Q, unsigned MaxRecurse) {
5074 switch (Opcode) {
5075 case Instruction::FNeg:
5076 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
5077 default:
5078 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
5079 }
5080 }
5081
SimplifyUnOp(unsigned Opcode,Value * Op,const SimplifyQuery & Q)5082 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
5083 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
5084 }
5085
SimplifyUnOp(unsigned Opcode,Value * Op,FastMathFlags FMF,const SimplifyQuery & Q)5086 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
5087 const SimplifyQuery &Q) {
5088 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
5089 }
5090
5091 /// Given operands for a BinaryOperator, see if we can fold the result.
5092 /// If not, this returns null.
SimplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)5093 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5094 const SimplifyQuery &Q, unsigned MaxRecurse) {
5095 switch (Opcode) {
5096 case Instruction::Add:
5097 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
5098 case Instruction::Sub:
5099 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
5100 case Instruction::Mul:
5101 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
5102 case Instruction::SDiv:
5103 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
5104 case Instruction::UDiv:
5105 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
5106 case Instruction::SRem:
5107 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
5108 case Instruction::URem:
5109 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
5110 case Instruction::Shl:
5111 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
5112 case Instruction::LShr:
5113 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
5114 case Instruction::AShr:
5115 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
5116 case Instruction::And:
5117 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
5118 case Instruction::Or:
5119 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
5120 case Instruction::Xor:
5121 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
5122 case Instruction::FAdd:
5123 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5124 case Instruction::FSub:
5125 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5126 case Instruction::FMul:
5127 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5128 case Instruction::FDiv:
5129 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5130 case Instruction::FRem:
5131 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5132 default:
5133 llvm_unreachable("Unexpected opcode");
5134 }
5135 }
5136
5137 /// Given operands for a BinaryOperator, see if we can fold the result.
5138 /// If not, this returns null.
5139 /// Try to use FastMathFlags when folding the result.
SimplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const FastMathFlags & FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5140 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5141 const FastMathFlags &FMF, const SimplifyQuery &Q,
5142 unsigned MaxRecurse) {
5143 switch (Opcode) {
5144 case Instruction::FAdd:
5145 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
5146 case Instruction::FSub:
5147 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
5148 case Instruction::FMul:
5149 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
5150 case Instruction::FDiv:
5151 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
5152 default:
5153 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
5154 }
5155 }
5156
SimplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q)5157 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5158 const SimplifyQuery &Q) {
5159 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
5160 }
5161
SimplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q)5162 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5163 FastMathFlags FMF, const SimplifyQuery &Q) {
5164 return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
5165 }
5166
5167 /// Given operands for a CmpInst, see if we can fold the result.
SimplifyCmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)5168 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5169 const SimplifyQuery &Q, unsigned MaxRecurse) {
5170 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
5171 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
5172 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5173 }
5174
SimplifyCmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)5175 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5176 const SimplifyQuery &Q) {
5177 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
5178 }
5179
IsIdempotent(Intrinsic::ID ID)5180 static bool IsIdempotent(Intrinsic::ID ID) {
5181 switch (ID) {
5182 default: return false;
5183
5184 // Unary idempotent: f(f(x)) = f(x)
5185 case Intrinsic::fabs:
5186 case Intrinsic::floor:
5187 case Intrinsic::ceil:
5188 case Intrinsic::trunc:
5189 case Intrinsic::rint:
5190 case Intrinsic::nearbyint:
5191 case Intrinsic::round:
5192 case Intrinsic::roundeven:
5193 case Intrinsic::canonicalize:
5194 return true;
5195 }
5196 }
5197
SimplifyRelativeLoad(Constant * Ptr,Constant * Offset,const DataLayout & DL)5198 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
5199 const DataLayout &DL) {
5200 GlobalValue *PtrSym;
5201 APInt PtrOffset;
5202 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
5203 return nullptr;
5204
5205 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
5206 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
5207 Type *Int32PtrTy = Int32Ty->getPointerTo();
5208 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
5209
5210 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
5211 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
5212 return nullptr;
5213
5214 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
5215 if (OffsetInt % 4 != 0)
5216 return nullptr;
5217
5218 Constant *C = ConstantExpr::getGetElementPtr(
5219 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
5220 ConstantInt::get(Int64Ty, OffsetInt / 4));
5221 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
5222 if (!Loaded)
5223 return nullptr;
5224
5225 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
5226 if (!LoadedCE)
5227 return nullptr;
5228
5229 if (LoadedCE->getOpcode() == Instruction::Trunc) {
5230 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5231 if (!LoadedCE)
5232 return nullptr;
5233 }
5234
5235 if (LoadedCE->getOpcode() != Instruction::Sub)
5236 return nullptr;
5237
5238 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5239 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
5240 return nullptr;
5241 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
5242
5243 Constant *LoadedRHS = LoadedCE->getOperand(1);
5244 GlobalValue *LoadedRHSSym;
5245 APInt LoadedRHSOffset;
5246 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
5247 DL) ||
5248 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
5249 return nullptr;
5250
5251 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
5252 }
5253
simplifyUnaryIntrinsic(Function * F,Value * Op0,const SimplifyQuery & Q)5254 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
5255 const SimplifyQuery &Q) {
5256 // Idempotent functions return the same result when called repeatedly.
5257 Intrinsic::ID IID = F->getIntrinsicID();
5258 if (IsIdempotent(IID))
5259 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
5260 if (II->getIntrinsicID() == IID)
5261 return II;
5262
5263 Value *X;
5264 switch (IID) {
5265 case Intrinsic::fabs:
5266 if (SignBitMustBeZero(Op0, Q.TLI)) return Op0;
5267 break;
5268 case Intrinsic::bswap:
5269 // bswap(bswap(x)) -> x
5270 if (match(Op0, m_BSwap(m_Value(X)))) return X;
5271 break;
5272 case Intrinsic::bitreverse:
5273 // bitreverse(bitreverse(x)) -> x
5274 if (match(Op0, m_BitReverse(m_Value(X)))) return X;
5275 break;
5276 case Intrinsic::ctpop: {
5277 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
5278 // ctpop(and X, 1) --> and X, 1
5279 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
5280 if (MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, BitWidth - 1),
5281 Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
5282 return Op0;
5283 break;
5284 }
5285 case Intrinsic::exp:
5286 // exp(log(x)) -> x
5287 if (Q.CxtI->hasAllowReassoc() &&
5288 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X;
5289 break;
5290 case Intrinsic::exp2:
5291 // exp2(log2(x)) -> x
5292 if (Q.CxtI->hasAllowReassoc() &&
5293 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X;
5294 break;
5295 case Intrinsic::log:
5296 // log(exp(x)) -> x
5297 if (Q.CxtI->hasAllowReassoc() &&
5298 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X;
5299 break;
5300 case Intrinsic::log2:
5301 // log2(exp2(x)) -> x
5302 if (Q.CxtI->hasAllowReassoc() &&
5303 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
5304 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0),
5305 m_Value(X))))) return X;
5306 break;
5307 case Intrinsic::log10:
5308 // log10(pow(10.0, x)) -> x
5309 if (Q.CxtI->hasAllowReassoc() &&
5310 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0),
5311 m_Value(X)))) return X;
5312 break;
5313 case Intrinsic::floor:
5314 case Intrinsic::trunc:
5315 case Intrinsic::ceil:
5316 case Intrinsic::round:
5317 case Intrinsic::roundeven:
5318 case Intrinsic::nearbyint:
5319 case Intrinsic::rint: {
5320 // floor (sitofp x) -> sitofp x
5321 // floor (uitofp x) -> uitofp x
5322 //
5323 // Converting from int always results in a finite integral number or
5324 // infinity. For either of those inputs, these rounding functions always
5325 // return the same value, so the rounding can be eliminated.
5326 if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
5327 return Op0;
5328 break;
5329 }
5330 case Intrinsic::experimental_vector_reverse:
5331 // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
5332 if (match(Op0,
5333 m_Intrinsic<Intrinsic::experimental_vector_reverse>(m_Value(X))))
5334 return X;
5335 break;
5336 default:
5337 break;
5338 }
5339
5340 return nullptr;
5341 }
5342
getMaxMinLimit(Intrinsic::ID IID,unsigned BitWidth)5343 static APInt getMaxMinLimit(Intrinsic::ID IID, unsigned BitWidth) {
5344 switch (IID) {
5345 case Intrinsic::smax: return APInt::getSignedMaxValue(BitWidth);
5346 case Intrinsic::smin: return APInt::getSignedMinValue(BitWidth);
5347 case Intrinsic::umax: return APInt::getMaxValue(BitWidth);
5348 case Intrinsic::umin: return APInt::getMinValue(BitWidth);
5349 default: llvm_unreachable("Unexpected intrinsic");
5350 }
5351 }
5352
getMaxMinPredicate(Intrinsic::ID IID)5353 static ICmpInst::Predicate getMaxMinPredicate(Intrinsic::ID IID) {
5354 switch (IID) {
5355 case Intrinsic::smax: return ICmpInst::ICMP_SGE;
5356 case Intrinsic::smin: return ICmpInst::ICMP_SLE;
5357 case Intrinsic::umax: return ICmpInst::ICMP_UGE;
5358 case Intrinsic::umin: return ICmpInst::ICMP_ULE;
5359 default: llvm_unreachable("Unexpected intrinsic");
5360 }
5361 }
5362
5363 /// Given a min/max intrinsic, see if it can be removed based on having an
5364 /// operand that is another min/max intrinsic with shared operand(s). The caller
5365 /// is expected to swap the operand arguments to handle commutation.
foldMinMaxSharedOp(Intrinsic::ID IID,Value * Op0,Value * Op1)5366 static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
5367 Value *X, *Y;
5368 if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
5369 return nullptr;
5370
5371 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
5372 if (!MM0)
5373 return nullptr;
5374 Intrinsic::ID IID0 = MM0->getIntrinsicID();
5375
5376 if (Op1 == X || Op1 == Y ||
5377 match(Op1, m_c_MaxOrMin(m_Specific(X), m_Specific(Y)))) {
5378 // max (max X, Y), X --> max X, Y
5379 if (IID0 == IID)
5380 return MM0;
5381 // max (min X, Y), X --> X
5382 if (IID0 == getInverseMinMaxIntrinsic(IID))
5383 return Op1;
5384 }
5385 return nullptr;
5386 }
5387
simplifyBinaryIntrinsic(Function * F,Value * Op0,Value * Op1,const SimplifyQuery & Q)5388 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
5389 const SimplifyQuery &Q) {
5390 Intrinsic::ID IID = F->getIntrinsicID();
5391 Type *ReturnType = F->getReturnType();
5392 unsigned BitWidth = ReturnType->getScalarSizeInBits();
5393 switch (IID) {
5394 case Intrinsic::abs:
5395 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
5396 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
5397 // on the outer abs.
5398 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
5399 return Op0;
5400 break;
5401
5402 case Intrinsic::cttz: {
5403 Value *X;
5404 if (match(Op0, m_Shl(m_One(), m_Value(X))))
5405 return X;
5406 break;
5407 }
5408 case Intrinsic::ctlz: {
5409 Value *X;
5410 if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
5411 return X;
5412 if (match(Op0, m_AShr(m_Negative(), m_Value())))
5413 return Constant::getNullValue(ReturnType);
5414 break;
5415 }
5416 case Intrinsic::smax:
5417 case Intrinsic::smin:
5418 case Intrinsic::umax:
5419 case Intrinsic::umin: {
5420 // If the arguments are the same, this is a no-op.
5421 if (Op0 == Op1)
5422 return Op0;
5423
5424 // Canonicalize constant operand as Op1.
5425 if (isa<Constant>(Op0))
5426 std::swap(Op0, Op1);
5427
5428 // Assume undef is the limit value.
5429 if (Q.isUndefValue(Op1))
5430 return ConstantInt::get(ReturnType, getMaxMinLimit(IID, BitWidth));
5431
5432 const APInt *C;
5433 if (match(Op1, m_APIntAllowUndef(C))) {
5434 // Clamp to limit value. For example:
5435 // umax(i8 %x, i8 255) --> 255
5436 if (*C == getMaxMinLimit(IID, BitWidth))
5437 return ConstantInt::get(ReturnType, *C);
5438
5439 // If the constant op is the opposite of the limit value, the other must
5440 // be larger/smaller or equal. For example:
5441 // umin(i8 %x, i8 255) --> %x
5442 if (*C == getMaxMinLimit(getInverseMinMaxIntrinsic(IID), BitWidth))
5443 return Op0;
5444
5445 // Remove nested call if constant operands allow it. Example:
5446 // max (max X, 7), 5 -> max X, 7
5447 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
5448 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
5449 // TODO: loosen undef/splat restrictions for vector constants.
5450 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
5451 const APInt *InnerC;
5452 if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
5453 ((IID == Intrinsic::smax && InnerC->sge(*C)) ||
5454 (IID == Intrinsic::smin && InnerC->sle(*C)) ||
5455 (IID == Intrinsic::umax && InnerC->uge(*C)) ||
5456 (IID == Intrinsic::umin && InnerC->ule(*C))))
5457 return Op0;
5458 }
5459 }
5460
5461 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
5462 return V;
5463 if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
5464 return V;
5465
5466 ICmpInst::Predicate Pred = getMaxMinPredicate(IID);
5467 if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
5468 return Op0;
5469 if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
5470 return Op1;
5471
5472 if (Optional<bool> Imp =
5473 isImpliedByDomCondition(Pred, Op0, Op1, Q.CxtI, Q.DL))
5474 return *Imp ? Op0 : Op1;
5475 if (Optional<bool> Imp =
5476 isImpliedByDomCondition(Pred, Op1, Op0, Q.CxtI, Q.DL))
5477 return *Imp ? Op1 : Op0;
5478
5479 break;
5480 }
5481 case Intrinsic::usub_with_overflow:
5482 case Intrinsic::ssub_with_overflow:
5483 // X - X -> { 0, false }
5484 // X - undef -> { 0, false }
5485 // undef - X -> { 0, false }
5486 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5487 return Constant::getNullValue(ReturnType);
5488 break;
5489 case Intrinsic::uadd_with_overflow:
5490 case Intrinsic::sadd_with_overflow:
5491 // X + undef -> { -1, false }
5492 // undef + x -> { -1, false }
5493 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
5494 return ConstantStruct::get(
5495 cast<StructType>(ReturnType),
5496 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
5497 Constant::getNullValue(ReturnType->getStructElementType(1))});
5498 }
5499 break;
5500 case Intrinsic::umul_with_overflow:
5501 case Intrinsic::smul_with_overflow:
5502 // 0 * X -> { 0, false }
5503 // X * 0 -> { 0, false }
5504 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
5505 return Constant::getNullValue(ReturnType);
5506 // undef * X -> { 0, false }
5507 // X * undef -> { 0, false }
5508 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5509 return Constant::getNullValue(ReturnType);
5510 break;
5511 case Intrinsic::uadd_sat:
5512 // sat(MAX + X) -> MAX
5513 // sat(X + MAX) -> MAX
5514 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
5515 return Constant::getAllOnesValue(ReturnType);
5516 LLVM_FALLTHROUGH;
5517 case Intrinsic::sadd_sat:
5518 // sat(X + undef) -> -1
5519 // sat(undef + X) -> -1
5520 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
5521 // For signed: Assume undef is ~X, in which case X + ~X = -1.
5522 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5523 return Constant::getAllOnesValue(ReturnType);
5524
5525 // X + 0 -> X
5526 if (match(Op1, m_Zero()))
5527 return Op0;
5528 // 0 + X -> X
5529 if (match(Op0, m_Zero()))
5530 return Op1;
5531 break;
5532 case Intrinsic::usub_sat:
5533 // sat(0 - X) -> 0, sat(X - MAX) -> 0
5534 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
5535 return Constant::getNullValue(ReturnType);
5536 LLVM_FALLTHROUGH;
5537 case Intrinsic::ssub_sat:
5538 // X - X -> 0, X - undef -> 0, undef - X -> 0
5539 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5540 return Constant::getNullValue(ReturnType);
5541 // X - 0 -> X
5542 if (match(Op1, m_Zero()))
5543 return Op0;
5544 break;
5545 case Intrinsic::load_relative:
5546 if (auto *C0 = dyn_cast<Constant>(Op0))
5547 if (auto *C1 = dyn_cast<Constant>(Op1))
5548 return SimplifyRelativeLoad(C0, C1, Q.DL);
5549 break;
5550 case Intrinsic::powi:
5551 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
5552 // powi(x, 0) -> 1.0
5553 if (Power->isZero())
5554 return ConstantFP::get(Op0->getType(), 1.0);
5555 // powi(x, 1) -> x
5556 if (Power->isOne())
5557 return Op0;
5558 }
5559 break;
5560 case Intrinsic::copysign:
5561 // copysign X, X --> X
5562 if (Op0 == Op1)
5563 return Op0;
5564 // copysign -X, X --> X
5565 // copysign X, -X --> -X
5566 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5567 match(Op1, m_FNeg(m_Specific(Op0))))
5568 return Op1;
5569 break;
5570 case Intrinsic::maxnum:
5571 case Intrinsic::minnum:
5572 case Intrinsic::maximum:
5573 case Intrinsic::minimum: {
5574 // If the arguments are the same, this is a no-op.
5575 if (Op0 == Op1) return Op0;
5576
5577 // Canonicalize constant operand as Op1.
5578 if (isa<Constant>(Op0))
5579 std::swap(Op0, Op1);
5580
5581 // If an argument is undef, return the other argument.
5582 if (Q.isUndefValue(Op1))
5583 return Op0;
5584
5585 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
5586 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
5587
5588 // minnum(X, nan) -> X
5589 // maxnum(X, nan) -> X
5590 // minimum(X, nan) -> nan
5591 // maximum(X, nan) -> nan
5592 if (match(Op1, m_NaN()))
5593 return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
5594
5595 // In the following folds, inf can be replaced with the largest finite
5596 // float, if the ninf flag is set.
5597 const APFloat *C;
5598 if (match(Op1, m_APFloat(C)) &&
5599 (C->isInfinity() || (Q.CxtI->hasNoInfs() && C->isLargest()))) {
5600 // minnum(X, -inf) -> -inf
5601 // maxnum(X, +inf) -> +inf
5602 // minimum(X, -inf) -> -inf if nnan
5603 // maximum(X, +inf) -> +inf if nnan
5604 if (C->isNegative() == IsMin && (!PropagateNaN || Q.CxtI->hasNoNaNs()))
5605 return ConstantFP::get(ReturnType, *C);
5606
5607 // minnum(X, +inf) -> X if nnan
5608 // maxnum(X, -inf) -> X if nnan
5609 // minimum(X, +inf) -> X
5610 // maximum(X, -inf) -> X
5611 if (C->isNegative() != IsMin && (PropagateNaN || Q.CxtI->hasNoNaNs()))
5612 return Op0;
5613 }
5614
5615 // Min/max of the same operation with common operand:
5616 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
5617 if (auto *M0 = dyn_cast<IntrinsicInst>(Op0))
5618 if (M0->getIntrinsicID() == IID &&
5619 (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1))
5620 return Op0;
5621 if (auto *M1 = dyn_cast<IntrinsicInst>(Op1))
5622 if (M1->getIntrinsicID() == IID &&
5623 (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0))
5624 return Op1;
5625
5626 break;
5627 }
5628 case Intrinsic::experimental_vector_extract: {
5629 Type *ReturnType = F->getReturnType();
5630
5631 // (extract_vector (insert_vector _, X, 0), 0) -> X
5632 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
5633 Value *X = nullptr;
5634 if (match(Op0, m_Intrinsic<Intrinsic::experimental_vector_insert>(
5635 m_Value(), m_Value(X), m_Zero())) &&
5636 IdxN == 0 && X->getType() == ReturnType)
5637 return X;
5638
5639 break;
5640 }
5641 default:
5642 break;
5643 }
5644
5645 return nullptr;
5646 }
5647
simplifyIntrinsic(CallBase * Call,const SimplifyQuery & Q)5648 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
5649
5650 // Intrinsics with no operands have some kind of side effect. Don't simplify.
5651 unsigned NumOperands = Call->getNumArgOperands();
5652 if (!NumOperands)
5653 return nullptr;
5654
5655 Function *F = cast<Function>(Call->getCalledFunction());
5656 Intrinsic::ID IID = F->getIntrinsicID();
5657 if (NumOperands == 1)
5658 return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q);
5659
5660 if (NumOperands == 2)
5661 return simplifyBinaryIntrinsic(F, Call->getArgOperand(0),
5662 Call->getArgOperand(1), Q);
5663
5664 // Handle intrinsics with 3 or more arguments.
5665 switch (IID) {
5666 case Intrinsic::masked_load:
5667 case Intrinsic::masked_gather: {
5668 Value *MaskArg = Call->getArgOperand(2);
5669 Value *PassthruArg = Call->getArgOperand(3);
5670 // If the mask is all zeros or undef, the "passthru" argument is the result.
5671 if (maskIsAllZeroOrUndef(MaskArg))
5672 return PassthruArg;
5673 return nullptr;
5674 }
5675 case Intrinsic::fshl:
5676 case Intrinsic::fshr: {
5677 Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1),
5678 *ShAmtArg = Call->getArgOperand(2);
5679
5680 // If both operands are undef, the result is undef.
5681 if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
5682 return UndefValue::get(F->getReturnType());
5683
5684 // If shift amount is undef, assume it is zero.
5685 if (Q.isUndefValue(ShAmtArg))
5686 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5687
5688 const APInt *ShAmtC;
5689 if (match(ShAmtArg, m_APInt(ShAmtC))) {
5690 // If there's effectively no shift, return the 1st arg or 2nd arg.
5691 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
5692 if (ShAmtC->urem(BitWidth).isNullValue())
5693 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5694 }
5695 return nullptr;
5696 }
5697 case Intrinsic::fma:
5698 case Intrinsic::fmuladd: {
5699 Value *Op0 = Call->getArgOperand(0);
5700 Value *Op1 = Call->getArgOperand(1);
5701 Value *Op2 = Call->getArgOperand(2);
5702 if (Value *V = simplifyFPOp({ Op0, Op1, Op2 }, {}, Q))
5703 return V;
5704 return nullptr;
5705 }
5706 case Intrinsic::smul_fix:
5707 case Intrinsic::smul_fix_sat: {
5708 Value *Op0 = Call->getArgOperand(0);
5709 Value *Op1 = Call->getArgOperand(1);
5710 Value *Op2 = Call->getArgOperand(2);
5711 Type *ReturnType = F->getReturnType();
5712
5713 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
5714 // when both Op0 and Op1 are constant so we do not care about that special
5715 // case here).
5716 if (isa<Constant>(Op0))
5717 std::swap(Op0, Op1);
5718
5719 // X * 0 -> 0
5720 if (match(Op1, m_Zero()))
5721 return Constant::getNullValue(ReturnType);
5722
5723 // X * undef -> 0
5724 if (Q.isUndefValue(Op1))
5725 return Constant::getNullValue(ReturnType);
5726
5727 // X * (1 << Scale) -> X
5728 APInt ScaledOne =
5729 APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
5730 cast<ConstantInt>(Op2)->getZExtValue());
5731 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
5732 return Op0;
5733
5734 return nullptr;
5735 }
5736 case Intrinsic::experimental_vector_insert: {
5737 Value *Vec = Call->getArgOperand(0);
5738 Value *SubVec = Call->getArgOperand(1);
5739 Value *Idx = Call->getArgOperand(2);
5740 Type *ReturnType = F->getReturnType();
5741
5742 // (insert_vector Y, (extract_vector X, 0), 0) -> X
5743 // where: Y is X, or Y is undef
5744 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
5745 Value *X = nullptr;
5746 if (match(SubVec, m_Intrinsic<Intrinsic::experimental_vector_extract>(
5747 m_Value(X), m_Zero())) &&
5748 (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
5749 X->getType() == ReturnType)
5750 return X;
5751
5752 return nullptr;
5753 }
5754 default:
5755 return nullptr;
5756 }
5757 }
5758
tryConstantFoldCall(CallBase * Call,const SimplifyQuery & Q)5759 static Value *tryConstantFoldCall(CallBase *Call, const SimplifyQuery &Q) {
5760 auto *F = dyn_cast<Function>(Call->getCalledOperand());
5761 if (!F || !canConstantFoldCallTo(Call, F))
5762 return nullptr;
5763
5764 SmallVector<Constant *, 4> ConstantArgs;
5765 unsigned NumArgs = Call->getNumArgOperands();
5766 ConstantArgs.reserve(NumArgs);
5767 for (auto &Arg : Call->args()) {
5768 Constant *C = dyn_cast<Constant>(&Arg);
5769 if (!C) {
5770 if (isa<MetadataAsValue>(Arg.get()))
5771 continue;
5772 return nullptr;
5773 }
5774 ConstantArgs.push_back(C);
5775 }
5776
5777 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
5778 }
5779
SimplifyCall(CallBase * Call,const SimplifyQuery & Q)5780 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) {
5781 // musttail calls can only be simplified if they are also DCEd.
5782 // As we can't guarantee this here, don't simplify them.
5783 if (Call->isMustTailCall())
5784 return nullptr;
5785
5786 // call undef -> poison
5787 // call null -> poison
5788 Value *Callee = Call->getCalledOperand();
5789 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
5790 return PoisonValue::get(Call->getType());
5791
5792 if (Value *V = tryConstantFoldCall(Call, Q))
5793 return V;
5794
5795 auto *F = dyn_cast<Function>(Callee);
5796 if (F && F->isIntrinsic())
5797 if (Value *Ret = simplifyIntrinsic(Call, Q))
5798 return Ret;
5799
5800 return nullptr;
5801 }
5802
5803 /// Given operands for a Freeze, see if we can fold the result.
SimplifyFreezeInst(Value * Op0,const SimplifyQuery & Q)5804 static Value *SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
5805 // Use a utility function defined in ValueTracking.
5806 if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
5807 return Op0;
5808 // We have room for improvement.
5809 return nullptr;
5810 }
5811
SimplifyFreezeInst(Value * Op0,const SimplifyQuery & Q)5812 Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
5813 return ::SimplifyFreezeInst(Op0, Q);
5814 }
5815
5816 /// See if we can compute a simplified version of this instruction.
5817 /// If not, this returns null.
5818
SimplifyInstruction(Instruction * I,const SimplifyQuery & SQ,OptimizationRemarkEmitter * ORE)5819 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
5820 OptimizationRemarkEmitter *ORE) {
5821 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
5822 Value *Result;
5823
5824 switch (I->getOpcode()) {
5825 default:
5826 Result = ConstantFoldInstruction(I, Q.DL, Q.TLI);
5827 break;
5828 case Instruction::FNeg:
5829 Result = SimplifyFNegInst(I->getOperand(0), I->getFastMathFlags(), Q);
5830 break;
5831 case Instruction::FAdd:
5832 Result = SimplifyFAddInst(I->getOperand(0), I->getOperand(1),
5833 I->getFastMathFlags(), Q);
5834 break;
5835 case Instruction::Add:
5836 Result =
5837 SimplifyAddInst(I->getOperand(0), I->getOperand(1),
5838 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5839 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5840 break;
5841 case Instruction::FSub:
5842 Result = SimplifyFSubInst(I->getOperand(0), I->getOperand(1),
5843 I->getFastMathFlags(), Q);
5844 break;
5845 case Instruction::Sub:
5846 Result =
5847 SimplifySubInst(I->getOperand(0), I->getOperand(1),
5848 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5849 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5850 break;
5851 case Instruction::FMul:
5852 Result = SimplifyFMulInst(I->getOperand(0), I->getOperand(1),
5853 I->getFastMathFlags(), Q);
5854 break;
5855 case Instruction::Mul:
5856 Result = SimplifyMulInst(I->getOperand(0), I->getOperand(1), Q);
5857 break;
5858 case Instruction::SDiv:
5859 Result = SimplifySDivInst(I->getOperand(0), I->getOperand(1), Q);
5860 break;
5861 case Instruction::UDiv:
5862 Result = SimplifyUDivInst(I->getOperand(0), I->getOperand(1), Q);
5863 break;
5864 case Instruction::FDiv:
5865 Result = SimplifyFDivInst(I->getOperand(0), I->getOperand(1),
5866 I->getFastMathFlags(), Q);
5867 break;
5868 case Instruction::SRem:
5869 Result = SimplifySRemInst(I->getOperand(0), I->getOperand(1), Q);
5870 break;
5871 case Instruction::URem:
5872 Result = SimplifyURemInst(I->getOperand(0), I->getOperand(1), Q);
5873 break;
5874 case Instruction::FRem:
5875 Result = SimplifyFRemInst(I->getOperand(0), I->getOperand(1),
5876 I->getFastMathFlags(), Q);
5877 break;
5878 case Instruction::Shl:
5879 Result =
5880 SimplifyShlInst(I->getOperand(0), I->getOperand(1),
5881 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
5882 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
5883 break;
5884 case Instruction::LShr:
5885 Result = SimplifyLShrInst(I->getOperand(0), I->getOperand(1),
5886 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
5887 break;
5888 case Instruction::AShr:
5889 Result = SimplifyAShrInst(I->getOperand(0), I->getOperand(1),
5890 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
5891 break;
5892 case Instruction::And:
5893 Result = SimplifyAndInst(I->getOperand(0), I->getOperand(1), Q);
5894 break;
5895 case Instruction::Or:
5896 Result = SimplifyOrInst(I->getOperand(0), I->getOperand(1), Q);
5897 break;
5898 case Instruction::Xor:
5899 Result = SimplifyXorInst(I->getOperand(0), I->getOperand(1), Q);
5900 break;
5901 case Instruction::ICmp:
5902 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(),
5903 I->getOperand(0), I->getOperand(1), Q);
5904 break;
5905 case Instruction::FCmp:
5906 Result =
5907 SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), I->getOperand(0),
5908 I->getOperand(1), I->getFastMathFlags(), Q);
5909 break;
5910 case Instruction::Select:
5911 Result = SimplifySelectInst(I->getOperand(0), I->getOperand(1),
5912 I->getOperand(2), Q);
5913 break;
5914 case Instruction::GetElementPtr: {
5915 SmallVector<Value *, 8> Ops(I->operands());
5916 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
5917 Ops, Q);
5918 break;
5919 }
5920 case Instruction::InsertValue: {
5921 InsertValueInst *IV = cast<InsertValueInst>(I);
5922 Result = SimplifyInsertValueInst(IV->getAggregateOperand(),
5923 IV->getInsertedValueOperand(),
5924 IV->getIndices(), Q);
5925 break;
5926 }
5927 case Instruction::InsertElement: {
5928 auto *IE = cast<InsertElementInst>(I);
5929 Result = SimplifyInsertElementInst(IE->getOperand(0), IE->getOperand(1),
5930 IE->getOperand(2), Q);
5931 break;
5932 }
5933 case Instruction::ExtractValue: {
5934 auto *EVI = cast<ExtractValueInst>(I);
5935 Result = SimplifyExtractValueInst(EVI->getAggregateOperand(),
5936 EVI->getIndices(), Q);
5937 break;
5938 }
5939 case Instruction::ExtractElement: {
5940 auto *EEI = cast<ExtractElementInst>(I);
5941 Result = SimplifyExtractElementInst(EEI->getVectorOperand(),
5942 EEI->getIndexOperand(), Q);
5943 break;
5944 }
5945 case Instruction::ShuffleVector: {
5946 auto *SVI = cast<ShuffleVectorInst>(I);
5947 Result =
5948 SimplifyShuffleVectorInst(SVI->getOperand(0), SVI->getOperand(1),
5949 SVI->getShuffleMask(), SVI->getType(), Q);
5950 break;
5951 }
5952 case Instruction::PHI:
5953 Result = SimplifyPHINode(cast<PHINode>(I), Q);
5954 break;
5955 case Instruction::Call: {
5956 Result = SimplifyCall(cast<CallInst>(I), Q);
5957 break;
5958 }
5959 case Instruction::Freeze:
5960 Result = SimplifyFreezeInst(I->getOperand(0), Q);
5961 break;
5962 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
5963 #include "llvm/IR/Instruction.def"
5964 #undef HANDLE_CAST_INST
5965 Result =
5966 SimplifyCastInst(I->getOpcode(), I->getOperand(0), I->getType(), Q);
5967 break;
5968 case Instruction::Alloca:
5969 // No simplifications for Alloca and it can't be constant folded.
5970 Result = nullptr;
5971 break;
5972 }
5973
5974 /// If called on unreachable code, the above logic may report that the
5975 /// instruction simplified to itself. Make life easier for users by
5976 /// detecting that case here, returning a safe value instead.
5977 return Result == I ? UndefValue::get(I->getType()) : Result;
5978 }
5979
5980 /// Implementation of recursive simplification through an instruction's
5981 /// uses.
5982 ///
5983 /// This is the common implementation of the recursive simplification routines.
5984 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
5985 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
5986 /// instructions to process and attempt to simplify it using
5987 /// InstructionSimplify. Recursively visited users which could not be
5988 /// simplified themselves are to the optional UnsimplifiedUsers set for
5989 /// further processing by the caller.
5990 ///
5991 /// This routine returns 'true' only when *it* simplifies something. The passed
5992 /// in simplified value does not count toward this.
replaceAndRecursivelySimplifyImpl(Instruction * I,Value * SimpleV,const TargetLibraryInfo * TLI,const DominatorTree * DT,AssumptionCache * AC,SmallSetVector<Instruction *,8> * UnsimplifiedUsers=nullptr)5993 static bool replaceAndRecursivelySimplifyImpl(
5994 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
5995 const DominatorTree *DT, AssumptionCache *AC,
5996 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
5997 bool Simplified = false;
5998 SmallSetVector<Instruction *, 8> Worklist;
5999 const DataLayout &DL = I->getModule()->getDataLayout();
6000
6001 // If we have an explicit value to collapse to, do that round of the
6002 // simplification loop by hand initially.
6003 if (SimpleV) {
6004 for (User *U : I->users())
6005 if (U != I)
6006 Worklist.insert(cast<Instruction>(U));
6007
6008 // Replace the instruction with its simplified value.
6009 I->replaceAllUsesWith(SimpleV);
6010
6011 // Gracefully handle edge cases where the instruction is not wired into any
6012 // parent block.
6013 if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6014 !I->mayHaveSideEffects())
6015 I->eraseFromParent();
6016 } else {
6017 Worklist.insert(I);
6018 }
6019
6020 // Note that we must test the size on each iteration, the worklist can grow.
6021 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
6022 I = Worklist[Idx];
6023
6024 // See if this instruction simplifies.
6025 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
6026 if (!SimpleV) {
6027 if (UnsimplifiedUsers)
6028 UnsimplifiedUsers->insert(I);
6029 continue;
6030 }
6031
6032 Simplified = true;
6033
6034 // Stash away all the uses of the old instruction so we can check them for
6035 // recursive simplifications after a RAUW. This is cheaper than checking all
6036 // uses of To on the recursive step in most cases.
6037 for (User *U : I->users())
6038 Worklist.insert(cast<Instruction>(U));
6039
6040 // Replace the instruction with its simplified value.
6041 I->replaceAllUsesWith(SimpleV);
6042
6043 // Gracefully handle edge cases where the instruction is not wired into any
6044 // parent block.
6045 if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6046 !I->mayHaveSideEffects())
6047 I->eraseFromParent();
6048 }
6049 return Simplified;
6050 }
6051
replaceAndRecursivelySimplify(Instruction * I,Value * SimpleV,const TargetLibraryInfo * TLI,const DominatorTree * DT,AssumptionCache * AC,SmallSetVector<Instruction *,8> * UnsimplifiedUsers)6052 bool llvm::replaceAndRecursivelySimplify(
6053 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6054 const DominatorTree *DT, AssumptionCache *AC,
6055 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
6056 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
6057 assert(SimpleV && "Must provide a simplified value.");
6058 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
6059 UnsimplifiedUsers);
6060 }
6061
6062 namespace llvm {
getBestSimplifyQuery(Pass & P,Function & F)6063 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
6064 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
6065 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
6066 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
6067 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
6068 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
6069 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
6070 return {F.getParent()->getDataLayout(), TLI, DT, AC};
6071 }
6072
getBestSimplifyQuery(LoopStandardAnalysisResults & AR,const DataLayout & DL)6073 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
6074 const DataLayout &DL) {
6075 return {DL, &AR.TLI, &AR.DT, &AR.AC};
6076 }
6077
6078 template <class T, class... TArgs>
getBestSimplifyQuery(AnalysisManager<T,TArgs...> & AM,Function & F)6079 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
6080 Function &F) {
6081 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
6082 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
6083 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
6084 return {F.getParent()->getDataLayout(), TLI, DT, AC};
6085 }
6086 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
6087 Function &);
6088 }
6089