1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions. This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "llvm/Analysis/InstructionSimplify.h"
20
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/SmallPtrSet.h"
24 #include "llvm/ADT/Statistic.h"
25 #include "llvm/Analysis/AliasAnalysis.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/CaptureTracking.h"
28 #include "llvm/Analysis/CmpInstAnalysis.h"
29 #include "llvm/Analysis/ConstantFolding.h"
30 #include "llvm/Analysis/LoopAnalysisManager.h"
31 #include "llvm/Analysis/MemoryBuiltins.h"
32 #include "llvm/Analysis/OverflowInstAnalysis.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/IR/ConstantRange.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/GetElementPtrTypeIterator.h"
39 #include "llvm/IR/GlobalAlias.h"
40 #include "llvm/IR/InstrTypes.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/Operator.h"
43 #include "llvm/IR/PatternMatch.h"
44 #include "llvm/IR/ValueHandle.h"
45 #include "llvm/Support/KnownBits.h"
46 #include <algorithm>
47 using namespace llvm;
48 using namespace llvm::PatternMatch;
49
50 #define DEBUG_TYPE "instsimplify"
51
52 enum { RecursionLimit = 3 };
53
54 STATISTIC(NumExpand, "Number of expansions");
55 STATISTIC(NumReassoc, "Number of reassociations");
56
57 static Value *SimplifyAndInst(Value *, Value *, const SimplifyQuery &, unsigned);
58 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
59 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
60 const SimplifyQuery &, unsigned);
61 static Value *SimplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
62 unsigned);
63 static Value *SimplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
64 const SimplifyQuery &, unsigned);
65 static Value *SimplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
66 unsigned);
67 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
68 const SimplifyQuery &Q, unsigned MaxRecurse);
69 static Value *SimplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
70 static Value *SimplifyXorInst(Value *, Value *, const SimplifyQuery &, unsigned);
71 static Value *SimplifyCastInst(unsigned, Value *, Type *,
72 const SimplifyQuery &, unsigned);
73 static Value *SimplifyGEPInst(Type *, ArrayRef<Value *>, const SimplifyQuery &,
74 unsigned);
75 static Value *SimplifySelectInst(Value *, Value *, Value *,
76 const SimplifyQuery &, unsigned);
77
foldSelectWithBinaryOp(Value * Cond,Value * TrueVal,Value * FalseVal)78 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
79 Value *FalseVal) {
80 BinaryOperator::BinaryOps BinOpCode;
81 if (auto *BO = dyn_cast<BinaryOperator>(Cond))
82 BinOpCode = BO->getOpcode();
83 else
84 return nullptr;
85
86 CmpInst::Predicate ExpectedPred, Pred1, Pred2;
87 if (BinOpCode == BinaryOperator::Or) {
88 ExpectedPred = ICmpInst::ICMP_NE;
89 } else if (BinOpCode == BinaryOperator::And) {
90 ExpectedPred = ICmpInst::ICMP_EQ;
91 } else
92 return nullptr;
93
94 // %A = icmp eq %TV, %FV
95 // %B = icmp eq %X, %Y (and one of these is a select operand)
96 // %C = and %A, %B
97 // %D = select %C, %TV, %FV
98 // -->
99 // %FV
100
101 // %A = icmp ne %TV, %FV
102 // %B = icmp ne %X, %Y (and one of these is a select operand)
103 // %C = or %A, %B
104 // %D = select %C, %TV, %FV
105 // -->
106 // %TV
107 Value *X, *Y;
108 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
109 m_Specific(FalseVal)),
110 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
111 Pred1 != Pred2 || Pred1 != ExpectedPred)
112 return nullptr;
113
114 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
115 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
116
117 return nullptr;
118 }
119
120 /// For a boolean type or a vector of boolean type, return false or a vector
121 /// with every element false.
getFalse(Type * Ty)122 static Constant *getFalse(Type *Ty) {
123 return ConstantInt::getFalse(Ty);
124 }
125
126 /// For a boolean type or a vector of boolean type, return true or a vector
127 /// with every element true.
getTrue(Type * Ty)128 static Constant *getTrue(Type *Ty) {
129 return ConstantInt::getTrue(Ty);
130 }
131
132 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
isSameCompare(Value * V,CmpInst::Predicate Pred,Value * LHS,Value * RHS)133 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
134 Value *RHS) {
135 CmpInst *Cmp = dyn_cast<CmpInst>(V);
136 if (!Cmp)
137 return false;
138 CmpInst::Predicate CPred = Cmp->getPredicate();
139 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
140 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
141 return true;
142 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
143 CRHS == LHS;
144 }
145
146 /// Simplify comparison with true or false branch of select:
147 /// %sel = select i1 %cond, i32 %tv, i32 %fv
148 /// %cmp = icmp sle i32 %sel, %rhs
149 /// Compose new comparison by substituting %sel with either %tv or %fv
150 /// and see if it simplifies.
simplifyCmpSelCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse,Constant * TrueOrFalse)151 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
152 Value *RHS, Value *Cond,
153 const SimplifyQuery &Q, unsigned MaxRecurse,
154 Constant *TrueOrFalse) {
155 Value *SimplifiedCmp = SimplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
156 if (SimplifiedCmp == Cond) {
157 // %cmp simplified to the select condition (%cond).
158 return TrueOrFalse;
159 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
160 // It didn't simplify. However, if composed comparison is equivalent
161 // to the select condition (%cond) then we can replace it.
162 return TrueOrFalse;
163 }
164 return SimplifiedCmp;
165 }
166
167 /// Simplify comparison with true branch of select
simplifyCmpSelTrueCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)168 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
169 Value *RHS, Value *Cond,
170 const SimplifyQuery &Q,
171 unsigned MaxRecurse) {
172 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
173 getTrue(Cond->getType()));
174 }
175
176 /// Simplify comparison with false branch of select
simplifyCmpSelFalseCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)177 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
178 Value *RHS, Value *Cond,
179 const SimplifyQuery &Q,
180 unsigned MaxRecurse) {
181 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
182 getFalse(Cond->getType()));
183 }
184
185 /// We know comparison with both branches of select can be simplified, but they
186 /// are not equal. This routine handles some logical simplifications.
handleOtherCmpSelSimplifications(Value * TCmp,Value * FCmp,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)187 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
188 Value *Cond,
189 const SimplifyQuery &Q,
190 unsigned MaxRecurse) {
191 // If the false value simplified to false, then the result of the compare
192 // is equal to "Cond && TCmp". This also catches the case when the false
193 // value simplified to false and the true value to true, returning "Cond".
194 // Folding select to and/or isn't poison-safe in general; impliesPoison
195 // checks whether folding it does not convert a well-defined value into
196 // poison.
197 if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
198 if (Value *V = SimplifyAndInst(Cond, TCmp, Q, MaxRecurse))
199 return V;
200 // If the true value simplified to true, then the result of the compare
201 // is equal to "Cond || FCmp".
202 if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
203 if (Value *V = SimplifyOrInst(Cond, FCmp, Q, MaxRecurse))
204 return V;
205 // Finally, if the false value simplified to true and the true value to
206 // false, then the result of the compare is equal to "!Cond".
207 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
208 if (Value *V = SimplifyXorInst(
209 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
210 return V;
211 return nullptr;
212 }
213
214 /// Does the given value dominate the specified phi node?
valueDominatesPHI(Value * V,PHINode * P,const DominatorTree * DT)215 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
216 Instruction *I = dyn_cast<Instruction>(V);
217 if (!I)
218 // Arguments and constants dominate all instructions.
219 return true;
220
221 // If we are processing instructions (and/or basic blocks) that have not been
222 // fully added to a function, the parent nodes may still be null. Simply
223 // return the conservative answer in these cases.
224 if (!I->getParent() || !P->getParent() || !I->getFunction())
225 return false;
226
227 // If we have a DominatorTree then do a precise test.
228 if (DT)
229 return DT->dominates(I, P);
230
231 // Otherwise, if the instruction is in the entry block and is not an invoke,
232 // then it obviously dominates all phi nodes.
233 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
234 !isa<CallBrInst>(I))
235 return true;
236
237 return false;
238 }
239
240 /// Try to simplify a binary operator of form "V op OtherOp" where V is
241 /// "(B0 opex B1)" by distributing 'op' across 'opex' as
242 /// "(B0 op OtherOp) opex (B1 op OtherOp)".
expandBinOp(Instruction::BinaryOps Opcode,Value * V,Value * OtherOp,Instruction::BinaryOps OpcodeToExpand,const SimplifyQuery & Q,unsigned MaxRecurse)243 static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
244 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
245 const SimplifyQuery &Q, unsigned MaxRecurse) {
246 auto *B = dyn_cast<BinaryOperator>(V);
247 if (!B || B->getOpcode() != OpcodeToExpand)
248 return nullptr;
249 Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
250 Value *L = SimplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(),
251 MaxRecurse);
252 if (!L)
253 return nullptr;
254 Value *R = SimplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(),
255 MaxRecurse);
256 if (!R)
257 return nullptr;
258
259 // Does the expanded pair of binops simplify to the existing binop?
260 if ((L == B0 && R == B1) ||
261 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
262 ++NumExpand;
263 return B;
264 }
265
266 // Otherwise, return "L op' R" if it simplifies.
267 Value *S = SimplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
268 if (!S)
269 return nullptr;
270
271 ++NumExpand;
272 return S;
273 }
274
275 /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
276 /// distributing op over op'.
expandCommutativeBinOp(Instruction::BinaryOps Opcode,Value * L,Value * R,Instruction::BinaryOps OpcodeToExpand,const SimplifyQuery & Q,unsigned MaxRecurse)277 static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode,
278 Value *L, Value *R,
279 Instruction::BinaryOps OpcodeToExpand,
280 const SimplifyQuery &Q,
281 unsigned MaxRecurse) {
282 // Recursion is always used, so bail out at once if we already hit the limit.
283 if (!MaxRecurse--)
284 return nullptr;
285
286 if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
287 return V;
288 if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
289 return V;
290 return nullptr;
291 }
292
293 /// Generic simplifications for associative binary operations.
294 /// Returns the simpler value, or null if none was found.
SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)295 static Value *SimplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
296 Value *LHS, Value *RHS,
297 const SimplifyQuery &Q,
298 unsigned MaxRecurse) {
299 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
300
301 // Recursion is always used, so bail out at once if we already hit the limit.
302 if (!MaxRecurse--)
303 return nullptr;
304
305 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
306 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
307
308 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
309 if (Op0 && Op0->getOpcode() == Opcode) {
310 Value *A = Op0->getOperand(0);
311 Value *B = Op0->getOperand(1);
312 Value *C = RHS;
313
314 // Does "B op C" simplify?
315 if (Value *V = SimplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
316 // It does! Return "A op V" if it simplifies or is already available.
317 // If V equals B then "A op V" is just the LHS.
318 if (V == B) return LHS;
319 // Otherwise return "A op V" if it simplifies.
320 if (Value *W = SimplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
321 ++NumReassoc;
322 return W;
323 }
324 }
325 }
326
327 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
328 if (Op1 && Op1->getOpcode() == Opcode) {
329 Value *A = LHS;
330 Value *B = Op1->getOperand(0);
331 Value *C = Op1->getOperand(1);
332
333 // Does "A op B" simplify?
334 if (Value *V = SimplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
335 // It does! Return "V op C" if it simplifies or is already available.
336 // If V equals B then "V op C" is just the RHS.
337 if (V == B) return RHS;
338 // Otherwise return "V op C" if it simplifies.
339 if (Value *W = SimplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
340 ++NumReassoc;
341 return W;
342 }
343 }
344 }
345
346 // The remaining transforms require commutativity as well as associativity.
347 if (!Instruction::isCommutative(Opcode))
348 return nullptr;
349
350 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
351 if (Op0 && Op0->getOpcode() == Opcode) {
352 Value *A = Op0->getOperand(0);
353 Value *B = Op0->getOperand(1);
354 Value *C = RHS;
355
356 // Does "C op A" simplify?
357 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
358 // It does! Return "V op B" if it simplifies or is already available.
359 // If V equals A then "V op B" is just the LHS.
360 if (V == A) return LHS;
361 // Otherwise return "V op B" if it simplifies.
362 if (Value *W = SimplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
363 ++NumReassoc;
364 return W;
365 }
366 }
367 }
368
369 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
370 if (Op1 && Op1->getOpcode() == Opcode) {
371 Value *A = LHS;
372 Value *B = Op1->getOperand(0);
373 Value *C = Op1->getOperand(1);
374
375 // Does "C op A" simplify?
376 if (Value *V = SimplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
377 // It does! Return "B op V" if it simplifies or is already available.
378 // If V equals C then "B op V" is just the RHS.
379 if (V == C) return RHS;
380 // Otherwise return "B op V" if it simplifies.
381 if (Value *W = SimplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
382 ++NumReassoc;
383 return W;
384 }
385 }
386 }
387
388 return nullptr;
389 }
390
391 /// In the case of a binary operation with a select instruction as an operand,
392 /// try to simplify the binop by seeing whether evaluating it on both branches
393 /// of the select results in the same value. Returns the common value if so,
394 /// otherwise returns null.
ThreadBinOpOverSelect(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)395 static Value *ThreadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
396 Value *RHS, const SimplifyQuery &Q,
397 unsigned MaxRecurse) {
398 // Recursion is always used, so bail out at once if we already hit the limit.
399 if (!MaxRecurse--)
400 return nullptr;
401
402 SelectInst *SI;
403 if (isa<SelectInst>(LHS)) {
404 SI = cast<SelectInst>(LHS);
405 } else {
406 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
407 SI = cast<SelectInst>(RHS);
408 }
409
410 // Evaluate the BinOp on the true and false branches of the select.
411 Value *TV;
412 Value *FV;
413 if (SI == LHS) {
414 TV = SimplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
415 FV = SimplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
416 } else {
417 TV = SimplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
418 FV = SimplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
419 }
420
421 // If they simplified to the same value, then return the common value.
422 // If they both failed to simplify then return null.
423 if (TV == FV)
424 return TV;
425
426 // If one branch simplified to undef, return the other one.
427 if (TV && Q.isUndefValue(TV))
428 return FV;
429 if (FV && Q.isUndefValue(FV))
430 return TV;
431
432 // If applying the operation did not change the true and false select values,
433 // then the result of the binop is the select itself.
434 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
435 return SI;
436
437 // If one branch simplified and the other did not, and the simplified
438 // value is equal to the unsimplified one, return the simplified value.
439 // For example, select (cond, X, X & Z) & Z -> X & Z.
440 if ((FV && !TV) || (TV && !FV)) {
441 // Check that the simplified value has the form "X op Y" where "op" is the
442 // same as the original operation.
443 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
444 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
445 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
446 // We already know that "op" is the same as for the simplified value. See
447 // if the operands match too. If so, return the simplified value.
448 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
449 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
450 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
451 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
452 Simplified->getOperand(1) == UnsimplifiedRHS)
453 return Simplified;
454 if (Simplified->isCommutative() &&
455 Simplified->getOperand(1) == UnsimplifiedLHS &&
456 Simplified->getOperand(0) == UnsimplifiedRHS)
457 return Simplified;
458 }
459 }
460
461 return nullptr;
462 }
463
464 /// In the case of a comparison with a select instruction, try to simplify the
465 /// comparison by seeing whether both branches of the select result in the same
466 /// value. Returns the common value if so, otherwise returns null.
467 /// For example, if we have:
468 /// %tmp = select i1 %cmp, i32 1, i32 2
469 /// %cmp1 = icmp sle i32 %tmp, 3
470 /// We can simplify %cmp1 to true, because both branches of select are
471 /// less than 3. We compose new comparison by substituting %tmp with both
472 /// branches of select and see if it can be simplified.
ThreadCmpOverSelect(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)473 static Value *ThreadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
474 Value *RHS, const SimplifyQuery &Q,
475 unsigned MaxRecurse) {
476 // Recursion is always used, so bail out at once if we already hit the limit.
477 if (!MaxRecurse--)
478 return nullptr;
479
480 // Make sure the select is on the LHS.
481 if (!isa<SelectInst>(LHS)) {
482 std::swap(LHS, RHS);
483 Pred = CmpInst::getSwappedPredicate(Pred);
484 }
485 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
486 SelectInst *SI = cast<SelectInst>(LHS);
487 Value *Cond = SI->getCondition();
488 Value *TV = SI->getTrueValue();
489 Value *FV = SI->getFalseValue();
490
491 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
492 // Does "cmp TV, RHS" simplify?
493 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
494 if (!TCmp)
495 return nullptr;
496
497 // Does "cmp FV, RHS" simplify?
498 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
499 if (!FCmp)
500 return nullptr;
501
502 // If both sides simplified to the same value, then use it as the result of
503 // the original comparison.
504 if (TCmp == FCmp)
505 return TCmp;
506
507 // The remaining cases only make sense if the select condition has the same
508 // type as the result of the comparison, so bail out if this is not so.
509 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
510 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
511
512 return nullptr;
513 }
514
515 /// In the case of a binary operation with an operand that is a PHI instruction,
516 /// try to simplify the binop by seeing whether evaluating it on the incoming
517 /// phi values yields the same result for every value. If so returns the common
518 /// value, otherwise returns null.
ThreadBinOpOverPHI(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)519 static Value *ThreadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
520 Value *RHS, const SimplifyQuery &Q,
521 unsigned MaxRecurse) {
522 // Recursion is always used, so bail out at once if we already hit the limit.
523 if (!MaxRecurse--)
524 return nullptr;
525
526 PHINode *PI;
527 if (isa<PHINode>(LHS)) {
528 PI = cast<PHINode>(LHS);
529 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
530 if (!valueDominatesPHI(RHS, PI, Q.DT))
531 return nullptr;
532 } else {
533 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
534 PI = cast<PHINode>(RHS);
535 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
536 if (!valueDominatesPHI(LHS, PI, Q.DT))
537 return nullptr;
538 }
539
540 // Evaluate the BinOp on the incoming phi values.
541 Value *CommonValue = nullptr;
542 for (Value *Incoming : PI->incoming_values()) {
543 // If the incoming value is the phi node itself, it can safely be skipped.
544 if (Incoming == PI) continue;
545 Value *V = PI == LHS ?
546 SimplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse) :
547 SimplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
548 // If the operation failed to simplify, or simplified to a different value
549 // to previously, then give up.
550 if (!V || (CommonValue && V != CommonValue))
551 return nullptr;
552 CommonValue = V;
553 }
554
555 return CommonValue;
556 }
557
558 /// In the case of a comparison with a PHI instruction, try to simplify the
559 /// comparison by seeing whether comparing with all of the incoming phi values
560 /// yields the same result every time. If so returns the common result,
561 /// otherwise returns null.
ThreadCmpOverPHI(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)562 static Value *ThreadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
563 const SimplifyQuery &Q, unsigned MaxRecurse) {
564 // Recursion is always used, so bail out at once if we already hit the limit.
565 if (!MaxRecurse--)
566 return nullptr;
567
568 // Make sure the phi is on the LHS.
569 if (!isa<PHINode>(LHS)) {
570 std::swap(LHS, RHS);
571 Pred = CmpInst::getSwappedPredicate(Pred);
572 }
573 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
574 PHINode *PI = cast<PHINode>(LHS);
575
576 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
577 if (!valueDominatesPHI(RHS, PI, Q.DT))
578 return nullptr;
579
580 // Evaluate the BinOp on the incoming phi values.
581 Value *CommonValue = nullptr;
582 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
583 Value *Incoming = PI->getIncomingValue(u);
584 Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
585 // If the incoming value is the phi node itself, it can safely be skipped.
586 if (Incoming == PI) continue;
587 // Change the context instruction to the "edge" that flows into the phi.
588 // This is important because that is where incoming is actually "evaluated"
589 // even though it is used later somewhere else.
590 Value *V = SimplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
591 MaxRecurse);
592 // If the operation failed to simplify, or simplified to a different value
593 // to previously, then give up.
594 if (!V || (CommonValue && V != CommonValue))
595 return nullptr;
596 CommonValue = V;
597 }
598
599 return CommonValue;
600 }
601
foldOrCommuteConstant(Instruction::BinaryOps Opcode,Value * & Op0,Value * & Op1,const SimplifyQuery & Q)602 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
603 Value *&Op0, Value *&Op1,
604 const SimplifyQuery &Q) {
605 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
606 if (auto *CRHS = dyn_cast<Constant>(Op1))
607 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
608
609 // Canonicalize the constant to the RHS if this is a commutative operation.
610 if (Instruction::isCommutative(Opcode))
611 std::swap(Op0, Op1);
612 }
613 return nullptr;
614 }
615
616 /// Given operands for an Add, see if we can fold the result.
617 /// If not, this returns null.
SimplifyAddInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)618 static Value *SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
619 const SimplifyQuery &Q, unsigned MaxRecurse) {
620 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
621 return C;
622
623 // X + undef -> undef
624 if (Q.isUndefValue(Op1))
625 return Op1;
626
627 // X + 0 -> X
628 if (match(Op1, m_Zero()))
629 return Op0;
630
631 // If two operands are negative, return 0.
632 if (isKnownNegation(Op0, Op1))
633 return Constant::getNullValue(Op0->getType());
634
635 // X + (Y - X) -> Y
636 // (Y - X) + X -> Y
637 // Eg: X + -X -> 0
638 Value *Y = nullptr;
639 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
640 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
641 return Y;
642
643 // X + ~X -> -1 since ~X = -X-1
644 Type *Ty = Op0->getType();
645 if (match(Op0, m_Not(m_Specific(Op1))) ||
646 match(Op1, m_Not(m_Specific(Op0))))
647 return Constant::getAllOnesValue(Ty);
648
649 // add nsw/nuw (xor Y, signmask), signmask --> Y
650 // The no-wrapping add guarantees that the top bit will be set by the add.
651 // Therefore, the xor must be clearing the already set sign bit of Y.
652 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
653 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
654 return Y;
655
656 // add nuw %x, -1 -> -1, because %x can only be 0.
657 if (IsNUW && match(Op1, m_AllOnes()))
658 return Op1; // Which is -1.
659
660 /// i1 add -> xor.
661 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
662 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
663 return V;
664
665 // Try some generic simplifications for associative operations.
666 if (Value *V = SimplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q,
667 MaxRecurse))
668 return V;
669
670 // Threading Add over selects and phi nodes is pointless, so don't bother.
671 // Threading over the select in "A + select(cond, B, C)" means evaluating
672 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
673 // only if B and C are equal. If B and C are equal then (since we assume
674 // that operands have already been simplified) "select(cond, B, C)" should
675 // have been simplified to the common value of B and C already. Analysing
676 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
677 // for threading over phi nodes.
678
679 return nullptr;
680 }
681
SimplifyAddInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Query)682 Value *llvm::SimplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
683 const SimplifyQuery &Query) {
684 return ::SimplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
685 }
686
687 /// Compute the base pointer and cumulative constant offsets for V.
688 ///
689 /// This strips all constant offsets off of V, leaving it the base pointer, and
690 /// accumulates the total constant offset applied in the returned constant. It
691 /// returns 0 if V is not a pointer, and returns the constant '0' if there are
692 /// no constant offsets applied.
693 ///
694 /// This is very similar to GetPointerBaseWithConstantOffset except it doesn't
695 /// follow non-inbounds geps. This allows it to remain usable for icmp ult/etc.
696 /// folding.
stripAndComputeConstantOffsets(const DataLayout & DL,Value * & V,bool AllowNonInbounds=false)697 static Constant *stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
698 bool AllowNonInbounds = false) {
699 assert(V->getType()->isPtrOrPtrVectorTy());
700
701 Type *IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
702 APInt Offset = APInt::getNullValue(IntIdxTy->getIntegerBitWidth());
703
704 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
705 // As that strip may trace through `addrspacecast`, need to sext or trunc
706 // the offset calculated.
707 IntIdxTy = DL.getIndexType(V->getType())->getScalarType();
708 Offset = Offset.sextOrTrunc(IntIdxTy->getIntegerBitWidth());
709
710 Constant *OffsetIntPtr = ConstantInt::get(IntIdxTy, Offset);
711 if (VectorType *VecTy = dyn_cast<VectorType>(V->getType()))
712 return ConstantVector::getSplat(VecTy->getElementCount(), OffsetIntPtr);
713 return OffsetIntPtr;
714 }
715
716 /// Compute the constant difference between two pointer values.
717 /// If the difference is not a constant, returns zero.
computePointerDifference(const DataLayout & DL,Value * LHS,Value * RHS)718 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
719 Value *RHS) {
720 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
721 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
722
723 // If LHS and RHS are not related via constant offsets to the same base
724 // value, there is nothing we can do here.
725 if (LHS != RHS)
726 return nullptr;
727
728 // Otherwise, the difference of LHS - RHS can be computed as:
729 // LHS - RHS
730 // = (LHSOffset + Base) - (RHSOffset + Base)
731 // = LHSOffset - RHSOffset
732 return ConstantExpr::getSub(LHSOffset, RHSOffset);
733 }
734
735 /// Given operands for a Sub, see if we can fold the result.
736 /// If not, this returns null.
SimplifySubInst(Value * Op0,Value * Op1,bool isNSW,bool isNUW,const SimplifyQuery & Q,unsigned MaxRecurse)737 static Value *SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
738 const SimplifyQuery &Q, unsigned MaxRecurse) {
739 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
740 return C;
741
742 // X - poison -> poison
743 // poison - X -> poison
744 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
745 return PoisonValue::get(Op0->getType());
746
747 // X - undef -> undef
748 // undef - X -> undef
749 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
750 return UndefValue::get(Op0->getType());
751
752 // X - 0 -> X
753 if (match(Op1, m_Zero()))
754 return Op0;
755
756 // X - X -> 0
757 if (Op0 == Op1)
758 return Constant::getNullValue(Op0->getType());
759
760 // Is this a negation?
761 if (match(Op0, m_Zero())) {
762 // 0 - X -> 0 if the sub is NUW.
763 if (isNUW)
764 return Constant::getNullValue(Op0->getType());
765
766 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
767 if (Known.Zero.isMaxSignedValue()) {
768 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
769 // Op1 must be 0 because negating the minimum signed value is undefined.
770 if (isNSW)
771 return Constant::getNullValue(Op0->getType());
772
773 // 0 - X -> X if X is 0 or the minimum signed value.
774 return Op1;
775 }
776 }
777
778 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
779 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
780 Value *X = nullptr, *Y = nullptr, *Z = Op1;
781 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
782 // See if "V === Y - Z" simplifies.
783 if (Value *V = SimplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse-1))
784 // It does! Now see if "X + V" simplifies.
785 if (Value *W = SimplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse-1)) {
786 // It does, we successfully reassociated!
787 ++NumReassoc;
788 return W;
789 }
790 // See if "V === X - Z" simplifies.
791 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
792 // It does! Now see if "Y + V" simplifies.
793 if (Value *W = SimplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse-1)) {
794 // It does, we successfully reassociated!
795 ++NumReassoc;
796 return W;
797 }
798 }
799
800 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
801 // For example, X - (X + 1) -> -1
802 X = Op0;
803 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
804 // See if "V === X - Y" simplifies.
805 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
806 // It does! Now see if "V - Z" simplifies.
807 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse-1)) {
808 // It does, we successfully reassociated!
809 ++NumReassoc;
810 return W;
811 }
812 // See if "V === X - Z" simplifies.
813 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse-1))
814 // It does! Now see if "V - Y" simplifies.
815 if (Value *W = SimplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse-1)) {
816 // It does, we successfully reassociated!
817 ++NumReassoc;
818 return W;
819 }
820 }
821
822 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
823 // For example, X - (X - Y) -> Y.
824 Z = Op0;
825 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
826 // See if "V === Z - X" simplifies.
827 if (Value *V = SimplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse-1))
828 // It does! Now see if "V + Y" simplifies.
829 if (Value *W = SimplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse-1)) {
830 // It does, we successfully reassociated!
831 ++NumReassoc;
832 return W;
833 }
834
835 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
836 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
837 match(Op1, m_Trunc(m_Value(Y))))
838 if (X->getType() == Y->getType())
839 // See if "V === X - Y" simplifies.
840 if (Value *V = SimplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse-1))
841 // It does! Now see if "trunc V" simplifies.
842 if (Value *W = SimplifyCastInst(Instruction::Trunc, V, Op0->getType(),
843 Q, MaxRecurse - 1))
844 // It does, return the simplified "trunc V".
845 return W;
846
847 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
848 if (match(Op0, m_PtrToInt(m_Value(X))) &&
849 match(Op1, m_PtrToInt(m_Value(Y))))
850 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
851 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
852
853 // i1 sub -> xor.
854 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
855 if (Value *V = SimplifyXorInst(Op0, Op1, Q, MaxRecurse-1))
856 return V;
857
858 // Threading Sub over selects and phi nodes is pointless, so don't bother.
859 // Threading over the select in "A - select(cond, B, C)" means evaluating
860 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
861 // only if B and C are equal. If B and C are equal then (since we assume
862 // that operands have already been simplified) "select(cond, B, C)" should
863 // have been simplified to the common value of B and C already. Analysing
864 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
865 // for threading over phi nodes.
866
867 return nullptr;
868 }
869
SimplifySubInst(Value * Op0,Value * Op1,bool isNSW,bool isNUW,const SimplifyQuery & Q)870 Value *llvm::SimplifySubInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
871 const SimplifyQuery &Q) {
872 return ::SimplifySubInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
873 }
874
875 /// Given operands for a Mul, see if we can fold the result.
876 /// If not, this returns null.
SimplifyMulInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)877 static Value *SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
878 unsigned MaxRecurse) {
879 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
880 return C;
881
882 // X * poison -> poison
883 if (isa<PoisonValue>(Op1))
884 return Op1;
885
886 // X * undef -> 0
887 // X * 0 -> 0
888 if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
889 return Constant::getNullValue(Op0->getType());
890
891 // X * 1 -> X
892 if (match(Op1, m_One()))
893 return Op0;
894
895 // (X / Y) * Y -> X if the division is exact.
896 Value *X = nullptr;
897 if (Q.IIQ.UseInstrInfo &&
898 (match(Op0,
899 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
900 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
901 return X;
902
903 // i1 mul -> and.
904 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
905 if (Value *V = SimplifyAndInst(Op0, Op1, Q, MaxRecurse-1))
906 return V;
907
908 // Try some generic simplifications for associative operations.
909 if (Value *V = SimplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q,
910 MaxRecurse))
911 return V;
912
913 // Mul distributes over Add. Try some generic simplifications based on this.
914 if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
915 Instruction::Add, Q, MaxRecurse))
916 return V;
917
918 // If the operation is with the result of a select instruction, check whether
919 // operating on either branch of the select always yields the same value.
920 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
921 if (Value *V = ThreadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q,
922 MaxRecurse))
923 return V;
924
925 // If the operation is with the result of a phi instruction, check whether
926 // operating on all incoming values of the phi always yields the same value.
927 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
928 if (Value *V = ThreadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q,
929 MaxRecurse))
930 return V;
931
932 return nullptr;
933 }
934
SimplifyMulInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)935 Value *llvm::SimplifyMulInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
936 return ::SimplifyMulInst(Op0, Op1, Q, RecursionLimit);
937 }
938
939 /// Check for common or similar folds of integer division or integer remainder.
940 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
simplifyDivRem(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q)941 static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
942 Value *Op1, const SimplifyQuery &Q) {
943 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
944 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
945
946 Type *Ty = Op0->getType();
947
948 // X / undef -> poison
949 // X % undef -> poison
950 if (Q.isUndefValue(Op1))
951 return PoisonValue::get(Ty);
952
953 // X / 0 -> poison
954 // X % 0 -> poison
955 // We don't need to preserve faults!
956 if (match(Op1, m_Zero()))
957 return PoisonValue::get(Ty);
958
959 // If any element of a constant divisor fixed width vector is zero or undef
960 // the behavior is undefined and we can fold the whole op to poison.
961 auto *Op1C = dyn_cast<Constant>(Op1);
962 auto *VTy = dyn_cast<FixedVectorType>(Ty);
963 if (Op1C && VTy) {
964 unsigned NumElts = VTy->getNumElements();
965 for (unsigned i = 0; i != NumElts; ++i) {
966 Constant *Elt = Op1C->getAggregateElement(i);
967 if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt)))
968 return PoisonValue::get(Ty);
969 }
970 }
971
972 // poison / X -> poison
973 // poison % X -> poison
974 if (isa<PoisonValue>(Op0))
975 return Op0;
976
977 // undef / X -> 0
978 // undef % X -> 0
979 if (Q.isUndefValue(Op0))
980 return Constant::getNullValue(Ty);
981
982 // 0 / X -> 0
983 // 0 % X -> 0
984 if (match(Op0, m_Zero()))
985 return Constant::getNullValue(Op0->getType());
986
987 // X / X -> 1
988 // X % X -> 0
989 if (Op0 == Op1)
990 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
991
992 // X / 1 -> X
993 // X % 1 -> 0
994 // If this is a boolean op (single-bit element type), we can't have
995 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
996 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1.
997 Value *X;
998 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) ||
999 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1000 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1001
1002 // If X * Y does not overflow, then:
1003 // X * Y / Y -> X
1004 // X * Y % Y -> 0
1005 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1006 auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1007 // The multiplication can't overflow if it is defined not to, or if
1008 // X == A / Y for some A.
1009 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1010 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1011 (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1012 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1013 return IsDiv ? X : Constant::getNullValue(Op0->getType());
1014 }
1015 }
1016
1017 return nullptr;
1018 }
1019
1020 /// Given a predicate and two operands, return true if the comparison is true.
1021 /// This is a helper for div/rem simplification where we return some other value
1022 /// when we can prove a relationship between the operands.
isICmpTrue(ICmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)1023 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
1024 const SimplifyQuery &Q, unsigned MaxRecurse) {
1025 Value *V = SimplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
1026 Constant *C = dyn_cast_or_null<Constant>(V);
1027 return (C && C->isAllOnesValue());
1028 }
1029
1030 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
1031 /// to simplify X % Y to X.
isDivZero(Value * X,Value * Y,const SimplifyQuery & Q,unsigned MaxRecurse,bool IsSigned)1032 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
1033 unsigned MaxRecurse, bool IsSigned) {
1034 // Recursion is always used, so bail out at once if we already hit the limit.
1035 if (!MaxRecurse--)
1036 return false;
1037
1038 if (IsSigned) {
1039 // |X| / |Y| --> 0
1040 //
1041 // We require that 1 operand is a simple constant. That could be extended to
1042 // 2 variables if we computed the sign bit for each.
1043 //
1044 // Make sure that a constant is not the minimum signed value because taking
1045 // the abs() of that is undefined.
1046 Type *Ty = X->getType();
1047 const APInt *C;
1048 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1049 // Is the variable divisor magnitude always greater than the constant
1050 // dividend magnitude?
1051 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1052 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1053 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1054 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1055 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1056 return true;
1057 }
1058 if (match(Y, m_APInt(C))) {
1059 // Special-case: we can't take the abs() of a minimum signed value. If
1060 // that's the divisor, then all we have to do is prove that the dividend
1061 // is also not the minimum signed value.
1062 if (C->isMinSignedValue())
1063 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1064
1065 // Is the variable dividend magnitude always less than the constant
1066 // divisor magnitude?
1067 // |X| < |C| --> X > -abs(C) and X < abs(C)
1068 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1069 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1070 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1071 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1072 return true;
1073 }
1074 return false;
1075 }
1076
1077 // IsSigned == false.
1078 // Is the dividend unsigned less than the divisor?
1079 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1080 }
1081
1082 /// These are simplifications common to SDiv and UDiv.
simplifyDiv(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1083 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1084 const SimplifyQuery &Q, unsigned MaxRecurse) {
1085 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1086 return C;
1087
1088 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q))
1089 return V;
1090
1091 bool IsSigned = Opcode == Instruction::SDiv;
1092
1093 // (X rem Y) / Y -> 0
1094 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1095 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1096 return Constant::getNullValue(Op0->getType());
1097
1098 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1099 ConstantInt *C1, *C2;
1100 if (!IsSigned && match(Op0, m_UDiv(m_Value(), m_ConstantInt(C1))) &&
1101 match(Op1, m_ConstantInt(C2))) {
1102 bool Overflow;
1103 (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1104 if (Overflow)
1105 return Constant::getNullValue(Op0->getType());
1106 }
1107
1108 // If the operation is with the result of a select instruction, check whether
1109 // operating on either branch of the select always yields the same value.
1110 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1111 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1112 return V;
1113
1114 // If the operation is with the result of a phi instruction, check whether
1115 // operating on all incoming values of the phi always yields the same value.
1116 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1117 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1118 return V;
1119
1120 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1121 return Constant::getNullValue(Op0->getType());
1122
1123 return nullptr;
1124 }
1125
1126 /// These are simplifications common to SRem and URem.
simplifyRem(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1127 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1128 const SimplifyQuery &Q, unsigned MaxRecurse) {
1129 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1130 return C;
1131
1132 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q))
1133 return V;
1134
1135 // (X % Y) % Y -> X % Y
1136 if ((Opcode == Instruction::SRem &&
1137 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1138 (Opcode == Instruction::URem &&
1139 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1140 return Op0;
1141
1142 // (X << Y) % X -> 0
1143 if (Q.IIQ.UseInstrInfo &&
1144 ((Opcode == Instruction::SRem &&
1145 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1146 (Opcode == Instruction::URem &&
1147 match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1148 return Constant::getNullValue(Op0->getType());
1149
1150 // If the operation is with the result of a select instruction, check whether
1151 // operating on either branch of the select always yields the same value.
1152 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1153 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1154 return V;
1155
1156 // If the operation is with the result of a phi instruction, check whether
1157 // operating on all incoming values of the phi always yields the same value.
1158 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1159 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1160 return V;
1161
1162 // If X / Y == 0, then X % Y == X.
1163 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem))
1164 return Op0;
1165
1166 return nullptr;
1167 }
1168
1169 /// Given operands for an SDiv, see if we can fold the result.
1170 /// If not, this returns null.
SimplifySDivInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1171 static Value *SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1172 unsigned MaxRecurse) {
1173 // If two operands are negated and no signed overflow, return -1.
1174 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1175 return Constant::getAllOnesValue(Op0->getType());
1176
1177 return simplifyDiv(Instruction::SDiv, Op0, Op1, Q, MaxRecurse);
1178 }
1179
SimplifySDivInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1180 Value *llvm::SimplifySDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1181 return ::SimplifySDivInst(Op0, Op1, Q, RecursionLimit);
1182 }
1183
1184 /// Given operands for a UDiv, see if we can fold the result.
1185 /// If not, this returns null.
SimplifyUDivInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1186 static Value *SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1187 unsigned MaxRecurse) {
1188 return simplifyDiv(Instruction::UDiv, Op0, Op1, Q, MaxRecurse);
1189 }
1190
SimplifyUDivInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1191 Value *llvm::SimplifyUDivInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1192 return ::SimplifyUDivInst(Op0, Op1, Q, RecursionLimit);
1193 }
1194
1195 /// Given operands for an SRem, see if we can fold the result.
1196 /// If not, this returns null.
SimplifySRemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1197 static Value *SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1198 unsigned MaxRecurse) {
1199 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1200 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1201 Value *X;
1202 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1203 return ConstantInt::getNullValue(Op0->getType());
1204
1205 // If the two operands are negated, return 0.
1206 if (isKnownNegation(Op0, Op1))
1207 return ConstantInt::getNullValue(Op0->getType());
1208
1209 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1210 }
1211
SimplifySRemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1212 Value *llvm::SimplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1213 return ::SimplifySRemInst(Op0, Op1, Q, RecursionLimit);
1214 }
1215
1216 /// Given operands for a URem, see if we can fold the result.
1217 /// If not, this returns null.
SimplifyURemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1218 static Value *SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1219 unsigned MaxRecurse) {
1220 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1221 }
1222
SimplifyURemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1223 Value *llvm::SimplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1224 return ::SimplifyURemInst(Op0, Op1, Q, RecursionLimit);
1225 }
1226
1227 /// Returns true if a shift by \c Amount always yields poison.
isPoisonShift(Value * Amount,const SimplifyQuery & Q)1228 static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1229 Constant *C = dyn_cast<Constant>(Amount);
1230 if (!C)
1231 return false;
1232
1233 // X shift by undef -> poison because it may shift by the bitwidth.
1234 if (Q.isUndefValue(C))
1235 return true;
1236
1237 // Shifting by the bitwidth or more is undefined.
1238 if (ConstantInt *CI = dyn_cast<ConstantInt>(C))
1239 if (CI->getValue().uge(CI->getType()->getScalarSizeInBits()))
1240 return true;
1241
1242 // If all lanes of a vector shift are undefined the whole shift is.
1243 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1244 for (unsigned I = 0,
1245 E = cast<FixedVectorType>(C->getType())->getNumElements();
1246 I != E; ++I)
1247 if (!isPoisonShift(C->getAggregateElement(I), Q))
1248 return false;
1249 return true;
1250 }
1251
1252 return false;
1253 }
1254
1255 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1256 /// If not, this returns null.
SimplifyShift(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool IsNSW,const SimplifyQuery & Q,unsigned MaxRecurse)1257 static Value *SimplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1258 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1259 unsigned MaxRecurse) {
1260 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1261 return C;
1262
1263 // poison shift by X -> poison
1264 if (isa<PoisonValue>(Op0))
1265 return Op0;
1266
1267 // 0 shift by X -> 0
1268 if (match(Op0, m_Zero()))
1269 return Constant::getNullValue(Op0->getType());
1270
1271 // X shift by 0 -> X
1272 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1273 // would be poison.
1274 Value *X;
1275 if (match(Op1, m_Zero()) ||
1276 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1277 return Op0;
1278
1279 // Fold undefined shifts.
1280 if (isPoisonShift(Op1, Q))
1281 return PoisonValue::get(Op0->getType());
1282
1283 // If the operation is with the result of a select instruction, check whether
1284 // operating on either branch of the select always yields the same value.
1285 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1286 if (Value *V = ThreadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1287 return V;
1288
1289 // If the operation is with the result of a phi instruction, check whether
1290 // operating on all incoming values of the phi always yields the same value.
1291 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1292 if (Value *V = ThreadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1293 return V;
1294
1295 // If any bits in the shift amount make that value greater than or equal to
1296 // the number of bits in the type, the shift is undefined.
1297 KnownBits KnownAmt = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1298 if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1299 return PoisonValue::get(Op0->getType());
1300
1301 // If all valid bits in the shift amount are known zero, the first operand is
1302 // unchanged.
1303 unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1304 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1305 return Op0;
1306
1307 // Check for nsw shl leading to a poison value.
1308 if (IsNSW) {
1309 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1310 KnownBits KnownVal = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1311 KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1312
1313 if (KnownVal.Zero.isSignBitSet())
1314 KnownShl.Zero.setSignBit();
1315 if (KnownVal.One.isSignBitSet())
1316 KnownShl.One.setSignBit();
1317
1318 if (KnownShl.hasConflict())
1319 return PoisonValue::get(Op0->getType());
1320 }
1321
1322 return nullptr;
1323 }
1324
1325 /// Given operands for an Shl, LShr or AShr, see if we can
1326 /// fold the result. If not, this returns null.
SimplifyRightShift(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q,unsigned MaxRecurse)1327 static Value *SimplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1328 Value *Op1, bool isExact, const SimplifyQuery &Q,
1329 unsigned MaxRecurse) {
1330 if (Value *V =
1331 SimplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1332 return V;
1333
1334 // X >> X -> 0
1335 if (Op0 == Op1)
1336 return Constant::getNullValue(Op0->getType());
1337
1338 // undef >> X -> 0
1339 // undef >> X -> undef (if it's exact)
1340 if (Q.isUndefValue(Op0))
1341 return isExact ? Op0 : Constant::getNullValue(Op0->getType());
1342
1343 // The low bit cannot be shifted out of an exact shift if it is set.
1344 if (isExact) {
1345 KnownBits Op0Known = computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1346 if (Op0Known.One[0])
1347 return Op0;
1348 }
1349
1350 return nullptr;
1351 }
1352
1353 /// Given operands for an Shl, see if we can fold the result.
1354 /// If not, this returns null.
SimplifyShlInst(Value * Op0,Value * Op1,bool isNSW,bool isNUW,const SimplifyQuery & Q,unsigned MaxRecurse)1355 static Value *SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1356 const SimplifyQuery &Q, unsigned MaxRecurse) {
1357 if (Value *V =
1358 SimplifyShift(Instruction::Shl, Op0, Op1, isNSW, Q, MaxRecurse))
1359 return V;
1360
1361 // undef << X -> 0
1362 // undef << X -> undef if (if it's NSW/NUW)
1363 if (Q.isUndefValue(Op0))
1364 return isNSW || isNUW ? Op0 : Constant::getNullValue(Op0->getType());
1365
1366 // (X >> A) << A -> X
1367 Value *X;
1368 if (Q.IIQ.UseInstrInfo &&
1369 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1370 return X;
1371
1372 // shl nuw i8 C, %x -> C iff C has sign bit set.
1373 if (isNUW && match(Op0, m_Negative()))
1374 return Op0;
1375 // NOTE: could use computeKnownBits() / LazyValueInfo,
1376 // but the cost-benefit analysis suggests it isn't worth it.
1377
1378 return nullptr;
1379 }
1380
SimplifyShlInst(Value * Op0,Value * Op1,bool isNSW,bool isNUW,const SimplifyQuery & Q)1381 Value *llvm::SimplifyShlInst(Value *Op0, Value *Op1, bool isNSW, bool isNUW,
1382 const SimplifyQuery &Q) {
1383 return ::SimplifyShlInst(Op0, Op1, isNSW, isNUW, Q, RecursionLimit);
1384 }
1385
1386 /// Given operands for an LShr, see if we can fold the result.
1387 /// If not, this returns null.
SimplifyLShrInst(Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q,unsigned MaxRecurse)1388 static Value *SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1389 const SimplifyQuery &Q, unsigned MaxRecurse) {
1390 if (Value *V = SimplifyRightShift(Instruction::LShr, Op0, Op1, isExact, Q,
1391 MaxRecurse))
1392 return V;
1393
1394 // (X << A) >> A -> X
1395 Value *X;
1396 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1397 return X;
1398
1399 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1400 // We can return X as we do in the above case since OR alters no bits in X.
1401 // SimplifyDemandedBits in InstCombine can do more general optimization for
1402 // bit manipulation. This pattern aims to provide opportunities for other
1403 // optimizers by supporting a simple but common case in InstSimplify.
1404 Value *Y;
1405 const APInt *ShRAmt, *ShLAmt;
1406 if (match(Op1, m_APInt(ShRAmt)) &&
1407 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1408 *ShRAmt == *ShLAmt) {
1409 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1410 const unsigned Width = Op0->getType()->getScalarSizeInBits();
1411 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
1412 if (ShRAmt->uge(EffWidthY))
1413 return X;
1414 }
1415
1416 return nullptr;
1417 }
1418
SimplifyLShrInst(Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q)1419 Value *llvm::SimplifyLShrInst(Value *Op0, Value *Op1, bool isExact,
1420 const SimplifyQuery &Q) {
1421 return ::SimplifyLShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1422 }
1423
1424 /// Given operands for an AShr, see if we can fold the result.
1425 /// If not, this returns null.
SimplifyAShrInst(Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q,unsigned MaxRecurse)1426 static Value *SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1427 const SimplifyQuery &Q, unsigned MaxRecurse) {
1428 if (Value *V = SimplifyRightShift(Instruction::AShr, Op0, Op1, isExact, Q,
1429 MaxRecurse))
1430 return V;
1431
1432 // all ones >>a X -> -1
1433 // Do not return Op0 because it may contain undef elements if it's a vector.
1434 if (match(Op0, m_AllOnes()))
1435 return Constant::getAllOnesValue(Op0->getType());
1436
1437 // (X << A) >> A -> X
1438 Value *X;
1439 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1440 return X;
1441
1442 // Arithmetic shifting an all-sign-bit value is a no-op.
1443 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1444 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1445 return Op0;
1446
1447 return nullptr;
1448 }
1449
SimplifyAShrInst(Value * Op0,Value * Op1,bool isExact,const SimplifyQuery & Q)1450 Value *llvm::SimplifyAShrInst(Value *Op0, Value *Op1, bool isExact,
1451 const SimplifyQuery &Q) {
1452 return ::SimplifyAShrInst(Op0, Op1, isExact, Q, RecursionLimit);
1453 }
1454
1455 /// Commuted variants are assumed to be handled by calling this function again
1456 /// with the parameters swapped.
simplifyUnsignedRangeCheck(ICmpInst * ZeroICmp,ICmpInst * UnsignedICmp,bool IsAnd,const SimplifyQuery & Q)1457 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1458 ICmpInst *UnsignedICmp, bool IsAnd,
1459 const SimplifyQuery &Q) {
1460 Value *X, *Y;
1461
1462 ICmpInst::Predicate EqPred;
1463 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1464 !ICmpInst::isEquality(EqPred))
1465 return nullptr;
1466
1467 ICmpInst::Predicate UnsignedPred;
1468
1469 Value *A, *B;
1470 // Y = (A - B);
1471 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1472 if (match(UnsignedICmp,
1473 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1474 ICmpInst::isUnsigned(UnsignedPred)) {
1475 // A >=/<= B || (A - B) != 0 <--> true
1476 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1477 UnsignedPred == ICmpInst::ICMP_ULE) &&
1478 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1479 return ConstantInt::getTrue(UnsignedICmp->getType());
1480 // A </> B && (A - B) == 0 <--> false
1481 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1482 UnsignedPred == ICmpInst::ICMP_UGT) &&
1483 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1484 return ConstantInt::getFalse(UnsignedICmp->getType());
1485
1486 // A </> B && (A - B) != 0 <--> A </> B
1487 // A </> B || (A - B) != 0 <--> (A - B) != 0
1488 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1489 UnsignedPred == ICmpInst::ICMP_UGT))
1490 return IsAnd ? UnsignedICmp : ZeroICmp;
1491
1492 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1493 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1494 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1495 UnsignedPred == ICmpInst::ICMP_UGE))
1496 return IsAnd ? ZeroICmp : UnsignedICmp;
1497 }
1498
1499 // Given Y = (A - B)
1500 // Y >= A && Y != 0 --> Y >= A iff B != 0
1501 // Y < A || Y == 0 --> Y < A iff B != 0
1502 if (match(UnsignedICmp,
1503 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1504 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1505 EqPred == ICmpInst::ICMP_NE &&
1506 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1507 return UnsignedICmp;
1508 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1509 EqPred == ICmpInst::ICMP_EQ &&
1510 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1511 return UnsignedICmp;
1512 }
1513 }
1514
1515 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1516 ICmpInst::isUnsigned(UnsignedPred))
1517 ;
1518 else if (match(UnsignedICmp,
1519 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1520 ICmpInst::isUnsigned(UnsignedPred))
1521 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1522 else
1523 return nullptr;
1524
1525 // X > Y && Y == 0 --> Y == 0 iff X != 0
1526 // X > Y || Y == 0 --> X > Y iff X != 0
1527 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1528 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1529 return IsAnd ? ZeroICmp : UnsignedICmp;
1530
1531 // X <= Y && Y != 0 --> X <= Y iff X != 0
1532 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1533 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1534 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1535 return IsAnd ? UnsignedICmp : ZeroICmp;
1536
1537 // The transforms below here are expected to be handled more generally with
1538 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1539 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1540 // these are candidates for removal.
1541
1542 // X < Y && Y != 0 --> X < Y
1543 // X < Y || Y != 0 --> Y != 0
1544 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1545 return IsAnd ? UnsignedICmp : ZeroICmp;
1546
1547 // X >= Y && Y == 0 --> Y == 0
1548 // X >= Y || Y == 0 --> X >= Y
1549 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1550 return IsAnd ? ZeroICmp : UnsignedICmp;
1551
1552 // X < Y && Y == 0 --> false
1553 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1554 IsAnd)
1555 return getFalse(UnsignedICmp->getType());
1556
1557 // X >= Y || Y != 0 --> true
1558 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1559 !IsAnd)
1560 return getTrue(UnsignedICmp->getType());
1561
1562 return nullptr;
1563 }
1564
1565 /// Commuted variants are assumed to be handled by calling this function again
1566 /// with the parameters swapped.
simplifyAndOfICmpsWithSameOperands(ICmpInst * Op0,ICmpInst * Op1)1567 static Value *simplifyAndOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1568 ICmpInst::Predicate Pred0, Pred1;
1569 Value *A ,*B;
1570 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1571 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1572 return nullptr;
1573
1574 // We have (icmp Pred0, A, B) & (icmp Pred1, A, B).
1575 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1576 // can eliminate Op1 from this 'and'.
1577 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1578 return Op0;
1579
1580 // Check for any combination of predicates that are guaranteed to be disjoint.
1581 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1582 (Pred0 == ICmpInst::ICMP_EQ && ICmpInst::isFalseWhenEqual(Pred1)) ||
1583 (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT) ||
1584 (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT))
1585 return getFalse(Op0->getType());
1586
1587 return nullptr;
1588 }
1589
1590 /// Commuted variants are assumed to be handled by calling this function again
1591 /// with the parameters swapped.
simplifyOrOfICmpsWithSameOperands(ICmpInst * Op0,ICmpInst * Op1)1592 static Value *simplifyOrOfICmpsWithSameOperands(ICmpInst *Op0, ICmpInst *Op1) {
1593 ICmpInst::Predicate Pred0, Pred1;
1594 Value *A ,*B;
1595 if (!match(Op0, m_ICmp(Pred0, m_Value(A), m_Value(B))) ||
1596 !match(Op1, m_ICmp(Pred1, m_Specific(A), m_Specific(B))))
1597 return nullptr;
1598
1599 // We have (icmp Pred0, A, B) | (icmp Pred1, A, B).
1600 // If Op1 is always implied true by Op0, then Op0 is a subset of Op1, and we
1601 // can eliminate Op0 from this 'or'.
1602 if (ICmpInst::isImpliedTrueByMatchingCmp(Pred0, Pred1))
1603 return Op1;
1604
1605 // Check for any combination of predicates that cover the entire range of
1606 // possibilities.
1607 if ((Pred0 == ICmpInst::getInversePredicate(Pred1)) ||
1608 (Pred0 == ICmpInst::ICMP_NE && ICmpInst::isTrueWhenEqual(Pred1)) ||
1609 (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGE) ||
1610 (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGE))
1611 return getTrue(Op0->getType());
1612
1613 return nullptr;
1614 }
1615
1616 /// Test if a pair of compares with a shared operand and 2 constants has an
1617 /// empty set intersection, full set union, or if one compare is a superset of
1618 /// the other.
simplifyAndOrOfICmpsWithConstants(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1619 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1620 bool IsAnd) {
1621 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1622 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1623 return nullptr;
1624
1625 const APInt *C0, *C1;
1626 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1627 !match(Cmp1->getOperand(1), m_APInt(C1)))
1628 return nullptr;
1629
1630 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1631 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1632
1633 // For and-of-compares, check if the intersection is empty:
1634 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1635 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1636 return getFalse(Cmp0->getType());
1637
1638 // For or-of-compares, check if the union is full:
1639 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1640 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1641 return getTrue(Cmp0->getType());
1642
1643 // Is one range a superset of the other?
1644 // If this is and-of-compares, take the smaller set:
1645 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1646 // If this is or-of-compares, take the larger set:
1647 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1648 if (Range0.contains(Range1))
1649 return IsAnd ? Cmp1 : Cmp0;
1650 if (Range1.contains(Range0))
1651 return IsAnd ? Cmp0 : Cmp1;
1652
1653 return nullptr;
1654 }
1655
simplifyAndOrOfICmpsWithZero(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1656 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1657 bool IsAnd) {
1658 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1659 if (!match(Cmp0->getOperand(1), m_Zero()) ||
1660 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1661 return nullptr;
1662
1663 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1664 return nullptr;
1665
1666 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1667 Value *X = Cmp0->getOperand(0);
1668 Value *Y = Cmp1->getOperand(0);
1669
1670 // If one of the compares is a masked version of a (not) null check, then
1671 // that compare implies the other, so we eliminate the other. Optionally, look
1672 // through a pointer-to-int cast to match a null check of a pointer type.
1673
1674 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1675 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1676 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1677 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1678 if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1679 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1680 return Cmp1;
1681
1682 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1683 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1684 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1685 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1686 if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1687 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1688 return Cmp0;
1689
1690 return nullptr;
1691 }
1692
simplifyAndOfICmpsWithAdd(ICmpInst * Op0,ICmpInst * Op1,const InstrInfoQuery & IIQ)1693 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1694 const InstrInfoQuery &IIQ) {
1695 // (icmp (add V, C0), C1) & (icmp V, C0)
1696 ICmpInst::Predicate Pred0, Pred1;
1697 const APInt *C0, *C1;
1698 Value *V;
1699 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1700 return nullptr;
1701
1702 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1703 return nullptr;
1704
1705 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1706 if (AddInst->getOperand(1) != Op1->getOperand(1))
1707 return nullptr;
1708
1709 Type *ITy = Op0->getType();
1710 bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1711 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1712
1713 const APInt Delta = *C1 - *C0;
1714 if (C0->isStrictlyPositive()) {
1715 if (Delta == 2) {
1716 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1717 return getFalse(ITy);
1718 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1719 return getFalse(ITy);
1720 }
1721 if (Delta == 1) {
1722 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1723 return getFalse(ITy);
1724 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && isNSW)
1725 return getFalse(ITy);
1726 }
1727 }
1728 if (C0->getBoolValue() && isNUW) {
1729 if (Delta == 2)
1730 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1731 return getFalse(ITy);
1732 if (Delta == 1)
1733 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1734 return getFalse(ITy);
1735 }
1736
1737 return nullptr;
1738 }
1739
1740 /// Try to eliminate compares with signed or unsigned min/max constants.
simplifyAndOrOfICmpsWithLimitConst(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1741 static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1,
1742 bool IsAnd) {
1743 // Canonicalize an equality compare as Cmp0.
1744 if (Cmp1->isEquality())
1745 std::swap(Cmp0, Cmp1);
1746 if (!Cmp0->isEquality())
1747 return nullptr;
1748
1749 // The non-equality compare must include a common operand (X). Canonicalize
1750 // the common operand as operand 0 (the predicate is swapped if the common
1751 // operand was operand 1).
1752 ICmpInst::Predicate Pred0 = Cmp0->getPredicate();
1753 Value *X = Cmp0->getOperand(0);
1754 ICmpInst::Predicate Pred1;
1755 bool HasNotOp = match(Cmp1, m_c_ICmp(Pred1, m_Not(m_Specific(X)), m_Value()));
1756 if (!HasNotOp && !match(Cmp1, m_c_ICmp(Pred1, m_Specific(X), m_Value())))
1757 return nullptr;
1758 if (ICmpInst::isEquality(Pred1))
1759 return nullptr;
1760
1761 // The equality compare must be against a constant. Flip bits if we matched
1762 // a bitwise not. Convert a null pointer constant to an integer zero value.
1763 APInt MinMaxC;
1764 const APInt *C;
1765 if (match(Cmp0->getOperand(1), m_APInt(C)))
1766 MinMaxC = HasNotOp ? ~*C : *C;
1767 else if (isa<ConstantPointerNull>(Cmp0->getOperand(1)))
1768 MinMaxC = APInt::getNullValue(8);
1769 else
1770 return nullptr;
1771
1772 // DeMorganize if this is 'or': P0 || P1 --> !P0 && !P1.
1773 if (!IsAnd) {
1774 Pred0 = ICmpInst::getInversePredicate(Pred0);
1775 Pred1 = ICmpInst::getInversePredicate(Pred1);
1776 }
1777
1778 // Normalize to unsigned compare and unsigned min/max value.
1779 // Example for 8-bit: -128 + 128 -> 0; 127 + 128 -> 255
1780 if (ICmpInst::isSigned(Pred1)) {
1781 Pred1 = ICmpInst::getUnsignedPredicate(Pred1);
1782 MinMaxC += APInt::getSignedMinValue(MinMaxC.getBitWidth());
1783 }
1784
1785 // (X != MAX) && (X < Y) --> X < Y
1786 // (X == MAX) || (X >= Y) --> X >= Y
1787 if (MinMaxC.isMaxValue())
1788 if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT)
1789 return Cmp1;
1790
1791 // (X != MIN) && (X > Y) --> X > Y
1792 // (X == MIN) || (X <= Y) --> X <= Y
1793 if (MinMaxC.isMinValue())
1794 if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_UGT)
1795 return Cmp1;
1796
1797 return nullptr;
1798 }
1799
simplifyAndOfICmps(ICmpInst * Op0,ICmpInst * Op1,const SimplifyQuery & Q)1800 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1801 const SimplifyQuery &Q) {
1802 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1803 return X;
1804 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1805 return X;
1806
1807 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op0, Op1))
1808 return X;
1809 if (Value *X = simplifyAndOfICmpsWithSameOperands(Op1, Op0))
1810 return X;
1811
1812 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1813 return X;
1814
1815 if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, true))
1816 return X;
1817
1818 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1819 return X;
1820
1821 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1822 return X;
1823 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1824 return X;
1825
1826 return nullptr;
1827 }
1828
simplifyOrOfICmpsWithAdd(ICmpInst * Op0,ICmpInst * Op1,const InstrInfoQuery & IIQ)1829 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1830 const InstrInfoQuery &IIQ) {
1831 // (icmp (add V, C0), C1) | (icmp V, C0)
1832 ICmpInst::Predicate Pred0, Pred1;
1833 const APInt *C0, *C1;
1834 Value *V;
1835 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1836 return nullptr;
1837
1838 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1839 return nullptr;
1840
1841 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1842 if (AddInst->getOperand(1) != Op1->getOperand(1))
1843 return nullptr;
1844
1845 Type *ITy = Op0->getType();
1846 bool isNSW = IIQ.hasNoSignedWrap(AddInst);
1847 bool isNUW = IIQ.hasNoUnsignedWrap(AddInst);
1848
1849 const APInt Delta = *C1 - *C0;
1850 if (C0->isStrictlyPositive()) {
1851 if (Delta == 2) {
1852 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1853 return getTrue(ITy);
1854 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1855 return getTrue(ITy);
1856 }
1857 if (Delta == 1) {
1858 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1859 return getTrue(ITy);
1860 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && isNSW)
1861 return getTrue(ITy);
1862 }
1863 }
1864 if (C0->getBoolValue() && isNUW) {
1865 if (Delta == 2)
1866 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1867 return getTrue(ITy);
1868 if (Delta == 1)
1869 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1870 return getTrue(ITy);
1871 }
1872
1873 return nullptr;
1874 }
1875
simplifyOrOfICmps(ICmpInst * Op0,ICmpInst * Op1,const SimplifyQuery & Q)1876 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1877 const SimplifyQuery &Q) {
1878 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1879 return X;
1880 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1881 return X;
1882
1883 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op0, Op1))
1884 return X;
1885 if (Value *X = simplifyOrOfICmpsWithSameOperands(Op1, Op0))
1886 return X;
1887
1888 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1889 return X;
1890
1891 if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, false))
1892 return X;
1893
1894 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1895 return X;
1896
1897 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1898 return X;
1899 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1900 return X;
1901
1902 return nullptr;
1903 }
1904
simplifyAndOrOfFCmps(const TargetLibraryInfo * TLI,FCmpInst * LHS,FCmpInst * RHS,bool IsAnd)1905 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI,
1906 FCmpInst *LHS, FCmpInst *RHS, bool IsAnd) {
1907 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1908 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1909 if (LHS0->getType() != RHS0->getType())
1910 return nullptr;
1911
1912 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1913 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1914 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1915 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1916 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1917 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1918 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1919 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1920 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1921 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1922 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1923 if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) ||
1924 (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1)))
1925 return RHS;
1926
1927 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1928 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1929 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1930 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1931 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1932 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1933 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1934 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1935 if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) ||
1936 (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1)))
1937 return LHS;
1938 }
1939
1940 return nullptr;
1941 }
1942
simplifyAndOrOfCmps(const SimplifyQuery & Q,Value * Op0,Value * Op1,bool IsAnd)1943 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q,
1944 Value *Op0, Value *Op1, bool IsAnd) {
1945 // Look through casts of the 'and' operands to find compares.
1946 auto *Cast0 = dyn_cast<CastInst>(Op0);
1947 auto *Cast1 = dyn_cast<CastInst>(Op1);
1948 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1949 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1950 Op0 = Cast0->getOperand(0);
1951 Op1 = Cast1->getOperand(0);
1952 }
1953
1954 Value *V = nullptr;
1955 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1956 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1957 if (ICmp0 && ICmp1)
1958 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1959 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1960
1961 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1962 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1963 if (FCmp0 && FCmp1)
1964 V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd);
1965
1966 if (!V)
1967 return nullptr;
1968 if (!Cast0)
1969 return V;
1970
1971 // If we looked through casts, we can only handle a constant simplification
1972 // because we are not allowed to create a cast instruction here.
1973 if (auto *C = dyn_cast<Constant>(V))
1974 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
1975
1976 return nullptr;
1977 }
1978
1979 /// Given a bitwise logic op, check if the operands are add/sub with a common
1980 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
simplifyLogicOfAddSub(Value * Op0,Value * Op1,Instruction::BinaryOps Opcode)1981 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
1982 Instruction::BinaryOps Opcode) {
1983 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
1984 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
1985 Value *X;
1986 Constant *C1, *C2;
1987 if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
1988 match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
1989 (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
1990 match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
1991 if (ConstantExpr::getNot(C1) == C2) {
1992 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
1993 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
1994 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
1995 Type *Ty = Op0->getType();
1996 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
1997 : ConstantInt::getAllOnesValue(Ty);
1998 }
1999 }
2000 return nullptr;
2001 }
2002
2003 /// Given operands for an And, see if we can fold the result.
2004 /// If not, this returns null.
SimplifyAndInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2005 static Value *SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2006 unsigned MaxRecurse) {
2007 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2008 return C;
2009
2010 // X & poison -> poison
2011 if (isa<PoisonValue>(Op1))
2012 return Op1;
2013
2014 // X & undef -> 0
2015 if (Q.isUndefValue(Op1))
2016 return Constant::getNullValue(Op0->getType());
2017
2018 // X & X = X
2019 if (Op0 == Op1)
2020 return Op0;
2021
2022 // X & 0 = 0
2023 if (match(Op1, m_Zero()))
2024 return Constant::getNullValue(Op0->getType());
2025
2026 // X & -1 = X
2027 if (match(Op1, m_AllOnes()))
2028 return Op0;
2029
2030 // A & ~A = ~A & A = 0
2031 if (match(Op0, m_Not(m_Specific(Op1))) ||
2032 match(Op1, m_Not(m_Specific(Op0))))
2033 return Constant::getNullValue(Op0->getType());
2034
2035 // (A | ?) & A = A
2036 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2037 return Op1;
2038
2039 // A & (A | ?) = A
2040 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
2041 return Op0;
2042
2043 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2044 return V;
2045
2046 // A mask that only clears known zeros of a shifted value is a no-op.
2047 Value *X;
2048 const APInt *Mask;
2049 const APInt *ShAmt;
2050 if (match(Op1, m_APInt(Mask))) {
2051 // If all bits in the inverted and shifted mask are clear:
2052 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2053 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2054 (~(*Mask)).lshr(*ShAmt).isNullValue())
2055 return Op0;
2056
2057 // If all bits in the inverted and shifted mask are clear:
2058 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2059 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2060 (~(*Mask)).shl(*ShAmt).isNullValue())
2061 return Op0;
2062 }
2063
2064 // If we have a multiplication overflow check that is being 'and'ed with a
2065 // check that one of the multipliers is not zero, we can omit the 'and', and
2066 // only keep the overflow check.
2067 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2068 return Op1;
2069 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, true))
2070 return Op0;
2071
2072 // A & (-A) = A if A is a power of two or zero.
2073 if (match(Op0, m_Neg(m_Specific(Op1))) ||
2074 match(Op1, m_Neg(m_Specific(Op0)))) {
2075 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2076 Q.DT))
2077 return Op0;
2078 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2079 Q.DT))
2080 return Op1;
2081 }
2082
2083 // This is a similar pattern used for checking if a value is a power-of-2:
2084 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2085 // A & (A - 1) --> 0 (if A is a power-of-2 or 0)
2086 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2087 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2088 return Constant::getNullValue(Op1->getType());
2089 if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) &&
2090 isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2091 return Constant::getNullValue(Op0->getType());
2092
2093 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2094 return V;
2095
2096 // Try some generic simplifications for associative operations.
2097 if (Value *V = SimplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q,
2098 MaxRecurse))
2099 return V;
2100
2101 // And distributes over Or. Try some generic simplifications based on this.
2102 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2103 Instruction::Or, Q, MaxRecurse))
2104 return V;
2105
2106 // And distributes over Xor. Try some generic simplifications based on this.
2107 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2108 Instruction::Xor, Q, MaxRecurse))
2109 return V;
2110
2111 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2112 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2113 // A & (A && B) -> A && B
2114 if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2115 return Op1;
2116 else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2117 return Op0;
2118 }
2119 // If the operation is with the result of a select instruction, check
2120 // whether operating on either branch of the select always yields the same
2121 // value.
2122 if (Value *V = ThreadBinOpOverSelect(Instruction::And, Op0, Op1, Q,
2123 MaxRecurse))
2124 return V;
2125 }
2126
2127 // If the operation is with the result of a phi instruction, check whether
2128 // operating on all incoming values of the phi always yields the same value.
2129 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2130 if (Value *V = ThreadBinOpOverPHI(Instruction::And, Op0, Op1, Q,
2131 MaxRecurse))
2132 return V;
2133
2134 // Assuming the effective width of Y is not larger than A, i.e. all bits
2135 // from X and Y are disjoint in (X << A) | Y,
2136 // if the mask of this AND op covers all bits of X or Y, while it covers
2137 // no bits from the other, we can bypass this AND op. E.g.,
2138 // ((X << A) | Y) & Mask -> Y,
2139 // if Mask = ((1 << effective_width_of(Y)) - 1)
2140 // ((X << A) | Y) & Mask -> X << A,
2141 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2142 // SimplifyDemandedBits in InstCombine can optimize the general case.
2143 // This pattern aims to help other passes for a common case.
2144 Value *Y, *XShifted;
2145 if (match(Op1, m_APInt(Mask)) &&
2146 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2147 m_Value(XShifted)),
2148 m_Value(Y)))) {
2149 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2150 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2151 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2152 const unsigned EffWidthY = Width - YKnown.countMinLeadingZeros();
2153 if (EffWidthY <= ShftCnt) {
2154 const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI,
2155 Q.DT);
2156 const unsigned EffWidthX = Width - XKnown.countMinLeadingZeros();
2157 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2158 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2159 // If the mask is extracting all bits from X or Y as is, we can skip
2160 // this AND op.
2161 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2162 return Y;
2163 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2164 return XShifted;
2165 }
2166 }
2167
2168 return nullptr;
2169 }
2170
SimplifyAndInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2171 Value *llvm::SimplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2172 return ::SimplifyAndInst(Op0, Op1, Q, RecursionLimit);
2173 }
2174
2175 /// Given operands for an Or, see if we can fold the result.
2176 /// If not, this returns null.
SimplifyOrInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2177 static Value *SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2178 unsigned MaxRecurse) {
2179 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2180 return C;
2181
2182 // X | poison -> poison
2183 if (isa<PoisonValue>(Op1))
2184 return Op1;
2185
2186 // X | undef -> -1
2187 // X | -1 = -1
2188 // Do not return Op1 because it may contain undef elements if it's a vector.
2189 if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2190 return Constant::getAllOnesValue(Op0->getType());
2191
2192 // X | X = X
2193 // X | 0 = X
2194 if (Op0 == Op1 || match(Op1, m_Zero()))
2195 return Op0;
2196
2197 // A | ~A = ~A | A = -1
2198 if (match(Op0, m_Not(m_Specific(Op1))) ||
2199 match(Op1, m_Not(m_Specific(Op0))))
2200 return Constant::getAllOnesValue(Op0->getType());
2201
2202 // (A & ?) | A = A
2203 if (match(Op0, m_c_And(m_Specific(Op1), m_Value())))
2204 return Op1;
2205
2206 // A | (A & ?) = A
2207 if (match(Op1, m_c_And(m_Specific(Op0), m_Value())))
2208 return Op0;
2209
2210 // ~(A & ?) | A = -1
2211 if (match(Op0, m_Not(m_c_And(m_Specific(Op1), m_Value()))))
2212 return Constant::getAllOnesValue(Op1->getType());
2213
2214 // A | ~(A & ?) = -1
2215 if (match(Op1, m_Not(m_c_And(m_Specific(Op0), m_Value()))))
2216 return Constant::getAllOnesValue(Op0->getType());
2217
2218 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2219 return V;
2220
2221 Value *A, *B, *NotA;
2222 // (A & ~B) | (A ^ B) -> (A ^ B)
2223 // (~B & A) | (A ^ B) -> (A ^ B)
2224 // (A & ~B) | (B ^ A) -> (B ^ A)
2225 // (~B & A) | (B ^ A) -> (B ^ A)
2226 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
2227 (match(Op0, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
2228 match(Op0, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
2229 return Op1;
2230
2231 // Commute the 'or' operands.
2232 // (A ^ B) | (A & ~B) -> (A ^ B)
2233 // (A ^ B) | (~B & A) -> (A ^ B)
2234 // (B ^ A) | (A & ~B) -> (B ^ A)
2235 // (B ^ A) | (~B & A) -> (B ^ A)
2236 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2237 (match(Op1, m_c_And(m_Specific(A), m_Not(m_Specific(B)))) ||
2238 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B)))))
2239 return Op0;
2240
2241 // (A & B) | (~A ^ B) -> (~A ^ B)
2242 // (B & A) | (~A ^ B) -> (~A ^ B)
2243 // (A & B) | (B ^ ~A) -> (B ^ ~A)
2244 // (B & A) | (B ^ ~A) -> (B ^ ~A)
2245 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
2246 (match(Op1, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
2247 match(Op1, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
2248 return Op1;
2249
2250 // Commute the 'or' operands.
2251 // (~A ^ B) | (A & B) -> (~A ^ B)
2252 // (~A ^ B) | (B & A) -> (~A ^ B)
2253 // (B ^ ~A) | (A & B) -> (B ^ ~A)
2254 // (B ^ ~A) | (B & A) -> (B ^ ~A)
2255 if (match(Op1, m_And(m_Value(A), m_Value(B))) &&
2256 (match(Op0, m_c_Xor(m_Specific(A), m_Not(m_Specific(B)))) ||
2257 match(Op0, m_c_Xor(m_Not(m_Specific(A)), m_Specific(B)))))
2258 return Op0;
2259
2260 // (~A & B) | ~(A | B) --> ~A
2261 // (~A & B) | ~(B | A) --> ~A
2262 // (B & ~A) | ~(A | B) --> ~A
2263 // (B & ~A) | ~(B | A) --> ~A
2264 if (match(Op0, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))),
2265 m_Value(B))) &&
2266 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2267 return NotA;
2268
2269 // Commute the 'or' operands.
2270 // ~(A | B) | (~A & B) --> ~A
2271 // ~(B | A) | (~A & B) --> ~A
2272 // ~(A | B) | (B & ~A) --> ~A
2273 // ~(B | A) | (B & ~A) --> ~A
2274 if (match(Op1, m_c_And(m_CombineAnd(m_Value(NotA), m_Not(m_Value(A))),
2275 m_Value(B))) &&
2276 match(Op0, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2277 return NotA;
2278
2279 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2280 return V;
2281
2282 // If we have a multiplication overflow check that is being 'and'ed with a
2283 // check that one of the multipliers is not zero, we can omit the 'and', and
2284 // only keep the overflow check.
2285 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2286 return Op1;
2287 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2288 return Op0;
2289
2290 // Try some generic simplifications for associative operations.
2291 if (Value *V = SimplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q,
2292 MaxRecurse))
2293 return V;
2294
2295 // Or distributes over And. Try some generic simplifications based on this.
2296 if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2297 Instruction::And, Q, MaxRecurse))
2298 return V;
2299
2300 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2301 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2302 // A | (A || B) -> A || B
2303 if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2304 return Op1;
2305 else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2306 return Op0;
2307 }
2308 // If the operation is with the result of a select instruction, check
2309 // whether operating on either branch of the select always yields the same
2310 // value.
2311 if (Value *V = ThreadBinOpOverSelect(Instruction::Or, Op0, Op1, Q,
2312 MaxRecurse))
2313 return V;
2314 }
2315
2316 // (A & C1)|(B & C2)
2317 const APInt *C1, *C2;
2318 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2319 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2320 if (*C1 == ~*C2) {
2321 // (A & C1)|(B & C2)
2322 // If we have: ((V + N) & C1) | (V & C2)
2323 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2324 // replace with V+N.
2325 Value *N;
2326 if (C2->isMask() && // C2 == 0+1+
2327 match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2328 // Add commutes, try both ways.
2329 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2330 return A;
2331 }
2332 // Or commutes, try both ways.
2333 if (C1->isMask() &&
2334 match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2335 // Add commutes, try both ways.
2336 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2337 return B;
2338 }
2339 }
2340 }
2341
2342 // If the operation is with the result of a phi instruction, check whether
2343 // operating on all incoming values of the phi always yields the same value.
2344 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2345 if (Value *V = ThreadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2346 return V;
2347
2348 return nullptr;
2349 }
2350
SimplifyOrInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2351 Value *llvm::SimplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2352 return ::SimplifyOrInst(Op0, Op1, Q, RecursionLimit);
2353 }
2354
2355 /// Given operands for a Xor, see if we can fold the result.
2356 /// If not, this returns null.
SimplifyXorInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2357 static Value *SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2358 unsigned MaxRecurse) {
2359 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2360 return C;
2361
2362 // A ^ undef -> undef
2363 if (Q.isUndefValue(Op1))
2364 return Op1;
2365
2366 // A ^ 0 = A
2367 if (match(Op1, m_Zero()))
2368 return Op0;
2369
2370 // A ^ A = 0
2371 if (Op0 == Op1)
2372 return Constant::getNullValue(Op0->getType());
2373
2374 // A ^ ~A = ~A ^ A = -1
2375 if (match(Op0, m_Not(m_Specific(Op1))) ||
2376 match(Op1, m_Not(m_Specific(Op0))))
2377 return Constant::getAllOnesValue(Op0->getType());
2378
2379 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2380 return V;
2381
2382 // Try some generic simplifications for associative operations.
2383 if (Value *V = SimplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q,
2384 MaxRecurse))
2385 return V;
2386
2387 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2388 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2389 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2390 // only if B and C are equal. If B and C are equal then (since we assume
2391 // that operands have already been simplified) "select(cond, B, C)" should
2392 // have been simplified to the common value of B and C already. Analysing
2393 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2394 // for threading over phi nodes.
2395
2396 return nullptr;
2397 }
2398
SimplifyXorInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2399 Value *llvm::SimplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2400 return ::SimplifyXorInst(Op0, Op1, Q, RecursionLimit);
2401 }
2402
2403
GetCompareTy(Value * Op)2404 static Type *GetCompareTy(Value *Op) {
2405 return CmpInst::makeCmpResultType(Op->getType());
2406 }
2407
2408 /// Rummage around inside V looking for something equivalent to the comparison
2409 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2410 /// Helper function for analyzing max/min idioms.
ExtractEquivalentCondition(Value * V,CmpInst::Predicate Pred,Value * LHS,Value * RHS)2411 static Value *ExtractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2412 Value *LHS, Value *RHS) {
2413 SelectInst *SI = dyn_cast<SelectInst>(V);
2414 if (!SI)
2415 return nullptr;
2416 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2417 if (!Cmp)
2418 return nullptr;
2419 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2420 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2421 return Cmp;
2422 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2423 LHS == CmpRHS && RHS == CmpLHS)
2424 return Cmp;
2425 return nullptr;
2426 }
2427
2428 // A significant optimization not implemented here is assuming that alloca
2429 // addresses are not equal to incoming argument values. They don't *alias*,
2430 // as we say, but that doesn't mean they aren't equal, so we take a
2431 // conservative approach.
2432 //
2433 // This is inspired in part by C++11 5.10p1:
2434 // "Two pointers of the same type compare equal if and only if they are both
2435 // null, both point to the same function, or both represent the same
2436 // address."
2437 //
2438 // This is pretty permissive.
2439 //
2440 // It's also partly due to C11 6.5.9p6:
2441 // "Two pointers compare equal if and only if both are null pointers, both are
2442 // pointers to the same object (including a pointer to an object and a
2443 // subobject at its beginning) or function, both are pointers to one past the
2444 // last element of the same array object, or one is a pointer to one past the
2445 // end of one array object and the other is a pointer to the start of a
2446 // different array object that happens to immediately follow the first array
2447 // object in the address space.)
2448 //
2449 // C11's version is more restrictive, however there's no reason why an argument
2450 // couldn't be a one-past-the-end value for a stack object in the caller and be
2451 // equal to the beginning of a stack object in the callee.
2452 //
2453 // If the C and C++ standards are ever made sufficiently restrictive in this
2454 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2455 // this optimization.
2456 static Constant *
computePointerICmp(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2457 computePointerICmp(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
2458 const SimplifyQuery &Q) {
2459 const DataLayout &DL = Q.DL;
2460 const TargetLibraryInfo *TLI = Q.TLI;
2461 const DominatorTree *DT = Q.DT;
2462 const Instruction *CxtI = Q.CxtI;
2463 const InstrInfoQuery &IIQ = Q.IIQ;
2464
2465 // First, skip past any trivial no-ops.
2466 LHS = LHS->stripPointerCasts();
2467 RHS = RHS->stripPointerCasts();
2468
2469 // A non-null pointer is not equal to a null pointer.
2470 if (isa<ConstantPointerNull>(RHS) && ICmpInst::isEquality(Pred) &&
2471 llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr,
2472 IIQ.UseInstrInfo))
2473 return ConstantInt::get(GetCompareTy(LHS),
2474 !CmpInst::isTrueWhenEqual(Pred));
2475
2476 // We can only fold certain predicates on pointer comparisons.
2477 switch (Pred) {
2478 default:
2479 return nullptr;
2480
2481 // Equality comaprisons are easy to fold.
2482 case CmpInst::ICMP_EQ:
2483 case CmpInst::ICMP_NE:
2484 break;
2485
2486 // We can only handle unsigned relational comparisons because 'inbounds' on
2487 // a GEP only protects against unsigned wrapping.
2488 case CmpInst::ICMP_UGT:
2489 case CmpInst::ICMP_UGE:
2490 case CmpInst::ICMP_ULT:
2491 case CmpInst::ICMP_ULE:
2492 // However, we have to switch them to their signed variants to handle
2493 // negative indices from the base pointer.
2494 Pred = ICmpInst::getSignedPredicate(Pred);
2495 break;
2496 }
2497
2498 // Strip off any constant offsets so that we can reason about them.
2499 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2500 // here and compare base addresses like AliasAnalysis does, however there are
2501 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2502 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2503 // doesn't need to guarantee pointer inequality when it says NoAlias.
2504 Constant *LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
2505 Constant *RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
2506
2507 // If LHS and RHS are related via constant offsets to the same base
2508 // value, we can replace it with an icmp which just compares the offsets.
2509 if (LHS == RHS)
2510 return ConstantExpr::getICmp(Pred, LHSOffset, RHSOffset);
2511
2512 // Various optimizations for (in)equality comparisons.
2513 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2514 // Different non-empty allocations that exist at the same time have
2515 // different addresses (if the program can tell). Global variables always
2516 // exist, so they always exist during the lifetime of each other and all
2517 // allocas. Two different allocas usually have different addresses...
2518 //
2519 // However, if there's an @llvm.stackrestore dynamically in between two
2520 // allocas, they may have the same address. It's tempting to reduce the
2521 // scope of the problem by only looking at *static* allocas here. That would
2522 // cover the majority of allocas while significantly reducing the likelihood
2523 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2524 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2525 // an entry block. Also, if we have a block that's not attached to a
2526 // function, we can't tell if it's "static" under the current definition.
2527 // Theoretically, this problem could be fixed by creating a new kind of
2528 // instruction kind specifically for static allocas. Such a new instruction
2529 // could be required to be at the top of the entry block, thus preventing it
2530 // from being subject to a @llvm.stackrestore. Instcombine could even
2531 // convert regular allocas into these special allocas. It'd be nifty.
2532 // However, until then, this problem remains open.
2533 //
2534 // So, we'll assume that two non-empty allocas have different addresses
2535 // for now.
2536 //
2537 // With all that, if the offsets are within the bounds of their allocations
2538 // (and not one-past-the-end! so we can't use inbounds!), and their
2539 // allocations aren't the same, the pointers are not equal.
2540 //
2541 // Note that it's not necessary to check for LHS being a global variable
2542 // address, due to canonicalization and constant folding.
2543 if (isa<AllocaInst>(LHS) &&
2544 (isa<AllocaInst>(RHS) || isa<GlobalVariable>(RHS))) {
2545 ConstantInt *LHSOffsetCI = dyn_cast<ConstantInt>(LHSOffset);
2546 ConstantInt *RHSOffsetCI = dyn_cast<ConstantInt>(RHSOffset);
2547 uint64_t LHSSize, RHSSize;
2548 ObjectSizeOpts Opts;
2549 Opts.NullIsUnknownSize =
2550 NullPointerIsDefined(cast<AllocaInst>(LHS)->getFunction());
2551 if (LHSOffsetCI && RHSOffsetCI &&
2552 getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2553 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
2554 const APInt &LHSOffsetValue = LHSOffsetCI->getValue();
2555 const APInt &RHSOffsetValue = RHSOffsetCI->getValue();
2556 if (!LHSOffsetValue.isNegative() &&
2557 !RHSOffsetValue.isNegative() &&
2558 LHSOffsetValue.ult(LHSSize) &&
2559 RHSOffsetValue.ult(RHSSize)) {
2560 return ConstantInt::get(GetCompareTy(LHS),
2561 !CmpInst::isTrueWhenEqual(Pred));
2562 }
2563 }
2564
2565 // Repeat the above check but this time without depending on DataLayout
2566 // or being able to compute a precise size.
2567 if (!cast<PointerType>(LHS->getType())->isEmptyTy() &&
2568 !cast<PointerType>(RHS->getType())->isEmptyTy() &&
2569 LHSOffset->isNullValue() &&
2570 RHSOffset->isNullValue())
2571 return ConstantInt::get(GetCompareTy(LHS),
2572 !CmpInst::isTrueWhenEqual(Pred));
2573 }
2574
2575 // Even if an non-inbounds GEP occurs along the path we can still optimize
2576 // equality comparisons concerning the result. We avoid walking the whole
2577 // chain again by starting where the last calls to
2578 // stripAndComputeConstantOffsets left off and accumulate the offsets.
2579 Constant *LHSNoBound = stripAndComputeConstantOffsets(DL, LHS, true);
2580 Constant *RHSNoBound = stripAndComputeConstantOffsets(DL, RHS, true);
2581 if (LHS == RHS)
2582 return ConstantExpr::getICmp(Pred,
2583 ConstantExpr::getAdd(LHSOffset, LHSNoBound),
2584 ConstantExpr::getAdd(RHSOffset, RHSNoBound));
2585
2586 // If one side of the equality comparison must come from a noalias call
2587 // (meaning a system memory allocation function), and the other side must
2588 // come from a pointer that cannot overlap with dynamically-allocated
2589 // memory within the lifetime of the current function (allocas, byval
2590 // arguments, globals), then determine the comparison result here.
2591 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2592 getUnderlyingObjects(LHS, LHSUObjs);
2593 getUnderlyingObjects(RHS, RHSUObjs);
2594
2595 // Is the set of underlying objects all noalias calls?
2596 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2597 return all_of(Objects, isNoAliasCall);
2598 };
2599
2600 // Is the set of underlying objects all things which must be disjoint from
2601 // noalias calls. For allocas, we consider only static ones (dynamic
2602 // allocas might be transformed into calls to malloc not simultaneously
2603 // live with the compared-to allocation). For globals, we exclude symbols
2604 // that might be resolve lazily to symbols in another dynamically-loaded
2605 // library (and, thus, could be malloc'ed by the implementation).
2606 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2607 return all_of(Objects, [](const Value *V) {
2608 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2609 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2610 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2611 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2612 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2613 !GV->isThreadLocal();
2614 if (const Argument *A = dyn_cast<Argument>(V))
2615 return A->hasByValAttr();
2616 return false;
2617 });
2618 };
2619
2620 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2621 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2622 return ConstantInt::get(GetCompareTy(LHS),
2623 !CmpInst::isTrueWhenEqual(Pred));
2624
2625 // Fold comparisons for non-escaping pointer even if the allocation call
2626 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2627 // dynamic allocation call could be either of the operands.
2628 Value *MI = nullptr;
2629 if (isAllocLikeFn(LHS, TLI) &&
2630 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2631 MI = LHS;
2632 else if (isAllocLikeFn(RHS, TLI) &&
2633 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2634 MI = RHS;
2635 // FIXME: We should also fold the compare when the pointer escapes, but the
2636 // compare dominates the pointer escape
2637 if (MI && !PointerMayBeCaptured(MI, true, true))
2638 return ConstantInt::get(GetCompareTy(LHS),
2639 CmpInst::isFalseWhenEqual(Pred));
2640 }
2641
2642 // Otherwise, fail.
2643 return nullptr;
2644 }
2645
2646 /// Fold an icmp when its operands have i1 scalar type.
simplifyICmpOfBools(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2647 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2648 Value *RHS, const SimplifyQuery &Q) {
2649 Type *ITy = GetCompareTy(LHS); // The return type.
2650 Type *OpTy = LHS->getType(); // The operand type.
2651 if (!OpTy->isIntOrIntVectorTy(1))
2652 return nullptr;
2653
2654 // A boolean compared to true/false can be simplified in 14 out of the 20
2655 // (10 predicates * 2 constants) possible combinations. Cases not handled here
2656 // require a 'not' of the LHS, so those must be transformed in InstCombine.
2657 if (match(RHS, m_Zero())) {
2658 switch (Pred) {
2659 case CmpInst::ICMP_NE: // X != 0 -> X
2660 case CmpInst::ICMP_UGT: // X >u 0 -> X
2661 case CmpInst::ICMP_SLT: // X <s 0 -> X
2662 return LHS;
2663
2664 case CmpInst::ICMP_ULT: // X <u 0 -> false
2665 case CmpInst::ICMP_SGT: // X >s 0 -> false
2666 return getFalse(ITy);
2667
2668 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2669 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2670 return getTrue(ITy);
2671
2672 default: break;
2673 }
2674 } else if (match(RHS, m_One())) {
2675 switch (Pred) {
2676 case CmpInst::ICMP_EQ: // X == 1 -> X
2677 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2678 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2679 return LHS;
2680
2681 case CmpInst::ICMP_UGT: // X >u 1 -> false
2682 case CmpInst::ICMP_SLT: // X <s -1 -> false
2683 return getFalse(ITy);
2684
2685 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2686 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2687 return getTrue(ITy);
2688
2689 default: break;
2690 }
2691 }
2692
2693 switch (Pred) {
2694 default:
2695 break;
2696 case ICmpInst::ICMP_UGE:
2697 if (isImpliedCondition(RHS, LHS, Q.DL).getValueOr(false))
2698 return getTrue(ITy);
2699 break;
2700 case ICmpInst::ICMP_SGE:
2701 /// For signed comparison, the values for an i1 are 0 and -1
2702 /// respectively. This maps into a truth table of:
2703 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2704 /// 0 | 0 | 1 (0 >= 0) | 1
2705 /// 0 | 1 | 1 (0 >= -1) | 1
2706 /// 1 | 0 | 0 (-1 >= 0) | 0
2707 /// 1 | 1 | 1 (-1 >= -1) | 1
2708 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2709 return getTrue(ITy);
2710 break;
2711 case ICmpInst::ICMP_ULE:
2712 if (isImpliedCondition(LHS, RHS, Q.DL).getValueOr(false))
2713 return getTrue(ITy);
2714 break;
2715 }
2716
2717 return nullptr;
2718 }
2719
2720 /// Try hard to fold icmp with zero RHS because this is a common case.
simplifyICmpWithZero(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2721 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2722 Value *RHS, const SimplifyQuery &Q) {
2723 if (!match(RHS, m_Zero()))
2724 return nullptr;
2725
2726 Type *ITy = GetCompareTy(LHS); // The return type.
2727 switch (Pred) {
2728 default:
2729 llvm_unreachable("Unknown ICmp predicate!");
2730 case ICmpInst::ICMP_ULT:
2731 return getFalse(ITy);
2732 case ICmpInst::ICMP_UGE:
2733 return getTrue(ITy);
2734 case ICmpInst::ICMP_EQ:
2735 case ICmpInst::ICMP_ULE:
2736 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2737 return getFalse(ITy);
2738 break;
2739 case ICmpInst::ICMP_NE:
2740 case ICmpInst::ICMP_UGT:
2741 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2742 return getTrue(ITy);
2743 break;
2744 case ICmpInst::ICMP_SLT: {
2745 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2746 if (LHSKnown.isNegative())
2747 return getTrue(ITy);
2748 if (LHSKnown.isNonNegative())
2749 return getFalse(ITy);
2750 break;
2751 }
2752 case ICmpInst::ICMP_SLE: {
2753 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2754 if (LHSKnown.isNegative())
2755 return getTrue(ITy);
2756 if (LHSKnown.isNonNegative() &&
2757 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2758 return getFalse(ITy);
2759 break;
2760 }
2761 case ICmpInst::ICMP_SGE: {
2762 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2763 if (LHSKnown.isNegative())
2764 return getFalse(ITy);
2765 if (LHSKnown.isNonNegative())
2766 return getTrue(ITy);
2767 break;
2768 }
2769 case ICmpInst::ICMP_SGT: {
2770 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2771 if (LHSKnown.isNegative())
2772 return getFalse(ITy);
2773 if (LHSKnown.isNonNegative() &&
2774 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2775 return getTrue(ITy);
2776 break;
2777 }
2778 }
2779
2780 return nullptr;
2781 }
2782
simplifyICmpWithConstant(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const InstrInfoQuery & IIQ)2783 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
2784 Value *RHS, const InstrInfoQuery &IIQ) {
2785 Type *ITy = GetCompareTy(RHS); // The return type.
2786
2787 Value *X;
2788 // Sign-bit checks can be optimized to true/false after unsigned
2789 // floating-point casts:
2790 // icmp slt (bitcast (uitofp X)), 0 --> false
2791 // icmp sgt (bitcast (uitofp X)), -1 --> true
2792 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
2793 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
2794 return ConstantInt::getFalse(ITy);
2795 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
2796 return ConstantInt::getTrue(ITy);
2797 }
2798
2799 const APInt *C;
2800 if (!match(RHS, m_APIntAllowUndef(C)))
2801 return nullptr;
2802
2803 // Rule out tautological comparisons (eg., ult 0 or uge 0).
2804 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
2805 if (RHS_CR.isEmptySet())
2806 return ConstantInt::getFalse(ITy);
2807 if (RHS_CR.isFullSet())
2808 return ConstantInt::getTrue(ITy);
2809
2810 ConstantRange LHS_CR = computeConstantRange(LHS, IIQ.UseInstrInfo);
2811 if (!LHS_CR.isFullSet()) {
2812 if (RHS_CR.contains(LHS_CR))
2813 return ConstantInt::getTrue(ITy);
2814 if (RHS_CR.inverse().contains(LHS_CR))
2815 return ConstantInt::getFalse(ITy);
2816 }
2817
2818 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
2819 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
2820 const APInt *MulC;
2821 if (ICmpInst::isEquality(Pred) &&
2822 ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
2823 *MulC != 0 && C->urem(*MulC) != 0) ||
2824 (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
2825 *MulC != 0 && C->srem(*MulC) != 0)))
2826 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
2827
2828 return nullptr;
2829 }
2830
simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,BinaryOperator * LBO,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)2831 static Value *simplifyICmpWithBinOpOnLHS(
2832 CmpInst::Predicate Pred, BinaryOperator *LBO, Value *RHS,
2833 const SimplifyQuery &Q, unsigned MaxRecurse) {
2834 Type *ITy = GetCompareTy(RHS); // The return type.
2835
2836 Value *Y = nullptr;
2837 // icmp pred (or X, Y), X
2838 if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
2839 if (Pred == ICmpInst::ICMP_ULT)
2840 return getFalse(ITy);
2841 if (Pred == ICmpInst::ICMP_UGE)
2842 return getTrue(ITy);
2843
2844 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
2845 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2846 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2847 if (RHSKnown.isNonNegative() && YKnown.isNegative())
2848 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
2849 if (RHSKnown.isNegative() || YKnown.isNonNegative())
2850 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
2851 }
2852 }
2853
2854 // icmp pred (and X, Y), X
2855 if (match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
2856 if (Pred == ICmpInst::ICMP_UGT)
2857 return getFalse(ITy);
2858 if (Pred == ICmpInst::ICMP_ULE)
2859 return getTrue(ITy);
2860 }
2861
2862 // icmp pred (urem X, Y), Y
2863 if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
2864 switch (Pred) {
2865 default:
2866 break;
2867 case ICmpInst::ICMP_SGT:
2868 case ICmpInst::ICMP_SGE: {
2869 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2870 if (!Known.isNonNegative())
2871 break;
2872 LLVM_FALLTHROUGH;
2873 }
2874 case ICmpInst::ICMP_EQ:
2875 case ICmpInst::ICMP_UGT:
2876 case ICmpInst::ICMP_UGE:
2877 return getFalse(ITy);
2878 case ICmpInst::ICMP_SLT:
2879 case ICmpInst::ICMP_SLE: {
2880 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2881 if (!Known.isNonNegative())
2882 break;
2883 LLVM_FALLTHROUGH;
2884 }
2885 case ICmpInst::ICMP_NE:
2886 case ICmpInst::ICMP_ULT:
2887 case ICmpInst::ICMP_ULE:
2888 return getTrue(ITy);
2889 }
2890 }
2891
2892 // icmp pred (urem X, Y), X
2893 if (match(LBO, m_URem(m_Specific(RHS), m_Value()))) {
2894 if (Pred == ICmpInst::ICMP_ULE)
2895 return getTrue(ITy);
2896 if (Pred == ICmpInst::ICMP_UGT)
2897 return getFalse(ITy);
2898 }
2899
2900 // x >> y <=u x
2901 // x udiv y <=u x.
2902 if (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
2903 match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
2904 // icmp pred (X op Y), X
2905 if (Pred == ICmpInst::ICMP_UGT)
2906 return getFalse(ITy);
2907 if (Pred == ICmpInst::ICMP_ULE)
2908 return getTrue(ITy);
2909 }
2910
2911 // (x*C1)/C2 <= x for C1 <= C2.
2912 // This holds even if the multiplication overflows: Assume that x != 0 and
2913 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
2914 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
2915 //
2916 // Additionally, either the multiplication and division might be represented
2917 // as shifts:
2918 // (x*C1)>>C2 <= x for C1 < 2**C2.
2919 // (x<<C1)/C2 <= x for 2**C1 < C2.
2920 const APInt *C1, *C2;
2921 if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2922 C1->ule(*C2)) ||
2923 (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2924 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
2925 (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
2926 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
2927 if (Pred == ICmpInst::ICMP_UGT)
2928 return getFalse(ITy);
2929 if (Pred == ICmpInst::ICMP_ULE)
2930 return getTrue(ITy);
2931 }
2932
2933 return nullptr;
2934 }
2935
2936
2937 // If only one of the icmp's operands has NSW flags, try to prove that:
2938 //
2939 // icmp slt (x + C1), (x +nsw C2)
2940 //
2941 // is equivalent to:
2942 //
2943 // icmp slt C1, C2
2944 //
2945 // which is true if x + C2 has the NSW flags set and:
2946 // *) C1 < C2 && C1 >= 0, or
2947 // *) C2 < C1 && C1 <= 0.
2948 //
trySimplifyICmpWithAdds(CmpInst::Predicate Pred,Value * LHS,Value * RHS)2949 static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
2950 Value *RHS) {
2951 // TODO: only support icmp slt for now.
2952 if (Pred != CmpInst::ICMP_SLT)
2953 return false;
2954
2955 // Canonicalize nsw add as RHS.
2956 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
2957 std::swap(LHS, RHS);
2958 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
2959 return false;
2960
2961 Value *X;
2962 const APInt *C1, *C2;
2963 if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
2964 !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
2965 return false;
2966
2967 return (C1->slt(*C2) && C1->isNonNegative()) ||
2968 (C2->slt(*C1) && C1->isNonPositive());
2969 }
2970
2971
2972 /// TODO: A large part of this logic is duplicated in InstCombine's
2973 /// foldICmpBinOp(). We should be able to share that and avoid the code
2974 /// duplication.
simplifyICmpWithBinOp(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)2975 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
2976 Value *RHS, const SimplifyQuery &Q,
2977 unsigned MaxRecurse) {
2978 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
2979 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
2980 if (MaxRecurse && (LBO || RBO)) {
2981 // Analyze the case when either LHS or RHS is an add instruction.
2982 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
2983 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
2984 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
2985 if (LBO && LBO->getOpcode() == Instruction::Add) {
2986 A = LBO->getOperand(0);
2987 B = LBO->getOperand(1);
2988 NoLHSWrapProblem =
2989 ICmpInst::isEquality(Pred) ||
2990 (CmpInst::isUnsigned(Pred) &&
2991 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
2992 (CmpInst::isSigned(Pred) &&
2993 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
2994 }
2995 if (RBO && RBO->getOpcode() == Instruction::Add) {
2996 C = RBO->getOperand(0);
2997 D = RBO->getOperand(1);
2998 NoRHSWrapProblem =
2999 ICmpInst::isEquality(Pred) ||
3000 (CmpInst::isUnsigned(Pred) &&
3001 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
3002 (CmpInst::isSigned(Pred) &&
3003 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
3004 }
3005
3006 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3007 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3008 if (Value *V = SimplifyICmpInst(Pred, A == RHS ? B : A,
3009 Constant::getNullValue(RHS->getType()), Q,
3010 MaxRecurse - 1))
3011 return V;
3012
3013 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3014 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3015 if (Value *V =
3016 SimplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
3017 C == LHS ? D : C, Q, MaxRecurse - 1))
3018 return V;
3019
3020 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3021 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3022 trySimplifyICmpWithAdds(Pred, LHS, RHS);
3023 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3024 // Determine Y and Z in the form icmp (X+Y), (X+Z).
3025 Value *Y, *Z;
3026 if (A == C) {
3027 // C + B == C + D -> B == D
3028 Y = B;
3029 Z = D;
3030 } else if (A == D) {
3031 // D + B == C + D -> B == C
3032 Y = B;
3033 Z = C;
3034 } else if (B == C) {
3035 // A + C == C + D -> A == D
3036 Y = A;
3037 Z = D;
3038 } else {
3039 assert(B == D);
3040 // A + D == C + D -> A == C
3041 Y = A;
3042 Z = C;
3043 }
3044 if (Value *V = SimplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3045 return V;
3046 }
3047 }
3048
3049 if (LBO)
3050 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3051 return V;
3052
3053 if (RBO)
3054 if (Value *V = simplifyICmpWithBinOpOnLHS(
3055 ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3056 return V;
3057
3058 // 0 - (zext X) pred C
3059 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3060 const APInt *C;
3061 if (match(RHS, m_APInt(C))) {
3062 if (C->isStrictlyPositive()) {
3063 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3064 return ConstantInt::getTrue(GetCompareTy(RHS));
3065 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3066 return ConstantInt::getFalse(GetCompareTy(RHS));
3067 }
3068 if (C->isNonNegative()) {
3069 if (Pred == ICmpInst::ICMP_SLE)
3070 return ConstantInt::getTrue(GetCompareTy(RHS));
3071 if (Pred == ICmpInst::ICMP_SGT)
3072 return ConstantInt::getFalse(GetCompareTy(RHS));
3073 }
3074 }
3075 }
3076
3077 // If C2 is a power-of-2 and C is not:
3078 // (C2 << X) == C --> false
3079 // (C2 << X) != C --> true
3080 const APInt *C;
3081 if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3082 match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
3083 // C2 << X can equal zero in some circumstances.
3084 // This simplification might be unsafe if C is zero.
3085 //
3086 // We know it is safe if:
3087 // - The shift is nsw. We can't shift out the one bit.
3088 // - The shift is nuw. We can't shift out the one bit.
3089 // - C2 is one.
3090 // - C isn't zero.
3091 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3092 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3093 match(LHS, m_Shl(m_One(), m_Value())) || !C->isNullValue()) {
3094 if (Pred == ICmpInst::ICMP_EQ)
3095 return ConstantInt::getFalse(GetCompareTy(RHS));
3096 if (Pred == ICmpInst::ICMP_NE)
3097 return ConstantInt::getTrue(GetCompareTy(RHS));
3098 }
3099 }
3100
3101 // TODO: This is overly constrained. LHS can be any power-of-2.
3102 // (1 << X) >u 0x8000 --> false
3103 // (1 << X) <=u 0x8000 --> true
3104 if (match(LHS, m_Shl(m_One(), m_Value())) && match(RHS, m_SignMask())) {
3105 if (Pred == ICmpInst::ICMP_UGT)
3106 return ConstantInt::getFalse(GetCompareTy(RHS));
3107 if (Pred == ICmpInst::ICMP_ULE)
3108 return ConstantInt::getTrue(GetCompareTy(RHS));
3109 }
3110
3111 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
3112 LBO->getOperand(1) == RBO->getOperand(1)) {
3113 switch (LBO->getOpcode()) {
3114 default:
3115 break;
3116 case Instruction::UDiv:
3117 case Instruction::LShr:
3118 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3119 !Q.IIQ.isExact(RBO))
3120 break;
3121 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3122 RBO->getOperand(0), Q, MaxRecurse - 1))
3123 return V;
3124 break;
3125 case Instruction::SDiv:
3126 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3127 !Q.IIQ.isExact(RBO))
3128 break;
3129 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3130 RBO->getOperand(0), Q, MaxRecurse - 1))
3131 return V;
3132 break;
3133 case Instruction::AShr:
3134 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3135 break;
3136 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3137 RBO->getOperand(0), Q, MaxRecurse - 1))
3138 return V;
3139 break;
3140 case Instruction::Shl: {
3141 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3142 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3143 if (!NUW && !NSW)
3144 break;
3145 if (!NSW && ICmpInst::isSigned(Pred))
3146 break;
3147 if (Value *V = SimplifyICmpInst(Pred, LBO->getOperand(0),
3148 RBO->getOperand(0), Q, MaxRecurse - 1))
3149 return V;
3150 break;
3151 }
3152 }
3153 }
3154 return nullptr;
3155 }
3156
3157 /// Simplify integer comparisons where at least one operand of the compare
3158 /// matches an integer min/max idiom.
simplifyICmpWithMinMax(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3159 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3160 Value *RHS, const SimplifyQuery &Q,
3161 unsigned MaxRecurse) {
3162 Type *ITy = GetCompareTy(LHS); // The return type.
3163 Value *A, *B;
3164 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3165 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3166
3167 // Signed variants on "max(a,b)>=a -> true".
3168 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3169 if (A != RHS)
3170 std::swap(A, B); // smax(A, B) pred A.
3171 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3172 // We analyze this as smax(A, B) pred A.
3173 P = Pred;
3174 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3175 (A == LHS || B == LHS)) {
3176 if (A != LHS)
3177 std::swap(A, B); // A pred smax(A, B).
3178 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3179 // We analyze this as smax(A, B) swapped-pred A.
3180 P = CmpInst::getSwappedPredicate(Pred);
3181 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3182 (A == RHS || B == RHS)) {
3183 if (A != RHS)
3184 std::swap(A, B); // smin(A, B) pred A.
3185 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3186 // We analyze this as smax(-A, -B) swapped-pred -A.
3187 // Note that we do not need to actually form -A or -B thanks to EqP.
3188 P = CmpInst::getSwappedPredicate(Pred);
3189 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3190 (A == LHS || B == LHS)) {
3191 if (A != LHS)
3192 std::swap(A, B); // A pred smin(A, B).
3193 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3194 // We analyze this as smax(-A, -B) pred -A.
3195 // Note that we do not need to actually form -A or -B thanks to EqP.
3196 P = Pred;
3197 }
3198 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3199 // Cases correspond to "max(A, B) p A".
3200 switch (P) {
3201 default:
3202 break;
3203 case CmpInst::ICMP_EQ:
3204 case CmpInst::ICMP_SLE:
3205 // Equivalent to "A EqP B". This may be the same as the condition tested
3206 // in the max/min; if so, we can just return that.
3207 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3208 return V;
3209 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3210 return V;
3211 // Otherwise, see if "A EqP B" simplifies.
3212 if (MaxRecurse)
3213 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3214 return V;
3215 break;
3216 case CmpInst::ICMP_NE:
3217 case CmpInst::ICMP_SGT: {
3218 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3219 // Equivalent to "A InvEqP B". This may be the same as the condition
3220 // tested in the max/min; if so, we can just return that.
3221 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3222 return V;
3223 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3224 return V;
3225 // Otherwise, see if "A InvEqP B" simplifies.
3226 if (MaxRecurse)
3227 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3228 return V;
3229 break;
3230 }
3231 case CmpInst::ICMP_SGE:
3232 // Always true.
3233 return getTrue(ITy);
3234 case CmpInst::ICMP_SLT:
3235 // Always false.
3236 return getFalse(ITy);
3237 }
3238 }
3239
3240 // Unsigned variants on "max(a,b)>=a -> true".
3241 P = CmpInst::BAD_ICMP_PREDICATE;
3242 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3243 if (A != RHS)
3244 std::swap(A, B); // umax(A, B) pred A.
3245 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3246 // We analyze this as umax(A, B) pred A.
3247 P = Pred;
3248 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3249 (A == LHS || B == LHS)) {
3250 if (A != LHS)
3251 std::swap(A, B); // A pred umax(A, B).
3252 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3253 // We analyze this as umax(A, B) swapped-pred A.
3254 P = CmpInst::getSwappedPredicate(Pred);
3255 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3256 (A == RHS || B == RHS)) {
3257 if (A != RHS)
3258 std::swap(A, B); // umin(A, B) pred A.
3259 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3260 // We analyze this as umax(-A, -B) swapped-pred -A.
3261 // Note that we do not need to actually form -A or -B thanks to EqP.
3262 P = CmpInst::getSwappedPredicate(Pred);
3263 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3264 (A == LHS || B == LHS)) {
3265 if (A != LHS)
3266 std::swap(A, B); // A pred umin(A, B).
3267 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3268 // We analyze this as umax(-A, -B) pred -A.
3269 // Note that we do not need to actually form -A or -B thanks to EqP.
3270 P = Pred;
3271 }
3272 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3273 // Cases correspond to "max(A, B) p A".
3274 switch (P) {
3275 default:
3276 break;
3277 case CmpInst::ICMP_EQ:
3278 case CmpInst::ICMP_ULE:
3279 // Equivalent to "A EqP B". This may be the same as the condition tested
3280 // in the max/min; if so, we can just return that.
3281 if (Value *V = ExtractEquivalentCondition(LHS, EqP, A, B))
3282 return V;
3283 if (Value *V = ExtractEquivalentCondition(RHS, EqP, A, B))
3284 return V;
3285 // Otherwise, see if "A EqP B" simplifies.
3286 if (MaxRecurse)
3287 if (Value *V = SimplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3288 return V;
3289 break;
3290 case CmpInst::ICMP_NE:
3291 case CmpInst::ICMP_UGT: {
3292 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3293 // Equivalent to "A InvEqP B". This may be the same as the condition
3294 // tested in the max/min; if so, we can just return that.
3295 if (Value *V = ExtractEquivalentCondition(LHS, InvEqP, A, B))
3296 return V;
3297 if (Value *V = ExtractEquivalentCondition(RHS, InvEqP, A, B))
3298 return V;
3299 // Otherwise, see if "A InvEqP B" simplifies.
3300 if (MaxRecurse)
3301 if (Value *V = SimplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3302 return V;
3303 break;
3304 }
3305 case CmpInst::ICMP_UGE:
3306 return getTrue(ITy);
3307 case CmpInst::ICMP_ULT:
3308 return getFalse(ITy);
3309 }
3310 }
3311
3312 // Comparing 1 each of min/max with a common operand?
3313 // Canonicalize min operand to RHS.
3314 if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3315 match(LHS, m_SMin(m_Value(), m_Value()))) {
3316 std::swap(LHS, RHS);
3317 Pred = ICmpInst::getSwappedPredicate(Pred);
3318 }
3319
3320 Value *C, *D;
3321 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3322 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3323 (A == C || A == D || B == C || B == D)) {
3324 // smax(A, B) >=s smin(A, D) --> true
3325 if (Pred == CmpInst::ICMP_SGE)
3326 return getTrue(ITy);
3327 // smax(A, B) <s smin(A, D) --> false
3328 if (Pred == CmpInst::ICMP_SLT)
3329 return getFalse(ITy);
3330 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3331 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3332 (A == C || A == D || B == C || B == D)) {
3333 // umax(A, B) >=u umin(A, D) --> true
3334 if (Pred == CmpInst::ICMP_UGE)
3335 return getTrue(ITy);
3336 // umax(A, B) <u umin(A, D) --> false
3337 if (Pred == CmpInst::ICMP_ULT)
3338 return getFalse(ITy);
3339 }
3340
3341 return nullptr;
3342 }
3343
simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)3344 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3345 Value *LHS, Value *RHS,
3346 const SimplifyQuery &Q) {
3347 // Gracefully handle instructions that have not been inserted yet.
3348 if (!Q.AC || !Q.CxtI || !Q.CxtI->getParent())
3349 return nullptr;
3350
3351 for (Value *AssumeBaseOp : {LHS, RHS}) {
3352 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3353 if (!AssumeVH)
3354 continue;
3355
3356 CallInst *Assume = cast<CallInst>(AssumeVH);
3357 if (Optional<bool> Imp =
3358 isImpliedCondition(Assume->getArgOperand(0), Predicate, LHS, RHS,
3359 Q.DL))
3360 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3361 return ConstantInt::get(GetCompareTy(LHS), *Imp);
3362 }
3363 }
3364
3365 return nullptr;
3366 }
3367
3368 /// Given operands for an ICmpInst, see if we can fold the result.
3369 /// If not, this returns null.
SimplifyICmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3370 static Value *SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3371 const SimplifyQuery &Q, unsigned MaxRecurse) {
3372 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3373 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3374
3375 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3376 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3377 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3378
3379 // If we have a constant, make sure it is on the RHS.
3380 std::swap(LHS, RHS);
3381 Pred = CmpInst::getSwappedPredicate(Pred);
3382 }
3383 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3384
3385 Type *ITy = GetCompareTy(LHS); // The return type.
3386
3387 // icmp poison, X -> poison
3388 if (isa<PoisonValue>(RHS))
3389 return PoisonValue::get(ITy);
3390
3391 // For EQ and NE, we can always pick a value for the undef to make the
3392 // predicate pass or fail, so we can return undef.
3393 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3394 if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3395 return UndefValue::get(ITy);
3396
3397 // icmp X, X -> true/false
3398 // icmp X, undef -> true/false because undef could be X.
3399 if (LHS == RHS || Q.isUndefValue(RHS))
3400 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3401
3402 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3403 return V;
3404
3405 // TODO: Sink/common this with other potentially expensive calls that use
3406 // ValueTracking? See comment below for isKnownNonEqual().
3407 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3408 return V;
3409
3410 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3411 return V;
3412
3413 // If both operands have range metadata, use the metadata
3414 // to simplify the comparison.
3415 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3416 auto RHS_Instr = cast<Instruction>(RHS);
3417 auto LHS_Instr = cast<Instruction>(LHS);
3418
3419 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3420 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3421 auto RHS_CR = getConstantRangeFromMetadata(
3422 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3423 auto LHS_CR = getConstantRangeFromMetadata(
3424 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3425
3426 if (LHS_CR.icmp(Pred, RHS_CR))
3427 return ConstantInt::getTrue(RHS->getContext());
3428
3429 if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
3430 return ConstantInt::getFalse(RHS->getContext());
3431 }
3432 }
3433
3434 // Compare of cast, for example (zext X) != 0 -> X != 0
3435 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3436 Instruction *LI = cast<CastInst>(LHS);
3437 Value *SrcOp = LI->getOperand(0);
3438 Type *SrcTy = SrcOp->getType();
3439 Type *DstTy = LI->getType();
3440
3441 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3442 // if the integer type is the same size as the pointer type.
3443 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3444 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3445 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3446 // Transfer the cast to the constant.
3447 if (Value *V = SimplifyICmpInst(Pred, SrcOp,
3448 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3449 Q, MaxRecurse-1))
3450 return V;
3451 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3452 if (RI->getOperand(0)->getType() == SrcTy)
3453 // Compare without the cast.
3454 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3455 Q, MaxRecurse-1))
3456 return V;
3457 }
3458 }
3459
3460 if (isa<ZExtInst>(LHS)) {
3461 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3462 // same type.
3463 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3464 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3465 // Compare X and Y. Note that signed predicates become unsigned.
3466 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3467 SrcOp, RI->getOperand(0), Q,
3468 MaxRecurse-1))
3469 return V;
3470 }
3471 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3472 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3473 if (SrcOp == RI->getOperand(0)) {
3474 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3475 return ConstantInt::getTrue(ITy);
3476 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3477 return ConstantInt::getFalse(ITy);
3478 }
3479 }
3480 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3481 // too. If not, then try to deduce the result of the comparison.
3482 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3483 // Compute the constant that would happen if we truncated to SrcTy then
3484 // reextended to DstTy.
3485 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3486 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3487
3488 // If the re-extended constant didn't change then this is effectively
3489 // also a case of comparing two zero-extended values.
3490 if (RExt == CI && MaxRecurse)
3491 if (Value *V = SimplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3492 SrcOp, Trunc, Q, MaxRecurse-1))
3493 return V;
3494
3495 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3496 // there. Use this to work out the result of the comparison.
3497 if (RExt != CI) {
3498 switch (Pred) {
3499 default: llvm_unreachable("Unknown ICmp predicate!");
3500 // LHS <u RHS.
3501 case ICmpInst::ICMP_EQ:
3502 case ICmpInst::ICMP_UGT:
3503 case ICmpInst::ICMP_UGE:
3504 return ConstantInt::getFalse(CI->getContext());
3505
3506 case ICmpInst::ICMP_NE:
3507 case ICmpInst::ICMP_ULT:
3508 case ICmpInst::ICMP_ULE:
3509 return ConstantInt::getTrue(CI->getContext());
3510
3511 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3512 // is non-negative then LHS <s RHS.
3513 case ICmpInst::ICMP_SGT:
3514 case ICmpInst::ICMP_SGE:
3515 return CI->getValue().isNegative() ?
3516 ConstantInt::getTrue(CI->getContext()) :
3517 ConstantInt::getFalse(CI->getContext());
3518
3519 case ICmpInst::ICMP_SLT:
3520 case ICmpInst::ICMP_SLE:
3521 return CI->getValue().isNegative() ?
3522 ConstantInt::getFalse(CI->getContext()) :
3523 ConstantInt::getTrue(CI->getContext());
3524 }
3525 }
3526 }
3527 }
3528
3529 if (isa<SExtInst>(LHS)) {
3530 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3531 // same type.
3532 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3533 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3534 // Compare X and Y. Note that the predicate does not change.
3535 if (Value *V = SimplifyICmpInst(Pred, SrcOp, RI->getOperand(0),
3536 Q, MaxRecurse-1))
3537 return V;
3538 }
3539 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3540 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3541 if (SrcOp == RI->getOperand(0)) {
3542 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3543 return ConstantInt::getTrue(ITy);
3544 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3545 return ConstantInt::getFalse(ITy);
3546 }
3547 }
3548 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3549 // too. If not, then try to deduce the result of the comparison.
3550 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3551 // Compute the constant that would happen if we truncated to SrcTy then
3552 // reextended to DstTy.
3553 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3554 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3555
3556 // If the re-extended constant didn't change then this is effectively
3557 // also a case of comparing two sign-extended values.
3558 if (RExt == CI && MaxRecurse)
3559 if (Value *V = SimplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse-1))
3560 return V;
3561
3562 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3563 // bits there. Use this to work out the result of the comparison.
3564 if (RExt != CI) {
3565 switch (Pred) {
3566 default: llvm_unreachable("Unknown ICmp predicate!");
3567 case ICmpInst::ICMP_EQ:
3568 return ConstantInt::getFalse(CI->getContext());
3569 case ICmpInst::ICMP_NE:
3570 return ConstantInt::getTrue(CI->getContext());
3571
3572 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3573 // LHS >s RHS.
3574 case ICmpInst::ICMP_SGT:
3575 case ICmpInst::ICMP_SGE:
3576 return CI->getValue().isNegative() ?
3577 ConstantInt::getTrue(CI->getContext()) :
3578 ConstantInt::getFalse(CI->getContext());
3579 case ICmpInst::ICMP_SLT:
3580 case ICmpInst::ICMP_SLE:
3581 return CI->getValue().isNegative() ?
3582 ConstantInt::getFalse(CI->getContext()) :
3583 ConstantInt::getTrue(CI->getContext());
3584
3585 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3586 // LHS >u RHS.
3587 case ICmpInst::ICMP_UGT:
3588 case ICmpInst::ICMP_UGE:
3589 // Comparison is true iff the LHS <s 0.
3590 if (MaxRecurse)
3591 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3592 Constant::getNullValue(SrcTy),
3593 Q, MaxRecurse-1))
3594 return V;
3595 break;
3596 case ICmpInst::ICMP_ULT:
3597 case ICmpInst::ICMP_ULE:
3598 // Comparison is true iff the LHS >=s 0.
3599 if (MaxRecurse)
3600 if (Value *V = SimplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3601 Constant::getNullValue(SrcTy),
3602 Q, MaxRecurse-1))
3603 return V;
3604 break;
3605 }
3606 }
3607 }
3608 }
3609 }
3610
3611 // icmp eq|ne X, Y -> false|true if X != Y
3612 // This is potentially expensive, and we have already computedKnownBits for
3613 // compares with 0 above here, so only try this for a non-zero compare.
3614 if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
3615 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3616 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3617 }
3618
3619 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3620 return V;
3621
3622 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3623 return V;
3624
3625 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
3626 return V;
3627
3628 // Simplify comparisons of related pointers using a powerful, recursive
3629 // GEP-walk when we have target data available..
3630 if (LHS->getType()->isPointerTy())
3631 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
3632 return C;
3633 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3634 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3635 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3636 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3637 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3638 Q.DL.getTypeSizeInBits(CRHS->getType()))
3639 if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
3640 CRHS->getPointerOperand(), Q))
3641 return C;
3642
3643 if (GetElementPtrInst *GLHS = dyn_cast<GetElementPtrInst>(LHS)) {
3644 if (GEPOperator *GRHS = dyn_cast<GEPOperator>(RHS)) {
3645 if (GLHS->getPointerOperand() == GRHS->getPointerOperand() &&
3646 GLHS->hasAllConstantIndices() && GRHS->hasAllConstantIndices() &&
3647 (ICmpInst::isEquality(Pred) ||
3648 (GLHS->isInBounds() && GRHS->isInBounds() &&
3649 Pred == ICmpInst::getSignedPredicate(Pred)))) {
3650 // The bases are equal and the indices are constant. Build a constant
3651 // expression GEP with the same indices and a null base pointer to see
3652 // what constant folding can make out of it.
3653 Constant *Null = Constant::getNullValue(GLHS->getPointerOperandType());
3654 SmallVector<Value *, 4> IndicesLHS(GLHS->indices());
3655 Constant *NewLHS = ConstantExpr::getGetElementPtr(
3656 GLHS->getSourceElementType(), Null, IndicesLHS);
3657
3658 SmallVector<Value *, 4> IndicesRHS(GRHS->idx_begin(), GRHS->idx_end());
3659 Constant *NewRHS = ConstantExpr::getGetElementPtr(
3660 GLHS->getSourceElementType(), Null, IndicesRHS);
3661 Constant *NewICmp = ConstantExpr::getICmp(Pred, NewLHS, NewRHS);
3662 return ConstantFoldConstant(NewICmp, Q.DL);
3663 }
3664 }
3665 }
3666
3667 // If the comparison is with the result of a select instruction, check whether
3668 // comparing with either branch of the select always yields the same value.
3669 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3670 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3671 return V;
3672
3673 // If the comparison is with the result of a phi instruction, check whether
3674 // doing the compare with each incoming phi value yields a common result.
3675 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3676 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3677 return V;
3678
3679 return nullptr;
3680 }
3681
SimplifyICmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)3682 Value *llvm::SimplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3683 const SimplifyQuery &Q) {
3684 return ::SimplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3685 }
3686
3687 /// Given operands for an FCmpInst, see if we can fold the result.
3688 /// If not, this returns null.
SimplifyFCmpInst(unsigned Predicate,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)3689 static Value *SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3690 FastMathFlags FMF, const SimplifyQuery &Q,
3691 unsigned MaxRecurse) {
3692 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3693 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3694
3695 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3696 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3697 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3698
3699 // If we have a constant, make sure it is on the RHS.
3700 std::swap(LHS, RHS);
3701 Pred = CmpInst::getSwappedPredicate(Pred);
3702 }
3703
3704 // Fold trivial predicates.
3705 Type *RetTy = GetCompareTy(LHS);
3706 if (Pred == FCmpInst::FCMP_FALSE)
3707 return getFalse(RetTy);
3708 if (Pred == FCmpInst::FCMP_TRUE)
3709 return getTrue(RetTy);
3710
3711 // Fold (un)ordered comparison if we can determine there are no NaNs.
3712 if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD)
3713 if (FMF.noNaNs() ||
3714 (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI)))
3715 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
3716
3717 // NaN is unordered; NaN is not ordered.
3718 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&
3719 "Comparison must be either ordered or unordered");
3720 if (match(RHS, m_NaN()))
3721 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3722
3723 // fcmp pred x, poison and fcmp pred poison, x
3724 // fold to poison
3725 if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
3726 return PoisonValue::get(RetTy);
3727
3728 // fcmp pred x, undef and fcmp pred undef, x
3729 // fold to true if unordered, false if ordered
3730 if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
3731 // Choosing NaN for the undef will always make unordered comparison succeed
3732 // and ordered comparison fail.
3733 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3734 }
3735
3736 // fcmp x,x -> true/false. Not all compares are foldable.
3737 if (LHS == RHS) {
3738 if (CmpInst::isTrueWhenEqual(Pred))
3739 return getTrue(RetTy);
3740 if (CmpInst::isFalseWhenEqual(Pred))
3741 return getFalse(RetTy);
3742 }
3743
3744 // Handle fcmp with constant RHS.
3745 // TODO: Use match with a specific FP value, so these work with vectors with
3746 // undef lanes.
3747 const APFloat *C;
3748 if (match(RHS, m_APFloat(C))) {
3749 // Check whether the constant is an infinity.
3750 if (C->isInfinity()) {
3751 if (C->isNegative()) {
3752 switch (Pred) {
3753 case FCmpInst::FCMP_OLT:
3754 // No value is ordered and less than negative infinity.
3755 return getFalse(RetTy);
3756 case FCmpInst::FCMP_UGE:
3757 // All values are unordered with or at least negative infinity.
3758 return getTrue(RetTy);
3759 default:
3760 break;
3761 }
3762 } else {
3763 switch (Pred) {
3764 case FCmpInst::FCMP_OGT:
3765 // No value is ordered and greater than infinity.
3766 return getFalse(RetTy);
3767 case FCmpInst::FCMP_ULE:
3768 // All values are unordered with and at most infinity.
3769 return getTrue(RetTy);
3770 default:
3771 break;
3772 }
3773 }
3774
3775 // LHS == Inf
3776 if (Pred == FCmpInst::FCMP_OEQ && isKnownNeverInfinity(LHS, Q.TLI))
3777 return getFalse(RetTy);
3778 // LHS != Inf
3779 if (Pred == FCmpInst::FCMP_UNE && isKnownNeverInfinity(LHS, Q.TLI))
3780 return getTrue(RetTy);
3781 // LHS == Inf || LHS == NaN
3782 if (Pred == FCmpInst::FCMP_UEQ && isKnownNeverInfinity(LHS, Q.TLI) &&
3783 isKnownNeverNaN(LHS, Q.TLI))
3784 return getFalse(RetTy);
3785 // LHS != Inf && LHS != NaN
3786 if (Pred == FCmpInst::FCMP_ONE && isKnownNeverInfinity(LHS, Q.TLI) &&
3787 isKnownNeverNaN(LHS, Q.TLI))
3788 return getTrue(RetTy);
3789 }
3790 if (C->isNegative() && !C->isNegZero()) {
3791 assert(!C->isNaN() && "Unexpected NaN constant!");
3792 // TODO: We can catch more cases by using a range check rather than
3793 // relying on CannotBeOrderedLessThanZero.
3794 switch (Pred) {
3795 case FCmpInst::FCMP_UGE:
3796 case FCmpInst::FCMP_UGT:
3797 case FCmpInst::FCMP_UNE:
3798 // (X >= 0) implies (X > C) when (C < 0)
3799 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3800 return getTrue(RetTy);
3801 break;
3802 case FCmpInst::FCMP_OEQ:
3803 case FCmpInst::FCMP_OLE:
3804 case FCmpInst::FCMP_OLT:
3805 // (X >= 0) implies !(X < C) when (C < 0)
3806 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3807 return getFalse(RetTy);
3808 break;
3809 default:
3810 break;
3811 }
3812 }
3813
3814 // Check comparison of [minnum/maxnum with constant] with other constant.
3815 const APFloat *C2;
3816 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
3817 *C2 < *C) ||
3818 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
3819 *C2 > *C)) {
3820 bool IsMaxNum =
3821 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
3822 // The ordered relationship and minnum/maxnum guarantee that we do not
3823 // have NaN constants, so ordered/unordered preds are handled the same.
3824 switch (Pred) {
3825 case FCmpInst::FCMP_OEQ: case FCmpInst::FCMP_UEQ:
3826 // minnum(X, LesserC) == C --> false
3827 // maxnum(X, GreaterC) == C --> false
3828 return getFalse(RetTy);
3829 case FCmpInst::FCMP_ONE: case FCmpInst::FCMP_UNE:
3830 // minnum(X, LesserC) != C --> true
3831 // maxnum(X, GreaterC) != C --> true
3832 return getTrue(RetTy);
3833 case FCmpInst::FCMP_OGE: case FCmpInst::FCMP_UGE:
3834 case FCmpInst::FCMP_OGT: case FCmpInst::FCMP_UGT:
3835 // minnum(X, LesserC) >= C --> false
3836 // minnum(X, LesserC) > C --> false
3837 // maxnum(X, GreaterC) >= C --> true
3838 // maxnum(X, GreaterC) > C --> true
3839 return ConstantInt::get(RetTy, IsMaxNum);
3840 case FCmpInst::FCMP_OLE: case FCmpInst::FCMP_ULE:
3841 case FCmpInst::FCMP_OLT: case FCmpInst::FCMP_ULT:
3842 // minnum(X, LesserC) <= C --> true
3843 // minnum(X, LesserC) < C --> true
3844 // maxnum(X, GreaterC) <= C --> false
3845 // maxnum(X, GreaterC) < C --> false
3846 return ConstantInt::get(RetTy, !IsMaxNum);
3847 default:
3848 // TRUE/FALSE/ORD/UNO should be handled before this.
3849 llvm_unreachable("Unexpected fcmp predicate");
3850 }
3851 }
3852 }
3853
3854 if (match(RHS, m_AnyZeroFP())) {
3855 switch (Pred) {
3856 case FCmpInst::FCMP_OGE:
3857 case FCmpInst::FCMP_ULT:
3858 // Positive or zero X >= 0.0 --> true
3859 // Positive or zero X < 0.0 --> false
3860 if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) &&
3861 CannotBeOrderedLessThanZero(LHS, Q.TLI))
3862 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
3863 break;
3864 case FCmpInst::FCMP_UGE:
3865 case FCmpInst::FCMP_OLT:
3866 // Positive or zero or nan X >= 0.0 --> true
3867 // Positive or zero or nan X < 0.0 --> false
3868 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
3869 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
3870 break;
3871 default:
3872 break;
3873 }
3874 }
3875
3876 // If the comparison is with the result of a select instruction, check whether
3877 // comparing with either branch of the select always yields the same value.
3878 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3879 if (Value *V = ThreadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3880 return V;
3881
3882 // If the comparison is with the result of a phi instruction, check whether
3883 // doing the compare with each incoming phi value yields a common result.
3884 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3885 if (Value *V = ThreadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3886 return V;
3887
3888 return nullptr;
3889 }
3890
SimplifyFCmpInst(unsigned Predicate,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q)3891 Value *llvm::SimplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3892 FastMathFlags FMF, const SimplifyQuery &Q) {
3893 return ::SimplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
3894 }
3895
simplifyWithOpReplaced(Value * V,Value * Op,Value * RepOp,const SimplifyQuery & Q,bool AllowRefinement,unsigned MaxRecurse)3896 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
3897 const SimplifyQuery &Q,
3898 bool AllowRefinement,
3899 unsigned MaxRecurse) {
3900 assert(!Op->getType()->isVectorTy() && "This is not safe for vectors");
3901
3902 // Trivial replacement.
3903 if (V == Op)
3904 return RepOp;
3905
3906 // We cannot replace a constant, and shouldn't even try.
3907 if (isa<Constant>(Op))
3908 return nullptr;
3909
3910 auto *I = dyn_cast<Instruction>(V);
3911 if (!I || !is_contained(I->operands(), Op))
3912 return nullptr;
3913
3914 // Replace Op with RepOp in instruction operands.
3915 SmallVector<Value *, 8> NewOps(I->getNumOperands());
3916 transform(I->operands(), NewOps.begin(),
3917 [&](Value *V) { return V == Op ? RepOp : V; });
3918
3919 if (!AllowRefinement) {
3920 // General InstSimplify functions may refine the result, e.g. by returning
3921 // a constant for a potentially poison value. To avoid this, implement only
3922 // a few non-refining but profitable transforms here.
3923
3924 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
3925 unsigned Opcode = BO->getOpcode();
3926 // id op x -> x, x op id -> x
3927 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
3928 return NewOps[1];
3929 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
3930 /* RHS */ true))
3931 return NewOps[0];
3932
3933 // x & x -> x, x | x -> x
3934 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
3935 NewOps[0] == NewOps[1])
3936 return NewOps[0];
3937 }
3938
3939 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
3940 // getelementptr x, 0 -> x
3941 if (NewOps.size() == 2 && match(NewOps[1], m_Zero()) &&
3942 !GEP->isInBounds())
3943 return NewOps[0];
3944 }
3945 } else if (MaxRecurse) {
3946 // The simplification queries below may return the original value. Consider:
3947 // %div = udiv i32 %arg, %arg2
3948 // %mul = mul nsw i32 %div, %arg2
3949 // %cmp = icmp eq i32 %mul, %arg
3950 // %sel = select i1 %cmp, i32 %div, i32 undef
3951 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
3952 // simplifies back to %arg. This can only happen because %mul does not
3953 // dominate %div. To ensure a consistent return value contract, we make sure
3954 // that this case returns nullptr as well.
3955 auto PreventSelfSimplify = [V](Value *Simplified) {
3956 return Simplified != V ? Simplified : nullptr;
3957 };
3958
3959 if (auto *B = dyn_cast<BinaryOperator>(I))
3960 return PreventSelfSimplify(SimplifyBinOp(B->getOpcode(), NewOps[0],
3961 NewOps[1], Q, MaxRecurse - 1));
3962
3963 if (CmpInst *C = dyn_cast<CmpInst>(I))
3964 return PreventSelfSimplify(SimplifyCmpInst(C->getPredicate(), NewOps[0],
3965 NewOps[1], Q, MaxRecurse - 1));
3966
3967 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
3968 return PreventSelfSimplify(SimplifyGEPInst(GEP->getSourceElementType(),
3969 NewOps, Q, MaxRecurse - 1));
3970
3971 if (isa<SelectInst>(I))
3972 return PreventSelfSimplify(
3973 SimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q,
3974 MaxRecurse - 1));
3975 // TODO: We could hand off more cases to instsimplify here.
3976 }
3977
3978 // If all operands are constant after substituting Op for RepOp then we can
3979 // constant fold the instruction.
3980 SmallVector<Constant *, 8> ConstOps;
3981 for (Value *NewOp : NewOps) {
3982 if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
3983 ConstOps.push_back(ConstOp);
3984 else
3985 return nullptr;
3986 }
3987
3988 // Consider:
3989 // %cmp = icmp eq i32 %x, 2147483647
3990 // %add = add nsw i32 %x, 1
3991 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
3992 //
3993 // We can't replace %sel with %add unless we strip away the flags (which
3994 // will be done in InstCombine).
3995 // TODO: This may be unsound, because it only catches some forms of
3996 // refinement.
3997 if (!AllowRefinement && canCreatePoison(cast<Operator>(I)))
3998 return nullptr;
3999
4000 if (CmpInst *C = dyn_cast<CmpInst>(I))
4001 return ConstantFoldCompareInstOperands(C->getPredicate(), ConstOps[0],
4002 ConstOps[1], Q.DL, Q.TLI);
4003
4004 if (LoadInst *LI = dyn_cast<LoadInst>(I))
4005 if (!LI->isVolatile())
4006 return ConstantFoldLoadFromConstPtr(ConstOps[0], LI->getType(), Q.DL);
4007
4008 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
4009 }
4010
simplifyWithOpReplaced(Value * V,Value * Op,Value * RepOp,const SimplifyQuery & Q,bool AllowRefinement)4011 Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4012 const SimplifyQuery &Q,
4013 bool AllowRefinement) {
4014 return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
4015 RecursionLimit);
4016 }
4017
4018 /// Try to simplify a select instruction when its condition operand is an
4019 /// integer comparison where one operand of the compare is a constant.
simplifySelectBitTest(Value * TrueVal,Value * FalseVal,Value * X,const APInt * Y,bool TrueWhenUnset)4020 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4021 const APInt *Y, bool TrueWhenUnset) {
4022 const APInt *C;
4023
4024 // (X & Y) == 0 ? X & ~Y : X --> X
4025 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4026 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4027 *Y == ~*C)
4028 return TrueWhenUnset ? FalseVal : TrueVal;
4029
4030 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4031 // (X & Y) != 0 ? X : X & ~Y --> X
4032 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4033 *Y == ~*C)
4034 return TrueWhenUnset ? FalseVal : TrueVal;
4035
4036 if (Y->isPowerOf2()) {
4037 // (X & Y) == 0 ? X | Y : X --> X | Y
4038 // (X & Y) != 0 ? X | Y : X --> X
4039 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4040 *Y == *C)
4041 return TrueWhenUnset ? TrueVal : FalseVal;
4042
4043 // (X & Y) == 0 ? X : X | Y --> X
4044 // (X & Y) != 0 ? X : X | Y --> X | Y
4045 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4046 *Y == *C)
4047 return TrueWhenUnset ? TrueVal : FalseVal;
4048 }
4049
4050 return nullptr;
4051 }
4052
4053 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
4054 /// eq/ne.
simplifySelectWithFakeICmpEq(Value * CmpLHS,Value * CmpRHS,ICmpInst::Predicate Pred,Value * TrueVal,Value * FalseVal)4055 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
4056 ICmpInst::Predicate Pred,
4057 Value *TrueVal, Value *FalseVal) {
4058 Value *X;
4059 APInt Mask;
4060 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
4061 return nullptr;
4062
4063 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
4064 Pred == ICmpInst::ICMP_EQ);
4065 }
4066
4067 /// Try to simplify a select instruction when its condition operand is an
4068 /// integer comparison.
simplifySelectWithICmpCond(Value * CondVal,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4069 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4070 Value *FalseVal, const SimplifyQuery &Q,
4071 unsigned MaxRecurse) {
4072 ICmpInst::Predicate Pred;
4073 Value *CmpLHS, *CmpRHS;
4074 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4075 return nullptr;
4076
4077 // Canonicalize ne to eq predicate.
4078 if (Pred == ICmpInst::ICMP_NE) {
4079 Pred = ICmpInst::ICMP_EQ;
4080 std::swap(TrueVal, FalseVal);
4081 }
4082
4083 // Check for integer min/max with a limit constant:
4084 // X > MIN_INT ? X : MIN_INT --> X
4085 // X < MAX_INT ? X : MAX_INT --> X
4086 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4087 Value *X, *Y;
4088 SelectPatternFlavor SPF =
4089 matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4090 X, Y).Flavor;
4091 if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4092 APInt LimitC = getMinMaxLimit(getInverseMinMaxFlavor(SPF),
4093 X->getType()->getScalarSizeInBits());
4094 if (match(Y, m_SpecificInt(LimitC)))
4095 return X;
4096 }
4097 }
4098
4099 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4100 Value *X;
4101 const APInt *Y;
4102 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4103 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4104 /*TrueWhenUnset=*/true))
4105 return V;
4106
4107 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4108 Value *ShAmt;
4109 auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4110 m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4111 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4112 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4113 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4114 return X;
4115
4116 // Test for a zero-shift-guard-op around rotates. These are used to
4117 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4118 // intrinsics do not have that problem.
4119 // We do not allow this transform for the general funnel shift case because
4120 // that would not preserve the poison safety of the original code.
4121 auto isRotate =
4122 m_CombineOr(m_FShl(m_Value(X), m_Deferred(X), m_Value(ShAmt)),
4123 m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4124 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4125 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4126 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4127 Pred == ICmpInst::ICMP_EQ)
4128 return FalseVal;
4129
4130 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4131 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4132 if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4133 match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4134 return FalseVal;
4135 if (match(TrueVal,
4136 m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4137 match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4138 return FalseVal;
4139 }
4140
4141 // Check for other compares that behave like bit test.
4142 if (Value *V = simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred,
4143 TrueVal, FalseVal))
4144 return V;
4145
4146 // If we have a scalar equality comparison, then we know the value in one of
4147 // the arms of the select. See if substituting this value into the arm and
4148 // simplifying the result yields the same value as the other arm.
4149 // Note that the equivalence/replacement opportunity does not hold for vectors
4150 // because each element of a vector select is chosen independently.
4151 if (Pred == ICmpInst::ICMP_EQ && !CondVal->getType()->isVectorTy()) {
4152 if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4153 /* AllowRefinement */ false, MaxRecurse) ==
4154 TrueVal ||
4155 simplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q,
4156 /* AllowRefinement */ false, MaxRecurse) ==
4157 TrueVal)
4158 return FalseVal;
4159 if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4160 /* AllowRefinement */ true, MaxRecurse) ==
4161 FalseVal ||
4162 simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q,
4163 /* AllowRefinement */ true, MaxRecurse) ==
4164 FalseVal)
4165 return FalseVal;
4166 }
4167
4168 return nullptr;
4169 }
4170
4171 /// Try to simplify a select instruction when its condition operand is a
4172 /// floating-point comparison.
simplifySelectWithFCmp(Value * Cond,Value * T,Value * F,const SimplifyQuery & Q)4173 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4174 const SimplifyQuery &Q) {
4175 FCmpInst::Predicate Pred;
4176 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4177 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4178 return nullptr;
4179
4180 // This transform is safe if we do not have (do not care about) -0.0 or if
4181 // at least one operand is known to not be -0.0. Otherwise, the select can
4182 // change the sign of a zero operand.
4183 bool HasNoSignedZeros = Q.CxtI && isa<FPMathOperator>(Q.CxtI) &&
4184 Q.CxtI->hasNoSignedZeros();
4185 const APFloat *C;
4186 if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4187 (match(F, m_APFloat(C)) && C->isNonZero())) {
4188 // (T == F) ? T : F --> F
4189 // (F == T) ? T : F --> F
4190 if (Pred == FCmpInst::FCMP_OEQ)
4191 return F;
4192
4193 // (T != F) ? T : F --> T
4194 // (F != T) ? T : F --> T
4195 if (Pred == FCmpInst::FCMP_UNE)
4196 return T;
4197 }
4198
4199 return nullptr;
4200 }
4201
4202 /// Given operands for a SelectInst, see if we can fold the result.
4203 /// If not, this returns null.
SimplifySelectInst(Value * Cond,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4204 static Value *SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4205 const SimplifyQuery &Q, unsigned MaxRecurse) {
4206 if (auto *CondC = dyn_cast<Constant>(Cond)) {
4207 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4208 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4209 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
4210
4211 // select poison, X, Y -> poison
4212 if (isa<PoisonValue>(CondC))
4213 return PoisonValue::get(TrueVal->getType());
4214
4215 // select undef, X, Y -> X or Y
4216 if (Q.isUndefValue(CondC))
4217 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4218
4219 // select true, X, Y --> X
4220 // select false, X, Y --> Y
4221 // For vectors, allow undef/poison elements in the condition to match the
4222 // defined elements, so we can eliminate the select.
4223 if (match(CondC, m_One()))
4224 return TrueVal;
4225 if (match(CondC, m_Zero()))
4226 return FalseVal;
4227 }
4228
4229 // select i1 Cond, i1 true, i1 false --> i1 Cond
4230 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4231 "Select must have bool or bool vector condition");
4232 assert(TrueVal->getType() == FalseVal->getType() &&
4233 "Select must have same types for true/false ops");
4234 if (Cond->getType() == TrueVal->getType() &&
4235 match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4236 return Cond;
4237
4238 // select ?, X, X -> X
4239 if (TrueVal == FalseVal)
4240 return TrueVal;
4241
4242 // If the true or false value is poison, we can fold to the other value.
4243 // If the true or false value is undef, we can fold to the other value as
4244 // long as the other value isn't poison.
4245 // select ?, poison, X -> X
4246 // select ?, undef, X -> X
4247 if (isa<PoisonValue>(TrueVal) ||
4248 (Q.isUndefValue(TrueVal) &&
4249 isGuaranteedNotToBePoison(FalseVal, Q.AC, Q.CxtI, Q.DT)))
4250 return FalseVal;
4251 // select ?, X, poison -> X
4252 // select ?, X, undef -> X
4253 if (isa<PoisonValue>(FalseVal) ||
4254 (Q.isUndefValue(FalseVal) &&
4255 isGuaranteedNotToBePoison(TrueVal, Q.AC, Q.CxtI, Q.DT)))
4256 return TrueVal;
4257
4258 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4259 Constant *TrueC, *FalseC;
4260 if (isa<FixedVectorType>(TrueVal->getType()) &&
4261 match(TrueVal, m_Constant(TrueC)) &&
4262 match(FalseVal, m_Constant(FalseC))) {
4263 unsigned NumElts =
4264 cast<FixedVectorType>(TrueC->getType())->getNumElements();
4265 SmallVector<Constant *, 16> NewC;
4266 for (unsigned i = 0; i != NumElts; ++i) {
4267 // Bail out on incomplete vector constants.
4268 Constant *TEltC = TrueC->getAggregateElement(i);
4269 Constant *FEltC = FalseC->getAggregateElement(i);
4270 if (!TEltC || !FEltC)
4271 break;
4272
4273 // If the elements match (undef or not), that value is the result. If only
4274 // one element is undef, choose the defined element as the safe result.
4275 if (TEltC == FEltC)
4276 NewC.push_back(TEltC);
4277 else if (isa<PoisonValue>(TEltC) ||
4278 (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
4279 NewC.push_back(FEltC);
4280 else if (isa<PoisonValue>(FEltC) ||
4281 (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
4282 NewC.push_back(TEltC);
4283 else
4284 break;
4285 }
4286 if (NewC.size() == NumElts)
4287 return ConstantVector::get(NewC);
4288 }
4289
4290 if (Value *V =
4291 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4292 return V;
4293
4294 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4295 return V;
4296
4297 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4298 return V;
4299
4300 Optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4301 if (Imp)
4302 return *Imp ? TrueVal : FalseVal;
4303
4304 return nullptr;
4305 }
4306
SimplifySelectInst(Value * Cond,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q)4307 Value *llvm::SimplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4308 const SimplifyQuery &Q) {
4309 return ::SimplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4310 }
4311
4312 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4313 /// If not, this returns null.
SimplifyGEPInst(Type * SrcTy,ArrayRef<Value * > Ops,const SimplifyQuery & Q,unsigned)4314 static Value *SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
4315 const SimplifyQuery &Q, unsigned) {
4316 // The type of the GEP pointer operand.
4317 unsigned AS =
4318 cast<PointerType>(Ops[0]->getType()->getScalarType())->getAddressSpace();
4319
4320 // getelementptr P -> P.
4321 if (Ops.size() == 1)
4322 return Ops[0];
4323
4324 // Compute the (pointer) type returned by the GEP instruction.
4325 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Ops.slice(1));
4326 Type *GEPTy = PointerType::get(LastType, AS);
4327 for (Value *Op : Ops) {
4328 // If one of the operands is a vector, the result type is a vector of
4329 // pointers. All vector operands must have the same number of elements.
4330 if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4331 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4332 break;
4333 }
4334 }
4335
4336 // getelementptr poison, idx -> poison
4337 // getelementptr baseptr, poison -> poison
4338 if (any_of(Ops, [](const auto *V) { return isa<PoisonValue>(V); }))
4339 return PoisonValue::get(GEPTy);
4340
4341 if (Q.isUndefValue(Ops[0]))
4342 return UndefValue::get(GEPTy);
4343
4344 bool IsScalableVec =
4345 isa<ScalableVectorType>(SrcTy) || any_of(Ops, [](const Value *V) {
4346 return isa<ScalableVectorType>(V->getType());
4347 });
4348
4349 if (Ops.size() == 2) {
4350 // getelementptr P, 0 -> P.
4351 if (match(Ops[1], m_Zero()) && Ops[0]->getType() == GEPTy)
4352 return Ops[0];
4353
4354 Type *Ty = SrcTy;
4355 if (!IsScalableVec && Ty->isSized()) {
4356 Value *P;
4357 uint64_t C;
4358 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
4359 // getelementptr P, N -> P if P points to a type of zero size.
4360 if (TyAllocSize == 0 && Ops[0]->getType() == GEPTy)
4361 return Ops[0];
4362
4363 // The following transforms are only safe if the ptrtoint cast
4364 // doesn't truncate the pointers.
4365 if (Ops[1]->getType()->getScalarSizeInBits() ==
4366 Q.DL.getPointerSizeInBits(AS)) {
4367 auto CanSimplify = [GEPTy, &P, V = Ops[0]]() -> bool {
4368 return P->getType() == GEPTy &&
4369 getUnderlyingObject(P) == getUnderlyingObject(V);
4370 };
4371 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
4372 if (TyAllocSize == 1 &&
4373 match(Ops[1], m_Sub(m_PtrToInt(m_Value(P)),
4374 m_PtrToInt(m_Specific(Ops[0])))) &&
4375 CanSimplify())
4376 return P;
4377
4378 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
4379 // size 1 << C.
4380 if (match(Ops[1], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
4381 m_PtrToInt(m_Specific(Ops[0]))),
4382 m_ConstantInt(C))) &&
4383 TyAllocSize == 1ULL << C && CanSimplify())
4384 return P;
4385
4386 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
4387 // size C.
4388 if (match(Ops[1], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
4389 m_PtrToInt(m_Specific(Ops[0]))),
4390 m_SpecificInt(TyAllocSize))) &&
4391 CanSimplify())
4392 return P;
4393 }
4394 }
4395 }
4396
4397 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
4398 all_of(Ops.slice(1).drop_back(1),
4399 [](Value *Idx) { return match(Idx, m_Zero()); })) {
4400 unsigned IdxWidth =
4401 Q.DL.getIndexSizeInBits(Ops[0]->getType()->getPointerAddressSpace());
4402 if (Q.DL.getTypeSizeInBits(Ops.back()->getType()) == IdxWidth) {
4403 APInt BasePtrOffset(IdxWidth, 0);
4404 Value *StrippedBasePtr =
4405 Ops[0]->stripAndAccumulateInBoundsConstantOffsets(Q.DL,
4406 BasePtrOffset);
4407
4408 // Avoid creating inttoptr of zero here: While LLVMs treatment of
4409 // inttoptr is generally conservative, this particular case is folded to
4410 // a null pointer, which will have incorrect provenance.
4411
4412 // gep (gep V, C), (sub 0, V) -> C
4413 if (match(Ops.back(),
4414 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
4415 !BasePtrOffset.isNullValue()) {
4416 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
4417 return ConstantExpr::getIntToPtr(CI, GEPTy);
4418 }
4419 // gep (gep V, C), (xor V, -1) -> C-1
4420 if (match(Ops.back(),
4421 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
4422 !BasePtrOffset.isOneValue()) {
4423 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
4424 return ConstantExpr::getIntToPtr(CI, GEPTy);
4425 }
4426 }
4427 }
4428
4429 // Check to see if this is constant foldable.
4430 if (!all_of(Ops, [](Value *V) { return isa<Constant>(V); }))
4431 return nullptr;
4432
4433 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ops[0]),
4434 Ops.slice(1));
4435 return ConstantFoldConstant(CE, Q.DL);
4436 }
4437
SimplifyGEPInst(Type * SrcTy,ArrayRef<Value * > Ops,const SimplifyQuery & Q)4438 Value *llvm::SimplifyGEPInst(Type *SrcTy, ArrayRef<Value *> Ops,
4439 const SimplifyQuery &Q) {
4440 return ::SimplifyGEPInst(SrcTy, Ops, Q, RecursionLimit);
4441 }
4442
4443 /// Given operands for an InsertValueInst, see if we can fold the result.
4444 /// If not, this returns null.
SimplifyInsertValueInst(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q,unsigned)4445 static Value *SimplifyInsertValueInst(Value *Agg, Value *Val,
4446 ArrayRef<unsigned> Idxs, const SimplifyQuery &Q,
4447 unsigned) {
4448 if (Constant *CAgg = dyn_cast<Constant>(Agg))
4449 if (Constant *CVal = dyn_cast<Constant>(Val))
4450 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
4451
4452 // insertvalue x, undef, n -> x
4453 if (Q.isUndefValue(Val))
4454 return Agg;
4455
4456 // insertvalue x, (extractvalue y, n), n
4457 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
4458 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
4459 EV->getIndices() == Idxs) {
4460 // insertvalue undef, (extractvalue y, n), n -> y
4461 if (Q.isUndefValue(Agg))
4462 return EV->getAggregateOperand();
4463
4464 // insertvalue y, (extractvalue y, n), n -> y
4465 if (Agg == EV->getAggregateOperand())
4466 return Agg;
4467 }
4468
4469 return nullptr;
4470 }
4471
SimplifyInsertValueInst(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q)4472 Value *llvm::SimplifyInsertValueInst(Value *Agg, Value *Val,
4473 ArrayRef<unsigned> Idxs,
4474 const SimplifyQuery &Q) {
4475 return ::SimplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
4476 }
4477
SimplifyInsertElementInst(Value * Vec,Value * Val,Value * Idx,const SimplifyQuery & Q)4478 Value *llvm::SimplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
4479 const SimplifyQuery &Q) {
4480 // Try to constant fold.
4481 auto *VecC = dyn_cast<Constant>(Vec);
4482 auto *ValC = dyn_cast<Constant>(Val);
4483 auto *IdxC = dyn_cast<Constant>(Idx);
4484 if (VecC && ValC && IdxC)
4485 return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
4486
4487 // For fixed-length vector, fold into poison if index is out of bounds.
4488 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
4489 if (isa<FixedVectorType>(Vec->getType()) &&
4490 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
4491 return PoisonValue::get(Vec->getType());
4492 }
4493
4494 // If index is undef, it might be out of bounds (see above case)
4495 if (Q.isUndefValue(Idx))
4496 return PoisonValue::get(Vec->getType());
4497
4498 // If the scalar is poison, or it is undef and there is no risk of
4499 // propagating poison from the vector value, simplify to the vector value.
4500 if (isa<PoisonValue>(Val) ||
4501 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
4502 return Vec;
4503
4504 // If we are extracting a value from a vector, then inserting it into the same
4505 // place, that's the input vector:
4506 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
4507 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
4508 return Vec;
4509
4510 return nullptr;
4511 }
4512
4513 /// Given operands for an ExtractValueInst, see if we can fold the result.
4514 /// If not, this returns null.
SimplifyExtractValueInst(Value * Agg,ArrayRef<unsigned> Idxs,const SimplifyQuery &,unsigned)4515 static Value *SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4516 const SimplifyQuery &, unsigned) {
4517 if (auto *CAgg = dyn_cast<Constant>(Agg))
4518 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
4519
4520 // extractvalue x, (insertvalue y, elt, n), n -> elt
4521 unsigned NumIdxs = Idxs.size();
4522 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
4523 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
4524 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
4525 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
4526 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
4527 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
4528 Idxs.slice(0, NumCommonIdxs)) {
4529 if (NumIdxs == NumInsertValueIdxs)
4530 return IVI->getInsertedValueOperand();
4531 break;
4532 }
4533 }
4534
4535 return nullptr;
4536 }
4537
SimplifyExtractValueInst(Value * Agg,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q)4538 Value *llvm::SimplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4539 const SimplifyQuery &Q) {
4540 return ::SimplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
4541 }
4542
4543 /// Given operands for an ExtractElementInst, see if we can fold the result.
4544 /// If not, this returns null.
SimplifyExtractElementInst(Value * Vec,Value * Idx,const SimplifyQuery & Q,unsigned)4545 static Value *SimplifyExtractElementInst(Value *Vec, Value *Idx,
4546 const SimplifyQuery &Q, unsigned) {
4547 auto *VecVTy = cast<VectorType>(Vec->getType());
4548 if (auto *CVec = dyn_cast<Constant>(Vec)) {
4549 if (auto *CIdx = dyn_cast<Constant>(Idx))
4550 return ConstantExpr::getExtractElement(CVec, CIdx);
4551
4552 if (Q.isUndefValue(Vec))
4553 return UndefValue::get(VecVTy->getElementType());
4554 }
4555
4556 // An undef extract index can be arbitrarily chosen to be an out-of-range
4557 // index value, which would result in the instruction being poison.
4558 if (Q.isUndefValue(Idx))
4559 return PoisonValue::get(VecVTy->getElementType());
4560
4561 // If extracting a specified index from the vector, see if we can recursively
4562 // find a previously computed scalar that was inserted into the vector.
4563 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
4564 // For fixed-length vector, fold into undef if index is out of bounds.
4565 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
4566 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
4567 return PoisonValue::get(VecVTy->getElementType());
4568 // Handle case where an element is extracted from a splat.
4569 if (IdxC->getValue().ult(MinNumElts))
4570 if (auto *Splat = getSplatValue(Vec))
4571 return Splat;
4572 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4573 return Elt;
4574 } else {
4575 // The index is not relevant if our vector is a splat.
4576 if (Value *Splat = getSplatValue(Vec))
4577 return Splat;
4578 }
4579 return nullptr;
4580 }
4581
SimplifyExtractElementInst(Value * Vec,Value * Idx,const SimplifyQuery & Q)4582 Value *llvm::SimplifyExtractElementInst(Value *Vec, Value *Idx,
4583 const SimplifyQuery &Q) {
4584 return ::SimplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
4585 }
4586
4587 /// See if we can fold the given phi. If not, returns null.
SimplifyPHINode(PHINode * PN,ArrayRef<Value * > IncomingValues,const SimplifyQuery & Q)4588 static Value *SimplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
4589 const SimplifyQuery &Q) {
4590 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
4591 // here, because the PHI we may succeed simplifying to was not
4592 // def-reachable from the original PHI!
4593
4594 // If all of the PHI's incoming values are the same then replace the PHI node
4595 // with the common value.
4596 Value *CommonValue = nullptr;
4597 bool HasUndefInput = false;
4598 for (Value *Incoming : IncomingValues) {
4599 // If the incoming value is the phi node itself, it can safely be skipped.
4600 if (Incoming == PN) continue;
4601 if (Q.isUndefValue(Incoming)) {
4602 // Remember that we saw an undef value, but otherwise ignore them.
4603 HasUndefInput = true;
4604 continue;
4605 }
4606 if (CommonValue && Incoming != CommonValue)
4607 return nullptr; // Not the same, bail out.
4608 CommonValue = Incoming;
4609 }
4610
4611 // If CommonValue is null then all of the incoming values were either undef or
4612 // equal to the phi node itself.
4613 if (!CommonValue)
4614 return UndefValue::get(PN->getType());
4615
4616 // If we have a PHI node like phi(X, undef, X), where X is defined by some
4617 // instruction, we cannot return X as the result of the PHI node unless it
4618 // dominates the PHI block.
4619 if (HasUndefInput)
4620 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
4621
4622 return CommonValue;
4623 }
4624
SimplifyCastInst(unsigned CastOpc,Value * Op,Type * Ty,const SimplifyQuery & Q,unsigned MaxRecurse)4625 static Value *SimplifyCastInst(unsigned CastOpc, Value *Op,
4626 Type *Ty, const SimplifyQuery &Q, unsigned MaxRecurse) {
4627 if (auto *C = dyn_cast<Constant>(Op))
4628 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
4629
4630 if (auto *CI = dyn_cast<CastInst>(Op)) {
4631 auto *Src = CI->getOperand(0);
4632 Type *SrcTy = Src->getType();
4633 Type *MidTy = CI->getType();
4634 Type *DstTy = Ty;
4635 if (Src->getType() == Ty) {
4636 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
4637 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
4638 Type *SrcIntPtrTy =
4639 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
4640 Type *MidIntPtrTy =
4641 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
4642 Type *DstIntPtrTy =
4643 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
4644 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
4645 SrcIntPtrTy, MidIntPtrTy,
4646 DstIntPtrTy) == Instruction::BitCast)
4647 return Src;
4648 }
4649 }
4650
4651 // bitcast x -> x
4652 if (CastOpc == Instruction::BitCast)
4653 if (Op->getType() == Ty)
4654 return Op;
4655
4656 return nullptr;
4657 }
4658
SimplifyCastInst(unsigned CastOpc,Value * Op,Type * Ty,const SimplifyQuery & Q)4659 Value *llvm::SimplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
4660 const SimplifyQuery &Q) {
4661 return ::SimplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
4662 }
4663
4664 /// For the given destination element of a shuffle, peek through shuffles to
4665 /// match a root vector source operand that contains that element in the same
4666 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
foldIdentityShuffles(int DestElt,Value * Op0,Value * Op1,int MaskVal,Value * RootVec,unsigned MaxRecurse)4667 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
4668 int MaskVal, Value *RootVec,
4669 unsigned MaxRecurse) {
4670 if (!MaxRecurse--)
4671 return nullptr;
4672
4673 // Bail out if any mask value is undefined. That kind of shuffle may be
4674 // simplified further based on demanded bits or other folds.
4675 if (MaskVal == -1)
4676 return nullptr;
4677
4678 // The mask value chooses which source operand we need to look at next.
4679 int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
4680 int RootElt = MaskVal;
4681 Value *SourceOp = Op0;
4682 if (MaskVal >= InVecNumElts) {
4683 RootElt = MaskVal - InVecNumElts;
4684 SourceOp = Op1;
4685 }
4686
4687 // If the source operand is a shuffle itself, look through it to find the
4688 // matching root vector.
4689 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
4690 return foldIdentityShuffles(
4691 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
4692 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
4693 }
4694
4695 // TODO: Look through bitcasts? What if the bitcast changes the vector element
4696 // size?
4697
4698 // The source operand is not a shuffle. Initialize the root vector value for
4699 // this shuffle if that has not been done yet.
4700 if (!RootVec)
4701 RootVec = SourceOp;
4702
4703 // Give up as soon as a source operand does not match the existing root value.
4704 if (RootVec != SourceOp)
4705 return nullptr;
4706
4707 // The element must be coming from the same lane in the source vector
4708 // (although it may have crossed lanes in intermediate shuffles).
4709 if (RootElt != DestElt)
4710 return nullptr;
4711
4712 return RootVec;
4713 }
4714
SimplifyShuffleVectorInst(Value * Op0,Value * Op1,ArrayRef<int> Mask,Type * RetTy,const SimplifyQuery & Q,unsigned MaxRecurse)4715 static Value *SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4716 ArrayRef<int> Mask, Type *RetTy,
4717 const SimplifyQuery &Q,
4718 unsigned MaxRecurse) {
4719 if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; }))
4720 return UndefValue::get(RetTy);
4721
4722 auto *InVecTy = cast<VectorType>(Op0->getType());
4723 unsigned MaskNumElts = Mask.size();
4724 ElementCount InVecEltCount = InVecTy->getElementCount();
4725
4726 bool Scalable = InVecEltCount.isScalable();
4727
4728 SmallVector<int, 32> Indices;
4729 Indices.assign(Mask.begin(), Mask.end());
4730
4731 // Canonicalization: If mask does not select elements from an input vector,
4732 // replace that input vector with poison.
4733 if (!Scalable) {
4734 bool MaskSelects0 = false, MaskSelects1 = false;
4735 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
4736 for (unsigned i = 0; i != MaskNumElts; ++i) {
4737 if (Indices[i] == -1)
4738 continue;
4739 if ((unsigned)Indices[i] < InVecNumElts)
4740 MaskSelects0 = true;
4741 else
4742 MaskSelects1 = true;
4743 }
4744 if (!MaskSelects0)
4745 Op0 = PoisonValue::get(InVecTy);
4746 if (!MaskSelects1)
4747 Op1 = PoisonValue::get(InVecTy);
4748 }
4749
4750 auto *Op0Const = dyn_cast<Constant>(Op0);
4751 auto *Op1Const = dyn_cast<Constant>(Op1);
4752
4753 // If all operands are constant, constant fold the shuffle. This
4754 // transformation depends on the value of the mask which is not known at
4755 // compile time for scalable vectors
4756 if (Op0Const && Op1Const)
4757 return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
4758
4759 // Canonicalization: if only one input vector is constant, it shall be the
4760 // second one. This transformation depends on the value of the mask which
4761 // is not known at compile time for scalable vectors
4762 if (!Scalable && Op0Const && !Op1Const) {
4763 std::swap(Op0, Op1);
4764 ShuffleVectorInst::commuteShuffleMask(Indices,
4765 InVecEltCount.getKnownMinValue());
4766 }
4767
4768 // A splat of an inserted scalar constant becomes a vector constant:
4769 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
4770 // NOTE: We may have commuted above, so analyze the updated Indices, not the
4771 // original mask constant.
4772 // NOTE: This transformation depends on the value of the mask which is not
4773 // known at compile time for scalable vectors
4774 Constant *C;
4775 ConstantInt *IndexC;
4776 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
4777 m_ConstantInt(IndexC)))) {
4778 // Match a splat shuffle mask of the insert index allowing undef elements.
4779 int InsertIndex = IndexC->getZExtValue();
4780 if (all_of(Indices, [InsertIndex](int MaskElt) {
4781 return MaskElt == InsertIndex || MaskElt == -1;
4782 })) {
4783 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
4784
4785 // Shuffle mask undefs become undefined constant result elements.
4786 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
4787 for (unsigned i = 0; i != MaskNumElts; ++i)
4788 if (Indices[i] == -1)
4789 VecC[i] = UndefValue::get(C->getType());
4790 return ConstantVector::get(VecC);
4791 }
4792 }
4793
4794 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
4795 // value type is same as the input vectors' type.
4796 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
4797 if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
4798 is_splat(OpShuf->getShuffleMask()))
4799 return Op0;
4800
4801 // All remaining transformation depend on the value of the mask, which is
4802 // not known at compile time for scalable vectors.
4803 if (Scalable)
4804 return nullptr;
4805
4806 // Don't fold a shuffle with undef mask elements. This may get folded in a
4807 // better way using demanded bits or other analysis.
4808 // TODO: Should we allow this?
4809 if (is_contained(Indices, -1))
4810 return nullptr;
4811
4812 // Check if every element of this shuffle can be mapped back to the
4813 // corresponding element of a single root vector. If so, we don't need this
4814 // shuffle. This handles simple identity shuffles as well as chains of
4815 // shuffles that may widen/narrow and/or move elements across lanes and back.
4816 Value *RootVec = nullptr;
4817 for (unsigned i = 0; i != MaskNumElts; ++i) {
4818 // Note that recursion is limited for each vector element, so if any element
4819 // exceeds the limit, this will fail to simplify.
4820 RootVec =
4821 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
4822
4823 // We can't replace a widening/narrowing shuffle with one of its operands.
4824 if (!RootVec || RootVec->getType() != RetTy)
4825 return nullptr;
4826 }
4827 return RootVec;
4828 }
4829
4830 /// Given operands for a ShuffleVectorInst, fold the result or return null.
SimplifyShuffleVectorInst(Value * Op0,Value * Op1,ArrayRef<int> Mask,Type * RetTy,const SimplifyQuery & Q)4831 Value *llvm::SimplifyShuffleVectorInst(Value *Op0, Value *Op1,
4832 ArrayRef<int> Mask, Type *RetTy,
4833 const SimplifyQuery &Q) {
4834 return ::SimplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
4835 }
4836
foldConstant(Instruction::UnaryOps Opcode,Value * & Op,const SimplifyQuery & Q)4837 static Constant *foldConstant(Instruction::UnaryOps Opcode,
4838 Value *&Op, const SimplifyQuery &Q) {
4839 if (auto *C = dyn_cast<Constant>(Op))
4840 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
4841 return nullptr;
4842 }
4843
4844 /// Given the operand for an FNeg, see if we can fold the result. If not, this
4845 /// returns null.
simplifyFNegInst(Value * Op,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)4846 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
4847 const SimplifyQuery &Q, unsigned MaxRecurse) {
4848 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
4849 return C;
4850
4851 Value *X;
4852 // fneg (fneg X) ==> X
4853 if (match(Op, m_FNeg(m_Value(X))))
4854 return X;
4855
4856 return nullptr;
4857 }
4858
SimplifyFNegInst(Value * Op,FastMathFlags FMF,const SimplifyQuery & Q)4859 Value *llvm::SimplifyFNegInst(Value *Op, FastMathFlags FMF,
4860 const SimplifyQuery &Q) {
4861 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
4862 }
4863
propagateNaN(Constant * In)4864 static Constant *propagateNaN(Constant *In) {
4865 // If the input is a vector with undef elements, just return a default NaN.
4866 if (!In->isNaN())
4867 return ConstantFP::getNaN(In->getType());
4868
4869 // Propagate the existing NaN constant when possible.
4870 // TODO: Should we quiet a signaling NaN?
4871 return In;
4872 }
4873
4874 /// Perform folds that are common to any floating-point operation. This implies
4875 /// transforms based on poison/undef/NaN because the operation itself makes no
4876 /// difference to the result.
simplifyFPOp(ArrayRef<Value * > Ops,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)4877 static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
4878 const SimplifyQuery &Q,
4879 fp::ExceptionBehavior ExBehavior,
4880 RoundingMode Rounding) {
4881 // Poison is independent of anything else. It always propagates from an
4882 // operand to a math result.
4883 if (any_of(Ops, [](Value *V) { return match(V, m_Poison()); }))
4884 return PoisonValue::get(Ops[0]->getType());
4885
4886 for (Value *V : Ops) {
4887 bool IsNan = match(V, m_NaN());
4888 bool IsInf = match(V, m_Inf());
4889 bool IsUndef = Q.isUndefValue(V);
4890
4891 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
4892 // (an undef operand can be chosen to be Nan/Inf), then the result of
4893 // this operation is poison.
4894 if (FMF.noNaNs() && (IsNan || IsUndef))
4895 return PoisonValue::get(V->getType());
4896 if (FMF.noInfs() && (IsInf || IsUndef))
4897 return PoisonValue::get(V->getType());
4898
4899 if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
4900 if (IsUndef || IsNan)
4901 return propagateNaN(cast<Constant>(V));
4902 } else if (ExBehavior != fp::ebStrict) {
4903 if (IsNan)
4904 return propagateNaN(cast<Constant>(V));
4905 }
4906 }
4907 return nullptr;
4908 }
4909
4910 /// Given operands for an FAdd, see if we can fold the result. If not, this
4911 /// returns null.
4912 static Value *
SimplifyFAddInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)4913 SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4914 const SimplifyQuery &Q, unsigned MaxRecurse,
4915 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
4916 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
4917 if (isDefaultFPEnvironment(ExBehavior, Rounding))
4918 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
4919 return C;
4920
4921 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
4922 return C;
4923
4924 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
4925 return nullptr;
4926
4927 // fadd X, -0 ==> X
4928 if (match(Op1, m_NegZeroFP()))
4929 return Op0;
4930
4931 // fadd X, 0 ==> X, when we know X is not -0
4932 if (match(Op1, m_PosZeroFP()) &&
4933 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4934 return Op0;
4935
4936 // With nnan: -X + X --> 0.0 (and commuted variant)
4937 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
4938 // Negative zeros are allowed because we always end up with positive zero:
4939 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4940 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
4941 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
4942 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
4943 if (FMF.noNaNs()) {
4944 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
4945 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
4946 return ConstantFP::getNullValue(Op0->getType());
4947
4948 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
4949 match(Op1, m_FNeg(m_Specific(Op0))))
4950 return ConstantFP::getNullValue(Op0->getType());
4951 }
4952
4953 // (X - Y) + Y --> X
4954 // Y + (X - Y) --> X
4955 Value *X;
4956 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
4957 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
4958 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
4959 return X;
4960
4961 return nullptr;
4962 }
4963
4964 /// Given operands for an FSub, see if we can fold the result. If not, this
4965 /// returns null.
4966 static Value *
SimplifyFSubInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)4967 SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
4968 const SimplifyQuery &Q, unsigned MaxRecurse,
4969 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
4970 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
4971 if (isDefaultFPEnvironment(ExBehavior, Rounding))
4972 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
4973 return C;
4974
4975 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
4976 return C;
4977
4978 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
4979 return nullptr;
4980
4981 // fsub X, +0 ==> X
4982 if (match(Op1, m_PosZeroFP()))
4983 return Op0;
4984
4985 // fsub X, -0 ==> X, when we know X is not -0
4986 if (match(Op1, m_NegZeroFP()) &&
4987 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
4988 return Op0;
4989
4990 // fsub -0.0, (fsub -0.0, X) ==> X
4991 // fsub -0.0, (fneg X) ==> X
4992 Value *X;
4993 if (match(Op0, m_NegZeroFP()) &&
4994 match(Op1, m_FNeg(m_Value(X))))
4995 return X;
4996
4997 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
4998 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
4999 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5000 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5001 match(Op1, m_FNeg(m_Value(X)))))
5002 return X;
5003
5004 // fsub nnan x, x ==> 0.0
5005 if (FMF.noNaNs() && Op0 == Op1)
5006 return Constant::getNullValue(Op0->getType());
5007
5008 // Y - (Y - X) --> X
5009 // (X + Y) - Y --> X
5010 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5011 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5012 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5013 return X;
5014
5015 return nullptr;
5016 }
5017
SimplifyFMAFMul(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5018 static Value *SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5019 const SimplifyQuery &Q, unsigned MaxRecurse,
5020 fp::ExceptionBehavior ExBehavior,
5021 RoundingMode Rounding) {
5022 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5023 return C;
5024
5025 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5026 return nullptr;
5027
5028 // fmul X, 1.0 ==> X
5029 if (match(Op1, m_FPOne()))
5030 return Op0;
5031
5032 // fmul 1.0, X ==> X
5033 if (match(Op0, m_FPOne()))
5034 return Op1;
5035
5036 // fmul nnan nsz X, 0 ==> 0
5037 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op1, m_AnyZeroFP()))
5038 return ConstantFP::getNullValue(Op0->getType());
5039
5040 // fmul nnan nsz 0, X ==> 0
5041 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5042 return ConstantFP::getNullValue(Op1->getType());
5043
5044 // sqrt(X) * sqrt(X) --> X, if we can:
5045 // 1. Remove the intermediate rounding (reassociate).
5046 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5047 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5048 Value *X;
5049 if (Op0 == Op1 && match(Op0, m_Intrinsic<Intrinsic::sqrt>(m_Value(X))) &&
5050 FMF.allowReassoc() && FMF.noNaNs() && FMF.noSignedZeros())
5051 return X;
5052
5053 return nullptr;
5054 }
5055
5056 /// Given the operands for an FMul, see if we can fold the result
5057 static Value *
SimplifyFMulInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5058 SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5059 const SimplifyQuery &Q, unsigned MaxRecurse,
5060 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5061 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5062 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5063 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5064 return C;
5065
5066 // Now apply simplifications that do not require rounding.
5067 return SimplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5068 }
5069
SimplifyFAddInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5070 Value *llvm::SimplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5071 const SimplifyQuery &Q,
5072 fp::ExceptionBehavior ExBehavior,
5073 RoundingMode Rounding) {
5074 return ::SimplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5075 Rounding);
5076 }
5077
SimplifyFSubInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5078 Value *llvm::SimplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5079 const SimplifyQuery &Q,
5080 fp::ExceptionBehavior ExBehavior,
5081 RoundingMode Rounding) {
5082 return ::SimplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5083 Rounding);
5084 }
5085
SimplifyFMulInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5086 Value *llvm::SimplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5087 const SimplifyQuery &Q,
5088 fp::ExceptionBehavior ExBehavior,
5089 RoundingMode Rounding) {
5090 return ::SimplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5091 Rounding);
5092 }
5093
SimplifyFMAFMul(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5094 Value *llvm::SimplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5095 const SimplifyQuery &Q,
5096 fp::ExceptionBehavior ExBehavior,
5097 RoundingMode Rounding) {
5098 return ::SimplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5099 Rounding);
5100 }
5101
5102 static Value *
SimplifyFDivInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5103 SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5104 const SimplifyQuery &Q, unsigned,
5105 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5106 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5107 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5108 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5109 return C;
5110
5111 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5112 return C;
5113
5114 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5115 return nullptr;
5116
5117 // X / 1.0 -> X
5118 if (match(Op1, m_FPOne()))
5119 return Op0;
5120
5121 // 0 / X -> 0
5122 // Requires that NaNs are off (X could be zero) and signed zeroes are
5123 // ignored (X could be positive or negative, so the output sign is unknown).
5124 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5125 return ConstantFP::getNullValue(Op0->getType());
5126
5127 if (FMF.noNaNs()) {
5128 // X / X -> 1.0 is legal when NaNs are ignored.
5129 // We can ignore infinities because INF/INF is NaN.
5130 if (Op0 == Op1)
5131 return ConstantFP::get(Op0->getType(), 1.0);
5132
5133 // (X * Y) / Y --> X if we can reassociate to the above form.
5134 Value *X;
5135 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5136 return X;
5137
5138 // -X / X -> -1.0 and
5139 // X / -X -> -1.0 are legal when NaNs are ignored.
5140 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5141 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5142 match(Op1, m_FNegNSZ(m_Specific(Op0))))
5143 return ConstantFP::get(Op0->getType(), -1.0);
5144 }
5145
5146 return nullptr;
5147 }
5148
SimplifyFDivInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5149 Value *llvm::SimplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5150 const SimplifyQuery &Q,
5151 fp::ExceptionBehavior ExBehavior,
5152 RoundingMode Rounding) {
5153 return ::SimplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5154 Rounding);
5155 }
5156
5157 static Value *
SimplifyFRemInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5158 SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5159 const SimplifyQuery &Q, unsigned,
5160 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5161 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5162 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5163 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5164 return C;
5165
5166 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5167 return C;
5168
5169 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5170 return nullptr;
5171
5172 // Unlike fdiv, the result of frem always matches the sign of the dividend.
5173 // The constant match may include undef elements in a vector, so return a full
5174 // zero constant as the result.
5175 if (FMF.noNaNs()) {
5176 // +0 % X -> 0
5177 if (match(Op0, m_PosZeroFP()))
5178 return ConstantFP::getNullValue(Op0->getType());
5179 // -0 % X -> -0
5180 if (match(Op0, m_NegZeroFP()))
5181 return ConstantFP::getNegativeZero(Op0->getType());
5182 }
5183
5184 return nullptr;
5185 }
5186
SimplifyFRemInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5187 Value *llvm::SimplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5188 const SimplifyQuery &Q,
5189 fp::ExceptionBehavior ExBehavior,
5190 RoundingMode Rounding) {
5191 return ::SimplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5192 Rounding);
5193 }
5194
5195 //=== Helper functions for higher up the class hierarchy.
5196
5197 /// Given the operand for a UnaryOperator, see if we can fold the result.
5198 /// If not, this returns null.
simplifyUnOp(unsigned Opcode,Value * Op,const SimplifyQuery & Q,unsigned MaxRecurse)5199 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5200 unsigned MaxRecurse) {
5201 switch (Opcode) {
5202 case Instruction::FNeg:
5203 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5204 default:
5205 llvm_unreachable("Unexpected opcode");
5206 }
5207 }
5208
5209 /// Given the operand for a UnaryOperator, see if we can fold the result.
5210 /// If not, this returns null.
5211 /// Try to use FastMathFlags when folding the result.
simplifyFPUnOp(unsigned Opcode,Value * Op,const FastMathFlags & FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5212 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5213 const FastMathFlags &FMF,
5214 const SimplifyQuery &Q, unsigned MaxRecurse) {
5215 switch (Opcode) {
5216 case Instruction::FNeg:
5217 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
5218 default:
5219 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
5220 }
5221 }
5222
SimplifyUnOp(unsigned Opcode,Value * Op,const SimplifyQuery & Q)5223 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
5224 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
5225 }
5226
SimplifyUnOp(unsigned Opcode,Value * Op,FastMathFlags FMF,const SimplifyQuery & Q)5227 Value *llvm::SimplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
5228 const SimplifyQuery &Q) {
5229 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
5230 }
5231
5232 /// Given operands for a BinaryOperator, see if we can fold the result.
5233 /// If not, this returns null.
SimplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)5234 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5235 const SimplifyQuery &Q, unsigned MaxRecurse) {
5236 switch (Opcode) {
5237 case Instruction::Add:
5238 return SimplifyAddInst(LHS, RHS, false, false, Q, MaxRecurse);
5239 case Instruction::Sub:
5240 return SimplifySubInst(LHS, RHS, false, false, Q, MaxRecurse);
5241 case Instruction::Mul:
5242 return SimplifyMulInst(LHS, RHS, Q, MaxRecurse);
5243 case Instruction::SDiv:
5244 return SimplifySDivInst(LHS, RHS, Q, MaxRecurse);
5245 case Instruction::UDiv:
5246 return SimplifyUDivInst(LHS, RHS, Q, MaxRecurse);
5247 case Instruction::SRem:
5248 return SimplifySRemInst(LHS, RHS, Q, MaxRecurse);
5249 case Instruction::URem:
5250 return SimplifyURemInst(LHS, RHS, Q, MaxRecurse);
5251 case Instruction::Shl:
5252 return SimplifyShlInst(LHS, RHS, false, false, Q, MaxRecurse);
5253 case Instruction::LShr:
5254 return SimplifyLShrInst(LHS, RHS, false, Q, MaxRecurse);
5255 case Instruction::AShr:
5256 return SimplifyAShrInst(LHS, RHS, false, Q, MaxRecurse);
5257 case Instruction::And:
5258 return SimplifyAndInst(LHS, RHS, Q, MaxRecurse);
5259 case Instruction::Or:
5260 return SimplifyOrInst(LHS, RHS, Q, MaxRecurse);
5261 case Instruction::Xor:
5262 return SimplifyXorInst(LHS, RHS, Q, MaxRecurse);
5263 case Instruction::FAdd:
5264 return SimplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5265 case Instruction::FSub:
5266 return SimplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5267 case Instruction::FMul:
5268 return SimplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5269 case Instruction::FDiv:
5270 return SimplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5271 case Instruction::FRem:
5272 return SimplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5273 default:
5274 llvm_unreachable("Unexpected opcode");
5275 }
5276 }
5277
5278 /// Given operands for a BinaryOperator, see if we can fold the result.
5279 /// If not, this returns null.
5280 /// Try to use FastMathFlags when folding the result.
SimplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const FastMathFlags & FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5281 static Value *SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5282 const FastMathFlags &FMF, const SimplifyQuery &Q,
5283 unsigned MaxRecurse) {
5284 switch (Opcode) {
5285 case Instruction::FAdd:
5286 return SimplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
5287 case Instruction::FSub:
5288 return SimplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
5289 case Instruction::FMul:
5290 return SimplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
5291 case Instruction::FDiv:
5292 return SimplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
5293 default:
5294 return SimplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
5295 }
5296 }
5297
SimplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q)5298 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5299 const SimplifyQuery &Q) {
5300 return ::SimplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
5301 }
5302
SimplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q)5303 Value *llvm::SimplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5304 FastMathFlags FMF, const SimplifyQuery &Q) {
5305 return ::SimplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
5306 }
5307
5308 /// Given operands for a CmpInst, see if we can fold the result.
SimplifyCmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)5309 static Value *SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5310 const SimplifyQuery &Q, unsigned MaxRecurse) {
5311 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
5312 return SimplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
5313 return SimplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5314 }
5315
SimplifyCmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)5316 Value *llvm::SimplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5317 const SimplifyQuery &Q) {
5318 return ::SimplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
5319 }
5320
IsIdempotent(Intrinsic::ID ID)5321 static bool IsIdempotent(Intrinsic::ID ID) {
5322 switch (ID) {
5323 default: return false;
5324
5325 // Unary idempotent: f(f(x)) = f(x)
5326 case Intrinsic::fabs:
5327 case Intrinsic::floor:
5328 case Intrinsic::ceil:
5329 case Intrinsic::trunc:
5330 case Intrinsic::rint:
5331 case Intrinsic::nearbyint:
5332 case Intrinsic::round:
5333 case Intrinsic::roundeven:
5334 case Intrinsic::canonicalize:
5335 return true;
5336 }
5337 }
5338
SimplifyRelativeLoad(Constant * Ptr,Constant * Offset,const DataLayout & DL)5339 static Value *SimplifyRelativeLoad(Constant *Ptr, Constant *Offset,
5340 const DataLayout &DL) {
5341 GlobalValue *PtrSym;
5342 APInt PtrOffset;
5343 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
5344 return nullptr;
5345
5346 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
5347 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
5348 Type *Int32PtrTy = Int32Ty->getPointerTo();
5349 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
5350
5351 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
5352 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
5353 return nullptr;
5354
5355 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
5356 if (OffsetInt % 4 != 0)
5357 return nullptr;
5358
5359 Constant *C = ConstantExpr::getGetElementPtr(
5360 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
5361 ConstantInt::get(Int64Ty, OffsetInt / 4));
5362 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
5363 if (!Loaded)
5364 return nullptr;
5365
5366 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
5367 if (!LoadedCE)
5368 return nullptr;
5369
5370 if (LoadedCE->getOpcode() == Instruction::Trunc) {
5371 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5372 if (!LoadedCE)
5373 return nullptr;
5374 }
5375
5376 if (LoadedCE->getOpcode() != Instruction::Sub)
5377 return nullptr;
5378
5379 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5380 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
5381 return nullptr;
5382 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
5383
5384 Constant *LoadedRHS = LoadedCE->getOperand(1);
5385 GlobalValue *LoadedRHSSym;
5386 APInt LoadedRHSOffset;
5387 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
5388 DL) ||
5389 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
5390 return nullptr;
5391
5392 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
5393 }
5394
simplifyUnaryIntrinsic(Function * F,Value * Op0,const SimplifyQuery & Q)5395 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
5396 const SimplifyQuery &Q) {
5397 // Idempotent functions return the same result when called repeatedly.
5398 Intrinsic::ID IID = F->getIntrinsicID();
5399 if (IsIdempotent(IID))
5400 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
5401 if (II->getIntrinsicID() == IID)
5402 return II;
5403
5404 Value *X;
5405 switch (IID) {
5406 case Intrinsic::fabs:
5407 if (SignBitMustBeZero(Op0, Q.TLI)) return Op0;
5408 break;
5409 case Intrinsic::bswap:
5410 // bswap(bswap(x)) -> x
5411 if (match(Op0, m_BSwap(m_Value(X)))) return X;
5412 break;
5413 case Intrinsic::bitreverse:
5414 // bitreverse(bitreverse(x)) -> x
5415 if (match(Op0, m_BitReverse(m_Value(X)))) return X;
5416 break;
5417 case Intrinsic::ctpop: {
5418 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
5419 // ctpop(and X, 1) --> and X, 1
5420 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
5421 if (MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, BitWidth - 1),
5422 Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
5423 return Op0;
5424 break;
5425 }
5426 case Intrinsic::exp:
5427 // exp(log(x)) -> x
5428 if (Q.CxtI->hasAllowReassoc() &&
5429 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X)))) return X;
5430 break;
5431 case Intrinsic::exp2:
5432 // exp2(log2(x)) -> x
5433 if (Q.CxtI->hasAllowReassoc() &&
5434 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X)))) return X;
5435 break;
5436 case Intrinsic::log:
5437 // log(exp(x)) -> x
5438 if (Q.CxtI->hasAllowReassoc() &&
5439 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X)))) return X;
5440 break;
5441 case Intrinsic::log2:
5442 // log2(exp2(x)) -> x
5443 if (Q.CxtI->hasAllowReassoc() &&
5444 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
5445 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0),
5446 m_Value(X))))) return X;
5447 break;
5448 case Intrinsic::log10:
5449 // log10(pow(10.0, x)) -> x
5450 if (Q.CxtI->hasAllowReassoc() &&
5451 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0),
5452 m_Value(X)))) return X;
5453 break;
5454 case Intrinsic::floor:
5455 case Intrinsic::trunc:
5456 case Intrinsic::ceil:
5457 case Intrinsic::round:
5458 case Intrinsic::roundeven:
5459 case Intrinsic::nearbyint:
5460 case Intrinsic::rint: {
5461 // floor (sitofp x) -> sitofp x
5462 // floor (uitofp x) -> uitofp x
5463 //
5464 // Converting from int always results in a finite integral number or
5465 // infinity. For either of those inputs, these rounding functions always
5466 // return the same value, so the rounding can be eliminated.
5467 if (match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
5468 return Op0;
5469 break;
5470 }
5471 case Intrinsic::experimental_vector_reverse:
5472 // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
5473 if (match(Op0,
5474 m_Intrinsic<Intrinsic::experimental_vector_reverse>(m_Value(X))))
5475 return X;
5476 break;
5477 default:
5478 break;
5479 }
5480
5481 return nullptr;
5482 }
5483
getMaxMinLimit(Intrinsic::ID IID,unsigned BitWidth)5484 static APInt getMaxMinLimit(Intrinsic::ID IID, unsigned BitWidth) {
5485 switch (IID) {
5486 case Intrinsic::smax: return APInt::getSignedMaxValue(BitWidth);
5487 case Intrinsic::smin: return APInt::getSignedMinValue(BitWidth);
5488 case Intrinsic::umax: return APInt::getMaxValue(BitWidth);
5489 case Intrinsic::umin: return APInt::getMinValue(BitWidth);
5490 default: llvm_unreachable("Unexpected intrinsic");
5491 }
5492 }
5493
getMaxMinPredicate(Intrinsic::ID IID)5494 static ICmpInst::Predicate getMaxMinPredicate(Intrinsic::ID IID) {
5495 switch (IID) {
5496 case Intrinsic::smax: return ICmpInst::ICMP_SGE;
5497 case Intrinsic::smin: return ICmpInst::ICMP_SLE;
5498 case Intrinsic::umax: return ICmpInst::ICMP_UGE;
5499 case Intrinsic::umin: return ICmpInst::ICMP_ULE;
5500 default: llvm_unreachable("Unexpected intrinsic");
5501 }
5502 }
5503
5504 /// Given a min/max intrinsic, see if it can be removed based on having an
5505 /// operand that is another min/max intrinsic with shared operand(s). The caller
5506 /// is expected to swap the operand arguments to handle commutation.
foldMinMaxSharedOp(Intrinsic::ID IID,Value * Op0,Value * Op1)5507 static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
5508 Value *X, *Y;
5509 if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
5510 return nullptr;
5511
5512 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
5513 if (!MM0)
5514 return nullptr;
5515 Intrinsic::ID IID0 = MM0->getIntrinsicID();
5516
5517 if (Op1 == X || Op1 == Y ||
5518 match(Op1, m_c_MaxOrMin(m_Specific(X), m_Specific(Y)))) {
5519 // max (max X, Y), X --> max X, Y
5520 if (IID0 == IID)
5521 return MM0;
5522 // max (min X, Y), X --> X
5523 if (IID0 == getInverseMinMaxIntrinsic(IID))
5524 return Op1;
5525 }
5526 return nullptr;
5527 }
5528
simplifyBinaryIntrinsic(Function * F,Value * Op0,Value * Op1,const SimplifyQuery & Q)5529 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
5530 const SimplifyQuery &Q) {
5531 Intrinsic::ID IID = F->getIntrinsicID();
5532 Type *ReturnType = F->getReturnType();
5533 unsigned BitWidth = ReturnType->getScalarSizeInBits();
5534 switch (IID) {
5535 case Intrinsic::abs:
5536 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
5537 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
5538 // on the outer abs.
5539 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
5540 return Op0;
5541 break;
5542
5543 case Intrinsic::cttz: {
5544 Value *X;
5545 if (match(Op0, m_Shl(m_One(), m_Value(X))))
5546 return X;
5547 break;
5548 }
5549 case Intrinsic::ctlz: {
5550 Value *X;
5551 if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
5552 return X;
5553 if (match(Op0, m_AShr(m_Negative(), m_Value())))
5554 return Constant::getNullValue(ReturnType);
5555 break;
5556 }
5557 case Intrinsic::smax:
5558 case Intrinsic::smin:
5559 case Intrinsic::umax:
5560 case Intrinsic::umin: {
5561 // If the arguments are the same, this is a no-op.
5562 if (Op0 == Op1)
5563 return Op0;
5564
5565 // Canonicalize constant operand as Op1.
5566 if (isa<Constant>(Op0))
5567 std::swap(Op0, Op1);
5568
5569 // Assume undef is the limit value.
5570 if (Q.isUndefValue(Op1))
5571 return ConstantInt::get(ReturnType, getMaxMinLimit(IID, BitWidth));
5572
5573 const APInt *C;
5574 if (match(Op1, m_APIntAllowUndef(C))) {
5575 // Clamp to limit value. For example:
5576 // umax(i8 %x, i8 255) --> 255
5577 if (*C == getMaxMinLimit(IID, BitWidth))
5578 return ConstantInt::get(ReturnType, *C);
5579
5580 // If the constant op is the opposite of the limit value, the other must
5581 // be larger/smaller or equal. For example:
5582 // umin(i8 %x, i8 255) --> %x
5583 if (*C == getMaxMinLimit(getInverseMinMaxIntrinsic(IID), BitWidth))
5584 return Op0;
5585
5586 // Remove nested call if constant operands allow it. Example:
5587 // max (max X, 7), 5 -> max X, 7
5588 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
5589 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
5590 // TODO: loosen undef/splat restrictions for vector constants.
5591 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
5592 const APInt *InnerC;
5593 if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
5594 ((IID == Intrinsic::smax && InnerC->sge(*C)) ||
5595 (IID == Intrinsic::smin && InnerC->sle(*C)) ||
5596 (IID == Intrinsic::umax && InnerC->uge(*C)) ||
5597 (IID == Intrinsic::umin && InnerC->ule(*C))))
5598 return Op0;
5599 }
5600 }
5601
5602 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
5603 return V;
5604 if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
5605 return V;
5606
5607 ICmpInst::Predicate Pred = getMaxMinPredicate(IID);
5608 if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
5609 return Op0;
5610 if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
5611 return Op1;
5612
5613 if (Optional<bool> Imp =
5614 isImpliedByDomCondition(Pred, Op0, Op1, Q.CxtI, Q.DL))
5615 return *Imp ? Op0 : Op1;
5616 if (Optional<bool> Imp =
5617 isImpliedByDomCondition(Pred, Op1, Op0, Q.CxtI, Q.DL))
5618 return *Imp ? Op1 : Op0;
5619
5620 break;
5621 }
5622 case Intrinsic::usub_with_overflow:
5623 case Intrinsic::ssub_with_overflow:
5624 // X - X -> { 0, false }
5625 // X - undef -> { 0, false }
5626 // undef - X -> { 0, false }
5627 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5628 return Constant::getNullValue(ReturnType);
5629 break;
5630 case Intrinsic::uadd_with_overflow:
5631 case Intrinsic::sadd_with_overflow:
5632 // X + undef -> { -1, false }
5633 // undef + x -> { -1, false }
5634 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
5635 return ConstantStruct::get(
5636 cast<StructType>(ReturnType),
5637 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
5638 Constant::getNullValue(ReturnType->getStructElementType(1))});
5639 }
5640 break;
5641 case Intrinsic::umul_with_overflow:
5642 case Intrinsic::smul_with_overflow:
5643 // 0 * X -> { 0, false }
5644 // X * 0 -> { 0, false }
5645 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
5646 return Constant::getNullValue(ReturnType);
5647 // undef * X -> { 0, false }
5648 // X * undef -> { 0, false }
5649 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5650 return Constant::getNullValue(ReturnType);
5651 break;
5652 case Intrinsic::uadd_sat:
5653 // sat(MAX + X) -> MAX
5654 // sat(X + MAX) -> MAX
5655 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
5656 return Constant::getAllOnesValue(ReturnType);
5657 LLVM_FALLTHROUGH;
5658 case Intrinsic::sadd_sat:
5659 // sat(X + undef) -> -1
5660 // sat(undef + X) -> -1
5661 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
5662 // For signed: Assume undef is ~X, in which case X + ~X = -1.
5663 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5664 return Constant::getAllOnesValue(ReturnType);
5665
5666 // X + 0 -> X
5667 if (match(Op1, m_Zero()))
5668 return Op0;
5669 // 0 + X -> X
5670 if (match(Op0, m_Zero()))
5671 return Op1;
5672 break;
5673 case Intrinsic::usub_sat:
5674 // sat(0 - X) -> 0, sat(X - MAX) -> 0
5675 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
5676 return Constant::getNullValue(ReturnType);
5677 LLVM_FALLTHROUGH;
5678 case Intrinsic::ssub_sat:
5679 // X - X -> 0, X - undef -> 0, undef - X -> 0
5680 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
5681 return Constant::getNullValue(ReturnType);
5682 // X - 0 -> X
5683 if (match(Op1, m_Zero()))
5684 return Op0;
5685 break;
5686 case Intrinsic::load_relative:
5687 if (auto *C0 = dyn_cast<Constant>(Op0))
5688 if (auto *C1 = dyn_cast<Constant>(Op1))
5689 return SimplifyRelativeLoad(C0, C1, Q.DL);
5690 break;
5691 case Intrinsic::powi:
5692 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
5693 // powi(x, 0) -> 1.0
5694 if (Power->isZero())
5695 return ConstantFP::get(Op0->getType(), 1.0);
5696 // powi(x, 1) -> x
5697 if (Power->isOne())
5698 return Op0;
5699 }
5700 break;
5701 case Intrinsic::copysign:
5702 // copysign X, X --> X
5703 if (Op0 == Op1)
5704 return Op0;
5705 // copysign -X, X --> X
5706 // copysign X, -X --> -X
5707 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5708 match(Op1, m_FNeg(m_Specific(Op0))))
5709 return Op1;
5710 break;
5711 case Intrinsic::maxnum:
5712 case Intrinsic::minnum:
5713 case Intrinsic::maximum:
5714 case Intrinsic::minimum: {
5715 // If the arguments are the same, this is a no-op.
5716 if (Op0 == Op1) return Op0;
5717
5718 // Canonicalize constant operand as Op1.
5719 if (isa<Constant>(Op0))
5720 std::swap(Op0, Op1);
5721
5722 // If an argument is undef, return the other argument.
5723 if (Q.isUndefValue(Op1))
5724 return Op0;
5725
5726 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
5727 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
5728
5729 // minnum(X, nan) -> X
5730 // maxnum(X, nan) -> X
5731 // minimum(X, nan) -> nan
5732 // maximum(X, nan) -> nan
5733 if (match(Op1, m_NaN()))
5734 return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
5735
5736 // In the following folds, inf can be replaced with the largest finite
5737 // float, if the ninf flag is set.
5738 const APFloat *C;
5739 if (match(Op1, m_APFloat(C)) &&
5740 (C->isInfinity() || (Q.CxtI->hasNoInfs() && C->isLargest()))) {
5741 // minnum(X, -inf) -> -inf
5742 // maxnum(X, +inf) -> +inf
5743 // minimum(X, -inf) -> -inf if nnan
5744 // maximum(X, +inf) -> +inf if nnan
5745 if (C->isNegative() == IsMin && (!PropagateNaN || Q.CxtI->hasNoNaNs()))
5746 return ConstantFP::get(ReturnType, *C);
5747
5748 // minnum(X, +inf) -> X if nnan
5749 // maxnum(X, -inf) -> X if nnan
5750 // minimum(X, +inf) -> X
5751 // maximum(X, -inf) -> X
5752 if (C->isNegative() != IsMin && (PropagateNaN || Q.CxtI->hasNoNaNs()))
5753 return Op0;
5754 }
5755
5756 // Min/max of the same operation with common operand:
5757 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
5758 if (auto *M0 = dyn_cast<IntrinsicInst>(Op0))
5759 if (M0->getIntrinsicID() == IID &&
5760 (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1))
5761 return Op0;
5762 if (auto *M1 = dyn_cast<IntrinsicInst>(Op1))
5763 if (M1->getIntrinsicID() == IID &&
5764 (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0))
5765 return Op1;
5766
5767 break;
5768 }
5769 case Intrinsic::experimental_vector_extract: {
5770 Type *ReturnType = F->getReturnType();
5771
5772 // (extract_vector (insert_vector _, X, 0), 0) -> X
5773 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
5774 Value *X = nullptr;
5775 if (match(Op0, m_Intrinsic<Intrinsic::experimental_vector_insert>(
5776 m_Value(), m_Value(X), m_Zero())) &&
5777 IdxN == 0 && X->getType() == ReturnType)
5778 return X;
5779
5780 break;
5781 }
5782 default:
5783 break;
5784 }
5785
5786 return nullptr;
5787 }
5788
simplifyIntrinsic(CallBase * Call,const SimplifyQuery & Q)5789 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
5790
5791 // Intrinsics with no operands have some kind of side effect. Don't simplify.
5792 unsigned NumOperands = Call->getNumArgOperands();
5793 if (!NumOperands)
5794 return nullptr;
5795
5796 Function *F = cast<Function>(Call->getCalledFunction());
5797 Intrinsic::ID IID = F->getIntrinsicID();
5798 if (NumOperands == 1)
5799 return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q);
5800
5801 if (NumOperands == 2)
5802 return simplifyBinaryIntrinsic(F, Call->getArgOperand(0),
5803 Call->getArgOperand(1), Q);
5804
5805 // Handle intrinsics with 3 or more arguments.
5806 switch (IID) {
5807 case Intrinsic::masked_load:
5808 case Intrinsic::masked_gather: {
5809 Value *MaskArg = Call->getArgOperand(2);
5810 Value *PassthruArg = Call->getArgOperand(3);
5811 // If the mask is all zeros or undef, the "passthru" argument is the result.
5812 if (maskIsAllZeroOrUndef(MaskArg))
5813 return PassthruArg;
5814 return nullptr;
5815 }
5816 case Intrinsic::fshl:
5817 case Intrinsic::fshr: {
5818 Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1),
5819 *ShAmtArg = Call->getArgOperand(2);
5820
5821 // If both operands are undef, the result is undef.
5822 if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
5823 return UndefValue::get(F->getReturnType());
5824
5825 // If shift amount is undef, assume it is zero.
5826 if (Q.isUndefValue(ShAmtArg))
5827 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5828
5829 const APInt *ShAmtC;
5830 if (match(ShAmtArg, m_APInt(ShAmtC))) {
5831 // If there's effectively no shift, return the 1st arg or 2nd arg.
5832 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
5833 if (ShAmtC->urem(BitWidth).isNullValue())
5834 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
5835 }
5836 return nullptr;
5837 }
5838 case Intrinsic::experimental_constrained_fma: {
5839 Value *Op0 = Call->getArgOperand(0);
5840 Value *Op1 = Call->getArgOperand(1);
5841 Value *Op2 = Call->getArgOperand(2);
5842 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
5843 if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q,
5844 FPI->getExceptionBehavior().getValue(),
5845 FPI->getRoundingMode().getValue()))
5846 return V;
5847 return nullptr;
5848 }
5849 case Intrinsic::fma:
5850 case Intrinsic::fmuladd: {
5851 Value *Op0 = Call->getArgOperand(0);
5852 Value *Op1 = Call->getArgOperand(1);
5853 Value *Op2 = Call->getArgOperand(2);
5854 if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q, fp::ebIgnore,
5855 RoundingMode::NearestTiesToEven))
5856 return V;
5857 return nullptr;
5858 }
5859 case Intrinsic::smul_fix:
5860 case Intrinsic::smul_fix_sat: {
5861 Value *Op0 = Call->getArgOperand(0);
5862 Value *Op1 = Call->getArgOperand(1);
5863 Value *Op2 = Call->getArgOperand(2);
5864 Type *ReturnType = F->getReturnType();
5865
5866 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
5867 // when both Op0 and Op1 are constant so we do not care about that special
5868 // case here).
5869 if (isa<Constant>(Op0))
5870 std::swap(Op0, Op1);
5871
5872 // X * 0 -> 0
5873 if (match(Op1, m_Zero()))
5874 return Constant::getNullValue(ReturnType);
5875
5876 // X * undef -> 0
5877 if (Q.isUndefValue(Op1))
5878 return Constant::getNullValue(ReturnType);
5879
5880 // X * (1 << Scale) -> X
5881 APInt ScaledOne =
5882 APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
5883 cast<ConstantInt>(Op2)->getZExtValue());
5884 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
5885 return Op0;
5886
5887 return nullptr;
5888 }
5889 case Intrinsic::experimental_vector_insert: {
5890 Value *Vec = Call->getArgOperand(0);
5891 Value *SubVec = Call->getArgOperand(1);
5892 Value *Idx = Call->getArgOperand(2);
5893 Type *ReturnType = F->getReturnType();
5894
5895 // (insert_vector Y, (extract_vector X, 0), 0) -> X
5896 // where: Y is X, or Y is undef
5897 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
5898 Value *X = nullptr;
5899 if (match(SubVec, m_Intrinsic<Intrinsic::experimental_vector_extract>(
5900 m_Value(X), m_Zero())) &&
5901 (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
5902 X->getType() == ReturnType)
5903 return X;
5904
5905 return nullptr;
5906 }
5907 case Intrinsic::experimental_constrained_fadd: {
5908 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
5909 return SimplifyFAddInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
5910 FPI->getFastMathFlags(), Q,
5911 FPI->getExceptionBehavior().getValue(),
5912 FPI->getRoundingMode().getValue());
5913 break;
5914 }
5915 case Intrinsic::experimental_constrained_fsub: {
5916 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
5917 return SimplifyFSubInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
5918 FPI->getFastMathFlags(), Q,
5919 FPI->getExceptionBehavior().getValue(),
5920 FPI->getRoundingMode().getValue());
5921 break;
5922 }
5923 case Intrinsic::experimental_constrained_fmul: {
5924 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
5925 return SimplifyFMulInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
5926 FPI->getFastMathFlags(), Q,
5927 FPI->getExceptionBehavior().getValue(),
5928 FPI->getRoundingMode().getValue());
5929 break;
5930 }
5931 case Intrinsic::experimental_constrained_fdiv: {
5932 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
5933 return SimplifyFDivInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
5934 FPI->getFastMathFlags(), Q,
5935 FPI->getExceptionBehavior().getValue(),
5936 FPI->getRoundingMode().getValue());
5937 break;
5938 }
5939 case Intrinsic::experimental_constrained_frem: {
5940 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
5941 return SimplifyFRemInst(FPI->getArgOperand(0), FPI->getArgOperand(1),
5942 FPI->getFastMathFlags(), Q,
5943 FPI->getExceptionBehavior().getValue(),
5944 FPI->getRoundingMode().getValue());
5945 break;
5946 }
5947 default:
5948 return nullptr;
5949 }
5950 }
5951
tryConstantFoldCall(CallBase * Call,const SimplifyQuery & Q)5952 static Value *tryConstantFoldCall(CallBase *Call, const SimplifyQuery &Q) {
5953 auto *F = dyn_cast<Function>(Call->getCalledOperand());
5954 if (!F || !canConstantFoldCallTo(Call, F))
5955 return nullptr;
5956
5957 SmallVector<Constant *, 4> ConstantArgs;
5958 unsigned NumArgs = Call->getNumArgOperands();
5959 ConstantArgs.reserve(NumArgs);
5960 for (auto &Arg : Call->args()) {
5961 Constant *C = dyn_cast<Constant>(&Arg);
5962 if (!C) {
5963 if (isa<MetadataAsValue>(Arg.get()))
5964 continue;
5965 return nullptr;
5966 }
5967 ConstantArgs.push_back(C);
5968 }
5969
5970 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
5971 }
5972
SimplifyCall(CallBase * Call,const SimplifyQuery & Q)5973 Value *llvm::SimplifyCall(CallBase *Call, const SimplifyQuery &Q) {
5974 // musttail calls can only be simplified if they are also DCEd.
5975 // As we can't guarantee this here, don't simplify them.
5976 if (Call->isMustTailCall())
5977 return nullptr;
5978
5979 // call undef -> poison
5980 // call null -> poison
5981 Value *Callee = Call->getCalledOperand();
5982 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
5983 return PoisonValue::get(Call->getType());
5984
5985 if (Value *V = tryConstantFoldCall(Call, Q))
5986 return V;
5987
5988 auto *F = dyn_cast<Function>(Callee);
5989 if (F && F->isIntrinsic())
5990 if (Value *Ret = simplifyIntrinsic(Call, Q))
5991 return Ret;
5992
5993 return nullptr;
5994 }
5995
5996 /// Given operands for a Freeze, see if we can fold the result.
SimplifyFreezeInst(Value * Op0,const SimplifyQuery & Q)5997 static Value *SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
5998 // Use a utility function defined in ValueTracking.
5999 if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
6000 return Op0;
6001 // We have room for improvement.
6002 return nullptr;
6003 }
6004
SimplifyFreezeInst(Value * Op0,const SimplifyQuery & Q)6005 Value *llvm::SimplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6006 return ::SimplifyFreezeInst(Op0, Q);
6007 }
6008
ConstructLoadOperandConstant(Value * Op)6009 static Constant *ConstructLoadOperandConstant(Value *Op) {
6010 SmallVector<Value *, 4> Worklist;
6011 // Invalid IR in unreachable code may contain self-referential values. Don't infinitely loop.
6012 SmallPtrSet<Value *, 4> Visited;
6013 Worklist.push_back(Op);
6014 while (true) {
6015 Value *CurOp = Worklist.back();
6016 if (!Visited.insert(CurOp).second)
6017 return nullptr;
6018 if (isa<Constant>(CurOp))
6019 break;
6020 if (auto *BC = dyn_cast<BitCastOperator>(CurOp)) {
6021 Worklist.push_back(BC->getOperand(0));
6022 } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
6023 for (unsigned I = 1; I != GEP->getNumOperands(); ++I) {
6024 if (!isa<Constant>(GEP->getOperand(I)))
6025 return nullptr;
6026 }
6027 Worklist.push_back(GEP->getOperand(0));
6028 } else if (auto *II = dyn_cast<IntrinsicInst>(CurOp)) {
6029 if (II->isLaunderOrStripInvariantGroup())
6030 Worklist.push_back(II->getOperand(0));
6031 else
6032 return nullptr;
6033 } else {
6034 return nullptr;
6035 }
6036 }
6037
6038 Constant *NewOp = cast<Constant>(Worklist.pop_back_val());
6039 while (!Worklist.empty()) {
6040 Value *CurOp = Worklist.pop_back_val();
6041 if (isa<BitCastOperator>(CurOp)) {
6042 NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
6043 } else if (auto *GEP = dyn_cast<GEPOperator>(CurOp)) {
6044 SmallVector<Constant *> Idxs;
6045 Idxs.reserve(GEP->getNumOperands() - 1);
6046 for (unsigned I = 1, E = GEP->getNumOperands(); I != E; ++I) {
6047 Idxs.push_back(cast<Constant>(GEP->getOperand(I)));
6048 }
6049 NewOp = ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), NewOp,
6050 Idxs, GEP->isInBounds(),
6051 GEP->getInRangeIndex());
6052 } else {
6053 assert(isa<IntrinsicInst>(CurOp) &&
6054 cast<IntrinsicInst>(CurOp)->isLaunderOrStripInvariantGroup() &&
6055 "expected invariant group intrinsic");
6056 NewOp = ConstantExpr::getBitCast(NewOp, CurOp->getType());
6057 }
6058 }
6059 return NewOp;
6060 }
6061
SimplifyLoadInst(LoadInst * LI,Value * PtrOp,const SimplifyQuery & Q)6062 static Value *SimplifyLoadInst(LoadInst *LI, Value *PtrOp,
6063 const SimplifyQuery &Q) {
6064 if (LI->isVolatile())
6065 return nullptr;
6066
6067 // Try to make the load operand a constant, specifically handle
6068 // invariant.group intrinsics.
6069 auto *PtrOpC = dyn_cast<Constant>(PtrOp);
6070 if (!PtrOpC)
6071 PtrOpC = ConstructLoadOperandConstant(PtrOp);
6072
6073 if (PtrOpC)
6074 return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
6075
6076 return nullptr;
6077 }
6078
6079 /// See if we can compute a simplified version of this instruction.
6080 /// If not, this returns null.
6081
simplifyInstructionWithOperands(Instruction * I,ArrayRef<Value * > NewOps,const SimplifyQuery & SQ,OptimizationRemarkEmitter * ORE)6082 static Value *simplifyInstructionWithOperands(Instruction *I,
6083 ArrayRef<Value *> NewOps,
6084 const SimplifyQuery &SQ,
6085 OptimizationRemarkEmitter *ORE) {
6086 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
6087 Value *Result = nullptr;
6088
6089 switch (I->getOpcode()) {
6090 default:
6091 if (llvm::all_of(NewOps, [](Value *V) { return isa<Constant>(V); })) {
6092 SmallVector<Constant *, 8> NewConstOps(NewOps.size());
6093 transform(NewOps, NewConstOps.begin(),
6094 [](Value *V) { return cast<Constant>(V); });
6095 Result = ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
6096 }
6097 break;
6098 case Instruction::FNeg:
6099 Result = SimplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q);
6100 break;
6101 case Instruction::FAdd:
6102 Result = SimplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6103 break;
6104 case Instruction::Add:
6105 Result = SimplifyAddInst(
6106 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6107 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6108 break;
6109 case Instruction::FSub:
6110 Result = SimplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6111 break;
6112 case Instruction::Sub:
6113 Result = SimplifySubInst(
6114 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6115 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6116 break;
6117 case Instruction::FMul:
6118 Result = SimplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6119 break;
6120 case Instruction::Mul:
6121 Result = SimplifyMulInst(NewOps[0], NewOps[1], Q);
6122 break;
6123 case Instruction::SDiv:
6124 Result = SimplifySDivInst(NewOps[0], NewOps[1], Q);
6125 break;
6126 case Instruction::UDiv:
6127 Result = SimplifyUDivInst(NewOps[0], NewOps[1], Q);
6128 break;
6129 case Instruction::FDiv:
6130 Result = SimplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6131 break;
6132 case Instruction::SRem:
6133 Result = SimplifySRemInst(NewOps[0], NewOps[1], Q);
6134 break;
6135 case Instruction::URem:
6136 Result = SimplifyURemInst(NewOps[0], NewOps[1], Q);
6137 break;
6138 case Instruction::FRem:
6139 Result = SimplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6140 break;
6141 case Instruction::Shl:
6142 Result = SimplifyShlInst(
6143 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6144 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6145 break;
6146 case Instruction::LShr:
6147 Result = SimplifyLShrInst(NewOps[0], NewOps[1],
6148 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
6149 break;
6150 case Instruction::AShr:
6151 Result = SimplifyAShrInst(NewOps[0], NewOps[1],
6152 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
6153 break;
6154 case Instruction::And:
6155 Result = SimplifyAndInst(NewOps[0], NewOps[1], Q);
6156 break;
6157 case Instruction::Or:
6158 Result = SimplifyOrInst(NewOps[0], NewOps[1], Q);
6159 break;
6160 case Instruction::Xor:
6161 Result = SimplifyXorInst(NewOps[0], NewOps[1], Q);
6162 break;
6163 case Instruction::ICmp:
6164 Result = SimplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
6165 NewOps[1], Q);
6166 break;
6167 case Instruction::FCmp:
6168 Result = SimplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
6169 NewOps[1], I->getFastMathFlags(), Q);
6170 break;
6171 case Instruction::Select:
6172 Result = SimplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q);
6173 break;
6174 case Instruction::GetElementPtr: {
6175 Result = SimplifyGEPInst(cast<GetElementPtrInst>(I)->getSourceElementType(),
6176 NewOps, Q);
6177 break;
6178 }
6179 case Instruction::InsertValue: {
6180 InsertValueInst *IV = cast<InsertValueInst>(I);
6181 Result = SimplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q);
6182 break;
6183 }
6184 case Instruction::InsertElement: {
6185 Result = SimplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
6186 break;
6187 }
6188 case Instruction::ExtractValue: {
6189 auto *EVI = cast<ExtractValueInst>(I);
6190 Result = SimplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q);
6191 break;
6192 }
6193 case Instruction::ExtractElement: {
6194 Result = SimplifyExtractElementInst(NewOps[0], NewOps[1], Q);
6195 break;
6196 }
6197 case Instruction::ShuffleVector: {
6198 auto *SVI = cast<ShuffleVectorInst>(I);
6199 Result = SimplifyShuffleVectorInst(
6200 NewOps[0], NewOps[1], SVI->getShuffleMask(), SVI->getType(), Q);
6201 break;
6202 }
6203 case Instruction::PHI:
6204 Result = SimplifyPHINode(cast<PHINode>(I), NewOps, Q);
6205 break;
6206 case Instruction::Call: {
6207 // TODO: Use NewOps
6208 Result = SimplifyCall(cast<CallInst>(I), Q);
6209 break;
6210 }
6211 case Instruction::Freeze:
6212 Result = llvm::SimplifyFreezeInst(NewOps[0], Q);
6213 break;
6214 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
6215 #include "llvm/IR/Instruction.def"
6216 #undef HANDLE_CAST_INST
6217 Result = SimplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q);
6218 break;
6219 case Instruction::Alloca:
6220 // No simplifications for Alloca and it can't be constant folded.
6221 Result = nullptr;
6222 break;
6223 case Instruction::Load:
6224 Result = SimplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
6225 break;
6226 }
6227
6228 /// If called on unreachable code, the above logic may report that the
6229 /// instruction simplified to itself. Make life easier for users by
6230 /// detecting that case here, returning a safe value instead.
6231 return Result == I ? UndefValue::get(I->getType()) : Result;
6232 }
6233
SimplifyInstructionWithOperands(Instruction * I,ArrayRef<Value * > NewOps,const SimplifyQuery & SQ,OptimizationRemarkEmitter * ORE)6234 Value *llvm::SimplifyInstructionWithOperands(Instruction *I,
6235 ArrayRef<Value *> NewOps,
6236 const SimplifyQuery &SQ,
6237 OptimizationRemarkEmitter *ORE) {
6238 assert(NewOps.size() == I->getNumOperands() &&
6239 "Number of operands should match the instruction!");
6240 return ::simplifyInstructionWithOperands(I, NewOps, SQ, ORE);
6241 }
6242
SimplifyInstruction(Instruction * I,const SimplifyQuery & SQ,OptimizationRemarkEmitter * ORE)6243 Value *llvm::SimplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
6244 OptimizationRemarkEmitter *ORE) {
6245 SmallVector<Value *, 8> Ops(I->operands());
6246 return ::simplifyInstructionWithOperands(I, Ops, SQ, ORE);
6247 }
6248
6249 /// Implementation of recursive simplification through an instruction's
6250 /// uses.
6251 ///
6252 /// This is the common implementation of the recursive simplification routines.
6253 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
6254 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
6255 /// instructions to process and attempt to simplify it using
6256 /// InstructionSimplify. Recursively visited users which could not be
6257 /// simplified themselves are to the optional UnsimplifiedUsers set for
6258 /// further processing by the caller.
6259 ///
6260 /// This routine returns 'true' only when *it* simplifies something. The passed
6261 /// in simplified value does not count toward this.
replaceAndRecursivelySimplifyImpl(Instruction * I,Value * SimpleV,const TargetLibraryInfo * TLI,const DominatorTree * DT,AssumptionCache * AC,SmallSetVector<Instruction *,8> * UnsimplifiedUsers=nullptr)6262 static bool replaceAndRecursivelySimplifyImpl(
6263 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6264 const DominatorTree *DT, AssumptionCache *AC,
6265 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
6266 bool Simplified = false;
6267 SmallSetVector<Instruction *, 8> Worklist;
6268 const DataLayout &DL = I->getModule()->getDataLayout();
6269
6270 // If we have an explicit value to collapse to, do that round of the
6271 // simplification loop by hand initially.
6272 if (SimpleV) {
6273 for (User *U : I->users())
6274 if (U != I)
6275 Worklist.insert(cast<Instruction>(U));
6276
6277 // Replace the instruction with its simplified value.
6278 I->replaceAllUsesWith(SimpleV);
6279
6280 // Gracefully handle edge cases where the instruction is not wired into any
6281 // parent block.
6282 if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6283 !I->mayHaveSideEffects())
6284 I->eraseFromParent();
6285 } else {
6286 Worklist.insert(I);
6287 }
6288
6289 // Note that we must test the size on each iteration, the worklist can grow.
6290 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
6291 I = Worklist[Idx];
6292
6293 // See if this instruction simplifies.
6294 SimpleV = SimplifyInstruction(I, {DL, TLI, DT, AC});
6295 if (!SimpleV) {
6296 if (UnsimplifiedUsers)
6297 UnsimplifiedUsers->insert(I);
6298 continue;
6299 }
6300
6301 Simplified = true;
6302
6303 // Stash away all the uses of the old instruction so we can check them for
6304 // recursive simplifications after a RAUW. This is cheaper than checking all
6305 // uses of To on the recursive step in most cases.
6306 for (User *U : I->users())
6307 Worklist.insert(cast<Instruction>(U));
6308
6309 // Replace the instruction with its simplified value.
6310 I->replaceAllUsesWith(SimpleV);
6311
6312 // Gracefully handle edge cases where the instruction is not wired into any
6313 // parent block.
6314 if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6315 !I->mayHaveSideEffects())
6316 I->eraseFromParent();
6317 }
6318 return Simplified;
6319 }
6320
replaceAndRecursivelySimplify(Instruction * I,Value * SimpleV,const TargetLibraryInfo * TLI,const DominatorTree * DT,AssumptionCache * AC,SmallSetVector<Instruction *,8> * UnsimplifiedUsers)6321 bool llvm::replaceAndRecursivelySimplify(
6322 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6323 const DominatorTree *DT, AssumptionCache *AC,
6324 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
6325 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
6326 assert(SimpleV && "Must provide a simplified value.");
6327 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
6328 UnsimplifiedUsers);
6329 }
6330
6331 namespace llvm {
getBestSimplifyQuery(Pass & P,Function & F)6332 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
6333 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
6334 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
6335 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
6336 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
6337 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
6338 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
6339 return {F.getParent()->getDataLayout(), TLI, DT, AC};
6340 }
6341
getBestSimplifyQuery(LoopStandardAnalysisResults & AR,const DataLayout & DL)6342 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
6343 const DataLayout &DL) {
6344 return {DL, &AR.TLI, &AR.DT, &AR.AC};
6345 }
6346
6347 template <class T, class... TArgs>
getBestSimplifyQuery(AnalysisManager<T,TArgs...> & AM,Function & F)6348 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
6349 Function &F) {
6350 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
6351 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
6352 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
6353 return {F.getParent()->getDataLayout(), TLI, DT, AC};
6354 }
6355 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
6356 Function &);
6357 }
6358