1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions. This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "llvm/Analysis/InstructionSimplify.h"
20
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/CaptureTracking.h"
27 #include "llvm/Analysis/CmpInstAnalysis.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/InstSimplifyFolder.h"
30 #include "llvm/Analysis/LoopAnalysisManager.h"
31 #include "llvm/Analysis/MemoryBuiltins.h"
32 #include "llvm/Analysis/OverflowInstAnalysis.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/IR/ConstantRange.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/PatternMatch.h"
42 #include "llvm/Support/KnownBits.h"
43 #include <algorithm>
44 #include <optional>
45 using namespace llvm;
46 using namespace llvm::PatternMatch;
47
48 #define DEBUG_TYPE "instsimplify"
49
50 enum { RecursionLimit = 3 };
51
52 STATISTIC(NumExpand, "Number of expansions");
53 STATISTIC(NumReassoc, "Number of reassociations");
54
55 static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
56 unsigned);
57 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
58 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
59 const SimplifyQuery &, unsigned);
60 static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
61 unsigned);
62 static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
63 const SimplifyQuery &, unsigned);
64 static Value *simplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
65 unsigned);
66 static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
67 const SimplifyQuery &Q, unsigned MaxRecurse);
68 static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
69 static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
70 unsigned);
71 static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
72 unsigned);
73 static Value *simplifyGEPInst(Type *, Value *, ArrayRef<Value *>, bool,
74 const SimplifyQuery &, unsigned);
75 static Value *simplifySelectInst(Value *, Value *, Value *,
76 const SimplifyQuery &, unsigned);
77 static Value *simplifyInstructionWithOperands(Instruction *I,
78 ArrayRef<Value *> NewOps,
79 const SimplifyQuery &SQ,
80 unsigned MaxRecurse);
81
foldSelectWithBinaryOp(Value * Cond,Value * TrueVal,Value * FalseVal)82 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
83 Value *FalseVal) {
84 BinaryOperator::BinaryOps BinOpCode;
85 if (auto *BO = dyn_cast<BinaryOperator>(Cond))
86 BinOpCode = BO->getOpcode();
87 else
88 return nullptr;
89
90 CmpInst::Predicate ExpectedPred, Pred1, Pred2;
91 if (BinOpCode == BinaryOperator::Or) {
92 ExpectedPred = ICmpInst::ICMP_NE;
93 } else if (BinOpCode == BinaryOperator::And) {
94 ExpectedPred = ICmpInst::ICMP_EQ;
95 } else
96 return nullptr;
97
98 // %A = icmp eq %TV, %FV
99 // %B = icmp eq %X, %Y (and one of these is a select operand)
100 // %C = and %A, %B
101 // %D = select %C, %TV, %FV
102 // -->
103 // %FV
104
105 // %A = icmp ne %TV, %FV
106 // %B = icmp ne %X, %Y (and one of these is a select operand)
107 // %C = or %A, %B
108 // %D = select %C, %TV, %FV
109 // -->
110 // %TV
111 Value *X, *Y;
112 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
113 m_Specific(FalseVal)),
114 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
115 Pred1 != Pred2 || Pred1 != ExpectedPred)
116 return nullptr;
117
118 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
119 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
120
121 return nullptr;
122 }
123
124 /// For a boolean type or a vector of boolean type, return false or a vector
125 /// with every element false.
getFalse(Type * Ty)126 static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
127
128 /// For a boolean type or a vector of boolean type, return true or a vector
129 /// with every element true.
getTrue(Type * Ty)130 static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
131
132 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
isSameCompare(Value * V,CmpInst::Predicate Pred,Value * LHS,Value * RHS)133 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
134 Value *RHS) {
135 CmpInst *Cmp = dyn_cast<CmpInst>(V);
136 if (!Cmp)
137 return false;
138 CmpInst::Predicate CPred = Cmp->getPredicate();
139 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
140 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
141 return true;
142 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
143 CRHS == LHS;
144 }
145
146 /// Simplify comparison with true or false branch of select:
147 /// %sel = select i1 %cond, i32 %tv, i32 %fv
148 /// %cmp = icmp sle i32 %sel, %rhs
149 /// Compose new comparison by substituting %sel with either %tv or %fv
150 /// and see if it simplifies.
simplifyCmpSelCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse,Constant * TrueOrFalse)151 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
152 Value *RHS, Value *Cond,
153 const SimplifyQuery &Q, unsigned MaxRecurse,
154 Constant *TrueOrFalse) {
155 Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
156 if (SimplifiedCmp == Cond) {
157 // %cmp simplified to the select condition (%cond).
158 return TrueOrFalse;
159 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
160 // It didn't simplify. However, if composed comparison is equivalent
161 // to the select condition (%cond) then we can replace it.
162 return TrueOrFalse;
163 }
164 return SimplifiedCmp;
165 }
166
167 /// Simplify comparison with true branch of select
simplifyCmpSelTrueCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)168 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
169 Value *RHS, Value *Cond,
170 const SimplifyQuery &Q,
171 unsigned MaxRecurse) {
172 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
173 getTrue(Cond->getType()));
174 }
175
176 /// Simplify comparison with false branch of select
simplifyCmpSelFalseCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)177 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
178 Value *RHS, Value *Cond,
179 const SimplifyQuery &Q,
180 unsigned MaxRecurse) {
181 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
182 getFalse(Cond->getType()));
183 }
184
185 /// We know comparison with both branches of select can be simplified, but they
186 /// are not equal. This routine handles some logical simplifications.
handleOtherCmpSelSimplifications(Value * TCmp,Value * FCmp,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)187 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
188 Value *Cond,
189 const SimplifyQuery &Q,
190 unsigned MaxRecurse) {
191 // If the false value simplified to false, then the result of the compare
192 // is equal to "Cond && TCmp". This also catches the case when the false
193 // value simplified to false and the true value to true, returning "Cond".
194 // Folding select to and/or isn't poison-safe in general; impliesPoison
195 // checks whether folding it does not convert a well-defined value into
196 // poison.
197 if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
198 if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
199 return V;
200 // If the true value simplified to true, then the result of the compare
201 // is equal to "Cond || FCmp".
202 if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
203 if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
204 return V;
205 // Finally, if the false value simplified to true and the true value to
206 // false, then the result of the compare is equal to "!Cond".
207 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
208 if (Value *V = simplifyXorInst(
209 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
210 return V;
211 return nullptr;
212 }
213
214 /// Does the given value dominate the specified phi node?
valueDominatesPHI(Value * V,PHINode * P,const DominatorTree * DT)215 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
216 Instruction *I = dyn_cast<Instruction>(V);
217 if (!I)
218 // Arguments and constants dominate all instructions.
219 return true;
220
221 // If we have a DominatorTree then do a precise test.
222 if (DT)
223 return DT->dominates(I, P);
224
225 // Otherwise, if the instruction is in the entry block and is not an invoke,
226 // then it obviously dominates all phi nodes.
227 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
228 !isa<CallBrInst>(I))
229 return true;
230
231 return false;
232 }
233
234 /// Try to simplify a binary operator of form "V op OtherOp" where V is
235 /// "(B0 opex B1)" by distributing 'op' across 'opex' as
236 /// "(B0 op OtherOp) opex (B1 op OtherOp)".
expandBinOp(Instruction::BinaryOps Opcode,Value * V,Value * OtherOp,Instruction::BinaryOps OpcodeToExpand,const SimplifyQuery & Q,unsigned MaxRecurse)237 static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
238 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
239 const SimplifyQuery &Q, unsigned MaxRecurse) {
240 auto *B = dyn_cast<BinaryOperator>(V);
241 if (!B || B->getOpcode() != OpcodeToExpand)
242 return nullptr;
243 Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
244 Value *L =
245 simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
246 if (!L)
247 return nullptr;
248 Value *R =
249 simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
250 if (!R)
251 return nullptr;
252
253 // Does the expanded pair of binops simplify to the existing binop?
254 if ((L == B0 && R == B1) ||
255 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
256 ++NumExpand;
257 return B;
258 }
259
260 // Otherwise, return "L op' R" if it simplifies.
261 Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
262 if (!S)
263 return nullptr;
264
265 ++NumExpand;
266 return S;
267 }
268
269 /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
270 /// distributing op over op'.
expandCommutativeBinOp(Instruction::BinaryOps Opcode,Value * L,Value * R,Instruction::BinaryOps OpcodeToExpand,const SimplifyQuery & Q,unsigned MaxRecurse)271 static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L,
272 Value *R,
273 Instruction::BinaryOps OpcodeToExpand,
274 const SimplifyQuery &Q,
275 unsigned MaxRecurse) {
276 // Recursion is always used, so bail out at once if we already hit the limit.
277 if (!MaxRecurse--)
278 return nullptr;
279
280 if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
281 return V;
282 if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
283 return V;
284 return nullptr;
285 }
286
287 /// Generic simplifications for associative binary operations.
288 /// Returns the simpler value, or null if none was found.
simplifyAssociativeBinOp(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)289 static Value *simplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
290 Value *LHS, Value *RHS,
291 const SimplifyQuery &Q,
292 unsigned MaxRecurse) {
293 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
294
295 // Recursion is always used, so bail out at once if we already hit the limit.
296 if (!MaxRecurse--)
297 return nullptr;
298
299 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
300 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
301
302 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
303 if (Op0 && Op0->getOpcode() == Opcode) {
304 Value *A = Op0->getOperand(0);
305 Value *B = Op0->getOperand(1);
306 Value *C = RHS;
307
308 // Does "B op C" simplify?
309 if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
310 // It does! Return "A op V" if it simplifies or is already available.
311 // If V equals B then "A op V" is just the LHS.
312 if (V == B)
313 return LHS;
314 // Otherwise return "A op V" if it simplifies.
315 if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
316 ++NumReassoc;
317 return W;
318 }
319 }
320 }
321
322 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
323 if (Op1 && Op1->getOpcode() == Opcode) {
324 Value *A = LHS;
325 Value *B = Op1->getOperand(0);
326 Value *C = Op1->getOperand(1);
327
328 // Does "A op B" simplify?
329 if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
330 // It does! Return "V op C" if it simplifies or is already available.
331 // If V equals B then "V op C" is just the RHS.
332 if (V == B)
333 return RHS;
334 // Otherwise return "V op C" if it simplifies.
335 if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
336 ++NumReassoc;
337 return W;
338 }
339 }
340 }
341
342 // The remaining transforms require commutativity as well as associativity.
343 if (!Instruction::isCommutative(Opcode))
344 return nullptr;
345
346 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
347 if (Op0 && Op0->getOpcode() == Opcode) {
348 Value *A = Op0->getOperand(0);
349 Value *B = Op0->getOperand(1);
350 Value *C = RHS;
351
352 // Does "C op A" simplify?
353 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
354 // It does! Return "V op B" if it simplifies or is already available.
355 // If V equals A then "V op B" is just the LHS.
356 if (V == A)
357 return LHS;
358 // Otherwise return "V op B" if it simplifies.
359 if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
360 ++NumReassoc;
361 return W;
362 }
363 }
364 }
365
366 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
367 if (Op1 && Op1->getOpcode() == Opcode) {
368 Value *A = LHS;
369 Value *B = Op1->getOperand(0);
370 Value *C = Op1->getOperand(1);
371
372 // Does "C op A" simplify?
373 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
374 // It does! Return "B op V" if it simplifies or is already available.
375 // If V equals C then "B op V" is just the RHS.
376 if (V == C)
377 return RHS;
378 // Otherwise return "B op V" if it simplifies.
379 if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
380 ++NumReassoc;
381 return W;
382 }
383 }
384 }
385
386 return nullptr;
387 }
388
389 /// In the case of a binary operation with a select instruction as an operand,
390 /// try to simplify the binop by seeing whether evaluating it on both branches
391 /// of the select results in the same value. Returns the common value if so,
392 /// otherwise returns null.
threadBinOpOverSelect(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)393 static Value *threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
394 Value *RHS, const SimplifyQuery &Q,
395 unsigned MaxRecurse) {
396 // Recursion is always used, so bail out at once if we already hit the limit.
397 if (!MaxRecurse--)
398 return nullptr;
399
400 SelectInst *SI;
401 if (isa<SelectInst>(LHS)) {
402 SI = cast<SelectInst>(LHS);
403 } else {
404 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
405 SI = cast<SelectInst>(RHS);
406 }
407
408 // Evaluate the BinOp on the true and false branches of the select.
409 Value *TV;
410 Value *FV;
411 if (SI == LHS) {
412 TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
413 FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
414 } else {
415 TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
416 FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
417 }
418
419 // If they simplified to the same value, then return the common value.
420 // If they both failed to simplify then return null.
421 if (TV == FV)
422 return TV;
423
424 // If one branch simplified to undef, return the other one.
425 if (TV && Q.isUndefValue(TV))
426 return FV;
427 if (FV && Q.isUndefValue(FV))
428 return TV;
429
430 // If applying the operation did not change the true and false select values,
431 // then the result of the binop is the select itself.
432 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
433 return SI;
434
435 // If one branch simplified and the other did not, and the simplified
436 // value is equal to the unsimplified one, return the simplified value.
437 // For example, select (cond, X, X & Z) & Z -> X & Z.
438 if ((FV && !TV) || (TV && !FV)) {
439 // Check that the simplified value has the form "X op Y" where "op" is the
440 // same as the original operation.
441 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
442 if (Simplified && Simplified->getOpcode() == unsigned(Opcode) &&
443 !Simplified->hasPoisonGeneratingFlags()) {
444 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
445 // We already know that "op" is the same as for the simplified value. See
446 // if the operands match too. If so, return the simplified value.
447 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
448 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
449 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
450 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
451 Simplified->getOperand(1) == UnsimplifiedRHS)
452 return Simplified;
453 if (Simplified->isCommutative() &&
454 Simplified->getOperand(1) == UnsimplifiedLHS &&
455 Simplified->getOperand(0) == UnsimplifiedRHS)
456 return Simplified;
457 }
458 }
459
460 return nullptr;
461 }
462
463 /// In the case of a comparison with a select instruction, try to simplify the
464 /// comparison by seeing whether both branches of the select result in the same
465 /// value. Returns the common value if so, otherwise returns null.
466 /// For example, if we have:
467 /// %tmp = select i1 %cmp, i32 1, i32 2
468 /// %cmp1 = icmp sle i32 %tmp, 3
469 /// We can simplify %cmp1 to true, because both branches of select are
470 /// less than 3. We compose new comparison by substituting %tmp with both
471 /// branches of select and see if it can be simplified.
threadCmpOverSelect(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)472 static Value *threadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
473 Value *RHS, const SimplifyQuery &Q,
474 unsigned MaxRecurse) {
475 // Recursion is always used, so bail out at once if we already hit the limit.
476 if (!MaxRecurse--)
477 return nullptr;
478
479 // Make sure the select is on the LHS.
480 if (!isa<SelectInst>(LHS)) {
481 std::swap(LHS, RHS);
482 Pred = CmpInst::getSwappedPredicate(Pred);
483 }
484 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
485 SelectInst *SI = cast<SelectInst>(LHS);
486 Value *Cond = SI->getCondition();
487 Value *TV = SI->getTrueValue();
488 Value *FV = SI->getFalseValue();
489
490 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
491 // Does "cmp TV, RHS" simplify?
492 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
493 if (!TCmp)
494 return nullptr;
495
496 // Does "cmp FV, RHS" simplify?
497 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
498 if (!FCmp)
499 return nullptr;
500
501 // If both sides simplified to the same value, then use it as the result of
502 // the original comparison.
503 if (TCmp == FCmp)
504 return TCmp;
505
506 // The remaining cases only make sense if the select condition has the same
507 // type as the result of the comparison, so bail out if this is not so.
508 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
509 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
510
511 return nullptr;
512 }
513
514 /// In the case of a binary operation with an operand that is a PHI instruction,
515 /// try to simplify the binop by seeing whether evaluating it on the incoming
516 /// phi values yields the same result for every value. If so returns the common
517 /// value, otherwise returns null.
threadBinOpOverPHI(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)518 static Value *threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
519 Value *RHS, const SimplifyQuery &Q,
520 unsigned MaxRecurse) {
521 // Recursion is always used, so bail out at once if we already hit the limit.
522 if (!MaxRecurse--)
523 return nullptr;
524
525 PHINode *PI;
526 if (isa<PHINode>(LHS)) {
527 PI = cast<PHINode>(LHS);
528 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
529 if (!valueDominatesPHI(RHS, PI, Q.DT))
530 return nullptr;
531 } else {
532 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
533 PI = cast<PHINode>(RHS);
534 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
535 if (!valueDominatesPHI(LHS, PI, Q.DT))
536 return nullptr;
537 }
538
539 // Evaluate the BinOp on the incoming phi values.
540 Value *CommonValue = nullptr;
541 for (Use &Incoming : PI->incoming_values()) {
542 // If the incoming value is the phi node itself, it can safely be skipped.
543 if (Incoming == PI)
544 continue;
545 Instruction *InTI = PI->getIncomingBlock(Incoming)->getTerminator();
546 Value *V = PI == LHS
547 ? simplifyBinOp(Opcode, Incoming, RHS,
548 Q.getWithInstruction(InTI), MaxRecurse)
549 : simplifyBinOp(Opcode, LHS, Incoming,
550 Q.getWithInstruction(InTI), MaxRecurse);
551 // If the operation failed to simplify, or simplified to a different value
552 // to previously, then give up.
553 if (!V || (CommonValue && V != CommonValue))
554 return nullptr;
555 CommonValue = V;
556 }
557
558 return CommonValue;
559 }
560
561 /// In the case of a comparison with a PHI instruction, try to simplify the
562 /// comparison by seeing whether comparing with all of the incoming phi values
563 /// yields the same result every time. If so returns the common result,
564 /// otherwise returns null.
threadCmpOverPHI(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)565 static Value *threadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
566 const SimplifyQuery &Q, unsigned MaxRecurse) {
567 // Recursion is always used, so bail out at once if we already hit the limit.
568 if (!MaxRecurse--)
569 return nullptr;
570
571 // Make sure the phi is on the LHS.
572 if (!isa<PHINode>(LHS)) {
573 std::swap(LHS, RHS);
574 Pred = CmpInst::getSwappedPredicate(Pred);
575 }
576 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
577 PHINode *PI = cast<PHINode>(LHS);
578
579 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
580 if (!valueDominatesPHI(RHS, PI, Q.DT))
581 return nullptr;
582
583 // Evaluate the BinOp on the incoming phi values.
584 Value *CommonValue = nullptr;
585 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
586 Value *Incoming = PI->getIncomingValue(u);
587 Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
588 // If the incoming value is the phi node itself, it can safely be skipped.
589 if (Incoming == PI)
590 continue;
591 // Change the context instruction to the "edge" that flows into the phi.
592 // This is important because that is where incoming is actually "evaluated"
593 // even though it is used later somewhere else.
594 Value *V = simplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
595 MaxRecurse);
596 // If the operation failed to simplify, or simplified to a different value
597 // to previously, then give up.
598 if (!V || (CommonValue && V != CommonValue))
599 return nullptr;
600 CommonValue = V;
601 }
602
603 return CommonValue;
604 }
605
foldOrCommuteConstant(Instruction::BinaryOps Opcode,Value * & Op0,Value * & Op1,const SimplifyQuery & Q)606 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
607 Value *&Op0, Value *&Op1,
608 const SimplifyQuery &Q) {
609 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
610 if (auto *CRHS = dyn_cast<Constant>(Op1)) {
611 switch (Opcode) {
612 default:
613 break;
614 case Instruction::FAdd:
615 case Instruction::FSub:
616 case Instruction::FMul:
617 case Instruction::FDiv:
618 case Instruction::FRem:
619 if (Q.CxtI != nullptr)
620 return ConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
621 }
622 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
623 }
624
625 // Canonicalize the constant to the RHS if this is a commutative operation.
626 if (Instruction::isCommutative(Opcode))
627 std::swap(Op0, Op1);
628 }
629 return nullptr;
630 }
631
632 /// Given operands for an Add, see if we can fold the result.
633 /// If not, this returns null.
simplifyAddInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)634 static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
635 const SimplifyQuery &Q, unsigned MaxRecurse) {
636 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
637 return C;
638
639 // X + poison -> poison
640 if (isa<PoisonValue>(Op1))
641 return Op1;
642
643 // X + undef -> undef
644 if (Q.isUndefValue(Op1))
645 return Op1;
646
647 // X + 0 -> X
648 if (match(Op1, m_Zero()))
649 return Op0;
650
651 // If two operands are negative, return 0.
652 if (isKnownNegation(Op0, Op1))
653 return Constant::getNullValue(Op0->getType());
654
655 // X + (Y - X) -> Y
656 // (Y - X) + X -> Y
657 // Eg: X + -X -> 0
658 Value *Y = nullptr;
659 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
660 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
661 return Y;
662
663 // X + ~X -> -1 since ~X = -X-1
664 Type *Ty = Op0->getType();
665 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
666 return Constant::getAllOnesValue(Ty);
667
668 // add nsw/nuw (xor Y, signmask), signmask --> Y
669 // The no-wrapping add guarantees that the top bit will be set by the add.
670 // Therefore, the xor must be clearing the already set sign bit of Y.
671 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
672 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
673 return Y;
674
675 // add nuw %x, -1 -> -1, because %x can only be 0.
676 if (IsNUW && match(Op1, m_AllOnes()))
677 return Op1; // Which is -1.
678
679 /// i1 add -> xor.
680 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
681 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
682 return V;
683
684 // Try some generic simplifications for associative operations.
685 if (Value *V =
686 simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
687 return V;
688
689 // Threading Add over selects and phi nodes is pointless, so don't bother.
690 // Threading over the select in "A + select(cond, B, C)" means evaluating
691 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
692 // only if B and C are equal. If B and C are equal then (since we assume
693 // that operands have already been simplified) "select(cond, B, C)" should
694 // have been simplified to the common value of B and C already. Analysing
695 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
696 // for threading over phi nodes.
697
698 return nullptr;
699 }
700
simplifyAddInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Query)701 Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
702 const SimplifyQuery &Query) {
703 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
704 }
705
706 /// Compute the base pointer and cumulative constant offsets for V.
707 ///
708 /// This strips all constant offsets off of V, leaving it the base pointer, and
709 /// accumulates the total constant offset applied in the returned constant.
710 /// It returns zero if there are no constant offsets applied.
711 ///
712 /// This is very similar to stripAndAccumulateConstantOffsets(), except it
713 /// normalizes the offset bitwidth to the stripped pointer type, not the
714 /// original pointer type.
stripAndComputeConstantOffsets(const DataLayout & DL,Value * & V,bool AllowNonInbounds=false)715 static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
716 bool AllowNonInbounds = false) {
717 assert(V->getType()->isPtrOrPtrVectorTy());
718
719 APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
720 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
721 // As that strip may trace through `addrspacecast`, need to sext or trunc
722 // the offset calculated.
723 return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
724 }
725
726 /// Compute the constant difference between two pointer values.
727 /// If the difference is not a constant, returns zero.
computePointerDifference(const DataLayout & DL,Value * LHS,Value * RHS)728 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
729 Value *RHS) {
730 APInt LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
731 APInt RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
732
733 // If LHS and RHS are not related via constant offsets to the same base
734 // value, there is nothing we can do here.
735 if (LHS != RHS)
736 return nullptr;
737
738 // Otherwise, the difference of LHS - RHS can be computed as:
739 // LHS - RHS
740 // = (LHSOffset + Base) - (RHSOffset + Base)
741 // = LHSOffset - RHSOffset
742 Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
743 if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
744 Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
745 return Res;
746 }
747
748 /// Test if there is a dominating equivalence condition for the
749 /// two operands. If there is, try to reduce the binary operation
750 /// between the two operands.
751 /// Example: Op0 - Op1 --> 0 when Op0 == Op1
simplifyByDomEq(unsigned Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)752 static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
753 const SimplifyQuery &Q, unsigned MaxRecurse) {
754 // Recursive run it can not get any benefit
755 if (MaxRecurse != RecursionLimit)
756 return nullptr;
757
758 std::optional<bool> Imp =
759 isImpliedByDomCondition(CmpInst::ICMP_EQ, Op0, Op1, Q.CxtI, Q.DL);
760 if (Imp && *Imp) {
761 Type *Ty = Op0->getType();
762 switch (Opcode) {
763 case Instruction::Sub:
764 case Instruction::Xor:
765 case Instruction::URem:
766 case Instruction::SRem:
767 return Constant::getNullValue(Ty);
768
769 case Instruction::SDiv:
770 case Instruction::UDiv:
771 return ConstantInt::get(Ty, 1);
772
773 case Instruction::And:
774 case Instruction::Or:
775 // Could be either one - choose Op1 since that's more likely a constant.
776 return Op1;
777 default:
778 break;
779 }
780 }
781 return nullptr;
782 }
783
784 /// Given operands for a Sub, see if we can fold the result.
785 /// If not, this returns null.
simplifySubInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)786 static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
787 const SimplifyQuery &Q, unsigned MaxRecurse) {
788 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
789 return C;
790
791 // X - poison -> poison
792 // poison - X -> poison
793 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
794 return PoisonValue::get(Op0->getType());
795
796 // X - undef -> undef
797 // undef - X -> undef
798 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
799 return UndefValue::get(Op0->getType());
800
801 // X - 0 -> X
802 if (match(Op1, m_Zero()))
803 return Op0;
804
805 // X - X -> 0
806 if (Op0 == Op1)
807 return Constant::getNullValue(Op0->getType());
808
809 // Is this a negation?
810 if (match(Op0, m_Zero())) {
811 // 0 - X -> 0 if the sub is NUW.
812 if (IsNUW)
813 return Constant::getNullValue(Op0->getType());
814
815 KnownBits Known = computeKnownBits(Op1, /* Depth */ 0, Q);
816 if (Known.Zero.isMaxSignedValue()) {
817 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
818 // Op1 must be 0 because negating the minimum signed value is undefined.
819 if (IsNSW)
820 return Constant::getNullValue(Op0->getType());
821
822 // 0 - X -> X if X is 0 or the minimum signed value.
823 return Op1;
824 }
825 }
826
827 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
828 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
829 Value *X = nullptr, *Y = nullptr, *Z = Op1;
830 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
831 // See if "V === Y - Z" simplifies.
832 if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
833 // It does! Now see if "X + V" simplifies.
834 if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
835 // It does, we successfully reassociated!
836 ++NumReassoc;
837 return W;
838 }
839 // See if "V === X - Z" simplifies.
840 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
841 // It does! Now see if "Y + V" simplifies.
842 if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
843 // It does, we successfully reassociated!
844 ++NumReassoc;
845 return W;
846 }
847 }
848
849 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
850 // For example, X - (X + 1) -> -1
851 X = Op0;
852 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
853 // See if "V === X - Y" simplifies.
854 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
855 // It does! Now see if "V - Z" simplifies.
856 if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
857 // It does, we successfully reassociated!
858 ++NumReassoc;
859 return W;
860 }
861 // See if "V === X - Z" simplifies.
862 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
863 // It does! Now see if "V - Y" simplifies.
864 if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
865 // It does, we successfully reassociated!
866 ++NumReassoc;
867 return W;
868 }
869 }
870
871 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
872 // For example, X - (X - Y) -> Y.
873 Z = Op0;
874 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
875 // See if "V === Z - X" simplifies.
876 if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
877 // It does! Now see if "V + Y" simplifies.
878 if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
879 // It does, we successfully reassociated!
880 ++NumReassoc;
881 return W;
882 }
883
884 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
885 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
886 match(Op1, m_Trunc(m_Value(Y))))
887 if (X->getType() == Y->getType())
888 // See if "V === X - Y" simplifies.
889 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
890 // It does! Now see if "trunc V" simplifies.
891 if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
892 Q, MaxRecurse - 1))
893 // It does, return the simplified "trunc V".
894 return W;
895
896 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
897 if (match(Op0, m_PtrToInt(m_Value(X))) && match(Op1, m_PtrToInt(m_Value(Y))))
898 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
899 return ConstantFoldIntegerCast(Result, Op0->getType(), /*IsSigned*/ true,
900 Q.DL);
901
902 // i1 sub -> xor.
903 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
904 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
905 return V;
906
907 // Threading Sub over selects and phi nodes is pointless, so don't bother.
908 // Threading over the select in "A - select(cond, B, C)" means evaluating
909 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
910 // only if B and C are equal. If B and C are equal then (since we assume
911 // that operands have already been simplified) "select(cond, B, C)" should
912 // have been simplified to the common value of B and C already. Analysing
913 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
914 // for threading over phi nodes.
915
916 if (Value *V = simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
917 return V;
918
919 return nullptr;
920 }
921
simplifySubInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q)922 Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
923 const SimplifyQuery &Q) {
924 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
925 }
926
927 /// Given operands for a Mul, see if we can fold the result.
928 /// If not, this returns null.
simplifyMulInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)929 static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
930 const SimplifyQuery &Q, unsigned MaxRecurse) {
931 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
932 return C;
933
934 // X * poison -> poison
935 if (isa<PoisonValue>(Op1))
936 return Op1;
937
938 // X * undef -> 0
939 // X * 0 -> 0
940 if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
941 return Constant::getNullValue(Op0->getType());
942
943 // X * 1 -> X
944 if (match(Op1, m_One()))
945 return Op0;
946
947 // (X / Y) * Y -> X if the division is exact.
948 Value *X = nullptr;
949 if (Q.IIQ.UseInstrInfo &&
950 (match(Op0,
951 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
952 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
953 return X;
954
955 if (Op0->getType()->isIntOrIntVectorTy(1)) {
956 // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
957 // representable). All other cases reduce to 0, so just return 0.
958 if (IsNSW)
959 return ConstantInt::getNullValue(Op0->getType());
960
961 // Treat "mul i1" as "and i1".
962 if (MaxRecurse)
963 if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
964 return V;
965 }
966
967 // Try some generic simplifications for associative operations.
968 if (Value *V =
969 simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
970 return V;
971
972 // Mul distributes over Add. Try some generic simplifications based on this.
973 if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
974 Instruction::Add, Q, MaxRecurse))
975 return V;
976
977 // If the operation is with the result of a select instruction, check whether
978 // operating on either branch of the select always yields the same value.
979 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
980 if (Value *V =
981 threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
982 return V;
983
984 // If the operation is with the result of a phi instruction, check whether
985 // operating on all incoming values of the phi always yields the same value.
986 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
987 if (Value *V =
988 threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
989 return V;
990
991 return nullptr;
992 }
993
simplifyMulInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q)994 Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
995 const SimplifyQuery &Q) {
996 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
997 }
998
999 /// Given a predicate and two operands, return true if the comparison is true.
1000 /// This is a helper for div/rem simplification where we return some other value
1001 /// when we can prove a relationship between the operands.
isICmpTrue(ICmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)1002 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
1003 const SimplifyQuery &Q, unsigned MaxRecurse) {
1004 Value *V = simplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
1005 Constant *C = dyn_cast_or_null<Constant>(V);
1006 return (C && C->isAllOnesValue());
1007 }
1008
1009 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
1010 /// to simplify X % Y to X.
isDivZero(Value * X,Value * Y,const SimplifyQuery & Q,unsigned MaxRecurse,bool IsSigned)1011 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
1012 unsigned MaxRecurse, bool IsSigned) {
1013 // Recursion is always used, so bail out at once if we already hit the limit.
1014 if (!MaxRecurse--)
1015 return false;
1016
1017 if (IsSigned) {
1018 // (X srem Y) sdiv Y --> 0
1019 if (match(X, m_SRem(m_Value(), m_Specific(Y))))
1020 return true;
1021
1022 // |X| / |Y| --> 0
1023 //
1024 // We require that 1 operand is a simple constant. That could be extended to
1025 // 2 variables if we computed the sign bit for each.
1026 //
1027 // Make sure that a constant is not the minimum signed value because taking
1028 // the abs() of that is undefined.
1029 Type *Ty = X->getType();
1030 const APInt *C;
1031 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1032 // Is the variable divisor magnitude always greater than the constant
1033 // dividend magnitude?
1034 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1035 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1036 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1037 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1038 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1039 return true;
1040 }
1041 if (match(Y, m_APInt(C))) {
1042 // Special-case: we can't take the abs() of a minimum signed value. If
1043 // that's the divisor, then all we have to do is prove that the dividend
1044 // is also not the minimum signed value.
1045 if (C->isMinSignedValue())
1046 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1047
1048 // Is the variable dividend magnitude always less than the constant
1049 // divisor magnitude?
1050 // |X| < |C| --> X > -abs(C) and X < abs(C)
1051 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1052 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1053 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1054 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1055 return true;
1056 }
1057 return false;
1058 }
1059
1060 // IsSigned == false.
1061
1062 // Is the unsigned dividend known to be less than a constant divisor?
1063 // TODO: Convert this (and above) to range analysis
1064 // ("computeConstantRangeIncludingKnownBits")?
1065 const APInt *C;
1066 if (match(Y, m_APInt(C)) &&
1067 computeKnownBits(X, /* Depth */ 0, Q).getMaxValue().ult(*C))
1068 return true;
1069
1070 // Try again for any divisor:
1071 // Is the dividend unsigned less than the divisor?
1072 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1073 }
1074
1075 /// Check for common or similar folds of integer division or integer remainder.
1076 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
simplifyDivRem(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1077 static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
1078 Value *Op1, const SimplifyQuery &Q,
1079 unsigned MaxRecurse) {
1080 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1081 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1082
1083 Type *Ty = Op0->getType();
1084
1085 // X / undef -> poison
1086 // X % undef -> poison
1087 if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1088 return PoisonValue::get(Ty);
1089
1090 // X / 0 -> poison
1091 // X % 0 -> poison
1092 // We don't need to preserve faults!
1093 if (match(Op1, m_Zero()))
1094 return PoisonValue::get(Ty);
1095
1096 // If any element of a constant divisor fixed width vector is zero or undef
1097 // the behavior is undefined and we can fold the whole op to poison.
1098 auto *Op1C = dyn_cast<Constant>(Op1);
1099 auto *VTy = dyn_cast<FixedVectorType>(Ty);
1100 if (Op1C && VTy) {
1101 unsigned NumElts = VTy->getNumElements();
1102 for (unsigned i = 0; i != NumElts; ++i) {
1103 Constant *Elt = Op1C->getAggregateElement(i);
1104 if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt)))
1105 return PoisonValue::get(Ty);
1106 }
1107 }
1108
1109 // poison / X -> poison
1110 // poison % X -> poison
1111 if (isa<PoisonValue>(Op0))
1112 return Op0;
1113
1114 // undef / X -> 0
1115 // undef % X -> 0
1116 if (Q.isUndefValue(Op0))
1117 return Constant::getNullValue(Ty);
1118
1119 // 0 / X -> 0
1120 // 0 % X -> 0
1121 if (match(Op0, m_Zero()))
1122 return Constant::getNullValue(Op0->getType());
1123
1124 // X / X -> 1
1125 // X % X -> 0
1126 if (Op0 == Op1)
1127 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1128
1129 KnownBits Known = computeKnownBits(Op1, /* Depth */ 0, Q);
1130 // X / 0 -> poison
1131 // X % 0 -> poison
1132 // If the divisor is known to be zero, just return poison. This can happen in
1133 // some cases where its provable indirectly the denominator is zero but it's
1134 // not trivially simplifiable (i.e known zero through a phi node).
1135 if (Known.isZero())
1136 return PoisonValue::get(Ty);
1137
1138 // X / 1 -> X
1139 // X % 1 -> 0
1140 // If the divisor can only be zero or one, we can't have division-by-zero
1141 // or remainder-by-zero, so assume the divisor is 1.
1142 // e.g. 1, zext (i8 X), sdiv X (Y and 1)
1143 if (Known.countMinLeadingZeros() == Known.getBitWidth() - 1)
1144 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1145
1146 // If X * Y does not overflow, then:
1147 // X * Y / Y -> X
1148 // X * Y % Y -> 0
1149 Value *X;
1150 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1151 auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1152 // The multiplication can't overflow if it is defined not to, or if
1153 // X == A / Y for some A.
1154 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1155 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1156 (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1157 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1158 return IsDiv ? X : Constant::getNullValue(Op0->getType());
1159 }
1160 }
1161
1162 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1163 return IsDiv ? Constant::getNullValue(Op0->getType()) : Op0;
1164
1165 if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1166 return V;
1167
1168 // If the operation is with the result of a select instruction, check whether
1169 // operating on either branch of the select always yields the same value.
1170 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1171 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1172 return V;
1173
1174 // If the operation is with the result of a phi instruction, check whether
1175 // operating on all incoming values of the phi always yields the same value.
1176 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1177 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1178 return V;
1179
1180 return nullptr;
1181 }
1182
1183 /// These are simplifications common to SDiv and UDiv.
simplifyDiv(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1184 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1185 bool IsExact, const SimplifyQuery &Q,
1186 unsigned MaxRecurse) {
1187 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1188 return C;
1189
1190 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1191 return V;
1192
1193 const APInt *DivC;
1194 if (IsExact && match(Op1, m_APInt(DivC))) {
1195 // If this is an exact divide by a constant, then the dividend (Op0) must
1196 // have at least as many trailing zeros as the divisor to divide evenly. If
1197 // it has less trailing zeros, then the result must be poison.
1198 if (DivC->countr_zero()) {
1199 KnownBits KnownOp0 = computeKnownBits(Op0, /* Depth */ 0, Q);
1200 if (KnownOp0.countMaxTrailingZeros() < DivC->countr_zero())
1201 return PoisonValue::get(Op0->getType());
1202 }
1203
1204 // udiv exact (mul nsw X, C), C --> X
1205 // sdiv exact (mul nuw X, C), C --> X
1206 // where C is not a power of 2.
1207 Value *X;
1208 if (!DivC->isPowerOf2() &&
1209 (Opcode == Instruction::UDiv
1210 ? match(Op0, m_NSWMul(m_Value(X), m_Specific(Op1)))
1211 : match(Op0, m_NUWMul(m_Value(X), m_Specific(Op1)))))
1212 return X;
1213 }
1214
1215 return nullptr;
1216 }
1217
1218 /// These are simplifications common to SRem and URem.
simplifyRem(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1219 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1220 const SimplifyQuery &Q, unsigned MaxRecurse) {
1221 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1222 return C;
1223
1224 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1225 return V;
1226
1227 // (X << Y) % X -> 0
1228 if (Q.IIQ.UseInstrInfo &&
1229 ((Opcode == Instruction::SRem &&
1230 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1231 (Opcode == Instruction::URem &&
1232 match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1233 return Constant::getNullValue(Op0->getType());
1234
1235 return nullptr;
1236 }
1237
1238 /// Given operands for an SDiv, see if we can fold the result.
1239 /// If not, this returns null.
simplifySDivInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1240 static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1241 const SimplifyQuery &Q, unsigned MaxRecurse) {
1242 // If two operands are negated and no signed overflow, return -1.
1243 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1244 return Constant::getAllOnesValue(Op0->getType());
1245
1246 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1247 }
1248
simplifySDivInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q)1249 Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1250 const SimplifyQuery &Q) {
1251 return ::simplifySDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1252 }
1253
1254 /// Given operands for a UDiv, see if we can fold the result.
1255 /// If not, this returns null.
simplifyUDivInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1256 static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1257 const SimplifyQuery &Q, unsigned MaxRecurse) {
1258 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1259 }
1260
simplifyUDivInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q)1261 Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1262 const SimplifyQuery &Q) {
1263 return ::simplifyUDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1264 }
1265
1266 /// Given operands for an SRem, see if we can fold the result.
1267 /// If not, this returns null.
simplifySRemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1268 static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1269 unsigned MaxRecurse) {
1270 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1271 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1272 Value *X;
1273 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1274 return ConstantInt::getNullValue(Op0->getType());
1275
1276 // If the two operands are negated, return 0.
1277 if (isKnownNegation(Op0, Op1))
1278 return ConstantInt::getNullValue(Op0->getType());
1279
1280 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1281 }
1282
simplifySRemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1283 Value *llvm::simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1284 return ::simplifySRemInst(Op0, Op1, Q, RecursionLimit);
1285 }
1286
1287 /// Given operands for a URem, see if we can fold the result.
1288 /// If not, this returns null.
simplifyURemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1289 static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1290 unsigned MaxRecurse) {
1291 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1292 }
1293
simplifyURemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1294 Value *llvm::simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1295 return ::simplifyURemInst(Op0, Op1, Q, RecursionLimit);
1296 }
1297
1298 /// Returns true if a shift by \c Amount always yields poison.
isPoisonShift(Value * Amount,const SimplifyQuery & Q)1299 static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1300 Constant *C = dyn_cast<Constant>(Amount);
1301 if (!C)
1302 return false;
1303
1304 // X shift by undef -> poison because it may shift by the bitwidth.
1305 if (Q.isUndefValue(C))
1306 return true;
1307
1308 // Shifting by the bitwidth or more is poison. This covers scalars and
1309 // fixed/scalable vectors with splat constants.
1310 const APInt *AmountC;
1311 if (match(C, m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1312 return true;
1313
1314 // Try harder for fixed-length vectors:
1315 // If all lanes of a vector shift are poison, the whole shift is poison.
1316 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1317 for (unsigned I = 0,
1318 E = cast<FixedVectorType>(C->getType())->getNumElements();
1319 I != E; ++I)
1320 if (!isPoisonShift(C->getAggregateElement(I), Q))
1321 return false;
1322 return true;
1323 }
1324
1325 return false;
1326 }
1327
1328 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1329 /// If not, this returns null.
simplifyShift(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool IsNSW,const SimplifyQuery & Q,unsigned MaxRecurse)1330 static Value *simplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1331 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1332 unsigned MaxRecurse) {
1333 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1334 return C;
1335
1336 // poison shift by X -> poison
1337 if (isa<PoisonValue>(Op0))
1338 return Op0;
1339
1340 // 0 shift by X -> 0
1341 if (match(Op0, m_Zero()))
1342 return Constant::getNullValue(Op0->getType());
1343
1344 // X shift by 0 -> X
1345 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1346 // would be poison.
1347 Value *X;
1348 if (match(Op1, m_Zero()) ||
1349 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1350 return Op0;
1351
1352 // Fold undefined shifts.
1353 if (isPoisonShift(Op1, Q))
1354 return PoisonValue::get(Op0->getType());
1355
1356 // If the operation is with the result of a select instruction, check whether
1357 // operating on either branch of the select always yields the same value.
1358 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1359 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1360 return V;
1361
1362 // If the operation is with the result of a phi instruction, check whether
1363 // operating on all incoming values of the phi always yields the same value.
1364 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1365 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1366 return V;
1367
1368 // If any bits in the shift amount make that value greater than or equal to
1369 // the number of bits in the type, the shift is undefined.
1370 KnownBits KnownAmt = computeKnownBits(Op1, /* Depth */ 0, Q);
1371 if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1372 return PoisonValue::get(Op0->getType());
1373
1374 // If all valid bits in the shift amount are known zero, the first operand is
1375 // unchanged.
1376 unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1377 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1378 return Op0;
1379
1380 // Check for nsw shl leading to a poison value.
1381 if (IsNSW) {
1382 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1383 KnownBits KnownVal = computeKnownBits(Op0, /* Depth */ 0, Q);
1384 KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1385
1386 if (KnownVal.Zero.isSignBitSet())
1387 KnownShl.Zero.setSignBit();
1388 if (KnownVal.One.isSignBitSet())
1389 KnownShl.One.setSignBit();
1390
1391 if (KnownShl.hasConflict())
1392 return PoisonValue::get(Op0->getType());
1393 }
1394
1395 return nullptr;
1396 }
1397
1398 /// Given operands for an LShr or AShr, see if we can fold the result. If not,
1399 /// this returns null.
simplifyRightShift(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1400 static Value *simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1401 Value *Op1, bool IsExact,
1402 const SimplifyQuery &Q, unsigned MaxRecurse) {
1403 if (Value *V =
1404 simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1405 return V;
1406
1407 // X >> X -> 0
1408 if (Op0 == Op1)
1409 return Constant::getNullValue(Op0->getType());
1410
1411 // undef >> X -> 0
1412 // undef >> X -> undef (if it's exact)
1413 if (Q.isUndefValue(Op0))
1414 return IsExact ? Op0 : Constant::getNullValue(Op0->getType());
1415
1416 // The low bit cannot be shifted out of an exact shift if it is set.
1417 // TODO: Generalize by counting trailing zeros (see fold for exact division).
1418 if (IsExact) {
1419 KnownBits Op0Known = computeKnownBits(Op0, /* Depth */ 0, Q);
1420 if (Op0Known.One[0])
1421 return Op0;
1422 }
1423
1424 return nullptr;
1425 }
1426
1427 /// Given operands for an Shl, see if we can fold the result.
1428 /// If not, this returns null.
simplifyShlInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)1429 static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1430 const SimplifyQuery &Q, unsigned MaxRecurse) {
1431 if (Value *V =
1432 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1433 return V;
1434
1435 Type *Ty = Op0->getType();
1436 // undef << X -> 0
1437 // undef << X -> undef if (if it's NSW/NUW)
1438 if (Q.isUndefValue(Op0))
1439 return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Ty);
1440
1441 // (X >> A) << A -> X
1442 Value *X;
1443 if (Q.IIQ.UseInstrInfo &&
1444 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1445 return X;
1446
1447 // shl nuw i8 C, %x -> C iff C has sign bit set.
1448 if (IsNUW && match(Op0, m_Negative()))
1449 return Op0;
1450 // NOTE: could use computeKnownBits() / LazyValueInfo,
1451 // but the cost-benefit analysis suggests it isn't worth it.
1452
1453 // "nuw" guarantees that only zeros are shifted out, and "nsw" guarantees
1454 // that the sign-bit does not change, so the only input that does not
1455 // produce poison is 0, and "0 << (bitwidth-1) --> 0".
1456 if (IsNSW && IsNUW &&
1457 match(Op1, m_SpecificInt(Ty->getScalarSizeInBits() - 1)))
1458 return Constant::getNullValue(Ty);
1459
1460 return nullptr;
1461 }
1462
simplifyShlInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q)1463 Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1464 const SimplifyQuery &Q) {
1465 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
1466 }
1467
1468 /// Given operands for an LShr, see if we can fold the result.
1469 /// If not, this returns null.
simplifyLShrInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1470 static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1471 const SimplifyQuery &Q, unsigned MaxRecurse) {
1472 if (Value *V = simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1473 MaxRecurse))
1474 return V;
1475
1476 // (X << A) >> A -> X
1477 Value *X;
1478 if (Q.IIQ.UseInstrInfo && match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1479 return X;
1480
1481 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1482 // We can return X as we do in the above case since OR alters no bits in X.
1483 // SimplifyDemandedBits in InstCombine can do more general optimization for
1484 // bit manipulation. This pattern aims to provide opportunities for other
1485 // optimizers by supporting a simple but common case in InstSimplify.
1486 Value *Y;
1487 const APInt *ShRAmt, *ShLAmt;
1488 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(ShRAmt)) &&
1489 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1490 *ShRAmt == *ShLAmt) {
1491 const KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
1492 const unsigned EffWidthY = YKnown.countMaxActiveBits();
1493 if (ShRAmt->uge(EffWidthY))
1494 return X;
1495 }
1496
1497 return nullptr;
1498 }
1499
simplifyLShrInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q)1500 Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1501 const SimplifyQuery &Q) {
1502 return ::simplifyLShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1503 }
1504
1505 /// Given operands for an AShr, see if we can fold the result.
1506 /// If not, this returns null.
simplifyAShrInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1507 static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1508 const SimplifyQuery &Q, unsigned MaxRecurse) {
1509 if (Value *V = simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1510 MaxRecurse))
1511 return V;
1512
1513 // -1 >>a X --> -1
1514 // (-1 << X) a>> X --> -1
1515 // Do not return Op0 because it may contain undef elements if it's a vector.
1516 if (match(Op0, m_AllOnes()) ||
1517 match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1518 return Constant::getAllOnesValue(Op0->getType());
1519
1520 // (X << A) >> A -> X
1521 Value *X;
1522 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1523 return X;
1524
1525 // Arithmetic shifting an all-sign-bit value is a no-op.
1526 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1527 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1528 return Op0;
1529
1530 return nullptr;
1531 }
1532
simplifyAShrInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q)1533 Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1534 const SimplifyQuery &Q) {
1535 return ::simplifyAShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1536 }
1537
1538 /// Commuted variants are assumed to be handled by calling this function again
1539 /// with the parameters swapped.
simplifyUnsignedRangeCheck(ICmpInst * ZeroICmp,ICmpInst * UnsignedICmp,bool IsAnd,const SimplifyQuery & Q)1540 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1541 ICmpInst *UnsignedICmp, bool IsAnd,
1542 const SimplifyQuery &Q) {
1543 Value *X, *Y;
1544
1545 ICmpInst::Predicate EqPred;
1546 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1547 !ICmpInst::isEquality(EqPred))
1548 return nullptr;
1549
1550 ICmpInst::Predicate UnsignedPred;
1551
1552 Value *A, *B;
1553 // Y = (A - B);
1554 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1555 if (match(UnsignedICmp,
1556 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1557 ICmpInst::isUnsigned(UnsignedPred)) {
1558 // A >=/<= B || (A - B) != 0 <--> true
1559 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1560 UnsignedPred == ICmpInst::ICMP_ULE) &&
1561 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1562 return ConstantInt::getTrue(UnsignedICmp->getType());
1563 // A </> B && (A - B) == 0 <--> false
1564 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1565 UnsignedPred == ICmpInst::ICMP_UGT) &&
1566 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1567 return ConstantInt::getFalse(UnsignedICmp->getType());
1568
1569 // A </> B && (A - B) != 0 <--> A </> B
1570 // A </> B || (A - B) != 0 <--> (A - B) != 0
1571 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1572 UnsignedPred == ICmpInst::ICMP_UGT))
1573 return IsAnd ? UnsignedICmp : ZeroICmp;
1574
1575 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1576 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1577 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1578 UnsignedPred == ICmpInst::ICMP_UGE))
1579 return IsAnd ? ZeroICmp : UnsignedICmp;
1580 }
1581
1582 // Given Y = (A - B)
1583 // Y >= A && Y != 0 --> Y >= A iff B != 0
1584 // Y < A || Y == 0 --> Y < A iff B != 0
1585 if (match(UnsignedICmp,
1586 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1587 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1588 EqPred == ICmpInst::ICMP_NE &&
1589 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1590 return UnsignedICmp;
1591 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1592 EqPred == ICmpInst::ICMP_EQ &&
1593 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1594 return UnsignedICmp;
1595 }
1596 }
1597
1598 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1599 ICmpInst::isUnsigned(UnsignedPred))
1600 ;
1601 else if (match(UnsignedICmp,
1602 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1603 ICmpInst::isUnsigned(UnsignedPred))
1604 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1605 else
1606 return nullptr;
1607
1608 // X > Y && Y == 0 --> Y == 0 iff X != 0
1609 // X > Y || Y == 0 --> X > Y iff X != 0
1610 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1611 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1612 return IsAnd ? ZeroICmp : UnsignedICmp;
1613
1614 // X <= Y && Y != 0 --> X <= Y iff X != 0
1615 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1616 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1617 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1618 return IsAnd ? UnsignedICmp : ZeroICmp;
1619
1620 // The transforms below here are expected to be handled more generally with
1621 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1622 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1623 // these are candidates for removal.
1624
1625 // X < Y && Y != 0 --> X < Y
1626 // X < Y || Y != 0 --> Y != 0
1627 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1628 return IsAnd ? UnsignedICmp : ZeroICmp;
1629
1630 // X >= Y && Y == 0 --> Y == 0
1631 // X >= Y || Y == 0 --> X >= Y
1632 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1633 return IsAnd ? ZeroICmp : UnsignedICmp;
1634
1635 // X < Y && Y == 0 --> false
1636 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1637 IsAnd)
1638 return getFalse(UnsignedICmp->getType());
1639
1640 // X >= Y || Y != 0 --> true
1641 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1642 !IsAnd)
1643 return getTrue(UnsignedICmp->getType());
1644
1645 return nullptr;
1646 }
1647
1648 /// Test if a pair of compares with a shared operand and 2 constants has an
1649 /// empty set intersection, full set union, or if one compare is a superset of
1650 /// the other.
simplifyAndOrOfICmpsWithConstants(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1651 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1652 bool IsAnd) {
1653 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1654 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1655 return nullptr;
1656
1657 const APInt *C0, *C1;
1658 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1659 !match(Cmp1->getOperand(1), m_APInt(C1)))
1660 return nullptr;
1661
1662 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1663 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1664
1665 // For and-of-compares, check if the intersection is empty:
1666 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1667 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1668 return getFalse(Cmp0->getType());
1669
1670 // For or-of-compares, check if the union is full:
1671 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1672 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1673 return getTrue(Cmp0->getType());
1674
1675 // Is one range a superset of the other?
1676 // If this is and-of-compares, take the smaller set:
1677 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1678 // If this is or-of-compares, take the larger set:
1679 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1680 if (Range0.contains(Range1))
1681 return IsAnd ? Cmp1 : Cmp0;
1682 if (Range1.contains(Range0))
1683 return IsAnd ? Cmp0 : Cmp1;
1684
1685 return nullptr;
1686 }
1687
simplifyAndOfICmpsWithAdd(ICmpInst * Op0,ICmpInst * Op1,const InstrInfoQuery & IIQ)1688 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1689 const InstrInfoQuery &IIQ) {
1690 // (icmp (add V, C0), C1) & (icmp V, C0)
1691 ICmpInst::Predicate Pred0, Pred1;
1692 const APInt *C0, *C1;
1693 Value *V;
1694 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1695 return nullptr;
1696
1697 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1698 return nullptr;
1699
1700 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1701 if (AddInst->getOperand(1) != Op1->getOperand(1))
1702 return nullptr;
1703
1704 Type *ITy = Op0->getType();
1705 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1706 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1707
1708 const APInt Delta = *C1 - *C0;
1709 if (C0->isStrictlyPositive()) {
1710 if (Delta == 2) {
1711 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1712 return getFalse(ITy);
1713 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1714 return getFalse(ITy);
1715 }
1716 if (Delta == 1) {
1717 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1718 return getFalse(ITy);
1719 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1720 return getFalse(ITy);
1721 }
1722 }
1723 if (C0->getBoolValue() && IsNUW) {
1724 if (Delta == 2)
1725 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1726 return getFalse(ITy);
1727 if (Delta == 1)
1728 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1729 return getFalse(ITy);
1730 }
1731
1732 return nullptr;
1733 }
1734
1735 /// Try to simplify and/or of icmp with ctpop intrinsic.
simplifyAndOrOfICmpsWithCtpop(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1736 static Value *simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1,
1737 bool IsAnd) {
1738 ICmpInst::Predicate Pred0, Pred1;
1739 Value *X;
1740 const APInt *C;
1741 if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
1742 m_APInt(C))) ||
1743 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())) || C->isZero())
1744 return nullptr;
1745
1746 // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1747 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1748 return Cmp1;
1749 // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1750 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1751 return Cmp1;
1752
1753 return nullptr;
1754 }
1755
simplifyAndOfICmps(ICmpInst * Op0,ICmpInst * Op1,const SimplifyQuery & Q)1756 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1757 const SimplifyQuery &Q) {
1758 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1759 return X;
1760 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1761 return X;
1762
1763 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1764 return X;
1765
1766 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, true))
1767 return X;
1768 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, true))
1769 return X;
1770
1771 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1772 return X;
1773 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1774 return X;
1775
1776 return nullptr;
1777 }
1778
simplifyOrOfICmpsWithAdd(ICmpInst * Op0,ICmpInst * Op1,const InstrInfoQuery & IIQ)1779 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1780 const InstrInfoQuery &IIQ) {
1781 // (icmp (add V, C0), C1) | (icmp V, C0)
1782 ICmpInst::Predicate Pred0, Pred1;
1783 const APInt *C0, *C1;
1784 Value *V;
1785 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1786 return nullptr;
1787
1788 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1789 return nullptr;
1790
1791 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1792 if (AddInst->getOperand(1) != Op1->getOperand(1))
1793 return nullptr;
1794
1795 Type *ITy = Op0->getType();
1796 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1797 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1798
1799 const APInt Delta = *C1 - *C0;
1800 if (C0->isStrictlyPositive()) {
1801 if (Delta == 2) {
1802 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1803 return getTrue(ITy);
1804 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1805 return getTrue(ITy);
1806 }
1807 if (Delta == 1) {
1808 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1809 return getTrue(ITy);
1810 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1811 return getTrue(ITy);
1812 }
1813 }
1814 if (C0->getBoolValue() && IsNUW) {
1815 if (Delta == 2)
1816 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1817 return getTrue(ITy);
1818 if (Delta == 1)
1819 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1820 return getTrue(ITy);
1821 }
1822
1823 return nullptr;
1824 }
1825
simplifyOrOfICmps(ICmpInst * Op0,ICmpInst * Op1,const SimplifyQuery & Q)1826 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1827 const SimplifyQuery &Q) {
1828 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1829 return X;
1830 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1831 return X;
1832
1833 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1834 return X;
1835
1836 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, false))
1837 return X;
1838 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, false))
1839 return X;
1840
1841 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1842 return X;
1843 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1844 return X;
1845
1846 return nullptr;
1847 }
1848
simplifyAndOrOfFCmps(const SimplifyQuery & Q,FCmpInst * LHS,FCmpInst * RHS,bool IsAnd)1849 static Value *simplifyAndOrOfFCmps(const SimplifyQuery &Q, FCmpInst *LHS,
1850 FCmpInst *RHS, bool IsAnd) {
1851 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1852 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1853 if (LHS0->getType() != RHS0->getType())
1854 return nullptr;
1855
1856 const DataLayout &DL = Q.DL;
1857 const TargetLibraryInfo *TLI = Q.TLI;
1858
1859 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1860 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1861 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1862 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1863 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1864 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1865 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1866 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1867 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1868 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1869 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1870 if (((LHS1 == RHS0 || LHS1 == RHS1) &&
1871 isKnownNeverNaN(LHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
1872 ((LHS0 == RHS0 || LHS0 == RHS1) &&
1873 isKnownNeverNaN(LHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
1874 return RHS;
1875
1876 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1877 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1878 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1879 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1880 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1881 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1882 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1883 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1884 if (((RHS1 == LHS0 || RHS1 == LHS1) &&
1885 isKnownNeverNaN(RHS0, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)) ||
1886 ((RHS0 == LHS0 || RHS0 == LHS1) &&
1887 isKnownNeverNaN(RHS1, DL, TLI, 0, Q.AC, Q.CxtI, Q.DT)))
1888 return LHS;
1889 }
1890
1891 return nullptr;
1892 }
1893
simplifyAndOrOfCmps(const SimplifyQuery & Q,Value * Op0,Value * Op1,bool IsAnd)1894 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0,
1895 Value *Op1, bool IsAnd) {
1896 // Look through casts of the 'and' operands to find compares.
1897 auto *Cast0 = dyn_cast<CastInst>(Op0);
1898 auto *Cast1 = dyn_cast<CastInst>(Op1);
1899 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
1900 Cast0->getSrcTy() == Cast1->getSrcTy()) {
1901 Op0 = Cast0->getOperand(0);
1902 Op1 = Cast1->getOperand(0);
1903 }
1904
1905 Value *V = nullptr;
1906 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
1907 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
1908 if (ICmp0 && ICmp1)
1909 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
1910 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
1911
1912 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
1913 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
1914 if (FCmp0 && FCmp1)
1915 V = simplifyAndOrOfFCmps(Q, FCmp0, FCmp1, IsAnd);
1916
1917 if (!V)
1918 return nullptr;
1919 if (!Cast0)
1920 return V;
1921
1922 // If we looked through casts, we can only handle a constant simplification
1923 // because we are not allowed to create a cast instruction here.
1924 if (auto *C = dyn_cast<Constant>(V))
1925 return ConstantFoldCastOperand(Cast0->getOpcode(), C, Cast0->getType(),
1926 Q.DL);
1927
1928 return nullptr;
1929 }
1930
1931 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
1932 const SimplifyQuery &Q,
1933 bool AllowRefinement,
1934 SmallVectorImpl<Instruction *> *DropFlags,
1935 unsigned MaxRecurse);
1936
simplifyAndOrWithICmpEq(unsigned Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1937 static Value *simplifyAndOrWithICmpEq(unsigned Opcode, Value *Op0, Value *Op1,
1938 const SimplifyQuery &Q,
1939 unsigned MaxRecurse) {
1940 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1941 "Must be and/or");
1942 ICmpInst::Predicate Pred;
1943 Value *A, *B;
1944 if (!match(Op0, m_ICmp(Pred, m_Value(A), m_Value(B))) ||
1945 !ICmpInst::isEquality(Pred))
1946 return nullptr;
1947
1948 auto Simplify = [&](Value *Res) -> Value * {
1949 Constant *Absorber = ConstantExpr::getBinOpAbsorber(Opcode, Res->getType());
1950
1951 // and (icmp eq a, b), x implies (a==b) inside x.
1952 // or (icmp ne a, b), x implies (a==b) inside x.
1953 // If x simplifies to true/false, we can simplify the and/or.
1954 if (Pred ==
1955 (Opcode == Instruction::And ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE)) {
1956 if (Res == Absorber)
1957 return Absorber;
1958 if (Res == ConstantExpr::getBinOpIdentity(Opcode, Res->getType()))
1959 return Op0;
1960 return nullptr;
1961 }
1962
1963 // If we have and (icmp ne a, b), x and for a==b we can simplify x to false,
1964 // then we can drop the icmp, as x will already be false in the case where
1965 // the icmp is false. Similar for or and true.
1966 if (Res == Absorber)
1967 return Op1;
1968 return nullptr;
1969 };
1970
1971 if (Value *Res =
1972 simplifyWithOpReplaced(Op1, A, B, Q, /* AllowRefinement */ true,
1973 /* DropFlags */ nullptr, MaxRecurse))
1974 return Simplify(Res);
1975 if (Value *Res =
1976 simplifyWithOpReplaced(Op1, B, A, Q, /* AllowRefinement */ true,
1977 /* DropFlags */ nullptr, MaxRecurse))
1978 return Simplify(Res);
1979
1980 return nullptr;
1981 }
1982
1983 /// Given a bitwise logic op, check if the operands are add/sub with a common
1984 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
simplifyLogicOfAddSub(Value * Op0,Value * Op1,Instruction::BinaryOps Opcode)1985 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
1986 Instruction::BinaryOps Opcode) {
1987 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
1988 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
1989 Value *X;
1990 Constant *C1, *C2;
1991 if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
1992 match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
1993 (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
1994 match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
1995 if (ConstantExpr::getNot(C1) == C2) {
1996 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
1997 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
1998 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
1999 Type *Ty = Op0->getType();
2000 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2001 : ConstantInt::getAllOnesValue(Ty);
2002 }
2003 }
2004 return nullptr;
2005 }
2006
2007 // Commutative patterns for and that will be tried with both operand orders.
simplifyAndCommutative(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2008 static Value *simplifyAndCommutative(Value *Op0, Value *Op1,
2009 const SimplifyQuery &Q,
2010 unsigned MaxRecurse) {
2011 // ~A & A = 0
2012 if (match(Op0, m_Not(m_Specific(Op1))))
2013 return Constant::getNullValue(Op0->getType());
2014
2015 // (A | ?) & A = A
2016 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2017 return Op1;
2018
2019 // (X | ~Y) & (X | Y) --> X
2020 Value *X, *Y;
2021 if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2022 match(Op1, m_c_Or(m_Deferred(X), m_Deferred(Y))))
2023 return X;
2024
2025 // If we have a multiplication overflow check that is being 'and'ed with a
2026 // check that one of the multipliers is not zero, we can omit the 'and', and
2027 // only keep the overflow check.
2028 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2029 return Op1;
2030
2031 // -A & A = A if A is a power of two or zero.
2032 if (match(Op0, m_Neg(m_Specific(Op1))) &&
2033 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2034 return Op1;
2035
2036 // This is a similar pattern used for checking if a value is a power-of-2:
2037 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2038 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2039 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2040 return Constant::getNullValue(Op1->getType());
2041
2042 // (x << N) & ((x << M) - 1) --> 0, where x is known to be a power of 2 and
2043 // M <= N.
2044 const APInt *Shift1, *Shift2;
2045 if (match(Op0, m_Shl(m_Value(X), m_APInt(Shift1))) &&
2046 match(Op1, m_Add(m_Shl(m_Specific(X), m_APInt(Shift2)), m_AllOnes())) &&
2047 isKnownToBeAPowerOfTwo(X, Q.DL, /*OrZero*/ true, /*Depth*/ 0, Q.AC,
2048 Q.CxtI) &&
2049 Shift1->uge(*Shift2))
2050 return Constant::getNullValue(Op0->getType());
2051
2052 if (Value *V =
2053 simplifyAndOrWithICmpEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2054 return V;
2055
2056 return nullptr;
2057 }
2058
2059 /// Given operands for an And, see if we can fold the result.
2060 /// If not, this returns null.
simplifyAndInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2061 static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2062 unsigned MaxRecurse) {
2063 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2064 return C;
2065
2066 // X & poison -> poison
2067 if (isa<PoisonValue>(Op1))
2068 return Op1;
2069
2070 // X & undef -> 0
2071 if (Q.isUndefValue(Op1))
2072 return Constant::getNullValue(Op0->getType());
2073
2074 // X & X = X
2075 if (Op0 == Op1)
2076 return Op0;
2077
2078 // X & 0 = 0
2079 if (match(Op1, m_Zero()))
2080 return Constant::getNullValue(Op0->getType());
2081
2082 // X & -1 = X
2083 if (match(Op1, m_AllOnes()))
2084 return Op0;
2085
2086 if (Value *Res = simplifyAndCommutative(Op0, Op1, Q, MaxRecurse))
2087 return Res;
2088 if (Value *Res = simplifyAndCommutative(Op1, Op0, Q, MaxRecurse))
2089 return Res;
2090
2091 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2092 return V;
2093
2094 // A mask that only clears known zeros of a shifted value is a no-op.
2095 const APInt *Mask;
2096 const APInt *ShAmt;
2097 Value *X, *Y;
2098 if (match(Op1, m_APInt(Mask))) {
2099 // If all bits in the inverted and shifted mask are clear:
2100 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2101 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2102 (~(*Mask)).lshr(*ShAmt).isZero())
2103 return Op0;
2104
2105 // If all bits in the inverted and shifted mask are clear:
2106 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2107 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2108 (~(*Mask)).shl(*ShAmt).isZero())
2109 return Op0;
2110 }
2111
2112 // and 2^x-1, 2^C --> 0 where x <= C.
2113 const APInt *PowerC;
2114 Value *Shift;
2115 if (match(Op1, m_Power2(PowerC)) &&
2116 match(Op0, m_Add(m_Value(Shift), m_AllOnes())) &&
2117 isKnownToBeAPowerOfTwo(Shift, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
2118 Q.DT)) {
2119 KnownBits Known = computeKnownBits(Shift, /* Depth */ 0, Q);
2120 // Use getActiveBits() to make use of the additional power of two knowledge
2121 if (PowerC->getActiveBits() >= Known.getMaxValue().getActiveBits())
2122 return ConstantInt::getNullValue(Op1->getType());
2123 }
2124
2125 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2126 return V;
2127
2128 // Try some generic simplifications for associative operations.
2129 if (Value *V =
2130 simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2131 return V;
2132
2133 // And distributes over Or. Try some generic simplifications based on this.
2134 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2135 Instruction::Or, Q, MaxRecurse))
2136 return V;
2137
2138 // And distributes over Xor. Try some generic simplifications based on this.
2139 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2140 Instruction::Xor, Q, MaxRecurse))
2141 return V;
2142
2143 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2144 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2145 // A & (A && B) -> A && B
2146 if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2147 return Op1;
2148 else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2149 return Op0;
2150 }
2151 // If the operation is with the result of a select instruction, check
2152 // whether operating on either branch of the select always yields the same
2153 // value.
2154 if (Value *V =
2155 threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2156 return V;
2157 }
2158
2159 // If the operation is with the result of a phi instruction, check whether
2160 // operating on all incoming values of the phi always yields the same value.
2161 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2162 if (Value *V =
2163 threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2164 return V;
2165
2166 // Assuming the effective width of Y is not larger than A, i.e. all bits
2167 // from X and Y are disjoint in (X << A) | Y,
2168 // if the mask of this AND op covers all bits of X or Y, while it covers
2169 // no bits from the other, we can bypass this AND op. E.g.,
2170 // ((X << A) | Y) & Mask -> Y,
2171 // if Mask = ((1 << effective_width_of(Y)) - 1)
2172 // ((X << A) | Y) & Mask -> X << A,
2173 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2174 // SimplifyDemandedBits in InstCombine can optimize the general case.
2175 // This pattern aims to help other passes for a common case.
2176 Value *XShifted;
2177 if (Q.IIQ.UseInstrInfo && match(Op1, m_APInt(Mask)) &&
2178 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2179 m_Value(XShifted)),
2180 m_Value(Y)))) {
2181 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2182 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2183 const KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
2184 const unsigned EffWidthY = YKnown.countMaxActiveBits();
2185 if (EffWidthY <= ShftCnt) {
2186 const KnownBits XKnown = computeKnownBits(X, /* Depth */ 0, Q);
2187 const unsigned EffWidthX = XKnown.countMaxActiveBits();
2188 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2189 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2190 // If the mask is extracting all bits from X or Y as is, we can skip
2191 // this AND op.
2192 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2193 return Y;
2194 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2195 return XShifted;
2196 }
2197 }
2198
2199 // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2200 // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2201 BinaryOperator *Or;
2202 if (match(Op0, m_c_Xor(m_Value(X),
2203 m_CombineAnd(m_BinOp(Or),
2204 m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2205 match(Op1, m_c_Xor(m_Specific(Or), m_Specific(Y))))
2206 return Constant::getNullValue(Op0->getType());
2207
2208 const APInt *C1;
2209 Value *A;
2210 // (A ^ C) & (A ^ ~C) -> 0
2211 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2212 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2213 return Constant::getNullValue(Op0->getType());
2214
2215 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2216 if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
2217 // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2218 if (*Implied == true)
2219 return Op0;
2220 // If Op0 is true implies Op1 is false, then they are not true together.
2221 if (*Implied == false)
2222 return ConstantInt::getFalse(Op0->getType());
2223 }
2224 if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
2225 // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2226 if (*Implied)
2227 return Op1;
2228 // If Op1 is true implies Op0 is false, then they are not true together.
2229 if (!*Implied)
2230 return ConstantInt::getFalse(Op1->getType());
2231 }
2232 }
2233
2234 if (Value *V = simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2235 return V;
2236
2237 return nullptr;
2238 }
2239
simplifyAndInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2240 Value *llvm::simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2241 return ::simplifyAndInst(Op0, Op1, Q, RecursionLimit);
2242 }
2243
2244 // TODO: Many of these folds could use LogicalAnd/LogicalOr.
simplifyOrLogic(Value * X,Value * Y)2245 static Value *simplifyOrLogic(Value *X, Value *Y) {
2246 assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2247 Type *Ty = X->getType();
2248
2249 // X | ~X --> -1
2250 if (match(Y, m_Not(m_Specific(X))))
2251 return ConstantInt::getAllOnesValue(Ty);
2252
2253 // X | ~(X & ?) = -1
2254 if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2255 return ConstantInt::getAllOnesValue(Ty);
2256
2257 // X | (X & ?) --> X
2258 if (match(Y, m_c_And(m_Specific(X), m_Value())))
2259 return X;
2260
2261 Value *A, *B;
2262
2263 // (A ^ B) | (A | B) --> A | B
2264 // (A ^ B) | (B | A) --> B | A
2265 if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2266 match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2267 return Y;
2268
2269 // ~(A ^ B) | (A | B) --> -1
2270 // ~(A ^ B) | (B | A) --> -1
2271 if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2272 match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2273 return ConstantInt::getAllOnesValue(Ty);
2274
2275 // (A & ~B) | (A ^ B) --> A ^ B
2276 // (~B & A) | (A ^ B) --> A ^ B
2277 // (A & ~B) | (B ^ A) --> B ^ A
2278 // (~B & A) | (B ^ A) --> B ^ A
2279 if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2280 match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2281 return Y;
2282
2283 // (~A ^ B) | (A & B) --> ~A ^ B
2284 // (B ^ ~A) | (A & B) --> B ^ ~A
2285 // (~A ^ B) | (B & A) --> ~A ^ B
2286 // (B ^ ~A) | (B & A) --> B ^ ~A
2287 if (match(X, m_c_Xor(m_NotForbidUndef(m_Value(A)), m_Value(B))) &&
2288 match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2289 return X;
2290
2291 // (~A | B) | (A ^ B) --> -1
2292 // (~A | B) | (B ^ A) --> -1
2293 // (B | ~A) | (A ^ B) --> -1
2294 // (B | ~A) | (B ^ A) --> -1
2295 if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2296 match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2297 return ConstantInt::getAllOnesValue(Ty);
2298
2299 // (~A & B) | ~(A | B) --> ~A
2300 // (~A & B) | ~(B | A) --> ~A
2301 // (B & ~A) | ~(A | B) --> ~A
2302 // (B & ~A) | ~(B | A) --> ~A
2303 Value *NotA;
2304 if (match(X,
2305 m_c_And(m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2306 m_Value(B))) &&
2307 match(Y, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2308 return NotA;
2309 // The same is true of Logical And
2310 // TODO: This could share the logic of the version above if there was a
2311 // version of LogicalAnd that allowed more than just i1 types.
2312 if (match(X, m_c_LogicalAnd(
2313 m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2314 m_Value(B))) &&
2315 match(Y, m_Not(m_c_LogicalOr(m_Specific(A), m_Specific(B)))))
2316 return NotA;
2317
2318 // ~(A ^ B) | (A & B) --> ~(A ^ B)
2319 // ~(A ^ B) | (B & A) --> ~(A ^ B)
2320 Value *NotAB;
2321 if (match(X, m_CombineAnd(m_NotForbidUndef(m_Xor(m_Value(A), m_Value(B))),
2322 m_Value(NotAB))) &&
2323 match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2324 return NotAB;
2325
2326 // ~(A & B) | (A ^ B) --> ~(A & B)
2327 // ~(A & B) | (B ^ A) --> ~(A & B)
2328 if (match(X, m_CombineAnd(m_NotForbidUndef(m_And(m_Value(A), m_Value(B))),
2329 m_Value(NotAB))) &&
2330 match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2331 return NotAB;
2332
2333 return nullptr;
2334 }
2335
2336 /// Given operands for an Or, see if we can fold the result.
2337 /// If not, this returns null.
simplifyOrInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2338 static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2339 unsigned MaxRecurse) {
2340 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2341 return C;
2342
2343 // X | poison -> poison
2344 if (isa<PoisonValue>(Op1))
2345 return Op1;
2346
2347 // X | undef -> -1
2348 // X | -1 = -1
2349 // Do not return Op1 because it may contain undef elements if it's a vector.
2350 if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2351 return Constant::getAllOnesValue(Op0->getType());
2352
2353 // X | X = X
2354 // X | 0 = X
2355 if (Op0 == Op1 || match(Op1, m_Zero()))
2356 return Op0;
2357
2358 if (Value *R = simplifyOrLogic(Op0, Op1))
2359 return R;
2360 if (Value *R = simplifyOrLogic(Op1, Op0))
2361 return R;
2362
2363 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2364 return V;
2365
2366 // Rotated -1 is still -1:
2367 // (-1 << X) | (-1 >> (C - X)) --> -1
2368 // (-1 >> X) | (-1 << (C - X)) --> -1
2369 // ...with C <= bitwidth (and commuted variants).
2370 Value *X, *Y;
2371 if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2372 match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2373 (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2374 match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2375 const APInt *C;
2376 if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2377 match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2378 C->ule(X->getType()->getScalarSizeInBits())) {
2379 return ConstantInt::getAllOnesValue(X->getType());
2380 }
2381 }
2382
2383 // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2384 // are mixing in another shift that is redundant with the funnel shift.
2385
2386 // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2387 // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2388 if (match(Op0,
2389 m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2390 match(Op1, m_Shl(m_Specific(X), m_Specific(Y))))
2391 return Op0;
2392 if (match(Op1,
2393 m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2394 match(Op0, m_Shl(m_Specific(X), m_Specific(Y))))
2395 return Op1;
2396
2397 // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2398 // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2399 if (match(Op0,
2400 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2401 match(Op1, m_LShr(m_Specific(X), m_Specific(Y))))
2402 return Op0;
2403 if (match(Op1,
2404 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2405 match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
2406 return Op1;
2407
2408 if (Value *V =
2409 simplifyAndOrWithICmpEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2410 return V;
2411 if (Value *V =
2412 simplifyAndOrWithICmpEq(Instruction::Or, Op1, Op0, Q, MaxRecurse))
2413 return V;
2414
2415 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2416 return V;
2417
2418 // If we have a multiplication overflow check that is being 'and'ed with a
2419 // check that one of the multipliers is not zero, we can omit the 'and', and
2420 // only keep the overflow check.
2421 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2422 return Op1;
2423 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2424 return Op0;
2425
2426 // Try some generic simplifications for associative operations.
2427 if (Value *V =
2428 simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2429 return V;
2430
2431 // Or distributes over And. Try some generic simplifications based on this.
2432 if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2433 Instruction::And, Q, MaxRecurse))
2434 return V;
2435
2436 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2437 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2438 // A | (A || B) -> A || B
2439 if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2440 return Op1;
2441 else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2442 return Op0;
2443 }
2444 // If the operation is with the result of a select instruction, check
2445 // whether operating on either branch of the select always yields the same
2446 // value.
2447 if (Value *V =
2448 threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2449 return V;
2450 }
2451
2452 // (A & C1)|(B & C2)
2453 Value *A, *B;
2454 const APInt *C1, *C2;
2455 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2456 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2457 if (*C1 == ~*C2) {
2458 // (A & C1)|(B & C2)
2459 // If we have: ((V + N) & C1) | (V & C2)
2460 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2461 // replace with V+N.
2462 Value *N;
2463 if (C2->isMask() && // C2 == 0+1+
2464 match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2465 // Add commutes, try both ways.
2466 if (MaskedValueIsZero(N, *C2, Q))
2467 return A;
2468 }
2469 // Or commutes, try both ways.
2470 if (C1->isMask() && match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2471 // Add commutes, try both ways.
2472 if (MaskedValueIsZero(N, *C1, Q))
2473 return B;
2474 }
2475 }
2476 }
2477
2478 // If the operation is with the result of a phi instruction, check whether
2479 // operating on all incoming values of the phi always yields the same value.
2480 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2481 if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2482 return V;
2483
2484 // (A ^ C) | (A ^ ~C) -> -1, i.e. all bits set to one.
2485 if (match(Op0, m_Xor(m_Value(A), m_APInt(C1))) &&
2486 match(Op1, m_Xor(m_Specific(A), m_SpecificInt(~*C1))))
2487 return Constant::getAllOnesValue(Op0->getType());
2488
2489 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2490 if (std::optional<bool> Implied =
2491 isImpliedCondition(Op0, Op1, Q.DL, false)) {
2492 // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2493 if (*Implied == false)
2494 return Op0;
2495 // If Op0 is false implies Op1 is true, then at least one is always true.
2496 if (*Implied == true)
2497 return ConstantInt::getTrue(Op0->getType());
2498 }
2499 if (std::optional<bool> Implied =
2500 isImpliedCondition(Op1, Op0, Q.DL, false)) {
2501 // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2502 if (*Implied == false)
2503 return Op1;
2504 // If Op1 is false implies Op0 is true, then at least one is always true.
2505 if (*Implied == true)
2506 return ConstantInt::getTrue(Op1->getType());
2507 }
2508 }
2509
2510 if (Value *V = simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2511 return V;
2512
2513 return nullptr;
2514 }
2515
simplifyOrInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2516 Value *llvm::simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2517 return ::simplifyOrInst(Op0, Op1, Q, RecursionLimit);
2518 }
2519
2520 /// Given operands for a Xor, see if we can fold the result.
2521 /// If not, this returns null.
simplifyXorInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2522 static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2523 unsigned MaxRecurse) {
2524 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2525 return C;
2526
2527 // X ^ poison -> poison
2528 if (isa<PoisonValue>(Op1))
2529 return Op1;
2530
2531 // A ^ undef -> undef
2532 if (Q.isUndefValue(Op1))
2533 return Op1;
2534
2535 // A ^ 0 = A
2536 if (match(Op1, m_Zero()))
2537 return Op0;
2538
2539 // A ^ A = 0
2540 if (Op0 == Op1)
2541 return Constant::getNullValue(Op0->getType());
2542
2543 // A ^ ~A = ~A ^ A = -1
2544 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2545 return Constant::getAllOnesValue(Op0->getType());
2546
2547 auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2548 Value *A, *B;
2549 // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2550 if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2551 match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2552 return A;
2553
2554 // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2555 // The 'not' op must contain a complete -1 operand (no undef elements for
2556 // vector) for the transform to be safe.
2557 Value *NotA;
2558 if (match(X,
2559 m_c_Or(m_CombineAnd(m_NotForbidUndef(m_Value(A)), m_Value(NotA)),
2560 m_Value(B))) &&
2561 match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2562 return NotA;
2563
2564 return nullptr;
2565 };
2566 if (Value *R = foldAndOrNot(Op0, Op1))
2567 return R;
2568 if (Value *R = foldAndOrNot(Op1, Op0))
2569 return R;
2570
2571 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2572 return V;
2573
2574 // Try some generic simplifications for associative operations.
2575 if (Value *V =
2576 simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2577 return V;
2578
2579 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2580 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2581 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2582 // only if B and C are equal. If B and C are equal then (since we assume
2583 // that operands have already been simplified) "select(cond, B, C)" should
2584 // have been simplified to the common value of B and C already. Analysing
2585 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2586 // for threading over phi nodes.
2587
2588 if (Value *V = simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2589 return V;
2590
2591 return nullptr;
2592 }
2593
simplifyXorInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2594 Value *llvm::simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2595 return ::simplifyXorInst(Op0, Op1, Q, RecursionLimit);
2596 }
2597
getCompareTy(Value * Op)2598 static Type *getCompareTy(Value *Op) {
2599 return CmpInst::makeCmpResultType(Op->getType());
2600 }
2601
2602 /// Rummage around inside V looking for something equivalent to the comparison
2603 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2604 /// Helper function for analyzing max/min idioms.
extractEquivalentCondition(Value * V,CmpInst::Predicate Pred,Value * LHS,Value * RHS)2605 static Value *extractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2606 Value *LHS, Value *RHS) {
2607 SelectInst *SI = dyn_cast<SelectInst>(V);
2608 if (!SI)
2609 return nullptr;
2610 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2611 if (!Cmp)
2612 return nullptr;
2613 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2614 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2615 return Cmp;
2616 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2617 LHS == CmpRHS && RHS == CmpLHS)
2618 return Cmp;
2619 return nullptr;
2620 }
2621
2622 /// Return true if the underlying object (storage) must be disjoint from
2623 /// storage returned by any noalias return call.
isAllocDisjoint(const Value * V)2624 static bool isAllocDisjoint(const Value *V) {
2625 // For allocas, we consider only static ones (dynamic
2626 // allocas might be transformed into calls to malloc not simultaneously
2627 // live with the compared-to allocation). For globals, we exclude symbols
2628 // that might be resolve lazily to symbols in another dynamically-loaded
2629 // library (and, thus, could be malloc'ed by the implementation).
2630 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2631 return AI->isStaticAlloca();
2632 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2633 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2634 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2635 !GV->isThreadLocal();
2636 if (const Argument *A = dyn_cast<Argument>(V))
2637 return A->hasByValAttr();
2638 return false;
2639 }
2640
2641 /// Return true if V1 and V2 are each the base of some distict storage region
2642 /// [V, object_size(V)] which do not overlap. Note that zero sized regions
2643 /// *are* possible, and that zero sized regions do not overlap with any other.
haveNonOverlappingStorage(const Value * V1,const Value * V2)2644 static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2645 // Global variables always exist, so they always exist during the lifetime
2646 // of each other and all allocas. Global variables themselves usually have
2647 // non-overlapping storage, but since their addresses are constants, the
2648 // case involving two globals does not reach here and is instead handled in
2649 // constant folding.
2650 //
2651 // Two different allocas usually have different addresses...
2652 //
2653 // However, if there's an @llvm.stackrestore dynamically in between two
2654 // allocas, they may have the same address. It's tempting to reduce the
2655 // scope of the problem by only looking at *static* allocas here. That would
2656 // cover the majority of allocas while significantly reducing the likelihood
2657 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2658 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2659 // an entry block. Also, if we have a block that's not attached to a
2660 // function, we can't tell if it's "static" under the current definition.
2661 // Theoretically, this problem could be fixed by creating a new kind of
2662 // instruction kind specifically for static allocas. Such a new instruction
2663 // could be required to be at the top of the entry block, thus preventing it
2664 // from being subject to a @llvm.stackrestore. Instcombine could even
2665 // convert regular allocas into these special allocas. It'd be nifty.
2666 // However, until then, this problem remains open.
2667 //
2668 // So, we'll assume that two non-empty allocas have different addresses
2669 // for now.
2670 auto isByValArg = [](const Value *V) {
2671 const Argument *A = dyn_cast<Argument>(V);
2672 return A && A->hasByValAttr();
2673 };
2674
2675 // Byval args are backed by store which does not overlap with each other,
2676 // allocas, or globals.
2677 if (isByValArg(V1))
2678 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2679 if (isByValArg(V2))
2680 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2681
2682 return isa<AllocaInst>(V1) &&
2683 (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2684 }
2685
2686 // A significant optimization not implemented here is assuming that alloca
2687 // addresses are not equal to incoming argument values. They don't *alias*,
2688 // as we say, but that doesn't mean they aren't equal, so we take a
2689 // conservative approach.
2690 //
2691 // This is inspired in part by C++11 5.10p1:
2692 // "Two pointers of the same type compare equal if and only if they are both
2693 // null, both point to the same function, or both represent the same
2694 // address."
2695 //
2696 // This is pretty permissive.
2697 //
2698 // It's also partly due to C11 6.5.9p6:
2699 // "Two pointers compare equal if and only if both are null pointers, both are
2700 // pointers to the same object (including a pointer to an object and a
2701 // subobject at its beginning) or function, both are pointers to one past the
2702 // last element of the same array object, or one is a pointer to one past the
2703 // end of one array object and the other is a pointer to the start of a
2704 // different array object that happens to immediately follow the first array
2705 // object in the address space.)
2706 //
2707 // C11's version is more restrictive, however there's no reason why an argument
2708 // couldn't be a one-past-the-end value for a stack object in the caller and be
2709 // equal to the beginning of a stack object in the callee.
2710 //
2711 // If the C and C++ standards are ever made sufficiently restrictive in this
2712 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2713 // this optimization.
computePointerICmp(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2714 static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
2715 Value *RHS, const SimplifyQuery &Q) {
2716 assert(LHS->getType() == RHS->getType() && "Must have same types");
2717 const DataLayout &DL = Q.DL;
2718 const TargetLibraryInfo *TLI = Q.TLI;
2719 const DominatorTree *DT = Q.DT;
2720 const Instruction *CxtI = Q.CxtI;
2721
2722 // We can only fold certain predicates on pointer comparisons.
2723 switch (Pred) {
2724 default:
2725 return nullptr;
2726
2727 // Equality comparisons are easy to fold.
2728 case CmpInst::ICMP_EQ:
2729 case CmpInst::ICMP_NE:
2730 break;
2731
2732 // We can only handle unsigned relational comparisons because 'inbounds' on
2733 // a GEP only protects against unsigned wrapping.
2734 case CmpInst::ICMP_UGT:
2735 case CmpInst::ICMP_UGE:
2736 case CmpInst::ICMP_ULT:
2737 case CmpInst::ICMP_ULE:
2738 // However, we have to switch them to their signed variants to handle
2739 // negative indices from the base pointer.
2740 Pred = ICmpInst::getSignedPredicate(Pred);
2741 break;
2742 }
2743
2744 // Strip off any constant offsets so that we can reason about them.
2745 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2746 // here and compare base addresses like AliasAnalysis does, however there are
2747 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2748 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2749 // doesn't need to guarantee pointer inequality when it says NoAlias.
2750
2751 // Even if an non-inbounds GEP occurs along the path we can still optimize
2752 // equality comparisons concerning the result.
2753 bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2754 unsigned IndexSize = DL.getIndexTypeSizeInBits(LHS->getType());
2755 APInt LHSOffset(IndexSize, 0), RHSOffset(IndexSize, 0);
2756 LHS = LHS->stripAndAccumulateConstantOffsets(DL, LHSOffset, AllowNonInbounds);
2757 RHS = RHS->stripAndAccumulateConstantOffsets(DL, RHSOffset, AllowNonInbounds);
2758
2759 // If LHS and RHS are related via constant offsets to the same base
2760 // value, we can replace it with an icmp which just compares the offsets.
2761 if (LHS == RHS)
2762 return ConstantInt::get(getCompareTy(LHS),
2763 ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2764
2765 // Various optimizations for (in)equality comparisons.
2766 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2767 // Different non-empty allocations that exist at the same time have
2768 // different addresses (if the program can tell). If the offsets are
2769 // within the bounds of their allocations (and not one-past-the-end!
2770 // so we can't use inbounds!), and their allocations aren't the same,
2771 // the pointers are not equal.
2772 if (haveNonOverlappingStorage(LHS, RHS)) {
2773 uint64_t LHSSize, RHSSize;
2774 ObjectSizeOpts Opts;
2775 Opts.EvalMode = ObjectSizeOpts::Mode::Min;
2776 auto *F = [](Value *V) -> Function * {
2777 if (auto *I = dyn_cast<Instruction>(V))
2778 return I->getFunction();
2779 if (auto *A = dyn_cast<Argument>(V))
2780 return A->getParent();
2781 return nullptr;
2782 }(LHS);
2783 Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2784 if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2785 getObjectSize(RHS, RHSSize, DL, TLI, Opts)) {
2786 APInt Dist = LHSOffset - RHSOffset;
2787 if (Dist.isNonNegative() ? Dist.ult(LHSSize) : (-Dist).ult(RHSSize))
2788 return ConstantInt::get(getCompareTy(LHS),
2789 !CmpInst::isTrueWhenEqual(Pred));
2790 }
2791 }
2792
2793 // If one side of the equality comparison must come from a noalias call
2794 // (meaning a system memory allocation function), and the other side must
2795 // come from a pointer that cannot overlap with dynamically-allocated
2796 // memory within the lifetime of the current function (allocas, byval
2797 // arguments, globals), then determine the comparison result here.
2798 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2799 getUnderlyingObjects(LHS, LHSUObjs);
2800 getUnderlyingObjects(RHS, RHSUObjs);
2801
2802 // Is the set of underlying objects all noalias calls?
2803 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2804 return all_of(Objects, isNoAliasCall);
2805 };
2806
2807 // Is the set of underlying objects all things which must be disjoint from
2808 // noalias calls. We assume that indexing from such disjoint storage
2809 // into the heap is undefined, and thus offsets can be safely ignored.
2810 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2811 return all_of(Objects, ::isAllocDisjoint);
2812 };
2813
2814 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2815 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2816 return ConstantInt::get(getCompareTy(LHS),
2817 !CmpInst::isTrueWhenEqual(Pred));
2818
2819 // Fold comparisons for non-escaping pointer even if the allocation call
2820 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2821 // dynamic allocation call could be either of the operands. Note that
2822 // the other operand can not be based on the alloc - if it were, then
2823 // the cmp itself would be a capture.
2824 Value *MI = nullptr;
2825 if (isAllocLikeFn(LHS, TLI) &&
2826 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2827 MI = LHS;
2828 else if (isAllocLikeFn(RHS, TLI) &&
2829 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2830 MI = RHS;
2831 if (MI) {
2832 // FIXME: This is incorrect, see PR54002. While we can assume that the
2833 // allocation is at an address that makes the comparison false, this
2834 // requires that *all* comparisons to that address be false, which
2835 // InstSimplify cannot guarantee.
2836 struct CustomCaptureTracker : public CaptureTracker {
2837 bool Captured = false;
2838 void tooManyUses() override { Captured = true; }
2839 bool captured(const Use *U) override {
2840 if (auto *ICmp = dyn_cast<ICmpInst>(U->getUser())) {
2841 // Comparison against value stored in global variable. Given the
2842 // pointer does not escape, its value cannot be guessed and stored
2843 // separately in a global variable.
2844 unsigned OtherIdx = 1 - U->getOperandNo();
2845 auto *LI = dyn_cast<LoadInst>(ICmp->getOperand(OtherIdx));
2846 if (LI && isa<GlobalVariable>(LI->getPointerOperand()))
2847 return false;
2848 }
2849
2850 Captured = true;
2851 return true;
2852 }
2853 };
2854 CustomCaptureTracker Tracker;
2855 PointerMayBeCaptured(MI, &Tracker);
2856 if (!Tracker.Captured)
2857 return ConstantInt::get(getCompareTy(LHS),
2858 CmpInst::isFalseWhenEqual(Pred));
2859 }
2860 }
2861
2862 // Otherwise, fail.
2863 return nullptr;
2864 }
2865
2866 /// Fold an icmp when its operands have i1 scalar type.
simplifyICmpOfBools(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2867 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2868 Value *RHS, const SimplifyQuery &Q) {
2869 Type *ITy = getCompareTy(LHS); // The return type.
2870 Type *OpTy = LHS->getType(); // The operand type.
2871 if (!OpTy->isIntOrIntVectorTy(1))
2872 return nullptr;
2873
2874 // A boolean compared to true/false can be reduced in 14 out of the 20
2875 // (10 predicates * 2 constants) possible combinations. The other
2876 // 6 cases require a 'not' of the LHS.
2877
2878 auto ExtractNotLHS = [](Value *V) -> Value * {
2879 Value *X;
2880 if (match(V, m_Not(m_Value(X))))
2881 return X;
2882 return nullptr;
2883 };
2884
2885 if (match(RHS, m_Zero())) {
2886 switch (Pred) {
2887 case CmpInst::ICMP_NE: // X != 0 -> X
2888 case CmpInst::ICMP_UGT: // X >u 0 -> X
2889 case CmpInst::ICMP_SLT: // X <s 0 -> X
2890 return LHS;
2891
2892 case CmpInst::ICMP_EQ: // not(X) == 0 -> X != 0 -> X
2893 case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2894 case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2895 if (Value *X = ExtractNotLHS(LHS))
2896 return X;
2897 break;
2898
2899 case CmpInst::ICMP_ULT: // X <u 0 -> false
2900 case CmpInst::ICMP_SGT: // X >s 0 -> false
2901 return getFalse(ITy);
2902
2903 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2904 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2905 return getTrue(ITy);
2906
2907 default:
2908 break;
2909 }
2910 } else if (match(RHS, m_One())) {
2911 switch (Pred) {
2912 case CmpInst::ICMP_EQ: // X == 1 -> X
2913 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2914 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2915 return LHS;
2916
2917 case CmpInst::ICMP_NE: // not(X) != 1 -> X == 1 -> X
2918 case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u 1 -> X
2919 case CmpInst::ICMP_SGT: // not(X) >s 1 -> X <=s -1 -> X
2920 if (Value *X = ExtractNotLHS(LHS))
2921 return X;
2922 break;
2923
2924 case CmpInst::ICMP_UGT: // X >u 1 -> false
2925 case CmpInst::ICMP_SLT: // X <s -1 -> false
2926 return getFalse(ITy);
2927
2928 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2929 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2930 return getTrue(ITy);
2931
2932 default:
2933 break;
2934 }
2935 }
2936
2937 switch (Pred) {
2938 default:
2939 break;
2940 case ICmpInst::ICMP_UGE:
2941 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2942 return getTrue(ITy);
2943 break;
2944 case ICmpInst::ICMP_SGE:
2945 /// For signed comparison, the values for an i1 are 0 and -1
2946 /// respectively. This maps into a truth table of:
2947 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2948 /// 0 | 0 | 1 (0 >= 0) | 1
2949 /// 0 | 1 | 1 (0 >= -1) | 1
2950 /// 1 | 0 | 0 (-1 >= 0) | 0
2951 /// 1 | 1 | 1 (-1 >= -1) | 1
2952 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2953 return getTrue(ITy);
2954 break;
2955 case ICmpInst::ICMP_ULE:
2956 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2957 return getTrue(ITy);
2958 break;
2959 case ICmpInst::ICMP_SLE:
2960 /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2961 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2962 return getTrue(ITy);
2963 break;
2964 }
2965
2966 return nullptr;
2967 }
2968
2969 /// Try hard to fold icmp with zero RHS because this is a common case.
simplifyICmpWithZero(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2970 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2971 Value *RHS, const SimplifyQuery &Q) {
2972 if (!match(RHS, m_Zero()))
2973 return nullptr;
2974
2975 Type *ITy = getCompareTy(LHS); // The return type.
2976 switch (Pred) {
2977 default:
2978 llvm_unreachable("Unknown ICmp predicate!");
2979 case ICmpInst::ICMP_ULT:
2980 return getFalse(ITy);
2981 case ICmpInst::ICMP_UGE:
2982 return getTrue(ITy);
2983 case ICmpInst::ICMP_EQ:
2984 case ICmpInst::ICMP_ULE:
2985 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2986 return getFalse(ITy);
2987 break;
2988 case ICmpInst::ICMP_NE:
2989 case ICmpInst::ICMP_UGT:
2990 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2991 return getTrue(ITy);
2992 break;
2993 case ICmpInst::ICMP_SLT: {
2994 KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
2995 if (LHSKnown.isNegative())
2996 return getTrue(ITy);
2997 if (LHSKnown.isNonNegative())
2998 return getFalse(ITy);
2999 break;
3000 }
3001 case ICmpInst::ICMP_SLE: {
3002 KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
3003 if (LHSKnown.isNegative())
3004 return getTrue(ITy);
3005 if (LHSKnown.isNonNegative() &&
3006 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
3007 return getFalse(ITy);
3008 break;
3009 }
3010 case ICmpInst::ICMP_SGE: {
3011 KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
3012 if (LHSKnown.isNegative())
3013 return getFalse(ITy);
3014 if (LHSKnown.isNonNegative())
3015 return getTrue(ITy);
3016 break;
3017 }
3018 case ICmpInst::ICMP_SGT: {
3019 KnownBits LHSKnown = computeKnownBits(LHS, /* Depth */ 0, Q);
3020 if (LHSKnown.isNegative())
3021 return getFalse(ITy);
3022 if (LHSKnown.isNonNegative() &&
3023 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
3024 return getTrue(ITy);
3025 break;
3026 }
3027 }
3028
3029 return nullptr;
3030 }
3031
simplifyICmpWithConstant(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const InstrInfoQuery & IIQ)3032 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
3033 Value *RHS, const InstrInfoQuery &IIQ) {
3034 Type *ITy = getCompareTy(RHS); // The return type.
3035
3036 Value *X;
3037 // Sign-bit checks can be optimized to true/false after unsigned
3038 // floating-point casts:
3039 // icmp slt (bitcast (uitofp X)), 0 --> false
3040 // icmp sgt (bitcast (uitofp X)), -1 --> true
3041 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
3042 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
3043 return ConstantInt::getFalse(ITy);
3044 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
3045 return ConstantInt::getTrue(ITy);
3046 }
3047
3048 const APInt *C;
3049 if (!match(RHS, m_APIntAllowUndef(C)))
3050 return nullptr;
3051
3052 // Rule out tautological comparisons (eg., ult 0 or uge 0).
3053 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
3054 if (RHS_CR.isEmptySet())
3055 return ConstantInt::getFalse(ITy);
3056 if (RHS_CR.isFullSet())
3057 return ConstantInt::getTrue(ITy);
3058
3059 ConstantRange LHS_CR =
3060 computeConstantRange(LHS, CmpInst::isSigned(Pred), IIQ.UseInstrInfo);
3061 if (!LHS_CR.isFullSet()) {
3062 if (RHS_CR.contains(LHS_CR))
3063 return ConstantInt::getTrue(ITy);
3064 if (RHS_CR.inverse().contains(LHS_CR))
3065 return ConstantInt::getFalse(ITy);
3066 }
3067
3068 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
3069 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3070 const APInt *MulC;
3071 if (IIQ.UseInstrInfo && ICmpInst::isEquality(Pred) &&
3072 ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
3073 *MulC != 0 && C->urem(*MulC) != 0) ||
3074 (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
3075 *MulC != 0 && C->srem(*MulC) != 0)))
3076 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3077
3078 return nullptr;
3079 }
3080
simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,BinaryOperator * LBO,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3081 static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
3082 BinaryOperator *LBO, Value *RHS,
3083 const SimplifyQuery &Q,
3084 unsigned MaxRecurse) {
3085 Type *ITy = getCompareTy(RHS); // The return type.
3086
3087 Value *Y = nullptr;
3088 // icmp pred (or X, Y), X
3089 if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
3090 if (Pred == ICmpInst::ICMP_ULT)
3091 return getFalse(ITy);
3092 if (Pred == ICmpInst::ICMP_UGE)
3093 return getTrue(ITy);
3094
3095 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3096 KnownBits RHSKnown = computeKnownBits(RHS, /* Depth */ 0, Q);
3097 KnownBits YKnown = computeKnownBits(Y, /* Depth */ 0, Q);
3098 if (RHSKnown.isNonNegative() && YKnown.isNegative())
3099 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
3100 if (RHSKnown.isNegative() || YKnown.isNonNegative())
3101 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
3102 }
3103 }
3104
3105 // icmp pred (and X, Y), X
3106 if (match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
3107 if (Pred == ICmpInst::ICMP_UGT)
3108 return getFalse(ITy);
3109 if (Pred == ICmpInst::ICMP_ULE)
3110 return getTrue(ITy);
3111 }
3112
3113 // icmp pred (urem X, Y), Y
3114 if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
3115 switch (Pred) {
3116 default:
3117 break;
3118 case ICmpInst::ICMP_SGT:
3119 case ICmpInst::ICMP_SGE: {
3120 KnownBits Known = computeKnownBits(RHS, /* Depth */ 0, Q);
3121 if (!Known.isNonNegative())
3122 break;
3123 [[fallthrough]];
3124 }
3125 case ICmpInst::ICMP_EQ:
3126 case ICmpInst::ICMP_UGT:
3127 case ICmpInst::ICMP_UGE:
3128 return getFalse(ITy);
3129 case ICmpInst::ICMP_SLT:
3130 case ICmpInst::ICMP_SLE: {
3131 KnownBits Known = computeKnownBits(RHS, /* Depth */ 0, Q);
3132 if (!Known.isNonNegative())
3133 break;
3134 [[fallthrough]];
3135 }
3136 case ICmpInst::ICMP_NE:
3137 case ICmpInst::ICMP_ULT:
3138 case ICmpInst::ICMP_ULE:
3139 return getTrue(ITy);
3140 }
3141 }
3142
3143 // icmp pred (urem X, Y), X
3144 if (match(LBO, m_URem(m_Specific(RHS), m_Value()))) {
3145 if (Pred == ICmpInst::ICMP_ULE)
3146 return getTrue(ITy);
3147 if (Pred == ICmpInst::ICMP_UGT)
3148 return getFalse(ITy);
3149 }
3150
3151 // x >>u y <=u x --> true.
3152 // x >>u y >u x --> false.
3153 // x udiv y <=u x --> true.
3154 // x udiv y >u x --> false.
3155 if (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
3156 match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
3157 // icmp pred (X op Y), X
3158 if (Pred == ICmpInst::ICMP_UGT)
3159 return getFalse(ITy);
3160 if (Pred == ICmpInst::ICMP_ULE)
3161 return getTrue(ITy);
3162 }
3163
3164 // If x is nonzero:
3165 // x >>u C <u x --> true for C != 0.
3166 // x >>u C != x --> true for C != 0.
3167 // x >>u C >=u x --> false for C != 0.
3168 // x >>u C == x --> false for C != 0.
3169 // x udiv C <u x --> true for C != 1.
3170 // x udiv C != x --> true for C != 1.
3171 // x udiv C >=u x --> false for C != 1.
3172 // x udiv C == x --> false for C != 1.
3173 // TODO: allow non-constant shift amount/divisor
3174 const APInt *C;
3175 if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3176 (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3177 if (isKnownNonZero(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) {
3178 switch (Pred) {
3179 default:
3180 break;
3181 case ICmpInst::ICMP_EQ:
3182 case ICmpInst::ICMP_UGE:
3183 return getFalse(ITy);
3184 case ICmpInst::ICMP_NE:
3185 case ICmpInst::ICMP_ULT:
3186 return getTrue(ITy);
3187 case ICmpInst::ICMP_UGT:
3188 case ICmpInst::ICMP_ULE:
3189 // UGT/ULE are handled by the more general case just above
3190 llvm_unreachable("Unexpected UGT/ULE, should have been handled");
3191 }
3192 }
3193 }
3194
3195 // (x*C1)/C2 <= x for C1 <= C2.
3196 // This holds even if the multiplication overflows: Assume that x != 0 and
3197 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3198 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3199 //
3200 // Additionally, either the multiplication and division might be represented
3201 // as shifts:
3202 // (x*C1)>>C2 <= x for C1 < 2**C2.
3203 // (x<<C1)/C2 <= x for 2**C1 < C2.
3204 const APInt *C1, *C2;
3205 if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3206 C1->ule(*C2)) ||
3207 (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3208 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3209 (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3210 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3211 if (Pred == ICmpInst::ICMP_UGT)
3212 return getFalse(ITy);
3213 if (Pred == ICmpInst::ICMP_ULE)
3214 return getTrue(ITy);
3215 }
3216
3217 // (sub C, X) == X, C is odd --> false
3218 // (sub C, X) != X, C is odd --> true
3219 if (match(LBO, m_Sub(m_APIntAllowUndef(C), m_Specific(RHS))) &&
3220 (*C & 1) == 1 && ICmpInst::isEquality(Pred))
3221 return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
3222
3223 return nullptr;
3224 }
3225
3226 // If only one of the icmp's operands has NSW flags, try to prove that:
3227 //
3228 // icmp slt (x + C1), (x +nsw C2)
3229 //
3230 // is equivalent to:
3231 //
3232 // icmp slt C1, C2
3233 //
3234 // which is true if x + C2 has the NSW flags set and:
3235 // *) C1 < C2 && C1 >= 0, or
3236 // *) C2 < C1 && C1 <= 0.
3237 //
trySimplifyICmpWithAdds(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const InstrInfoQuery & IIQ)3238 static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
3239 Value *RHS, const InstrInfoQuery &IIQ) {
3240 // TODO: only support icmp slt for now.
3241 if (Pred != CmpInst::ICMP_SLT || !IIQ.UseInstrInfo)
3242 return false;
3243
3244 // Canonicalize nsw add as RHS.
3245 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3246 std::swap(LHS, RHS);
3247 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3248 return false;
3249
3250 Value *X;
3251 const APInt *C1, *C2;
3252 if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
3253 !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
3254 return false;
3255
3256 return (C1->slt(*C2) && C1->isNonNegative()) ||
3257 (C2->slt(*C1) && C1->isNonPositive());
3258 }
3259
3260 /// TODO: A large part of this logic is duplicated in InstCombine's
3261 /// foldICmpBinOp(). We should be able to share that and avoid the code
3262 /// duplication.
simplifyICmpWithBinOp(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3263 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
3264 Value *RHS, const SimplifyQuery &Q,
3265 unsigned MaxRecurse) {
3266 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
3267 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
3268 if (MaxRecurse && (LBO || RBO)) {
3269 // Analyze the case when either LHS or RHS is an add instruction.
3270 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3271 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3272 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3273 if (LBO && LBO->getOpcode() == Instruction::Add) {
3274 A = LBO->getOperand(0);
3275 B = LBO->getOperand(1);
3276 NoLHSWrapProblem =
3277 ICmpInst::isEquality(Pred) ||
3278 (CmpInst::isUnsigned(Pred) &&
3279 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
3280 (CmpInst::isSigned(Pred) &&
3281 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
3282 }
3283 if (RBO && RBO->getOpcode() == Instruction::Add) {
3284 C = RBO->getOperand(0);
3285 D = RBO->getOperand(1);
3286 NoRHSWrapProblem =
3287 ICmpInst::isEquality(Pred) ||
3288 (CmpInst::isUnsigned(Pred) &&
3289 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
3290 (CmpInst::isSigned(Pred) &&
3291 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
3292 }
3293
3294 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3295 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3296 if (Value *V = simplifyICmpInst(Pred, A == RHS ? B : A,
3297 Constant::getNullValue(RHS->getType()), Q,
3298 MaxRecurse - 1))
3299 return V;
3300
3301 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3302 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3303 if (Value *V =
3304 simplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
3305 C == LHS ? D : C, Q, MaxRecurse - 1))
3306 return V;
3307
3308 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3309 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3310 trySimplifyICmpWithAdds(Pred, LHS, RHS, Q.IIQ);
3311 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3312 // Determine Y and Z in the form icmp (X+Y), (X+Z).
3313 Value *Y, *Z;
3314 if (A == C) {
3315 // C + B == C + D -> B == D
3316 Y = B;
3317 Z = D;
3318 } else if (A == D) {
3319 // D + B == C + D -> B == C
3320 Y = B;
3321 Z = C;
3322 } else if (B == C) {
3323 // A + C == C + D -> A == D
3324 Y = A;
3325 Z = D;
3326 } else {
3327 assert(B == D);
3328 // A + D == C + D -> A == C
3329 Y = A;
3330 Z = C;
3331 }
3332 if (Value *V = simplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3333 return V;
3334 }
3335 }
3336
3337 if (LBO)
3338 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3339 return V;
3340
3341 if (RBO)
3342 if (Value *V = simplifyICmpWithBinOpOnLHS(
3343 ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3344 return V;
3345
3346 // 0 - (zext X) pred C
3347 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3348 const APInt *C;
3349 if (match(RHS, m_APInt(C))) {
3350 if (C->isStrictlyPositive()) {
3351 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3352 return ConstantInt::getTrue(getCompareTy(RHS));
3353 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3354 return ConstantInt::getFalse(getCompareTy(RHS));
3355 }
3356 if (C->isNonNegative()) {
3357 if (Pred == ICmpInst::ICMP_SLE)
3358 return ConstantInt::getTrue(getCompareTy(RHS));
3359 if (Pred == ICmpInst::ICMP_SGT)
3360 return ConstantInt::getFalse(getCompareTy(RHS));
3361 }
3362 }
3363 }
3364
3365 // If C2 is a power-of-2 and C is not:
3366 // (C2 << X) == C --> false
3367 // (C2 << X) != C --> true
3368 const APInt *C;
3369 if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3370 match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
3371 // C2 << X can equal zero in some circumstances.
3372 // This simplification might be unsafe if C is zero.
3373 //
3374 // We know it is safe if:
3375 // - The shift is nsw. We can't shift out the one bit.
3376 // - The shift is nuw. We can't shift out the one bit.
3377 // - C2 is one.
3378 // - C isn't zero.
3379 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3380 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3381 match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3382 if (Pred == ICmpInst::ICMP_EQ)
3383 return ConstantInt::getFalse(getCompareTy(RHS));
3384 if (Pred == ICmpInst::ICMP_NE)
3385 return ConstantInt::getTrue(getCompareTy(RHS));
3386 }
3387 }
3388
3389 // If C is a power-of-2:
3390 // (C << X) >u 0x8000 --> false
3391 // (C << X) <=u 0x8000 --> true
3392 if (match(LHS, m_Shl(m_Power2(), m_Value())) && match(RHS, m_SignMask())) {
3393 if (Pred == ICmpInst::ICMP_UGT)
3394 return ConstantInt::getFalse(getCompareTy(RHS));
3395 if (Pred == ICmpInst::ICMP_ULE)
3396 return ConstantInt::getTrue(getCompareTy(RHS));
3397 }
3398
3399 if (!MaxRecurse || !LBO || !RBO || LBO->getOpcode() != RBO->getOpcode())
3400 return nullptr;
3401
3402 if (LBO->getOperand(0) == RBO->getOperand(0)) {
3403 switch (LBO->getOpcode()) {
3404 default:
3405 break;
3406 case Instruction::Shl: {
3407 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3408 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3409 if (!NUW || (ICmpInst::isSigned(Pred) && !NSW) ||
3410 !isKnownNonZero(LBO->getOperand(0), Q.DL))
3411 break;
3412 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(1),
3413 RBO->getOperand(1), Q, MaxRecurse - 1))
3414 return V;
3415 break;
3416 }
3417 // If C1 & C2 == C1, A = X and/or C1, B = X and/or C2:
3418 // icmp ule A, B -> true
3419 // icmp ugt A, B -> false
3420 // icmp sle A, B -> true (C1 and C2 are the same sign)
3421 // icmp sgt A, B -> false (C1 and C2 are the same sign)
3422 case Instruction::And:
3423 case Instruction::Or: {
3424 const APInt *C1, *C2;
3425 if (ICmpInst::isRelational(Pred) &&
3426 match(LBO->getOperand(1), m_APInt(C1)) &&
3427 match(RBO->getOperand(1), m_APInt(C2))) {
3428 if (!C1->isSubsetOf(*C2)) {
3429 std::swap(C1, C2);
3430 Pred = ICmpInst::getSwappedPredicate(Pred);
3431 }
3432 if (C1->isSubsetOf(*C2)) {
3433 if (Pred == ICmpInst::ICMP_ULE)
3434 return ConstantInt::getTrue(getCompareTy(LHS));
3435 if (Pred == ICmpInst::ICMP_UGT)
3436 return ConstantInt::getFalse(getCompareTy(LHS));
3437 if (C1->isNonNegative() == C2->isNonNegative()) {
3438 if (Pred == ICmpInst::ICMP_SLE)
3439 return ConstantInt::getTrue(getCompareTy(LHS));
3440 if (Pred == ICmpInst::ICMP_SGT)
3441 return ConstantInt::getFalse(getCompareTy(LHS));
3442 }
3443 }
3444 }
3445 break;
3446 }
3447 }
3448 }
3449
3450 if (LBO->getOperand(1) == RBO->getOperand(1)) {
3451 switch (LBO->getOpcode()) {
3452 default:
3453 break;
3454 case Instruction::UDiv:
3455 case Instruction::LShr:
3456 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3457 !Q.IIQ.isExact(RBO))
3458 break;
3459 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3460 RBO->getOperand(0), Q, MaxRecurse - 1))
3461 return V;
3462 break;
3463 case Instruction::SDiv:
3464 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3465 !Q.IIQ.isExact(RBO))
3466 break;
3467 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3468 RBO->getOperand(0), Q, MaxRecurse - 1))
3469 return V;
3470 break;
3471 case Instruction::AShr:
3472 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3473 break;
3474 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3475 RBO->getOperand(0), Q, MaxRecurse - 1))
3476 return V;
3477 break;
3478 case Instruction::Shl: {
3479 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3480 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3481 if (!NUW && !NSW)
3482 break;
3483 if (!NSW && ICmpInst::isSigned(Pred))
3484 break;
3485 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3486 RBO->getOperand(0), Q, MaxRecurse - 1))
3487 return V;
3488 break;
3489 }
3490 }
3491 }
3492 return nullptr;
3493 }
3494
3495 /// simplify integer comparisons where at least one operand of the compare
3496 /// matches an integer min/max idiom.
simplifyICmpWithMinMax(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3497 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3498 Value *RHS, const SimplifyQuery &Q,
3499 unsigned MaxRecurse) {
3500 Type *ITy = getCompareTy(LHS); // The return type.
3501 Value *A, *B;
3502 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3503 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3504
3505 // Signed variants on "max(a,b)>=a -> true".
3506 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3507 if (A != RHS)
3508 std::swap(A, B); // smax(A, B) pred A.
3509 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3510 // We analyze this as smax(A, B) pred A.
3511 P = Pred;
3512 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3513 (A == LHS || B == LHS)) {
3514 if (A != LHS)
3515 std::swap(A, B); // A pred smax(A, B).
3516 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3517 // We analyze this as smax(A, B) swapped-pred A.
3518 P = CmpInst::getSwappedPredicate(Pred);
3519 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3520 (A == RHS || B == RHS)) {
3521 if (A != RHS)
3522 std::swap(A, B); // smin(A, B) pred A.
3523 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3524 // We analyze this as smax(-A, -B) swapped-pred -A.
3525 // Note that we do not need to actually form -A or -B thanks to EqP.
3526 P = CmpInst::getSwappedPredicate(Pred);
3527 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3528 (A == LHS || B == LHS)) {
3529 if (A != LHS)
3530 std::swap(A, B); // A pred smin(A, B).
3531 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3532 // We analyze this as smax(-A, -B) pred -A.
3533 // Note that we do not need to actually form -A or -B thanks to EqP.
3534 P = Pred;
3535 }
3536 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3537 // Cases correspond to "max(A, B) p A".
3538 switch (P) {
3539 default:
3540 break;
3541 case CmpInst::ICMP_EQ:
3542 case CmpInst::ICMP_SLE:
3543 // Equivalent to "A EqP B". This may be the same as the condition tested
3544 // in the max/min; if so, we can just return that.
3545 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3546 return V;
3547 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3548 return V;
3549 // Otherwise, see if "A EqP B" simplifies.
3550 if (MaxRecurse)
3551 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3552 return V;
3553 break;
3554 case CmpInst::ICMP_NE:
3555 case CmpInst::ICMP_SGT: {
3556 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3557 // Equivalent to "A InvEqP B". This may be the same as the condition
3558 // tested in the max/min; if so, we can just return that.
3559 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3560 return V;
3561 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3562 return V;
3563 // Otherwise, see if "A InvEqP B" simplifies.
3564 if (MaxRecurse)
3565 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3566 return V;
3567 break;
3568 }
3569 case CmpInst::ICMP_SGE:
3570 // Always true.
3571 return getTrue(ITy);
3572 case CmpInst::ICMP_SLT:
3573 // Always false.
3574 return getFalse(ITy);
3575 }
3576 }
3577
3578 // Unsigned variants on "max(a,b)>=a -> true".
3579 P = CmpInst::BAD_ICMP_PREDICATE;
3580 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3581 if (A != RHS)
3582 std::swap(A, B); // umax(A, B) pred A.
3583 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3584 // We analyze this as umax(A, B) pred A.
3585 P = Pred;
3586 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3587 (A == LHS || B == LHS)) {
3588 if (A != LHS)
3589 std::swap(A, B); // A pred umax(A, B).
3590 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3591 // We analyze this as umax(A, B) swapped-pred A.
3592 P = CmpInst::getSwappedPredicate(Pred);
3593 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3594 (A == RHS || B == RHS)) {
3595 if (A != RHS)
3596 std::swap(A, B); // umin(A, B) pred A.
3597 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3598 // We analyze this as umax(-A, -B) swapped-pred -A.
3599 // Note that we do not need to actually form -A or -B thanks to EqP.
3600 P = CmpInst::getSwappedPredicate(Pred);
3601 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3602 (A == LHS || B == LHS)) {
3603 if (A != LHS)
3604 std::swap(A, B); // A pred umin(A, B).
3605 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3606 // We analyze this as umax(-A, -B) pred -A.
3607 // Note that we do not need to actually form -A or -B thanks to EqP.
3608 P = Pred;
3609 }
3610 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3611 // Cases correspond to "max(A, B) p A".
3612 switch (P) {
3613 default:
3614 break;
3615 case CmpInst::ICMP_EQ:
3616 case CmpInst::ICMP_ULE:
3617 // Equivalent to "A EqP B". This may be the same as the condition tested
3618 // in the max/min; if so, we can just return that.
3619 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3620 return V;
3621 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3622 return V;
3623 // Otherwise, see if "A EqP B" simplifies.
3624 if (MaxRecurse)
3625 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3626 return V;
3627 break;
3628 case CmpInst::ICMP_NE:
3629 case CmpInst::ICMP_UGT: {
3630 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3631 // Equivalent to "A InvEqP B". This may be the same as the condition
3632 // tested in the max/min; if so, we can just return that.
3633 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3634 return V;
3635 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3636 return V;
3637 // Otherwise, see if "A InvEqP B" simplifies.
3638 if (MaxRecurse)
3639 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3640 return V;
3641 break;
3642 }
3643 case CmpInst::ICMP_UGE:
3644 return getTrue(ITy);
3645 case CmpInst::ICMP_ULT:
3646 return getFalse(ITy);
3647 }
3648 }
3649
3650 // Comparing 1 each of min/max with a common operand?
3651 // Canonicalize min operand to RHS.
3652 if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3653 match(LHS, m_SMin(m_Value(), m_Value()))) {
3654 std::swap(LHS, RHS);
3655 Pred = ICmpInst::getSwappedPredicate(Pred);
3656 }
3657
3658 Value *C, *D;
3659 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3660 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3661 (A == C || A == D || B == C || B == D)) {
3662 // smax(A, B) >=s smin(A, D) --> true
3663 if (Pred == CmpInst::ICMP_SGE)
3664 return getTrue(ITy);
3665 // smax(A, B) <s smin(A, D) --> false
3666 if (Pred == CmpInst::ICMP_SLT)
3667 return getFalse(ITy);
3668 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3669 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3670 (A == C || A == D || B == C || B == D)) {
3671 // umax(A, B) >=u umin(A, D) --> true
3672 if (Pred == CmpInst::ICMP_UGE)
3673 return getTrue(ITy);
3674 // umax(A, B) <u umin(A, D) --> false
3675 if (Pred == CmpInst::ICMP_ULT)
3676 return getFalse(ITy);
3677 }
3678
3679 return nullptr;
3680 }
3681
simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)3682 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3683 Value *LHS, Value *RHS,
3684 const SimplifyQuery &Q) {
3685 // Gracefully handle instructions that have not been inserted yet.
3686 if (!Q.AC || !Q.CxtI)
3687 return nullptr;
3688
3689 for (Value *AssumeBaseOp : {LHS, RHS}) {
3690 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3691 if (!AssumeVH)
3692 continue;
3693
3694 CallInst *Assume = cast<CallInst>(AssumeVH);
3695 if (std::optional<bool> Imp = isImpliedCondition(
3696 Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
3697 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3698 return ConstantInt::get(getCompareTy(LHS), *Imp);
3699 }
3700 }
3701
3702 return nullptr;
3703 }
3704
simplifyICmpWithIntrinsicOnLHS(CmpInst::Predicate Pred,Value * LHS,Value * RHS)3705 static Value *simplifyICmpWithIntrinsicOnLHS(CmpInst::Predicate Pred,
3706 Value *LHS, Value *RHS) {
3707 auto *II = dyn_cast<IntrinsicInst>(LHS);
3708 if (!II)
3709 return nullptr;
3710
3711 switch (II->getIntrinsicID()) {
3712 case Intrinsic::uadd_sat:
3713 // uadd.sat(X, Y) uge X, uadd.sat(X, Y) uge Y
3714 if (II->getArgOperand(0) == RHS || II->getArgOperand(1) == RHS) {
3715 if (Pred == ICmpInst::ICMP_UGE)
3716 return ConstantInt::getTrue(getCompareTy(II));
3717 if (Pred == ICmpInst::ICMP_ULT)
3718 return ConstantInt::getFalse(getCompareTy(II));
3719 }
3720 return nullptr;
3721 case Intrinsic::usub_sat:
3722 // usub.sat(X, Y) ule X
3723 if (II->getArgOperand(0) == RHS) {
3724 if (Pred == ICmpInst::ICMP_ULE)
3725 return ConstantInt::getTrue(getCompareTy(II));
3726 if (Pred == ICmpInst::ICMP_UGT)
3727 return ConstantInt::getFalse(getCompareTy(II));
3728 }
3729 return nullptr;
3730 default:
3731 return nullptr;
3732 }
3733 }
3734
3735 /// Given operands for an ICmpInst, see if we can fold the result.
3736 /// If not, this returns null.
simplifyICmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3737 static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3738 const SimplifyQuery &Q, unsigned MaxRecurse) {
3739 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3740 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3741
3742 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3743 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3744 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3745
3746 // If we have a constant, make sure it is on the RHS.
3747 std::swap(LHS, RHS);
3748 Pred = CmpInst::getSwappedPredicate(Pred);
3749 }
3750 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3751
3752 Type *ITy = getCompareTy(LHS); // The return type.
3753
3754 // icmp poison, X -> poison
3755 if (isa<PoisonValue>(RHS))
3756 return PoisonValue::get(ITy);
3757
3758 // For EQ and NE, we can always pick a value for the undef to make the
3759 // predicate pass or fail, so we can return undef.
3760 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3761 if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3762 return UndefValue::get(ITy);
3763
3764 // icmp X, X -> true/false
3765 // icmp X, undef -> true/false because undef could be X.
3766 if (LHS == RHS || Q.isUndefValue(RHS))
3767 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3768
3769 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3770 return V;
3771
3772 // TODO: Sink/common this with other potentially expensive calls that use
3773 // ValueTracking? See comment below for isKnownNonEqual().
3774 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3775 return V;
3776
3777 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3778 return V;
3779
3780 // If both operands have range metadata, use the metadata
3781 // to simplify the comparison.
3782 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3783 auto RHS_Instr = cast<Instruction>(RHS);
3784 auto LHS_Instr = cast<Instruction>(LHS);
3785
3786 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3787 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3788 auto RHS_CR = getConstantRangeFromMetadata(
3789 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3790 auto LHS_CR = getConstantRangeFromMetadata(
3791 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3792
3793 if (LHS_CR.icmp(Pred, RHS_CR))
3794 return ConstantInt::getTrue(RHS->getContext());
3795
3796 if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
3797 return ConstantInt::getFalse(RHS->getContext());
3798 }
3799 }
3800
3801 // Compare of cast, for example (zext X) != 0 -> X != 0
3802 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3803 Instruction *LI = cast<CastInst>(LHS);
3804 Value *SrcOp = LI->getOperand(0);
3805 Type *SrcTy = SrcOp->getType();
3806 Type *DstTy = LI->getType();
3807
3808 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3809 // if the integer type is the same size as the pointer type.
3810 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3811 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3812 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3813 // Transfer the cast to the constant.
3814 if (Value *V = simplifyICmpInst(Pred, SrcOp,
3815 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3816 Q, MaxRecurse - 1))
3817 return V;
3818 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3819 if (RI->getOperand(0)->getType() == SrcTy)
3820 // Compare without the cast.
3821 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3822 MaxRecurse - 1))
3823 return V;
3824 }
3825 }
3826
3827 if (isa<ZExtInst>(LHS)) {
3828 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3829 // same type.
3830 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3831 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3832 // Compare X and Y. Note that signed predicates become unsigned.
3833 if (Value *V =
3834 simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), SrcOp,
3835 RI->getOperand(0), Q, MaxRecurse - 1))
3836 return V;
3837 }
3838 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3839 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3840 if (SrcOp == RI->getOperand(0)) {
3841 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3842 return ConstantInt::getTrue(ITy);
3843 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3844 return ConstantInt::getFalse(ITy);
3845 }
3846 }
3847 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3848 // too. If not, then try to deduce the result of the comparison.
3849 else if (match(RHS, m_ImmConstant())) {
3850 Constant *C = dyn_cast<Constant>(RHS);
3851 assert(C != nullptr);
3852
3853 // Compute the constant that would happen if we truncated to SrcTy then
3854 // reextended to DstTy.
3855 Constant *Trunc =
3856 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3857 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3858 Constant *RExt =
3859 ConstantFoldCastOperand(CastInst::ZExt, Trunc, DstTy, Q.DL);
3860 assert(RExt && "Constant-fold of ImmConstant should not fail");
3861 Constant *AnyEq =
3862 ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, RExt, C, Q.DL);
3863 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3864
3865 // If the re-extended constant didn't change any of the elements then
3866 // this is effectively also a case of comparing two zero-extended
3867 // values.
3868 if (AnyEq->isAllOnesValue() && MaxRecurse)
3869 if (Value *V = simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3870 SrcOp, Trunc, Q, MaxRecurse - 1))
3871 return V;
3872
3873 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3874 // there. Use this to work out the result of the comparison.
3875 if (AnyEq->isNullValue()) {
3876 switch (Pred) {
3877 default:
3878 llvm_unreachable("Unknown ICmp predicate!");
3879 // LHS <u RHS.
3880 case ICmpInst::ICMP_EQ:
3881 case ICmpInst::ICMP_UGT:
3882 case ICmpInst::ICMP_UGE:
3883 return Constant::getNullValue(ITy);
3884
3885 case ICmpInst::ICMP_NE:
3886 case ICmpInst::ICMP_ULT:
3887 case ICmpInst::ICMP_ULE:
3888 return Constant::getAllOnesValue(ITy);
3889
3890 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3891 // is non-negative then LHS <s RHS.
3892 case ICmpInst::ICMP_SGT:
3893 case ICmpInst::ICMP_SGE:
3894 return ConstantFoldCompareInstOperands(
3895 ICmpInst::ICMP_SLT, C, Constant::getNullValue(C->getType()),
3896 Q.DL);
3897 case ICmpInst::ICMP_SLT:
3898 case ICmpInst::ICMP_SLE:
3899 return ConstantFoldCompareInstOperands(
3900 ICmpInst::ICMP_SGE, C, Constant::getNullValue(C->getType()),
3901 Q.DL);
3902 }
3903 }
3904 }
3905 }
3906
3907 if (isa<SExtInst>(LHS)) {
3908 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3909 // same type.
3910 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3911 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3912 // Compare X and Y. Note that the predicate does not change.
3913 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3914 MaxRecurse - 1))
3915 return V;
3916 }
3917 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3918 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3919 if (SrcOp == RI->getOperand(0)) {
3920 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3921 return ConstantInt::getTrue(ITy);
3922 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3923 return ConstantInt::getFalse(ITy);
3924 }
3925 }
3926 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3927 // too. If not, then try to deduce the result of the comparison.
3928 else if (match(RHS, m_ImmConstant())) {
3929 Constant *C = cast<Constant>(RHS);
3930
3931 // Compute the constant that would happen if we truncated to SrcTy then
3932 // reextended to DstTy.
3933 Constant *Trunc =
3934 ConstantFoldCastOperand(Instruction::Trunc, C, SrcTy, Q.DL);
3935 assert(Trunc && "Constant-fold of ImmConstant should not fail");
3936 Constant *RExt =
3937 ConstantFoldCastOperand(CastInst::SExt, Trunc, DstTy, Q.DL);
3938 assert(RExt && "Constant-fold of ImmConstant should not fail");
3939 Constant *AnyEq =
3940 ConstantFoldCompareInstOperands(ICmpInst::ICMP_EQ, RExt, C, Q.DL);
3941 assert(AnyEq && "Constant-fold of ImmConstant should not fail");
3942
3943 // If the re-extended constant didn't change then this is effectively
3944 // also a case of comparing two sign-extended values.
3945 if (AnyEq->isAllOnesValue() && MaxRecurse)
3946 if (Value *V =
3947 simplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse - 1))
3948 return V;
3949
3950 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3951 // bits there. Use this to work out the result of the comparison.
3952 if (AnyEq->isNullValue()) {
3953 switch (Pred) {
3954 default:
3955 llvm_unreachable("Unknown ICmp predicate!");
3956 case ICmpInst::ICMP_EQ:
3957 return Constant::getNullValue(ITy);
3958 case ICmpInst::ICMP_NE:
3959 return Constant::getAllOnesValue(ITy);
3960
3961 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3962 // LHS >s RHS.
3963 case ICmpInst::ICMP_SGT:
3964 case ICmpInst::ICMP_SGE:
3965 return ConstantExpr::getICmp(ICmpInst::ICMP_SLT, C,
3966 Constant::getNullValue(C->getType()));
3967 case ICmpInst::ICMP_SLT:
3968 case ICmpInst::ICMP_SLE:
3969 return ConstantExpr::getICmp(ICmpInst::ICMP_SGE, C,
3970 Constant::getNullValue(C->getType()));
3971
3972 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3973 // LHS >u RHS.
3974 case ICmpInst::ICMP_UGT:
3975 case ICmpInst::ICMP_UGE:
3976 // Comparison is true iff the LHS <s 0.
3977 if (MaxRecurse)
3978 if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3979 Constant::getNullValue(SrcTy), Q,
3980 MaxRecurse - 1))
3981 return V;
3982 break;
3983 case ICmpInst::ICMP_ULT:
3984 case ICmpInst::ICMP_ULE:
3985 // Comparison is true iff the LHS >=s 0.
3986 if (MaxRecurse)
3987 if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3988 Constant::getNullValue(SrcTy), Q,
3989 MaxRecurse - 1))
3990 return V;
3991 break;
3992 }
3993 }
3994 }
3995 }
3996 }
3997
3998 // icmp eq|ne X, Y -> false|true if X != Y
3999 // This is potentially expensive, and we have already computedKnownBits for
4000 // compares with 0 above here, so only try this for a non-zero compare.
4001 if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
4002 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
4003 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
4004 }
4005
4006 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
4007 return V;
4008
4009 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
4010 return V;
4011
4012 if (Value *V = simplifyICmpWithIntrinsicOnLHS(Pred, LHS, RHS))
4013 return V;
4014 if (Value *V = simplifyICmpWithIntrinsicOnLHS(
4015 ICmpInst::getSwappedPredicate(Pred), RHS, LHS))
4016 return V;
4017
4018 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
4019 return V;
4020
4021 if (std::optional<bool> Res =
4022 isImpliedByDomCondition(Pred, LHS, RHS, Q.CxtI, Q.DL))
4023 return ConstantInt::getBool(ITy, *Res);
4024
4025 // Simplify comparisons of related pointers using a powerful, recursive
4026 // GEP-walk when we have target data available..
4027 if (LHS->getType()->isPointerTy())
4028 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
4029 return C;
4030 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
4031 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
4032 if (CLHS->getPointerOperandType() == CRHS->getPointerOperandType() &&
4033 Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
4034 Q.DL.getTypeSizeInBits(CLHS->getType()))
4035 if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
4036 CRHS->getPointerOperand(), Q))
4037 return C;
4038
4039 // If the comparison is with the result of a select instruction, check whether
4040 // comparing with either branch of the select always yields the same value.
4041 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4042 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4043 return V;
4044
4045 // If the comparison is with the result of a phi instruction, check whether
4046 // doing the compare with each incoming phi value yields a common result.
4047 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4048 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4049 return V;
4050
4051 return nullptr;
4052 }
4053
simplifyICmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)4054 Value *llvm::simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4055 const SimplifyQuery &Q) {
4056 return ::simplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
4057 }
4058
4059 /// Given operands for an FCmpInst, see if we can fold the result.
4060 /// If not, this returns null.
simplifyFCmpInst(unsigned Predicate,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)4061 static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4062 FastMathFlags FMF, const SimplifyQuery &Q,
4063 unsigned MaxRecurse) {
4064 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
4065 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
4066
4067 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
4068 if (Constant *CRHS = dyn_cast<Constant>(RHS))
4069 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI,
4070 Q.CxtI);
4071
4072 // If we have a constant, make sure it is on the RHS.
4073 std::swap(LHS, RHS);
4074 Pred = CmpInst::getSwappedPredicate(Pred);
4075 }
4076
4077 // Fold trivial predicates.
4078 Type *RetTy = getCompareTy(LHS);
4079 if (Pred == FCmpInst::FCMP_FALSE)
4080 return getFalse(RetTy);
4081 if (Pred == FCmpInst::FCMP_TRUE)
4082 return getTrue(RetTy);
4083
4084 // fcmp pred x, poison and fcmp pred poison, x
4085 // fold to poison
4086 if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
4087 return PoisonValue::get(RetTy);
4088
4089 // fcmp pred x, undef and fcmp pred undef, x
4090 // fold to true if unordered, false if ordered
4091 if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
4092 // Choosing NaN for the undef will always make unordered comparison succeed
4093 // and ordered comparison fail.
4094 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4095 }
4096
4097 // fcmp x,x -> true/false. Not all compares are foldable.
4098 if (LHS == RHS) {
4099 if (CmpInst::isTrueWhenEqual(Pred))
4100 return getTrue(RetTy);
4101 if (CmpInst::isFalseWhenEqual(Pred))
4102 return getFalse(RetTy);
4103 }
4104
4105 // Fold (un)ordered comparison if we can determine there are no NaNs.
4106 //
4107 // This catches the 2 variable input case, constants are handled below as a
4108 // class-like compare.
4109 if (Pred == FCmpInst::FCMP_ORD || Pred == FCmpInst::FCMP_UNO) {
4110 if (FMF.noNaNs() ||
4111 (isKnownNeverNaN(RHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
4112 isKnownNeverNaN(LHS, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT)))
4113 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
4114 }
4115
4116 const APFloat *C = nullptr;
4117 match(RHS, m_APFloatAllowUndef(C));
4118 std::optional<KnownFPClass> FullKnownClassLHS;
4119
4120 // Lazily compute the possible classes for LHS. Avoid computing it twice if
4121 // RHS is a 0.
4122 auto computeLHSClass = [=, &FullKnownClassLHS](FPClassTest InterestedFlags =
4123 fcAllFlags) {
4124 if (FullKnownClassLHS)
4125 return *FullKnownClassLHS;
4126 return computeKnownFPClass(LHS, FMF, Q.DL, InterestedFlags, 0, Q.TLI, Q.AC,
4127 Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo);
4128 };
4129
4130 if (C && Q.CxtI) {
4131 // Fold out compares that express a class test.
4132 //
4133 // FIXME: Should be able to perform folds without context
4134 // instruction. Always pass in the context function?
4135
4136 const Function *ParentF = Q.CxtI->getFunction();
4137 auto [ClassVal, ClassTest] = fcmpToClassTest(Pred, *ParentF, LHS, C);
4138 if (ClassVal) {
4139 FullKnownClassLHS = computeLHSClass();
4140 if ((FullKnownClassLHS->KnownFPClasses & ClassTest) == fcNone)
4141 return getFalse(RetTy);
4142 if ((FullKnownClassLHS->KnownFPClasses & ~ClassTest) == fcNone)
4143 return getTrue(RetTy);
4144 }
4145 }
4146
4147 // Handle fcmp with constant RHS.
4148 if (C) {
4149 // TODO: If we always required a context function, we wouldn't need to
4150 // special case nans.
4151 if (C->isNaN())
4152 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
4153
4154 // TODO: Need version fcmpToClassTest which returns implied class when the
4155 // compare isn't a complete class test. e.g. > 1.0 implies fcPositive, but
4156 // isn't implementable as a class call.
4157 if (C->isNegative() && !C->isNegZero()) {
4158 FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4159
4160 // TODO: We can catch more cases by using a range check rather than
4161 // relying on CannotBeOrderedLessThanZero.
4162 switch (Pred) {
4163 case FCmpInst::FCMP_UGE:
4164 case FCmpInst::FCMP_UGT:
4165 case FCmpInst::FCMP_UNE: {
4166 KnownFPClass KnownClass = computeLHSClass(Interested);
4167
4168 // (X >= 0) implies (X > C) when (C < 0)
4169 if (KnownClass.cannotBeOrderedLessThanZero())
4170 return getTrue(RetTy);
4171 break;
4172 }
4173 case FCmpInst::FCMP_OEQ:
4174 case FCmpInst::FCMP_OLE:
4175 case FCmpInst::FCMP_OLT: {
4176 KnownFPClass KnownClass = computeLHSClass(Interested);
4177
4178 // (X >= 0) implies !(X < C) when (C < 0)
4179 if (KnownClass.cannotBeOrderedLessThanZero())
4180 return getFalse(RetTy);
4181 break;
4182 }
4183 default:
4184 break;
4185 }
4186 }
4187 // Check comparison of [minnum/maxnum with constant] with other constant.
4188 const APFloat *C2;
4189 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
4190 *C2 < *C) ||
4191 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
4192 *C2 > *C)) {
4193 bool IsMaxNum =
4194 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
4195 // The ordered relationship and minnum/maxnum guarantee that we do not
4196 // have NaN constants, so ordered/unordered preds are handled the same.
4197 switch (Pred) {
4198 case FCmpInst::FCMP_OEQ:
4199 case FCmpInst::FCMP_UEQ:
4200 // minnum(X, LesserC) == C --> false
4201 // maxnum(X, GreaterC) == C --> false
4202 return getFalse(RetTy);
4203 case FCmpInst::FCMP_ONE:
4204 case FCmpInst::FCMP_UNE:
4205 // minnum(X, LesserC) != C --> true
4206 // maxnum(X, GreaterC) != C --> true
4207 return getTrue(RetTy);
4208 case FCmpInst::FCMP_OGE:
4209 case FCmpInst::FCMP_UGE:
4210 case FCmpInst::FCMP_OGT:
4211 case FCmpInst::FCMP_UGT:
4212 // minnum(X, LesserC) >= C --> false
4213 // minnum(X, LesserC) > C --> false
4214 // maxnum(X, GreaterC) >= C --> true
4215 // maxnum(X, GreaterC) > C --> true
4216 return ConstantInt::get(RetTy, IsMaxNum);
4217 case FCmpInst::FCMP_OLE:
4218 case FCmpInst::FCMP_ULE:
4219 case FCmpInst::FCMP_OLT:
4220 case FCmpInst::FCMP_ULT:
4221 // minnum(X, LesserC) <= C --> true
4222 // minnum(X, LesserC) < C --> true
4223 // maxnum(X, GreaterC) <= C --> false
4224 // maxnum(X, GreaterC) < C --> false
4225 return ConstantInt::get(RetTy, !IsMaxNum);
4226 default:
4227 // TRUE/FALSE/ORD/UNO should be handled before this.
4228 llvm_unreachable("Unexpected fcmp predicate");
4229 }
4230 }
4231 }
4232
4233 // TODO: Could fold this with above if there were a matcher which returned all
4234 // classes in a non-splat vector.
4235 if (match(RHS, m_AnyZeroFP())) {
4236 switch (Pred) {
4237 case FCmpInst::FCMP_OGE:
4238 case FCmpInst::FCMP_ULT: {
4239 FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4240 if (!FMF.noNaNs())
4241 Interested |= fcNan;
4242
4243 KnownFPClass Known = computeLHSClass(Interested);
4244
4245 // Positive or zero X >= 0.0 --> true
4246 // Positive or zero X < 0.0 --> false
4247 if ((FMF.noNaNs() || Known.isKnownNeverNaN()) &&
4248 Known.cannotBeOrderedLessThanZero())
4249 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
4250 break;
4251 }
4252 case FCmpInst::FCMP_UGE:
4253 case FCmpInst::FCMP_OLT: {
4254 FPClassTest Interested = KnownFPClass::OrderedLessThanZeroMask;
4255 KnownFPClass Known = computeLHSClass(Interested);
4256
4257 // Positive or zero or nan X >= 0.0 --> true
4258 // Positive or zero or nan X < 0.0 --> false
4259 if (Known.cannotBeOrderedLessThanZero())
4260 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
4261 break;
4262 }
4263 default:
4264 break;
4265 }
4266 }
4267
4268 // If the comparison is with the result of a select instruction, check whether
4269 // comparing with either branch of the select always yields the same value.
4270 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4271 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4272 return V;
4273
4274 // If the comparison is with the result of a phi instruction, check whether
4275 // doing the compare with each incoming phi value yields a common result.
4276 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4277 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4278 return V;
4279
4280 return nullptr;
4281 }
4282
simplifyFCmpInst(unsigned Predicate,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q)4283 Value *llvm::simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4284 FastMathFlags FMF, const SimplifyQuery &Q) {
4285 return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
4286 }
4287
simplifyWithOpReplaced(Value * V,Value * Op,Value * RepOp,const SimplifyQuery & Q,bool AllowRefinement,SmallVectorImpl<Instruction * > * DropFlags,unsigned MaxRecurse)4288 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4289 const SimplifyQuery &Q,
4290 bool AllowRefinement,
4291 SmallVectorImpl<Instruction *> *DropFlags,
4292 unsigned MaxRecurse) {
4293 // Trivial replacement.
4294 if (V == Op)
4295 return RepOp;
4296
4297 if (!MaxRecurse--)
4298 return nullptr;
4299
4300 // We cannot replace a constant, and shouldn't even try.
4301 if (isa<Constant>(Op))
4302 return nullptr;
4303
4304 auto *I = dyn_cast<Instruction>(V);
4305 if (!I)
4306 return nullptr;
4307
4308 // The arguments of a phi node might refer to a value from a previous
4309 // cycle iteration.
4310 if (isa<PHINode>(I))
4311 return nullptr;
4312
4313 if (Op->getType()->isVectorTy()) {
4314 // For vector types, the simplification must hold per-lane, so forbid
4315 // potentially cross-lane operations like shufflevector.
4316 if (!I->getType()->isVectorTy() || isa<ShuffleVectorInst>(I) ||
4317 isa<CallBase>(I) || isa<BitCastInst>(I))
4318 return nullptr;
4319 }
4320
4321 // Don't fold away llvm.is.constant checks based on assumptions.
4322 if (match(I, m_Intrinsic<Intrinsic::is_constant>()))
4323 return nullptr;
4324
4325 // Don't simplify freeze.
4326 if (isa<FreezeInst>(I))
4327 return nullptr;
4328
4329 // Replace Op with RepOp in instruction operands.
4330 SmallVector<Value *, 8> NewOps;
4331 bool AnyReplaced = false;
4332 for (Value *InstOp : I->operands()) {
4333 if (Value *NewInstOp = simplifyWithOpReplaced(
4334 InstOp, Op, RepOp, Q, AllowRefinement, DropFlags, MaxRecurse)) {
4335 NewOps.push_back(NewInstOp);
4336 AnyReplaced = InstOp != NewInstOp;
4337 } else {
4338 NewOps.push_back(InstOp);
4339 }
4340 }
4341
4342 if (!AnyReplaced)
4343 return nullptr;
4344
4345 if (!AllowRefinement) {
4346 // General InstSimplify functions may refine the result, e.g. by returning
4347 // a constant for a potentially poison value. To avoid this, implement only
4348 // a few non-refining but profitable transforms here.
4349
4350 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4351 unsigned Opcode = BO->getOpcode();
4352 // id op x -> x, x op id -> x
4353 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4354 return NewOps[1];
4355 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4356 /* RHS */ true))
4357 return NewOps[0];
4358
4359 // x & x -> x, x | x -> x
4360 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4361 NewOps[0] == NewOps[1]) {
4362 // or disjoint x, x results in poison.
4363 if (auto *PDI = dyn_cast<PossiblyDisjointInst>(BO)) {
4364 if (PDI->isDisjoint()) {
4365 if (!DropFlags)
4366 return nullptr;
4367 DropFlags->push_back(BO);
4368 }
4369 }
4370 return NewOps[0];
4371 }
4372
4373 // x - x -> 0, x ^ x -> 0. This is non-refining, because x is non-poison
4374 // by assumption and this case never wraps, so nowrap flags can be
4375 // ignored.
4376 if ((Opcode == Instruction::Sub || Opcode == Instruction::Xor) &&
4377 NewOps[0] == RepOp && NewOps[1] == RepOp)
4378 return Constant::getNullValue(I->getType());
4379
4380 // If we are substituting an absorber constant into a binop and extra
4381 // poison can't leak if we remove the select -- because both operands of
4382 // the binop are based on the same value -- then it may be safe to replace
4383 // the value with the absorber constant. Examples:
4384 // (Op == 0) ? 0 : (Op & -Op) --> Op & -Op
4385 // (Op == 0) ? 0 : (Op * (binop Op, C)) --> Op * (binop Op, C)
4386 // (Op == -1) ? -1 : (Op | (binop C, Op) --> Op | (binop C, Op)
4387 Constant *Absorber =
4388 ConstantExpr::getBinOpAbsorber(Opcode, I->getType());
4389 if ((NewOps[0] == Absorber || NewOps[1] == Absorber) &&
4390 impliesPoison(BO, Op))
4391 return Absorber;
4392 }
4393
4394 if (isa<GetElementPtrInst>(I)) {
4395 // getelementptr x, 0 -> x.
4396 // This never returns poison, even if inbounds is set.
4397 if (NewOps.size() == 2 && match(NewOps[1], m_Zero()))
4398 return NewOps[0];
4399 }
4400 } else {
4401 // The simplification queries below may return the original value. Consider:
4402 // %div = udiv i32 %arg, %arg2
4403 // %mul = mul nsw i32 %div, %arg2
4404 // %cmp = icmp eq i32 %mul, %arg
4405 // %sel = select i1 %cmp, i32 %div, i32 undef
4406 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4407 // simplifies back to %arg. This can only happen because %mul does not
4408 // dominate %div. To ensure a consistent return value contract, we make sure
4409 // that this case returns nullptr as well.
4410 auto PreventSelfSimplify = [V](Value *Simplified) {
4411 return Simplified != V ? Simplified : nullptr;
4412 };
4413
4414 return PreventSelfSimplify(
4415 ::simplifyInstructionWithOperands(I, NewOps, Q, MaxRecurse));
4416 }
4417
4418 // If all operands are constant after substituting Op for RepOp then we can
4419 // constant fold the instruction.
4420 SmallVector<Constant *, 8> ConstOps;
4421 for (Value *NewOp : NewOps) {
4422 if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4423 ConstOps.push_back(ConstOp);
4424 else
4425 return nullptr;
4426 }
4427
4428 // Consider:
4429 // %cmp = icmp eq i32 %x, 2147483647
4430 // %add = add nsw i32 %x, 1
4431 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
4432 //
4433 // We can't replace %sel with %add unless we strip away the flags (which
4434 // will be done in InstCombine).
4435 // TODO: This may be unsound, because it only catches some forms of
4436 // refinement.
4437 if (!AllowRefinement) {
4438 if (canCreatePoison(cast<Operator>(I), !DropFlags)) {
4439 // abs cannot create poison if the value is known to never be int_min.
4440 if (auto *II = dyn_cast<IntrinsicInst>(I);
4441 II && II->getIntrinsicID() == Intrinsic::abs) {
4442 if (!ConstOps[0]->isNotMinSignedValue())
4443 return nullptr;
4444 } else
4445 return nullptr;
4446 }
4447 Constant *Res = ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
4448 if (DropFlags && Res && I->hasPoisonGeneratingFlagsOrMetadata())
4449 DropFlags->push_back(I);
4450 return Res;
4451 }
4452
4453 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
4454 }
4455
simplifyWithOpReplaced(Value * V,Value * Op,Value * RepOp,const SimplifyQuery & Q,bool AllowRefinement,SmallVectorImpl<Instruction * > * DropFlags)4456 Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4457 const SimplifyQuery &Q,
4458 bool AllowRefinement,
4459 SmallVectorImpl<Instruction *> *DropFlags) {
4460 return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement, DropFlags,
4461 RecursionLimit);
4462 }
4463
4464 /// Try to simplify a select instruction when its condition operand is an
4465 /// integer comparison where one operand of the compare is a constant.
simplifySelectBitTest(Value * TrueVal,Value * FalseVal,Value * X,const APInt * Y,bool TrueWhenUnset)4466 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4467 const APInt *Y, bool TrueWhenUnset) {
4468 const APInt *C;
4469
4470 // (X & Y) == 0 ? X & ~Y : X --> X
4471 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4472 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4473 *Y == ~*C)
4474 return TrueWhenUnset ? FalseVal : TrueVal;
4475
4476 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4477 // (X & Y) != 0 ? X : X & ~Y --> X
4478 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4479 *Y == ~*C)
4480 return TrueWhenUnset ? FalseVal : TrueVal;
4481
4482 if (Y->isPowerOf2()) {
4483 // (X & Y) == 0 ? X | Y : X --> X | Y
4484 // (X & Y) != 0 ? X | Y : X --> X
4485 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4486 *Y == *C) {
4487 // We can't return the or if it has the disjoint flag.
4488 if (TrueWhenUnset && cast<PossiblyDisjointInst>(TrueVal)->isDisjoint())
4489 return nullptr;
4490 return TrueWhenUnset ? TrueVal : FalseVal;
4491 }
4492
4493 // (X & Y) == 0 ? X : X | Y --> X
4494 // (X & Y) != 0 ? X : X | Y --> X | Y
4495 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4496 *Y == *C) {
4497 // We can't return the or if it has the disjoint flag.
4498 if (!TrueWhenUnset && cast<PossiblyDisjointInst>(FalseVal)->isDisjoint())
4499 return nullptr;
4500 return TrueWhenUnset ? TrueVal : FalseVal;
4501 }
4502 }
4503
4504 return nullptr;
4505 }
4506
simplifyCmpSelOfMaxMin(Value * CmpLHS,Value * CmpRHS,ICmpInst::Predicate Pred,Value * TVal,Value * FVal)4507 static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4508 ICmpInst::Predicate Pred, Value *TVal,
4509 Value *FVal) {
4510 // Canonicalize common cmp+sel operand as CmpLHS.
4511 if (CmpRHS == TVal || CmpRHS == FVal) {
4512 std::swap(CmpLHS, CmpRHS);
4513 Pred = ICmpInst::getSwappedPredicate(Pred);
4514 }
4515
4516 // Canonicalize common cmp+sel operand as TVal.
4517 if (CmpLHS == FVal) {
4518 std::swap(TVal, FVal);
4519 Pred = ICmpInst::getInversePredicate(Pred);
4520 }
4521
4522 // A vector select may be shuffling together elements that are equivalent
4523 // based on the max/min/select relationship.
4524 Value *X = CmpLHS, *Y = CmpRHS;
4525 bool PeekedThroughSelectShuffle = false;
4526 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4527 if (Shuf && Shuf->isSelect()) {
4528 if (Shuf->getOperand(0) == Y)
4529 FVal = Shuf->getOperand(1);
4530 else if (Shuf->getOperand(1) == Y)
4531 FVal = Shuf->getOperand(0);
4532 else
4533 return nullptr;
4534 PeekedThroughSelectShuffle = true;
4535 }
4536
4537 // (X pred Y) ? X : max/min(X, Y)
4538 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4539 if (!MMI || TVal != X ||
4540 !match(FVal, m_c_MaxOrMin(m_Specific(X), m_Specific(Y))))
4541 return nullptr;
4542
4543 // (X > Y) ? X : max(X, Y) --> max(X, Y)
4544 // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4545 // (X < Y) ? X : min(X, Y) --> min(X, Y)
4546 // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4547 //
4548 // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4549 // (X > Y) ? X : (Z ? max(X, Y) : Y)
4550 // If Z is true, this reduces as above, and if Z is false:
4551 // (X > Y) ? X : Y --> max(X, Y)
4552 ICmpInst::Predicate MMPred = MMI->getPredicate();
4553 if (MMPred == CmpInst::getStrictPredicate(Pred))
4554 return MMI;
4555
4556 // Other transforms are not valid with a shuffle.
4557 if (PeekedThroughSelectShuffle)
4558 return nullptr;
4559
4560 // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4561 if (Pred == CmpInst::ICMP_EQ)
4562 return MMI;
4563
4564 // (X != Y) ? X : max/min(X, Y) --> X
4565 if (Pred == CmpInst::ICMP_NE)
4566 return X;
4567
4568 // (X < Y) ? X : max(X, Y) --> X
4569 // (X <= Y) ? X : max(X, Y) --> X
4570 // (X > Y) ? X : min(X, Y) --> X
4571 // (X >= Y) ? X : min(X, Y) --> X
4572 ICmpInst::Predicate InvPred = CmpInst::getInversePredicate(Pred);
4573 if (MMPred == CmpInst::getStrictPredicate(InvPred))
4574 return X;
4575
4576 return nullptr;
4577 }
4578
4579 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
4580 /// eq/ne.
simplifySelectWithFakeICmpEq(Value * CmpLHS,Value * CmpRHS,ICmpInst::Predicate Pred,Value * TrueVal,Value * FalseVal)4581 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
4582 ICmpInst::Predicate Pred,
4583 Value *TrueVal, Value *FalseVal) {
4584 Value *X;
4585 APInt Mask;
4586 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
4587 return nullptr;
4588
4589 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
4590 Pred == ICmpInst::ICMP_EQ);
4591 }
4592
4593 /// Try to simplify a select instruction when its condition operand is an
4594 /// integer equality comparison.
simplifySelectWithICmpEq(Value * CmpLHS,Value * CmpRHS,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4595 static Value *simplifySelectWithICmpEq(Value *CmpLHS, Value *CmpRHS,
4596 Value *TrueVal, Value *FalseVal,
4597 const SimplifyQuery &Q,
4598 unsigned MaxRecurse) {
4599 if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4600 /* AllowRefinement */ false,
4601 /* DropFlags */ nullptr, MaxRecurse) == TrueVal)
4602 return FalseVal;
4603 if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4604 /* AllowRefinement */ true,
4605 /* DropFlags */ nullptr, MaxRecurse) == FalseVal)
4606 return FalseVal;
4607
4608 return nullptr;
4609 }
4610
4611 /// Try to simplify a select instruction when its condition operand is an
4612 /// integer comparison.
simplifySelectWithICmpCond(Value * CondVal,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4613 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4614 Value *FalseVal,
4615 const SimplifyQuery &Q,
4616 unsigned MaxRecurse) {
4617 ICmpInst::Predicate Pred;
4618 Value *CmpLHS, *CmpRHS;
4619 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4620 return nullptr;
4621
4622 if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4623 return V;
4624
4625 // Canonicalize ne to eq predicate.
4626 if (Pred == ICmpInst::ICMP_NE) {
4627 Pred = ICmpInst::ICMP_EQ;
4628 std::swap(TrueVal, FalseVal);
4629 }
4630
4631 // Check for integer min/max with a limit constant:
4632 // X > MIN_INT ? X : MIN_INT --> X
4633 // X < MAX_INT ? X : MAX_INT --> X
4634 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4635 Value *X, *Y;
4636 SelectPatternFlavor SPF =
4637 matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4638 X, Y)
4639 .Flavor;
4640 if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4641 APInt LimitC = getMinMaxLimit(getInverseMinMaxFlavor(SPF),
4642 X->getType()->getScalarSizeInBits());
4643 if (match(Y, m_SpecificInt(LimitC)))
4644 return X;
4645 }
4646 }
4647
4648 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4649 Value *X;
4650 const APInt *Y;
4651 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4652 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4653 /*TrueWhenUnset=*/true))
4654 return V;
4655
4656 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4657 Value *ShAmt;
4658 auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4659 m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4660 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4661 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4662 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4663 return X;
4664
4665 // Test for a zero-shift-guard-op around rotates. These are used to
4666 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4667 // intrinsics do not have that problem.
4668 // We do not allow this transform for the general funnel shift case because
4669 // that would not preserve the poison safety of the original code.
4670 auto isRotate =
4671 m_CombineOr(m_FShl(m_Value(X), m_Deferred(X), m_Value(ShAmt)),
4672 m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4673 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4674 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4675 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4676 Pred == ICmpInst::ICMP_EQ)
4677 return FalseVal;
4678
4679 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4680 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4681 if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4682 match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4683 return FalseVal;
4684 if (match(TrueVal,
4685 m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4686 match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4687 return FalseVal;
4688 }
4689
4690 // Check for other compares that behave like bit test.
4691 if (Value *V =
4692 simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4693 return V;
4694
4695 // If we have a scalar equality comparison, then we know the value in one of
4696 // the arms of the select. See if substituting this value into the arm and
4697 // simplifying the result yields the same value as the other arm.
4698 if (Pred == ICmpInst::ICMP_EQ) {
4699 if (Value *V = simplifySelectWithICmpEq(CmpLHS, CmpRHS, TrueVal, FalseVal,
4700 Q, MaxRecurse))
4701 return V;
4702 if (Value *V = simplifySelectWithICmpEq(CmpRHS, CmpLHS, TrueVal, FalseVal,
4703 Q, MaxRecurse))
4704 return V;
4705
4706 Value *X;
4707 Value *Y;
4708 // select((X | Y) == 0 ? X : 0) --> 0 (commuted 2 ways)
4709 if (match(CmpLHS, m_Or(m_Value(X), m_Value(Y))) &&
4710 match(CmpRHS, m_Zero())) {
4711 // (X | Y) == 0 implies X == 0 and Y == 0.
4712 if (Value *V = simplifySelectWithICmpEq(X, CmpRHS, TrueVal, FalseVal, Q,
4713 MaxRecurse))
4714 return V;
4715 if (Value *V = simplifySelectWithICmpEq(Y, CmpRHS, TrueVal, FalseVal, Q,
4716 MaxRecurse))
4717 return V;
4718 }
4719
4720 // select((X & Y) == -1 ? X : -1) --> -1 (commuted 2 ways)
4721 if (match(CmpLHS, m_And(m_Value(X), m_Value(Y))) &&
4722 match(CmpRHS, m_AllOnes())) {
4723 // (X & Y) == -1 implies X == -1 and Y == -1.
4724 if (Value *V = simplifySelectWithICmpEq(X, CmpRHS, TrueVal, FalseVal, Q,
4725 MaxRecurse))
4726 return V;
4727 if (Value *V = simplifySelectWithICmpEq(Y, CmpRHS, TrueVal, FalseVal, Q,
4728 MaxRecurse))
4729 return V;
4730 }
4731 }
4732
4733 return nullptr;
4734 }
4735
4736 /// Try to simplify a select instruction when its condition operand is a
4737 /// floating-point comparison.
simplifySelectWithFCmp(Value * Cond,Value * T,Value * F,const SimplifyQuery & Q)4738 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4739 const SimplifyQuery &Q) {
4740 FCmpInst::Predicate Pred;
4741 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4742 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4743 return nullptr;
4744
4745 // This transform is safe if we do not have (do not care about) -0.0 or if
4746 // at least one operand is known to not be -0.0. Otherwise, the select can
4747 // change the sign of a zero operand.
4748 bool HasNoSignedZeros =
4749 Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros();
4750 const APFloat *C;
4751 if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4752 (match(F, m_APFloat(C)) && C->isNonZero())) {
4753 // (T == F) ? T : F --> F
4754 // (F == T) ? T : F --> F
4755 if (Pred == FCmpInst::FCMP_OEQ)
4756 return F;
4757
4758 // (T != F) ? T : F --> T
4759 // (F != T) ? T : F --> T
4760 if (Pred == FCmpInst::FCMP_UNE)
4761 return T;
4762 }
4763
4764 return nullptr;
4765 }
4766
4767 /// Given operands for a SelectInst, see if we can fold the result.
4768 /// If not, this returns null.
simplifySelectInst(Value * Cond,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4769 static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4770 const SimplifyQuery &Q, unsigned MaxRecurse) {
4771 if (auto *CondC = dyn_cast<Constant>(Cond)) {
4772 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4773 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4774 if (Constant *C = ConstantFoldSelectInstruction(CondC, TrueC, FalseC))
4775 return C;
4776
4777 // select poison, X, Y -> poison
4778 if (isa<PoisonValue>(CondC))
4779 return PoisonValue::get(TrueVal->getType());
4780
4781 // select undef, X, Y -> X or Y
4782 if (Q.isUndefValue(CondC))
4783 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4784
4785 // select true, X, Y --> X
4786 // select false, X, Y --> Y
4787 // For vectors, allow undef/poison elements in the condition to match the
4788 // defined elements, so we can eliminate the select.
4789 if (match(CondC, m_One()))
4790 return TrueVal;
4791 if (match(CondC, m_Zero()))
4792 return FalseVal;
4793 }
4794
4795 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4796 "Select must have bool or bool vector condition");
4797 assert(TrueVal->getType() == FalseVal->getType() &&
4798 "Select must have same types for true/false ops");
4799
4800 if (Cond->getType() == TrueVal->getType()) {
4801 // select i1 Cond, i1 true, i1 false --> i1 Cond
4802 if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4803 return Cond;
4804
4805 // (X && Y) ? X : Y --> Y (commuted 2 ways)
4806 if (match(Cond, m_c_LogicalAnd(m_Specific(TrueVal), m_Specific(FalseVal))))
4807 return FalseVal;
4808
4809 // (X || Y) ? X : Y --> X (commuted 2 ways)
4810 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Specific(FalseVal))))
4811 return TrueVal;
4812
4813 // (X || Y) ? false : X --> false (commuted 2 ways)
4814 if (match(Cond, m_c_LogicalOr(m_Specific(FalseVal), m_Value())) &&
4815 match(TrueVal, m_ZeroInt()))
4816 return ConstantInt::getFalse(Cond->getType());
4817
4818 // Match patterns that end in logical-and.
4819 if (match(FalseVal, m_ZeroInt())) {
4820 // !(X || Y) && X --> false (commuted 2 ways)
4821 if (match(Cond, m_Not(m_c_LogicalOr(m_Specific(TrueVal), m_Value()))))
4822 return ConstantInt::getFalse(Cond->getType());
4823 // X && !(X || Y) --> false (commuted 2 ways)
4824 if (match(TrueVal, m_Not(m_c_LogicalOr(m_Specific(Cond), m_Value()))))
4825 return ConstantInt::getFalse(Cond->getType());
4826
4827 // (X || Y) && Y --> Y (commuted 2 ways)
4828 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Value())))
4829 return TrueVal;
4830 // Y && (X || Y) --> Y (commuted 2 ways)
4831 if (match(TrueVal, m_c_LogicalOr(m_Specific(Cond), m_Value())))
4832 return Cond;
4833
4834 // (X || Y) && (X || !Y) --> X (commuted 8 ways)
4835 Value *X, *Y;
4836 if (match(Cond, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4837 match(TrueVal, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4838 return X;
4839 if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4840 match(Cond, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4841 return X;
4842 }
4843
4844 // Match patterns that end in logical-or.
4845 if (match(TrueVal, m_One())) {
4846 // !(X && Y) || X --> true (commuted 2 ways)
4847 if (match(Cond, m_Not(m_c_LogicalAnd(m_Specific(FalseVal), m_Value()))))
4848 return ConstantInt::getTrue(Cond->getType());
4849 // X || !(X && Y) --> true (commuted 2 ways)
4850 if (match(FalseVal, m_Not(m_c_LogicalAnd(m_Specific(Cond), m_Value()))))
4851 return ConstantInt::getTrue(Cond->getType());
4852
4853 // (X && Y) || Y --> Y (commuted 2 ways)
4854 if (match(Cond, m_c_LogicalAnd(m_Specific(FalseVal), m_Value())))
4855 return FalseVal;
4856 // Y || (X && Y) --> Y (commuted 2 ways)
4857 if (match(FalseVal, m_c_LogicalAnd(m_Specific(Cond), m_Value())))
4858 return Cond;
4859 }
4860 }
4861
4862 // select ?, X, X -> X
4863 if (TrueVal == FalseVal)
4864 return TrueVal;
4865
4866 if (Cond == TrueVal) {
4867 // select i1 X, i1 X, i1 false --> X (logical-and)
4868 if (match(FalseVal, m_ZeroInt()))
4869 return Cond;
4870 // select i1 X, i1 X, i1 true --> true
4871 if (match(FalseVal, m_One()))
4872 return ConstantInt::getTrue(Cond->getType());
4873 }
4874 if (Cond == FalseVal) {
4875 // select i1 X, i1 true, i1 X --> X (logical-or)
4876 if (match(TrueVal, m_One()))
4877 return Cond;
4878 // select i1 X, i1 false, i1 X --> false
4879 if (match(TrueVal, m_ZeroInt()))
4880 return ConstantInt::getFalse(Cond->getType());
4881 }
4882
4883 // If the true or false value is poison, we can fold to the other value.
4884 // If the true or false value is undef, we can fold to the other value as
4885 // long as the other value isn't poison.
4886 // select ?, poison, X -> X
4887 // select ?, undef, X -> X
4888 if (isa<PoisonValue>(TrueVal) ||
4889 (Q.isUndefValue(TrueVal) && impliesPoison(FalseVal, Cond)))
4890 return FalseVal;
4891 // select ?, X, poison -> X
4892 // select ?, X, undef -> X
4893 if (isa<PoisonValue>(FalseVal) ||
4894 (Q.isUndefValue(FalseVal) && impliesPoison(TrueVal, Cond)))
4895 return TrueVal;
4896
4897 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4898 Constant *TrueC, *FalseC;
4899 if (isa<FixedVectorType>(TrueVal->getType()) &&
4900 match(TrueVal, m_Constant(TrueC)) &&
4901 match(FalseVal, m_Constant(FalseC))) {
4902 unsigned NumElts =
4903 cast<FixedVectorType>(TrueC->getType())->getNumElements();
4904 SmallVector<Constant *, 16> NewC;
4905 for (unsigned i = 0; i != NumElts; ++i) {
4906 // Bail out on incomplete vector constants.
4907 Constant *TEltC = TrueC->getAggregateElement(i);
4908 Constant *FEltC = FalseC->getAggregateElement(i);
4909 if (!TEltC || !FEltC)
4910 break;
4911
4912 // If the elements match (undef or not), that value is the result. If only
4913 // one element is undef, choose the defined element as the safe result.
4914 if (TEltC == FEltC)
4915 NewC.push_back(TEltC);
4916 else if (isa<PoisonValue>(TEltC) ||
4917 (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
4918 NewC.push_back(FEltC);
4919 else if (isa<PoisonValue>(FEltC) ||
4920 (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
4921 NewC.push_back(TEltC);
4922 else
4923 break;
4924 }
4925 if (NewC.size() == NumElts)
4926 return ConstantVector::get(NewC);
4927 }
4928
4929 if (Value *V =
4930 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4931 return V;
4932
4933 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4934 return V;
4935
4936 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4937 return V;
4938
4939 std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4940 if (Imp)
4941 return *Imp ? TrueVal : FalseVal;
4942
4943 return nullptr;
4944 }
4945
simplifySelectInst(Value * Cond,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q)4946 Value *llvm::simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4947 const SimplifyQuery &Q) {
4948 return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4949 }
4950
4951 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4952 /// If not, this returns null.
simplifyGEPInst(Type * SrcTy,Value * Ptr,ArrayRef<Value * > Indices,bool InBounds,const SimplifyQuery & Q,unsigned)4953 static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
4954 ArrayRef<Value *> Indices, bool InBounds,
4955 const SimplifyQuery &Q, unsigned) {
4956 // The type of the GEP pointer operand.
4957 unsigned AS =
4958 cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
4959
4960 // getelementptr P -> P.
4961 if (Indices.empty())
4962 return Ptr;
4963
4964 // Compute the (pointer) type returned by the GEP instruction.
4965 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
4966 Type *GEPTy = Ptr->getType();
4967 if (!GEPTy->isVectorTy()) {
4968 for (Value *Op : Indices) {
4969 // If one of the operands is a vector, the result type is a vector of
4970 // pointers. All vector operands must have the same number of elements.
4971 if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4972 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4973 break;
4974 }
4975 }
4976 }
4977
4978 // All-zero GEP is a no-op, unless it performs a vector splat.
4979 if (Ptr->getType() == GEPTy &&
4980 all_of(Indices, [](const auto *V) { return match(V, m_Zero()); }))
4981 return Ptr;
4982
4983 // getelementptr poison, idx -> poison
4984 // getelementptr baseptr, poison -> poison
4985 if (isa<PoisonValue>(Ptr) ||
4986 any_of(Indices, [](const auto *V) { return isa<PoisonValue>(V); }))
4987 return PoisonValue::get(GEPTy);
4988
4989 // getelementptr undef, idx -> undef
4990 if (Q.isUndefValue(Ptr))
4991 return UndefValue::get(GEPTy);
4992
4993 bool IsScalableVec =
4994 SrcTy->isScalableTy() || any_of(Indices, [](const Value *V) {
4995 return isa<ScalableVectorType>(V->getType());
4996 });
4997
4998 if (Indices.size() == 1) {
4999 Type *Ty = SrcTy;
5000 if (!IsScalableVec && Ty->isSized()) {
5001 Value *P;
5002 uint64_t C;
5003 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
5004 // getelementptr P, N -> P if P points to a type of zero size.
5005 if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
5006 return Ptr;
5007
5008 // The following transforms are only safe if the ptrtoint cast
5009 // doesn't truncate the pointers.
5010 if (Indices[0]->getType()->getScalarSizeInBits() ==
5011 Q.DL.getPointerSizeInBits(AS)) {
5012 auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
5013 return P->getType() == GEPTy &&
5014 getUnderlyingObject(P) == getUnderlyingObject(Ptr);
5015 };
5016 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
5017 if (TyAllocSize == 1 &&
5018 match(Indices[0],
5019 m_Sub(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Specific(Ptr)))) &&
5020 CanSimplify())
5021 return P;
5022
5023 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
5024 // size 1 << C.
5025 if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
5026 m_PtrToInt(m_Specific(Ptr))),
5027 m_ConstantInt(C))) &&
5028 TyAllocSize == 1ULL << C && CanSimplify())
5029 return P;
5030
5031 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
5032 // size C.
5033 if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
5034 m_PtrToInt(m_Specific(Ptr))),
5035 m_SpecificInt(TyAllocSize))) &&
5036 CanSimplify())
5037 return P;
5038 }
5039 }
5040 }
5041
5042 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
5043 all_of(Indices.drop_back(1),
5044 [](Value *Idx) { return match(Idx, m_Zero()); })) {
5045 unsigned IdxWidth =
5046 Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
5047 if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
5048 APInt BasePtrOffset(IdxWidth, 0);
5049 Value *StrippedBasePtr =
5050 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
5051
5052 // Avoid creating inttoptr of zero here: While LLVMs treatment of
5053 // inttoptr is generally conservative, this particular case is folded to
5054 // a null pointer, which will have incorrect provenance.
5055
5056 // gep (gep V, C), (sub 0, V) -> C
5057 if (match(Indices.back(),
5058 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
5059 !BasePtrOffset.isZero()) {
5060 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
5061 return ConstantExpr::getIntToPtr(CI, GEPTy);
5062 }
5063 // gep (gep V, C), (xor V, -1) -> C-1
5064 if (match(Indices.back(),
5065 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
5066 !BasePtrOffset.isOne()) {
5067 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
5068 return ConstantExpr::getIntToPtr(CI, GEPTy);
5069 }
5070 }
5071 }
5072
5073 // Check to see if this is constant foldable.
5074 if (!isa<Constant>(Ptr) ||
5075 !all_of(Indices, [](Value *V) { return isa<Constant>(V); }))
5076 return nullptr;
5077
5078 if (!ConstantExpr::isSupportedGetElementPtr(SrcTy))
5079 return ConstantFoldGetElementPtr(SrcTy, cast<Constant>(Ptr), InBounds,
5080 std::nullopt, Indices);
5081
5082 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices,
5083 InBounds);
5084 return ConstantFoldConstant(CE, Q.DL);
5085 }
5086
simplifyGEPInst(Type * SrcTy,Value * Ptr,ArrayRef<Value * > Indices,bool InBounds,const SimplifyQuery & Q)5087 Value *llvm::simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
5088 bool InBounds, const SimplifyQuery &Q) {
5089 return ::simplifyGEPInst(SrcTy, Ptr, Indices, InBounds, Q, RecursionLimit);
5090 }
5091
5092 /// Given operands for an InsertValueInst, see if we can fold the result.
5093 /// If not, this returns null.
simplifyInsertValueInst(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q,unsigned)5094 static Value *simplifyInsertValueInst(Value *Agg, Value *Val,
5095 ArrayRef<unsigned> Idxs,
5096 const SimplifyQuery &Q, unsigned) {
5097 if (Constant *CAgg = dyn_cast<Constant>(Agg))
5098 if (Constant *CVal = dyn_cast<Constant>(Val))
5099 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
5100
5101 // insertvalue x, poison, n -> x
5102 // insertvalue x, undef, n -> x if x cannot be poison
5103 if (isa<PoisonValue>(Val) ||
5104 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Agg)))
5105 return Agg;
5106
5107 // insertvalue x, (extractvalue y, n), n
5108 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
5109 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
5110 EV->getIndices() == Idxs) {
5111 // insertvalue poison, (extractvalue y, n), n -> y
5112 // insertvalue undef, (extractvalue y, n), n -> y if y cannot be poison
5113 if (isa<PoisonValue>(Agg) ||
5114 (Q.isUndefValue(Agg) &&
5115 isGuaranteedNotToBePoison(EV->getAggregateOperand())))
5116 return EV->getAggregateOperand();
5117
5118 // insertvalue y, (extractvalue y, n), n -> y
5119 if (Agg == EV->getAggregateOperand())
5120 return Agg;
5121 }
5122
5123 return nullptr;
5124 }
5125
simplifyInsertValueInst(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q)5126 Value *llvm::simplifyInsertValueInst(Value *Agg, Value *Val,
5127 ArrayRef<unsigned> Idxs,
5128 const SimplifyQuery &Q) {
5129 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
5130 }
5131
simplifyInsertElementInst(Value * Vec,Value * Val,Value * Idx,const SimplifyQuery & Q)5132 Value *llvm::simplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
5133 const SimplifyQuery &Q) {
5134 // Try to constant fold.
5135 auto *VecC = dyn_cast<Constant>(Vec);
5136 auto *ValC = dyn_cast<Constant>(Val);
5137 auto *IdxC = dyn_cast<Constant>(Idx);
5138 if (VecC && ValC && IdxC)
5139 return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
5140
5141 // For fixed-length vector, fold into poison if index is out of bounds.
5142 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
5143 if (isa<FixedVectorType>(Vec->getType()) &&
5144 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
5145 return PoisonValue::get(Vec->getType());
5146 }
5147
5148 // If index is undef, it might be out of bounds (see above case)
5149 if (Q.isUndefValue(Idx))
5150 return PoisonValue::get(Vec->getType());
5151
5152 // If the scalar is poison, or it is undef and there is no risk of
5153 // propagating poison from the vector value, simplify to the vector value.
5154 if (isa<PoisonValue>(Val) ||
5155 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
5156 return Vec;
5157
5158 // If we are extracting a value from a vector, then inserting it into the same
5159 // place, that's the input vector:
5160 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
5161 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
5162 return Vec;
5163
5164 return nullptr;
5165 }
5166
5167 /// Given operands for an ExtractValueInst, see if we can fold the result.
5168 /// If not, this returns null.
simplifyExtractValueInst(Value * Agg,ArrayRef<unsigned> Idxs,const SimplifyQuery &,unsigned)5169 static Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
5170 const SimplifyQuery &, unsigned) {
5171 if (auto *CAgg = dyn_cast<Constant>(Agg))
5172 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
5173
5174 // extractvalue x, (insertvalue y, elt, n), n -> elt
5175 unsigned NumIdxs = Idxs.size();
5176 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
5177 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
5178 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
5179 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
5180 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
5181 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
5182 Idxs.slice(0, NumCommonIdxs)) {
5183 if (NumIdxs == NumInsertValueIdxs)
5184 return IVI->getInsertedValueOperand();
5185 break;
5186 }
5187 }
5188
5189 return nullptr;
5190 }
5191
simplifyExtractValueInst(Value * Agg,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q)5192 Value *llvm::simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
5193 const SimplifyQuery &Q) {
5194 return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
5195 }
5196
5197 /// Given operands for an ExtractElementInst, see if we can fold the result.
5198 /// If not, this returns null.
simplifyExtractElementInst(Value * Vec,Value * Idx,const SimplifyQuery & Q,unsigned)5199 static Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
5200 const SimplifyQuery &Q, unsigned) {
5201 auto *VecVTy = cast<VectorType>(Vec->getType());
5202 if (auto *CVec = dyn_cast<Constant>(Vec)) {
5203 if (auto *CIdx = dyn_cast<Constant>(Idx))
5204 return ConstantExpr::getExtractElement(CVec, CIdx);
5205
5206 if (Q.isUndefValue(Vec))
5207 return UndefValue::get(VecVTy->getElementType());
5208 }
5209
5210 // An undef extract index can be arbitrarily chosen to be an out-of-range
5211 // index value, which would result in the instruction being poison.
5212 if (Q.isUndefValue(Idx))
5213 return PoisonValue::get(VecVTy->getElementType());
5214
5215 // If extracting a specified index from the vector, see if we can recursively
5216 // find a previously computed scalar that was inserted into the vector.
5217 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
5218 // For fixed-length vector, fold into undef if index is out of bounds.
5219 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
5220 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
5221 return PoisonValue::get(VecVTy->getElementType());
5222 // Handle case where an element is extracted from a splat.
5223 if (IdxC->getValue().ult(MinNumElts))
5224 if (auto *Splat = getSplatValue(Vec))
5225 return Splat;
5226 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
5227 return Elt;
5228 } else {
5229 // extractelt x, (insertelt y, elt, n), n -> elt
5230 // If the possibly-variable indices are trivially known to be equal
5231 // (because they are the same operand) then use the value that was
5232 // inserted directly.
5233 auto *IE = dyn_cast<InsertElementInst>(Vec);
5234 if (IE && IE->getOperand(2) == Idx)
5235 return IE->getOperand(1);
5236
5237 // The index is not relevant if our vector is a splat.
5238 if (Value *Splat = getSplatValue(Vec))
5239 return Splat;
5240 }
5241 return nullptr;
5242 }
5243
simplifyExtractElementInst(Value * Vec,Value * Idx,const SimplifyQuery & Q)5244 Value *llvm::simplifyExtractElementInst(Value *Vec, Value *Idx,
5245 const SimplifyQuery &Q) {
5246 return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5247 }
5248
5249 /// See if we can fold the given phi. If not, returns null.
simplifyPHINode(PHINode * PN,ArrayRef<Value * > IncomingValues,const SimplifyQuery & Q)5250 static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
5251 const SimplifyQuery &Q) {
5252 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5253 // here, because the PHI we may succeed simplifying to was not
5254 // def-reachable from the original PHI!
5255
5256 // If all of the PHI's incoming values are the same then replace the PHI node
5257 // with the common value.
5258 Value *CommonValue = nullptr;
5259 bool HasUndefInput = false;
5260 for (Value *Incoming : IncomingValues) {
5261 // If the incoming value is the phi node itself, it can safely be skipped.
5262 if (Incoming == PN)
5263 continue;
5264 if (Q.isUndefValue(Incoming)) {
5265 // Remember that we saw an undef value, but otherwise ignore them.
5266 HasUndefInput = true;
5267 continue;
5268 }
5269 if (CommonValue && Incoming != CommonValue)
5270 return nullptr; // Not the same, bail out.
5271 CommonValue = Incoming;
5272 }
5273
5274 // If CommonValue is null then all of the incoming values were either undef or
5275 // equal to the phi node itself.
5276 if (!CommonValue)
5277 return UndefValue::get(PN->getType());
5278
5279 if (HasUndefInput) {
5280 // If we have a PHI node like phi(X, undef, X), where X is defined by some
5281 // instruction, we cannot return X as the result of the PHI node unless it
5282 // dominates the PHI block.
5283 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
5284 }
5285
5286 return CommonValue;
5287 }
5288
simplifyCastInst(unsigned CastOpc,Value * Op,Type * Ty,const SimplifyQuery & Q,unsigned MaxRecurse)5289 static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5290 const SimplifyQuery &Q, unsigned MaxRecurse) {
5291 if (auto *C = dyn_cast<Constant>(Op))
5292 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
5293
5294 if (auto *CI = dyn_cast<CastInst>(Op)) {
5295 auto *Src = CI->getOperand(0);
5296 Type *SrcTy = Src->getType();
5297 Type *MidTy = CI->getType();
5298 Type *DstTy = Ty;
5299 if (Src->getType() == Ty) {
5300 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
5301 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5302 Type *SrcIntPtrTy =
5303 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
5304 Type *MidIntPtrTy =
5305 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
5306 Type *DstIntPtrTy =
5307 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
5308 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5309 SrcIntPtrTy, MidIntPtrTy,
5310 DstIntPtrTy) == Instruction::BitCast)
5311 return Src;
5312 }
5313 }
5314
5315 // bitcast x -> x
5316 if (CastOpc == Instruction::BitCast)
5317 if (Op->getType() == Ty)
5318 return Op;
5319
5320 return nullptr;
5321 }
5322
simplifyCastInst(unsigned CastOpc,Value * Op,Type * Ty,const SimplifyQuery & Q)5323 Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5324 const SimplifyQuery &Q) {
5325 return ::simplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
5326 }
5327
5328 /// For the given destination element of a shuffle, peek through shuffles to
5329 /// match a root vector source operand that contains that element in the same
5330 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
foldIdentityShuffles(int DestElt,Value * Op0,Value * Op1,int MaskVal,Value * RootVec,unsigned MaxRecurse)5331 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5332 int MaskVal, Value *RootVec,
5333 unsigned MaxRecurse) {
5334 if (!MaxRecurse--)
5335 return nullptr;
5336
5337 // Bail out if any mask value is undefined. That kind of shuffle may be
5338 // simplified further based on demanded bits or other folds.
5339 if (MaskVal == -1)
5340 return nullptr;
5341
5342 // The mask value chooses which source operand we need to look at next.
5343 int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5344 int RootElt = MaskVal;
5345 Value *SourceOp = Op0;
5346 if (MaskVal >= InVecNumElts) {
5347 RootElt = MaskVal - InVecNumElts;
5348 SourceOp = Op1;
5349 }
5350
5351 // If the source operand is a shuffle itself, look through it to find the
5352 // matching root vector.
5353 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5354 return foldIdentityShuffles(
5355 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5356 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5357 }
5358
5359 // TODO: Look through bitcasts? What if the bitcast changes the vector element
5360 // size?
5361
5362 // The source operand is not a shuffle. Initialize the root vector value for
5363 // this shuffle if that has not been done yet.
5364 if (!RootVec)
5365 RootVec = SourceOp;
5366
5367 // Give up as soon as a source operand does not match the existing root value.
5368 if (RootVec != SourceOp)
5369 return nullptr;
5370
5371 // The element must be coming from the same lane in the source vector
5372 // (although it may have crossed lanes in intermediate shuffles).
5373 if (RootElt != DestElt)
5374 return nullptr;
5375
5376 return RootVec;
5377 }
5378
simplifyShuffleVectorInst(Value * Op0,Value * Op1,ArrayRef<int> Mask,Type * RetTy,const SimplifyQuery & Q,unsigned MaxRecurse)5379 static Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5380 ArrayRef<int> Mask, Type *RetTy,
5381 const SimplifyQuery &Q,
5382 unsigned MaxRecurse) {
5383 if (all_of(Mask, [](int Elem) { return Elem == PoisonMaskElem; }))
5384 return PoisonValue::get(RetTy);
5385
5386 auto *InVecTy = cast<VectorType>(Op0->getType());
5387 unsigned MaskNumElts = Mask.size();
5388 ElementCount InVecEltCount = InVecTy->getElementCount();
5389
5390 bool Scalable = InVecEltCount.isScalable();
5391
5392 SmallVector<int, 32> Indices;
5393 Indices.assign(Mask.begin(), Mask.end());
5394
5395 // Canonicalization: If mask does not select elements from an input vector,
5396 // replace that input vector with poison.
5397 if (!Scalable) {
5398 bool MaskSelects0 = false, MaskSelects1 = false;
5399 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5400 for (unsigned i = 0; i != MaskNumElts; ++i) {
5401 if (Indices[i] == -1)
5402 continue;
5403 if ((unsigned)Indices[i] < InVecNumElts)
5404 MaskSelects0 = true;
5405 else
5406 MaskSelects1 = true;
5407 }
5408 if (!MaskSelects0)
5409 Op0 = PoisonValue::get(InVecTy);
5410 if (!MaskSelects1)
5411 Op1 = PoisonValue::get(InVecTy);
5412 }
5413
5414 auto *Op0Const = dyn_cast<Constant>(Op0);
5415 auto *Op1Const = dyn_cast<Constant>(Op1);
5416
5417 // If all operands are constant, constant fold the shuffle. This
5418 // transformation depends on the value of the mask which is not known at
5419 // compile time for scalable vectors
5420 if (Op0Const && Op1Const)
5421 return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5422
5423 // Canonicalization: if only one input vector is constant, it shall be the
5424 // second one. This transformation depends on the value of the mask which
5425 // is not known at compile time for scalable vectors
5426 if (!Scalable && Op0Const && !Op1Const) {
5427 std::swap(Op0, Op1);
5428 ShuffleVectorInst::commuteShuffleMask(Indices,
5429 InVecEltCount.getKnownMinValue());
5430 }
5431
5432 // A splat of an inserted scalar constant becomes a vector constant:
5433 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5434 // NOTE: We may have commuted above, so analyze the updated Indices, not the
5435 // original mask constant.
5436 // NOTE: This transformation depends on the value of the mask which is not
5437 // known at compile time for scalable vectors
5438 Constant *C;
5439 ConstantInt *IndexC;
5440 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
5441 m_ConstantInt(IndexC)))) {
5442 // Match a splat shuffle mask of the insert index allowing undef elements.
5443 int InsertIndex = IndexC->getZExtValue();
5444 if (all_of(Indices, [InsertIndex](int MaskElt) {
5445 return MaskElt == InsertIndex || MaskElt == -1;
5446 })) {
5447 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5448
5449 // Shuffle mask poisons become poison constant result elements.
5450 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5451 for (unsigned i = 0; i != MaskNumElts; ++i)
5452 if (Indices[i] == -1)
5453 VecC[i] = PoisonValue::get(C->getType());
5454 return ConstantVector::get(VecC);
5455 }
5456 }
5457
5458 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5459 // value type is same as the input vectors' type.
5460 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5461 if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
5462 all_equal(OpShuf->getShuffleMask()))
5463 return Op0;
5464
5465 // All remaining transformation depend on the value of the mask, which is
5466 // not known at compile time for scalable vectors.
5467 if (Scalable)
5468 return nullptr;
5469
5470 // Don't fold a shuffle with undef mask elements. This may get folded in a
5471 // better way using demanded bits or other analysis.
5472 // TODO: Should we allow this?
5473 if (is_contained(Indices, -1))
5474 return nullptr;
5475
5476 // Check if every element of this shuffle can be mapped back to the
5477 // corresponding element of a single root vector. If so, we don't need this
5478 // shuffle. This handles simple identity shuffles as well as chains of
5479 // shuffles that may widen/narrow and/or move elements across lanes and back.
5480 Value *RootVec = nullptr;
5481 for (unsigned i = 0; i != MaskNumElts; ++i) {
5482 // Note that recursion is limited for each vector element, so if any element
5483 // exceeds the limit, this will fail to simplify.
5484 RootVec =
5485 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5486
5487 // We can't replace a widening/narrowing shuffle with one of its operands.
5488 if (!RootVec || RootVec->getType() != RetTy)
5489 return nullptr;
5490 }
5491 return RootVec;
5492 }
5493
5494 /// Given operands for a ShuffleVectorInst, fold the result or return null.
simplifyShuffleVectorInst(Value * Op0,Value * Op1,ArrayRef<int> Mask,Type * RetTy,const SimplifyQuery & Q)5495 Value *llvm::simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5496 ArrayRef<int> Mask, Type *RetTy,
5497 const SimplifyQuery &Q) {
5498 return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
5499 }
5500
foldConstant(Instruction::UnaryOps Opcode,Value * & Op,const SimplifyQuery & Q)5501 static Constant *foldConstant(Instruction::UnaryOps Opcode, Value *&Op,
5502 const SimplifyQuery &Q) {
5503 if (auto *C = dyn_cast<Constant>(Op))
5504 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
5505 return nullptr;
5506 }
5507
5508 /// Given the operand for an FNeg, see if we can fold the result. If not, this
5509 /// returns null.
simplifyFNegInst(Value * Op,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5510 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
5511 const SimplifyQuery &Q, unsigned MaxRecurse) {
5512 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
5513 return C;
5514
5515 Value *X;
5516 // fneg (fneg X) ==> X
5517 if (match(Op, m_FNeg(m_Value(X))))
5518 return X;
5519
5520 return nullptr;
5521 }
5522
simplifyFNegInst(Value * Op,FastMathFlags FMF,const SimplifyQuery & Q)5523 Value *llvm::simplifyFNegInst(Value *Op, FastMathFlags FMF,
5524 const SimplifyQuery &Q) {
5525 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
5526 }
5527
5528 /// Try to propagate existing NaN values when possible. If not, replace the
5529 /// constant or elements in the constant with a canonical NaN.
propagateNaN(Constant * In)5530 static Constant *propagateNaN(Constant *In) {
5531 Type *Ty = In->getType();
5532 if (auto *VecTy = dyn_cast<FixedVectorType>(Ty)) {
5533 unsigned NumElts = VecTy->getNumElements();
5534 SmallVector<Constant *, 32> NewC(NumElts);
5535 for (unsigned i = 0; i != NumElts; ++i) {
5536 Constant *EltC = In->getAggregateElement(i);
5537 // Poison elements propagate. NaN propagates except signaling is quieted.
5538 // Replace unknown or undef elements with canonical NaN.
5539 if (EltC && isa<PoisonValue>(EltC))
5540 NewC[i] = EltC;
5541 else if (EltC && EltC->isNaN())
5542 NewC[i] = ConstantFP::get(
5543 EltC->getType(), cast<ConstantFP>(EltC)->getValue().makeQuiet());
5544 else
5545 NewC[i] = ConstantFP::getNaN(VecTy->getElementType());
5546 }
5547 return ConstantVector::get(NewC);
5548 }
5549
5550 // If it is not a fixed vector, but not a simple NaN either, return a
5551 // canonical NaN.
5552 if (!In->isNaN())
5553 return ConstantFP::getNaN(Ty);
5554
5555 // If we known this is a NaN, and it's scalable vector, we must have a splat
5556 // on our hands. Grab that before splatting a QNaN constant.
5557 if (isa<ScalableVectorType>(Ty)) {
5558 auto *Splat = In->getSplatValue();
5559 assert(Splat && Splat->isNaN() &&
5560 "Found a scalable-vector NaN but not a splat");
5561 In = Splat;
5562 }
5563
5564 // Propagate an existing QNaN constant. If it is an SNaN, make it quiet, but
5565 // preserve the sign/payload.
5566 return ConstantFP::get(Ty, cast<ConstantFP>(In)->getValue().makeQuiet());
5567 }
5568
5569 /// Perform folds that are common to any floating-point operation. This implies
5570 /// transforms based on poison/undef/NaN because the operation itself makes no
5571 /// difference to the result.
simplifyFPOp(ArrayRef<Value * > Ops,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5572 static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
5573 const SimplifyQuery &Q,
5574 fp::ExceptionBehavior ExBehavior,
5575 RoundingMode Rounding) {
5576 // Poison is independent of anything else. It always propagates from an
5577 // operand to a math result.
5578 if (any_of(Ops, [](Value *V) { return match(V, m_Poison()); }))
5579 return PoisonValue::get(Ops[0]->getType());
5580
5581 for (Value *V : Ops) {
5582 bool IsNan = match(V, m_NaN());
5583 bool IsInf = match(V, m_Inf());
5584 bool IsUndef = Q.isUndefValue(V);
5585
5586 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5587 // (an undef operand can be chosen to be Nan/Inf), then the result of
5588 // this operation is poison.
5589 if (FMF.noNaNs() && (IsNan || IsUndef))
5590 return PoisonValue::get(V->getType());
5591 if (FMF.noInfs() && (IsInf || IsUndef))
5592 return PoisonValue::get(V->getType());
5593
5594 if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5595 // Undef does not propagate because undef means that all bits can take on
5596 // any value. If this is undef * NaN for example, then the result values
5597 // (at least the exponent bits) are limited. Assume the undef is a
5598 // canonical NaN and propagate that.
5599 if (IsUndef)
5600 return ConstantFP::getNaN(V->getType());
5601 if (IsNan)
5602 return propagateNaN(cast<Constant>(V));
5603 } else if (ExBehavior != fp::ebStrict) {
5604 if (IsNan)
5605 return propagateNaN(cast<Constant>(V));
5606 }
5607 }
5608 return nullptr;
5609 }
5610
5611 /// Given operands for an FAdd, see if we can fold the result. If not, this
5612 /// returns null.
5613 static Value *
simplifyFAddInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5614 simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5615 const SimplifyQuery &Q, unsigned MaxRecurse,
5616 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5617 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5618 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5619 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5620 return C;
5621
5622 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5623 return C;
5624
5625 // fadd X, -0 ==> X
5626 // With strict/constrained FP, we have these possible edge cases that do
5627 // not simplify to Op0:
5628 // fadd SNaN, -0.0 --> QNaN
5629 // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5630 if (canIgnoreSNaN(ExBehavior, FMF) &&
5631 (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5632 FMF.noSignedZeros()))
5633 if (match(Op1, m_NegZeroFP()))
5634 return Op0;
5635
5636 // fadd X, 0 ==> X, when we know X is not -0
5637 if (canIgnoreSNaN(ExBehavior, FMF))
5638 if (match(Op1, m_PosZeroFP()) &&
5639 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
5640 return Op0;
5641
5642 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5643 return nullptr;
5644
5645 if (FMF.noNaNs()) {
5646 // With nnan: X + {+/-}Inf --> {+/-}Inf
5647 if (match(Op1, m_Inf()))
5648 return Op1;
5649
5650 // With nnan: -X + X --> 0.0 (and commuted variant)
5651 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5652 // Negative zeros are allowed because we always end up with positive zero:
5653 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5654 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5655 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5656 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5657 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5658 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5659 return ConstantFP::getZero(Op0->getType());
5660
5661 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5662 match(Op1, m_FNeg(m_Specific(Op0))))
5663 return ConstantFP::getZero(Op0->getType());
5664 }
5665
5666 // (X - Y) + Y --> X
5667 // Y + (X - Y) --> X
5668 Value *X;
5669 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5670 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5671 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5672 return X;
5673
5674 return nullptr;
5675 }
5676
5677 /// Given operands for an FSub, see if we can fold the result. If not, this
5678 /// returns null.
5679 static Value *
simplifyFSubInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5680 simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5681 const SimplifyQuery &Q, unsigned MaxRecurse,
5682 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5683 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5684 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5685 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5686 return C;
5687
5688 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5689 return C;
5690
5691 // fsub X, +0 ==> X
5692 if (canIgnoreSNaN(ExBehavior, FMF) &&
5693 (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5694 FMF.noSignedZeros()))
5695 if (match(Op1, m_PosZeroFP()))
5696 return Op0;
5697
5698 // fsub X, -0 ==> X, when we know X is not -0
5699 if (canIgnoreSNaN(ExBehavior, FMF))
5700 if (match(Op1, m_NegZeroFP()) &&
5701 (FMF.noSignedZeros() || cannotBeNegativeZero(Op0, Q.DL, Q.TLI)))
5702 return Op0;
5703
5704 // fsub -0.0, (fsub -0.0, X) ==> X
5705 // fsub -0.0, (fneg X) ==> X
5706 Value *X;
5707 if (canIgnoreSNaN(ExBehavior, FMF))
5708 if (match(Op0, m_NegZeroFP()) && match(Op1, m_FNeg(m_Value(X))))
5709 return X;
5710
5711 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5712 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5713 if (canIgnoreSNaN(ExBehavior, FMF))
5714 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5715 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5716 match(Op1, m_FNeg(m_Value(X)))))
5717 return X;
5718
5719 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5720 return nullptr;
5721
5722 if (FMF.noNaNs()) {
5723 // fsub nnan x, x ==> 0.0
5724 if (Op0 == Op1)
5725 return Constant::getNullValue(Op0->getType());
5726
5727 // With nnan: {+/-}Inf - X --> {+/-}Inf
5728 if (match(Op0, m_Inf()))
5729 return Op0;
5730
5731 // With nnan: X - {+/-}Inf --> {-/+}Inf
5732 if (match(Op1, m_Inf()))
5733 return foldConstant(Instruction::FNeg, Op1, Q);
5734 }
5735
5736 // Y - (Y - X) --> X
5737 // (X + Y) - Y --> X
5738 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5739 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5740 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5741 return X;
5742
5743 return nullptr;
5744 }
5745
simplifyFMAFMul(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5746 static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5747 const SimplifyQuery &Q, unsigned MaxRecurse,
5748 fp::ExceptionBehavior ExBehavior,
5749 RoundingMode Rounding) {
5750 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5751 return C;
5752
5753 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5754 return nullptr;
5755
5756 // Canonicalize special constants as operand 1.
5757 if (match(Op0, m_FPOne()) || match(Op0, m_AnyZeroFP()))
5758 std::swap(Op0, Op1);
5759
5760 // X * 1.0 --> X
5761 if (match(Op1, m_FPOne()))
5762 return Op0;
5763
5764 if (match(Op1, m_AnyZeroFP())) {
5765 // X * 0.0 --> 0.0 (with nnan and nsz)
5766 if (FMF.noNaNs() && FMF.noSignedZeros())
5767 return ConstantFP::getZero(Op0->getType());
5768
5769 // +normal number * (-)0.0 --> (-)0.0
5770 if (isKnownNeverInfOrNaN(Op0, Q.DL, Q.TLI, 0, Q.AC, Q.CxtI, Q.DT) &&
5771 // TODO: Check SignBit from computeKnownFPClass when it's more complete.
5772 SignBitMustBeZero(Op0, Q.DL, Q.TLI))
5773 return Op1;
5774 }
5775
5776 // sqrt(X) * sqrt(X) --> X, if we can:
5777 // 1. Remove the intermediate rounding (reassociate).
5778 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5779 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5780 Value *X;
5781 if (Op0 == Op1 && match(Op0, m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
5782 FMF.noNaNs() && FMF.noSignedZeros())
5783 return X;
5784
5785 return nullptr;
5786 }
5787
5788 /// Given the operands for an FMul, see if we can fold the result
5789 static Value *
simplifyFMulInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5790 simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5791 const SimplifyQuery &Q, unsigned MaxRecurse,
5792 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5793 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5794 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5795 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5796 return C;
5797
5798 // Now apply simplifications that do not require rounding.
5799 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5800 }
5801
simplifyFAddInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5802 Value *llvm::simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5803 const SimplifyQuery &Q,
5804 fp::ExceptionBehavior ExBehavior,
5805 RoundingMode Rounding) {
5806 return ::simplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5807 Rounding);
5808 }
5809
simplifyFSubInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5810 Value *llvm::simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5811 const SimplifyQuery &Q,
5812 fp::ExceptionBehavior ExBehavior,
5813 RoundingMode Rounding) {
5814 return ::simplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5815 Rounding);
5816 }
5817
simplifyFMulInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5818 Value *llvm::simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5819 const SimplifyQuery &Q,
5820 fp::ExceptionBehavior ExBehavior,
5821 RoundingMode Rounding) {
5822 return ::simplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5823 Rounding);
5824 }
5825
simplifyFMAFMul(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5826 Value *llvm::simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5827 const SimplifyQuery &Q,
5828 fp::ExceptionBehavior ExBehavior,
5829 RoundingMode Rounding) {
5830 return ::simplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5831 Rounding);
5832 }
5833
5834 static Value *
simplifyFDivInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5835 simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5836 const SimplifyQuery &Q, unsigned,
5837 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5838 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5839 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5840 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5841 return C;
5842
5843 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5844 return C;
5845
5846 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5847 return nullptr;
5848
5849 // X / 1.0 -> X
5850 if (match(Op1, m_FPOne()))
5851 return Op0;
5852
5853 // 0 / X -> 0
5854 // Requires that NaNs are off (X could be zero) and signed zeroes are
5855 // ignored (X could be positive or negative, so the output sign is unknown).
5856 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5857 return ConstantFP::getZero(Op0->getType());
5858
5859 if (FMF.noNaNs()) {
5860 // X / X -> 1.0 is legal when NaNs are ignored.
5861 // We can ignore infinities because INF/INF is NaN.
5862 if (Op0 == Op1)
5863 return ConstantFP::get(Op0->getType(), 1.0);
5864
5865 // (X * Y) / Y --> X if we can reassociate to the above form.
5866 Value *X;
5867 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5868 return X;
5869
5870 // -X / X -> -1.0 and
5871 // X / -X -> -1.0 are legal when NaNs are ignored.
5872 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5873 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5874 match(Op1, m_FNegNSZ(m_Specific(Op0))))
5875 return ConstantFP::get(Op0->getType(), -1.0);
5876
5877 // nnan ninf X / [-]0.0 -> poison
5878 if (FMF.noInfs() && match(Op1, m_AnyZeroFP()))
5879 return PoisonValue::get(Op1->getType());
5880 }
5881
5882 return nullptr;
5883 }
5884
simplifyFDivInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5885 Value *llvm::simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5886 const SimplifyQuery &Q,
5887 fp::ExceptionBehavior ExBehavior,
5888 RoundingMode Rounding) {
5889 return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5890 Rounding);
5891 }
5892
5893 static Value *
simplifyFRemInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5894 simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5895 const SimplifyQuery &Q, unsigned,
5896 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5897 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5898 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5899 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5900 return C;
5901
5902 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5903 return C;
5904
5905 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5906 return nullptr;
5907
5908 // Unlike fdiv, the result of frem always matches the sign of the dividend.
5909 // The constant match may include undef elements in a vector, so return a full
5910 // zero constant as the result.
5911 if (FMF.noNaNs()) {
5912 // +0 % X -> 0
5913 if (match(Op0, m_PosZeroFP()))
5914 return ConstantFP::getZero(Op0->getType());
5915 // -0 % X -> -0
5916 if (match(Op0, m_NegZeroFP()))
5917 return ConstantFP::getNegativeZero(Op0->getType());
5918 }
5919
5920 return nullptr;
5921 }
5922
simplifyFRemInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5923 Value *llvm::simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5924 const SimplifyQuery &Q,
5925 fp::ExceptionBehavior ExBehavior,
5926 RoundingMode Rounding) {
5927 return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5928 Rounding);
5929 }
5930
5931 //=== Helper functions for higher up the class hierarchy.
5932
5933 /// Given the operand for a UnaryOperator, see if we can fold the result.
5934 /// If not, this returns null.
simplifyUnOp(unsigned Opcode,Value * Op,const SimplifyQuery & Q,unsigned MaxRecurse)5935 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5936 unsigned MaxRecurse) {
5937 switch (Opcode) {
5938 case Instruction::FNeg:
5939 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5940 default:
5941 llvm_unreachable("Unexpected opcode");
5942 }
5943 }
5944
5945 /// Given the operand for a UnaryOperator, see if we can fold the result.
5946 /// If not, this returns null.
5947 /// Try to use FastMathFlags when folding the result.
simplifyFPUnOp(unsigned Opcode,Value * Op,const FastMathFlags & FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5948 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5949 const FastMathFlags &FMF, const SimplifyQuery &Q,
5950 unsigned MaxRecurse) {
5951 switch (Opcode) {
5952 case Instruction::FNeg:
5953 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
5954 default:
5955 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
5956 }
5957 }
5958
simplifyUnOp(unsigned Opcode,Value * Op,const SimplifyQuery & Q)5959 Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
5960 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
5961 }
5962
simplifyUnOp(unsigned Opcode,Value * Op,FastMathFlags FMF,const SimplifyQuery & Q)5963 Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
5964 const SimplifyQuery &Q) {
5965 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
5966 }
5967
5968 /// Given operands for a BinaryOperator, see if we can fold the result.
5969 /// If not, this returns null.
simplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)5970 static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5971 const SimplifyQuery &Q, unsigned MaxRecurse) {
5972 switch (Opcode) {
5973 case Instruction::Add:
5974 return simplifyAddInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5975 MaxRecurse);
5976 case Instruction::Sub:
5977 return simplifySubInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5978 MaxRecurse);
5979 case Instruction::Mul:
5980 return simplifyMulInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5981 MaxRecurse);
5982 case Instruction::SDiv:
5983 return simplifySDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5984 case Instruction::UDiv:
5985 return simplifyUDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5986 case Instruction::SRem:
5987 return simplifySRemInst(LHS, RHS, Q, MaxRecurse);
5988 case Instruction::URem:
5989 return simplifyURemInst(LHS, RHS, Q, MaxRecurse);
5990 case Instruction::Shl:
5991 return simplifyShlInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5992 MaxRecurse);
5993 case Instruction::LShr:
5994 return simplifyLShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5995 case Instruction::AShr:
5996 return simplifyAShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5997 case Instruction::And:
5998 return simplifyAndInst(LHS, RHS, Q, MaxRecurse);
5999 case Instruction::Or:
6000 return simplifyOrInst(LHS, RHS, Q, MaxRecurse);
6001 case Instruction::Xor:
6002 return simplifyXorInst(LHS, RHS, Q, MaxRecurse);
6003 case Instruction::FAdd:
6004 return simplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6005 case Instruction::FSub:
6006 return simplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6007 case Instruction::FMul:
6008 return simplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6009 case Instruction::FDiv:
6010 return simplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6011 case Instruction::FRem:
6012 return simplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6013 default:
6014 llvm_unreachable("Unexpected opcode");
6015 }
6016 }
6017
6018 /// Given operands for a BinaryOperator, see if we can fold the result.
6019 /// If not, this returns null.
6020 /// Try to use FastMathFlags when folding the result.
simplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const FastMathFlags & FMF,const SimplifyQuery & Q,unsigned MaxRecurse)6021 static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6022 const FastMathFlags &FMF, const SimplifyQuery &Q,
6023 unsigned MaxRecurse) {
6024 switch (Opcode) {
6025 case Instruction::FAdd:
6026 return simplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
6027 case Instruction::FSub:
6028 return simplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
6029 case Instruction::FMul:
6030 return simplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
6031 case Instruction::FDiv:
6032 return simplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
6033 default:
6034 return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
6035 }
6036 }
6037
simplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q)6038 Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6039 const SimplifyQuery &Q) {
6040 return ::simplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
6041 }
6042
simplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q)6043 Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
6044 FastMathFlags FMF, const SimplifyQuery &Q) {
6045 return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
6046 }
6047
6048 /// Given operands for a CmpInst, see if we can fold the result.
simplifyCmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)6049 static Value *simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
6050 const SimplifyQuery &Q, unsigned MaxRecurse) {
6051 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
6052 return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
6053 return simplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
6054 }
6055
simplifyCmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)6056 Value *llvm::simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
6057 const SimplifyQuery &Q) {
6058 return ::simplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
6059 }
6060
isIdempotent(Intrinsic::ID ID)6061 static bool isIdempotent(Intrinsic::ID ID) {
6062 switch (ID) {
6063 default:
6064 return false;
6065
6066 // Unary idempotent: f(f(x)) = f(x)
6067 case Intrinsic::fabs:
6068 case Intrinsic::floor:
6069 case Intrinsic::ceil:
6070 case Intrinsic::trunc:
6071 case Intrinsic::rint:
6072 case Intrinsic::nearbyint:
6073 case Intrinsic::round:
6074 case Intrinsic::roundeven:
6075 case Intrinsic::canonicalize:
6076 case Intrinsic::arithmetic_fence:
6077 return true;
6078 }
6079 }
6080
6081 /// Return true if the intrinsic rounds a floating-point value to an integral
6082 /// floating-point value (not an integer type).
removesFPFraction(Intrinsic::ID ID)6083 static bool removesFPFraction(Intrinsic::ID ID) {
6084 switch (ID) {
6085 default:
6086 return false;
6087
6088 case Intrinsic::floor:
6089 case Intrinsic::ceil:
6090 case Intrinsic::trunc:
6091 case Intrinsic::rint:
6092 case Intrinsic::nearbyint:
6093 case Intrinsic::round:
6094 case Intrinsic::roundeven:
6095 return true;
6096 }
6097 }
6098
simplifyRelativeLoad(Constant * Ptr,Constant * Offset,const DataLayout & DL)6099 static Value *simplifyRelativeLoad(Constant *Ptr, Constant *Offset,
6100 const DataLayout &DL) {
6101 GlobalValue *PtrSym;
6102 APInt PtrOffset;
6103 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
6104 return nullptr;
6105
6106 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
6107
6108 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
6109 if (!OffsetConstInt || OffsetConstInt->getBitWidth() > 64)
6110 return nullptr;
6111
6112 APInt OffsetInt = OffsetConstInt->getValue().sextOrTrunc(
6113 DL.getIndexTypeSizeInBits(Ptr->getType()));
6114 if (OffsetInt.srem(4) != 0)
6115 return nullptr;
6116
6117 Constant *Loaded = ConstantFoldLoadFromConstPtr(Ptr, Int32Ty, OffsetInt, DL);
6118 if (!Loaded)
6119 return nullptr;
6120
6121 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
6122 if (!LoadedCE)
6123 return nullptr;
6124
6125 if (LoadedCE->getOpcode() == Instruction::Trunc) {
6126 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6127 if (!LoadedCE)
6128 return nullptr;
6129 }
6130
6131 if (LoadedCE->getOpcode() != Instruction::Sub)
6132 return nullptr;
6133
6134 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
6135 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
6136 return nullptr;
6137 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
6138
6139 Constant *LoadedRHS = LoadedCE->getOperand(1);
6140 GlobalValue *LoadedRHSSym;
6141 APInt LoadedRHSOffset;
6142 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
6143 DL) ||
6144 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
6145 return nullptr;
6146
6147 return LoadedLHSPtr;
6148 }
6149
6150 // TODO: Need to pass in FastMathFlags
simplifyLdexp(Value * Op0,Value * Op1,const SimplifyQuery & Q,bool IsStrict)6151 static Value *simplifyLdexp(Value *Op0, Value *Op1, const SimplifyQuery &Q,
6152 bool IsStrict) {
6153 // ldexp(poison, x) -> poison
6154 // ldexp(x, poison) -> poison
6155 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6156 return Op0;
6157
6158 // ldexp(undef, x) -> nan
6159 if (Q.isUndefValue(Op0))
6160 return ConstantFP::getNaN(Op0->getType());
6161
6162 if (!IsStrict) {
6163 // TODO: Could insert a canonicalize for strict
6164
6165 // ldexp(x, undef) -> x
6166 if (Q.isUndefValue(Op1))
6167 return Op0;
6168 }
6169
6170 const APFloat *C = nullptr;
6171 match(Op0, PatternMatch::m_APFloat(C));
6172
6173 // These cases should be safe, even with strictfp.
6174 // ldexp(0.0, x) -> 0.0
6175 // ldexp(-0.0, x) -> -0.0
6176 // ldexp(inf, x) -> inf
6177 // ldexp(-inf, x) -> -inf
6178 if (C && (C->isZero() || C->isInfinity()))
6179 return Op0;
6180
6181 // These are canonicalization dropping, could do it if we knew how we could
6182 // ignore denormal flushes and target handling of nan payload bits.
6183 if (IsStrict)
6184 return nullptr;
6185
6186 // TODO: Could quiet this with strictfp if the exception mode isn't strict.
6187 if (C && C->isNaN())
6188 return ConstantFP::get(Op0->getType(), C->makeQuiet());
6189
6190 // ldexp(x, 0) -> x
6191
6192 // TODO: Could fold this if we know the exception mode isn't
6193 // strict, we know the denormal mode and other target modes.
6194 if (match(Op1, PatternMatch::m_ZeroInt()))
6195 return Op0;
6196
6197 return nullptr;
6198 }
6199
simplifyUnaryIntrinsic(Function * F,Value * Op0,const SimplifyQuery & Q,const CallBase * Call)6200 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
6201 const SimplifyQuery &Q,
6202 const CallBase *Call) {
6203 // Idempotent functions return the same result when called repeatedly.
6204 Intrinsic::ID IID = F->getIntrinsicID();
6205 if (isIdempotent(IID))
6206 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
6207 if (II->getIntrinsicID() == IID)
6208 return II;
6209
6210 if (removesFPFraction(IID)) {
6211 // Converting from int or calling a rounding function always results in a
6212 // finite integral number or infinity. For those inputs, rounding functions
6213 // always return the same value, so the (2nd) rounding is eliminated. Ex:
6214 // floor (sitofp x) -> sitofp x
6215 // round (ceil x) -> ceil x
6216 auto *II = dyn_cast<IntrinsicInst>(Op0);
6217 if ((II && removesFPFraction(II->getIntrinsicID())) ||
6218 match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
6219 return Op0;
6220 }
6221
6222 Value *X;
6223 switch (IID) {
6224 case Intrinsic::fabs:
6225 if (SignBitMustBeZero(Op0, Q.DL, Q.TLI))
6226 return Op0;
6227 break;
6228 case Intrinsic::bswap:
6229 // bswap(bswap(x)) -> x
6230 if (match(Op0, m_BSwap(m_Value(X))))
6231 return X;
6232 break;
6233 case Intrinsic::bitreverse:
6234 // bitreverse(bitreverse(x)) -> x
6235 if (match(Op0, m_BitReverse(m_Value(X))))
6236 return X;
6237 break;
6238 case Intrinsic::ctpop: {
6239 // ctpop(X) -> 1 iff X is non-zero power of 2.
6240 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
6241 Q.DT))
6242 return ConstantInt::get(Op0->getType(), 1);
6243 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
6244 // ctpop(and X, 1) --> and X, 1
6245 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
6246 if (MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, BitWidth - 1),
6247 Q))
6248 return Op0;
6249 break;
6250 }
6251 case Intrinsic::exp:
6252 // exp(log(x)) -> x
6253 if (Call->hasAllowReassoc() &&
6254 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X))))
6255 return X;
6256 break;
6257 case Intrinsic::exp2:
6258 // exp2(log2(x)) -> x
6259 if (Call->hasAllowReassoc() &&
6260 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
6261 return X;
6262 break;
6263 case Intrinsic::exp10:
6264 // exp10(log10(x)) -> x
6265 if (Call->hasAllowReassoc() &&
6266 match(Op0, m_Intrinsic<Intrinsic::log10>(m_Value(X))))
6267 return X;
6268 break;
6269 case Intrinsic::log:
6270 // log(exp(x)) -> x
6271 if (Call->hasAllowReassoc() &&
6272 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
6273 return X;
6274 break;
6275 case Intrinsic::log2:
6276 // log2(exp2(x)) -> x
6277 if (Call->hasAllowReassoc() &&
6278 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
6279 match(Op0,
6280 m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), m_Value(X)))))
6281 return X;
6282 break;
6283 case Intrinsic::log10:
6284 // log10(pow(10.0, x)) -> x
6285 // log10(exp10(x)) -> x
6286 if (Call->hasAllowReassoc() &&
6287 (match(Op0, m_Intrinsic<Intrinsic::exp10>(m_Value(X))) ||
6288 match(Op0,
6289 m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), m_Value(X)))))
6290 return X;
6291 break;
6292 case Intrinsic::experimental_vector_reverse:
6293 // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
6294 if (match(Op0, m_VecReverse(m_Value(X))))
6295 return X;
6296 // experimental.vector.reverse(splat(X)) -> splat(X)
6297 if (isSplatValue(Op0))
6298 return Op0;
6299 break;
6300 case Intrinsic::frexp: {
6301 // Frexp is idempotent with the added complication of the struct return.
6302 if (match(Op0, m_ExtractValue<0>(m_Value(X)))) {
6303 if (match(X, m_Intrinsic<Intrinsic::frexp>(m_Value())))
6304 return X;
6305 }
6306
6307 break;
6308 }
6309 default:
6310 break;
6311 }
6312
6313 return nullptr;
6314 }
6315
6316 /// Given a min/max intrinsic, see if it can be removed based on having an
6317 /// operand that is another min/max intrinsic with shared operand(s). The caller
6318 /// is expected to swap the operand arguments to handle commutation.
foldMinMaxSharedOp(Intrinsic::ID IID,Value * Op0,Value * Op1)6319 static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
6320 Value *X, *Y;
6321 if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
6322 return nullptr;
6323
6324 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6325 if (!MM0)
6326 return nullptr;
6327 Intrinsic::ID IID0 = MM0->getIntrinsicID();
6328
6329 if (Op1 == X || Op1 == Y ||
6330 match(Op1, m_c_MaxOrMin(m_Specific(X), m_Specific(Y)))) {
6331 // max (max X, Y), X --> max X, Y
6332 if (IID0 == IID)
6333 return MM0;
6334 // max (min X, Y), X --> X
6335 if (IID0 == getInverseMinMaxIntrinsic(IID))
6336 return Op1;
6337 }
6338 return nullptr;
6339 }
6340
6341 /// Given a min/max intrinsic, see if it can be removed based on having an
6342 /// operand that is another min/max intrinsic with shared operand(s). The caller
6343 /// is expected to swap the operand arguments to handle commutation.
foldMinimumMaximumSharedOp(Intrinsic::ID IID,Value * Op0,Value * Op1)6344 static Value *foldMinimumMaximumSharedOp(Intrinsic::ID IID, Value *Op0,
6345 Value *Op1) {
6346 assert((IID == Intrinsic::maxnum || IID == Intrinsic::minnum ||
6347 IID == Intrinsic::maximum || IID == Intrinsic::minimum) &&
6348 "Unsupported intrinsic");
6349
6350 auto *M0 = dyn_cast<IntrinsicInst>(Op0);
6351 // If Op0 is not the same intrinsic as IID, do not process.
6352 // This is a difference with integer min/max handling. We do not process the
6353 // case like max(min(X,Y),min(X,Y)) => min(X,Y). But it can be handled by GVN.
6354 if (!M0 || M0->getIntrinsicID() != IID)
6355 return nullptr;
6356 Value *X0 = M0->getOperand(0);
6357 Value *Y0 = M0->getOperand(1);
6358 // Simple case, m(m(X,Y), X) => m(X, Y)
6359 // m(m(X,Y), Y) => m(X, Y)
6360 // For minimum/maximum, X is NaN => m(NaN, Y) == NaN and m(NaN, NaN) == NaN.
6361 // For minimum/maximum, Y is NaN => m(X, NaN) == NaN and m(NaN, NaN) == NaN.
6362 // For minnum/maxnum, X is NaN => m(NaN, Y) == Y and m(Y, Y) == Y.
6363 // For minnum/maxnum, Y is NaN => m(X, NaN) == X and m(X, NaN) == X.
6364 if (X0 == Op1 || Y0 == Op1)
6365 return M0;
6366
6367 auto *M1 = dyn_cast<IntrinsicInst>(Op1);
6368 if (!M1)
6369 return nullptr;
6370 Value *X1 = M1->getOperand(0);
6371 Value *Y1 = M1->getOperand(1);
6372 Intrinsic::ID IID1 = M1->getIntrinsicID();
6373 // we have a case m(m(X,Y),m'(X,Y)) taking into account m' is commutative.
6374 // if m' is m or inversion of m => m(m(X,Y),m'(X,Y)) == m(X,Y).
6375 // For minimum/maximum, X is NaN => m(NaN,Y) == m'(NaN, Y) == NaN.
6376 // For minimum/maximum, Y is NaN => m(X,NaN) == m'(X, NaN) == NaN.
6377 // For minnum/maxnum, X is NaN => m(NaN,Y) == m'(NaN, Y) == Y.
6378 // For minnum/maxnum, Y is NaN => m(X,NaN) == m'(X, NaN) == X.
6379 if ((X0 == X1 && Y0 == Y1) || (X0 == Y1 && Y0 == X1))
6380 if (IID1 == IID || getInverseMinMaxIntrinsic(IID1) == IID)
6381 return M0;
6382
6383 return nullptr;
6384 }
6385
simplifyBinaryIntrinsic(Function * F,Value * Op0,Value * Op1,const SimplifyQuery & Q,const CallBase * Call)6386 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
6387 const SimplifyQuery &Q,
6388 const CallBase *Call) {
6389 Intrinsic::ID IID = F->getIntrinsicID();
6390 Type *ReturnType = F->getReturnType();
6391 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6392 switch (IID) {
6393 case Intrinsic::abs:
6394 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6395 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6396 // on the outer abs.
6397 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
6398 return Op0;
6399 break;
6400
6401 case Intrinsic::cttz: {
6402 Value *X;
6403 if (match(Op0, m_Shl(m_One(), m_Value(X))))
6404 return X;
6405 break;
6406 }
6407 case Intrinsic::ctlz: {
6408 Value *X;
6409 if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
6410 return X;
6411 if (match(Op0, m_AShr(m_Negative(), m_Value())))
6412 return Constant::getNullValue(ReturnType);
6413 break;
6414 }
6415 case Intrinsic::ptrmask: {
6416 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
6417 return PoisonValue::get(Op0->getType());
6418
6419 // NOTE: We can't apply this simplifications based on the value of Op1
6420 // because we need to preserve provenance.
6421 if (Q.isUndefValue(Op0) || match(Op0, m_Zero()))
6422 return Constant::getNullValue(Op0->getType());
6423
6424 assert(Op1->getType()->getScalarSizeInBits() ==
6425 Q.DL.getIndexTypeSizeInBits(Op0->getType()) &&
6426 "Invalid mask width");
6427 // If index-width (mask size) is less than pointer-size then mask is
6428 // 1-extended.
6429 if (match(Op1, m_PtrToInt(m_Specific(Op0))))
6430 return Op0;
6431
6432 // NOTE: We may have attributes associated with the return value of the
6433 // llvm.ptrmask intrinsic that will be lost when we just return the
6434 // operand. We should try to preserve them.
6435 if (match(Op1, m_AllOnes()) || Q.isUndefValue(Op1))
6436 return Op0;
6437
6438 Constant *C;
6439 if (match(Op1, m_ImmConstant(C))) {
6440 KnownBits PtrKnown = computeKnownBits(Op0, /*Depth=*/0, Q);
6441 // See if we only masking off bits we know are already zero due to
6442 // alignment.
6443 APInt IrrelevantPtrBits =
6444 PtrKnown.Zero.zextOrTrunc(C->getType()->getScalarSizeInBits());
6445 C = ConstantFoldBinaryOpOperands(
6446 Instruction::Or, C, ConstantInt::get(C->getType(), IrrelevantPtrBits),
6447 Q.DL);
6448 if (C != nullptr && C->isAllOnesValue())
6449 return Op0;
6450 }
6451 break;
6452 }
6453 case Intrinsic::smax:
6454 case Intrinsic::smin:
6455 case Intrinsic::umax:
6456 case Intrinsic::umin: {
6457 // If the arguments are the same, this is a no-op.
6458 if (Op0 == Op1)
6459 return Op0;
6460
6461 // Canonicalize immediate constant operand as Op1.
6462 if (match(Op0, m_ImmConstant()))
6463 std::swap(Op0, Op1);
6464
6465 // Assume undef is the limit value.
6466 if (Q.isUndefValue(Op1))
6467 return ConstantInt::get(
6468 ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
6469
6470 const APInt *C;
6471 if (match(Op1, m_APIntAllowUndef(C))) {
6472 // Clamp to limit value. For example:
6473 // umax(i8 %x, i8 255) --> 255
6474 if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
6475 return ConstantInt::get(ReturnType, *C);
6476
6477 // If the constant op is the opposite of the limit value, the other must
6478 // be larger/smaller or equal. For example:
6479 // umin(i8 %x, i8 255) --> %x
6480 if (*C == MinMaxIntrinsic::getSaturationPoint(
6481 getInverseMinMaxIntrinsic(IID), BitWidth))
6482 return Op0;
6483
6484 // Remove nested call if constant operands allow it. Example:
6485 // max (max X, 7), 5 -> max X, 7
6486 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6487 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6488 // TODO: loosen undef/splat restrictions for vector constants.
6489 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6490 const APInt *InnerC;
6491 if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
6492 ICmpInst::compare(*InnerC, *C,
6493 ICmpInst::getNonStrictPredicate(
6494 MinMaxIntrinsic::getPredicate(IID))))
6495 return Op0;
6496 }
6497 }
6498
6499 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6500 return V;
6501 if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
6502 return V;
6503
6504 ICmpInst::Predicate Pred =
6505 ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
6506 if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
6507 return Op0;
6508 if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
6509 return Op1;
6510
6511 break;
6512 }
6513 case Intrinsic::usub_with_overflow:
6514 case Intrinsic::ssub_with_overflow:
6515 // X - X -> { 0, false }
6516 // X - undef -> { 0, false }
6517 // undef - X -> { 0, false }
6518 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6519 return Constant::getNullValue(ReturnType);
6520 break;
6521 case Intrinsic::uadd_with_overflow:
6522 case Intrinsic::sadd_with_overflow:
6523 // X + undef -> { -1, false }
6524 // undef + x -> { -1, false }
6525 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
6526 return ConstantStruct::get(
6527 cast<StructType>(ReturnType),
6528 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
6529 Constant::getNullValue(ReturnType->getStructElementType(1))});
6530 }
6531 break;
6532 case Intrinsic::umul_with_overflow:
6533 case Intrinsic::smul_with_overflow:
6534 // 0 * X -> { 0, false }
6535 // X * 0 -> { 0, false }
6536 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
6537 return Constant::getNullValue(ReturnType);
6538 // undef * X -> { 0, false }
6539 // X * undef -> { 0, false }
6540 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6541 return Constant::getNullValue(ReturnType);
6542 break;
6543 case Intrinsic::uadd_sat:
6544 // sat(MAX + X) -> MAX
6545 // sat(X + MAX) -> MAX
6546 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
6547 return Constant::getAllOnesValue(ReturnType);
6548 [[fallthrough]];
6549 case Intrinsic::sadd_sat:
6550 // sat(X + undef) -> -1
6551 // sat(undef + X) -> -1
6552 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6553 // For signed: Assume undef is ~X, in which case X + ~X = -1.
6554 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6555 return Constant::getAllOnesValue(ReturnType);
6556
6557 // X + 0 -> X
6558 if (match(Op1, m_Zero()))
6559 return Op0;
6560 // 0 + X -> X
6561 if (match(Op0, m_Zero()))
6562 return Op1;
6563 break;
6564 case Intrinsic::usub_sat:
6565 // sat(0 - X) -> 0, sat(X - MAX) -> 0
6566 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
6567 return Constant::getNullValue(ReturnType);
6568 [[fallthrough]];
6569 case Intrinsic::ssub_sat:
6570 // X - X -> 0, X - undef -> 0, undef - X -> 0
6571 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6572 return Constant::getNullValue(ReturnType);
6573 // X - 0 -> X
6574 if (match(Op1, m_Zero()))
6575 return Op0;
6576 break;
6577 case Intrinsic::load_relative:
6578 if (auto *C0 = dyn_cast<Constant>(Op0))
6579 if (auto *C1 = dyn_cast<Constant>(Op1))
6580 return simplifyRelativeLoad(C0, C1, Q.DL);
6581 break;
6582 case Intrinsic::powi:
6583 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
6584 // powi(x, 0) -> 1.0
6585 if (Power->isZero())
6586 return ConstantFP::get(Op0->getType(), 1.0);
6587 // powi(x, 1) -> x
6588 if (Power->isOne())
6589 return Op0;
6590 }
6591 break;
6592 case Intrinsic::ldexp:
6593 return simplifyLdexp(Op0, Op1, Q, false);
6594 case Intrinsic::copysign:
6595 // copysign X, X --> X
6596 if (Op0 == Op1)
6597 return Op0;
6598 // copysign -X, X --> X
6599 // copysign X, -X --> -X
6600 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
6601 match(Op1, m_FNeg(m_Specific(Op0))))
6602 return Op1;
6603 break;
6604 case Intrinsic::is_fpclass: {
6605 if (isa<PoisonValue>(Op0))
6606 return PoisonValue::get(ReturnType);
6607
6608 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6609 // If all tests are made, it doesn't matter what the value is.
6610 if ((Mask & fcAllFlags) == fcAllFlags)
6611 return ConstantInt::get(ReturnType, true);
6612 if ((Mask & fcAllFlags) == 0)
6613 return ConstantInt::get(ReturnType, false);
6614 if (Q.isUndefValue(Op0))
6615 return UndefValue::get(ReturnType);
6616 break;
6617 }
6618 case Intrinsic::maxnum:
6619 case Intrinsic::minnum:
6620 case Intrinsic::maximum:
6621 case Intrinsic::minimum: {
6622 // If the arguments are the same, this is a no-op.
6623 if (Op0 == Op1)
6624 return Op0;
6625
6626 // Canonicalize constant operand as Op1.
6627 if (isa<Constant>(Op0))
6628 std::swap(Op0, Op1);
6629
6630 // If an argument is undef, return the other argument.
6631 if (Q.isUndefValue(Op1))
6632 return Op0;
6633
6634 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6635 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6636
6637 // minnum(X, nan) -> X
6638 // maxnum(X, nan) -> X
6639 // minimum(X, nan) -> nan
6640 // maximum(X, nan) -> nan
6641 if (match(Op1, m_NaN()))
6642 return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
6643
6644 // In the following folds, inf can be replaced with the largest finite
6645 // float, if the ninf flag is set.
6646 const APFloat *C;
6647 if (match(Op1, m_APFloat(C)) &&
6648 (C->isInfinity() || (Call->hasNoInfs() && C->isLargest()))) {
6649 // minnum(X, -inf) -> -inf
6650 // maxnum(X, +inf) -> +inf
6651 // minimum(X, -inf) -> -inf if nnan
6652 // maximum(X, +inf) -> +inf if nnan
6653 if (C->isNegative() == IsMin && (!PropagateNaN || Call->hasNoNaNs()))
6654 return ConstantFP::get(ReturnType, *C);
6655
6656 // minnum(X, +inf) -> X if nnan
6657 // maxnum(X, -inf) -> X if nnan
6658 // minimum(X, +inf) -> X
6659 // maximum(X, -inf) -> X
6660 if (C->isNegative() != IsMin && (PropagateNaN || Call->hasNoNaNs()))
6661 return Op0;
6662 }
6663
6664 // Min/max of the same operation with common operand:
6665 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
6666 if (Value *V = foldMinimumMaximumSharedOp(IID, Op0, Op1))
6667 return V;
6668 if (Value *V = foldMinimumMaximumSharedOp(IID, Op1, Op0))
6669 return V;
6670
6671 break;
6672 }
6673 case Intrinsic::vector_extract: {
6674 Type *ReturnType = F->getReturnType();
6675
6676 // (extract_vector (insert_vector _, X, 0), 0) -> X
6677 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6678 Value *X = nullptr;
6679 if (match(Op0, m_Intrinsic<Intrinsic::vector_insert>(m_Value(), m_Value(X),
6680 m_Zero())) &&
6681 IdxN == 0 && X->getType() == ReturnType)
6682 return X;
6683
6684 break;
6685 }
6686 default:
6687 break;
6688 }
6689
6690 return nullptr;
6691 }
6692
simplifyIntrinsic(CallBase * Call,Value * Callee,ArrayRef<Value * > Args,const SimplifyQuery & Q)6693 static Value *simplifyIntrinsic(CallBase *Call, Value *Callee,
6694 ArrayRef<Value *> Args,
6695 const SimplifyQuery &Q) {
6696 // Operand bundles should not be in Args.
6697 assert(Call->arg_size() == Args.size());
6698 unsigned NumOperands = Args.size();
6699 Function *F = cast<Function>(Callee);
6700 Intrinsic::ID IID = F->getIntrinsicID();
6701
6702 // Most of the intrinsics with no operands have some kind of side effect.
6703 // Don't simplify.
6704 if (!NumOperands) {
6705 switch (IID) {
6706 case Intrinsic::vscale: {
6707 Type *RetTy = F->getReturnType();
6708 ConstantRange CR = getVScaleRange(Call->getFunction(), 64);
6709 if (const APInt *C = CR.getSingleElement())
6710 return ConstantInt::get(RetTy, C->getZExtValue());
6711 return nullptr;
6712 }
6713 default:
6714 return nullptr;
6715 }
6716 }
6717
6718 if (NumOperands == 1)
6719 return simplifyUnaryIntrinsic(F, Args[0], Q, Call);
6720
6721 if (NumOperands == 2)
6722 return simplifyBinaryIntrinsic(F, Args[0], Args[1], Q, Call);
6723
6724 // Handle intrinsics with 3 or more arguments.
6725 switch (IID) {
6726 case Intrinsic::masked_load:
6727 case Intrinsic::masked_gather: {
6728 Value *MaskArg = Args[2];
6729 Value *PassthruArg = Args[3];
6730 // If the mask is all zeros or undef, the "passthru" argument is the result.
6731 if (maskIsAllZeroOrUndef(MaskArg))
6732 return PassthruArg;
6733 return nullptr;
6734 }
6735 case Intrinsic::fshl:
6736 case Intrinsic::fshr: {
6737 Value *Op0 = Args[0], *Op1 = Args[1], *ShAmtArg = Args[2];
6738
6739 // If both operands are undef, the result is undef.
6740 if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
6741 return UndefValue::get(F->getReturnType());
6742
6743 // If shift amount is undef, assume it is zero.
6744 if (Q.isUndefValue(ShAmtArg))
6745 return Args[IID == Intrinsic::fshl ? 0 : 1];
6746
6747 const APInt *ShAmtC;
6748 if (match(ShAmtArg, m_APInt(ShAmtC))) {
6749 // If there's effectively no shift, return the 1st arg or 2nd arg.
6750 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
6751 if (ShAmtC->urem(BitWidth).isZero())
6752 return Args[IID == Intrinsic::fshl ? 0 : 1];
6753 }
6754
6755 // Rotating zero by anything is zero.
6756 if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
6757 return ConstantInt::getNullValue(F->getReturnType());
6758
6759 // Rotating -1 by anything is -1.
6760 if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
6761 return ConstantInt::getAllOnesValue(F->getReturnType());
6762
6763 return nullptr;
6764 }
6765 case Intrinsic::experimental_constrained_fma: {
6766 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6767 if (Value *V = simplifyFPOp(Args, {}, Q, *FPI->getExceptionBehavior(),
6768 *FPI->getRoundingMode()))
6769 return V;
6770 return nullptr;
6771 }
6772 case Intrinsic::fma:
6773 case Intrinsic::fmuladd: {
6774 if (Value *V = simplifyFPOp(Args, {}, Q, fp::ebIgnore,
6775 RoundingMode::NearestTiesToEven))
6776 return V;
6777 return nullptr;
6778 }
6779 case Intrinsic::smul_fix:
6780 case Intrinsic::smul_fix_sat: {
6781 Value *Op0 = Args[0];
6782 Value *Op1 = Args[1];
6783 Value *Op2 = Args[2];
6784 Type *ReturnType = F->getReturnType();
6785
6786 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
6787 // when both Op0 and Op1 are constant so we do not care about that special
6788 // case here).
6789 if (isa<Constant>(Op0))
6790 std::swap(Op0, Op1);
6791
6792 // X * 0 -> 0
6793 if (match(Op1, m_Zero()))
6794 return Constant::getNullValue(ReturnType);
6795
6796 // X * undef -> 0
6797 if (Q.isUndefValue(Op1))
6798 return Constant::getNullValue(ReturnType);
6799
6800 // X * (1 << Scale) -> X
6801 APInt ScaledOne =
6802 APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
6803 cast<ConstantInt>(Op2)->getZExtValue());
6804 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
6805 return Op0;
6806
6807 return nullptr;
6808 }
6809 case Intrinsic::vector_insert: {
6810 Value *Vec = Args[0];
6811 Value *SubVec = Args[1];
6812 Value *Idx = Args[2];
6813 Type *ReturnType = F->getReturnType();
6814
6815 // (insert_vector Y, (extract_vector X, 0), 0) -> X
6816 // where: Y is X, or Y is undef
6817 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6818 Value *X = nullptr;
6819 if (match(SubVec,
6820 m_Intrinsic<Intrinsic::vector_extract>(m_Value(X), m_Zero())) &&
6821 (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
6822 X->getType() == ReturnType)
6823 return X;
6824
6825 return nullptr;
6826 }
6827 case Intrinsic::experimental_constrained_fadd: {
6828 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6829 return simplifyFAddInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6830 *FPI->getExceptionBehavior(),
6831 *FPI->getRoundingMode());
6832 }
6833 case Intrinsic::experimental_constrained_fsub: {
6834 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6835 return simplifyFSubInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6836 *FPI->getExceptionBehavior(),
6837 *FPI->getRoundingMode());
6838 }
6839 case Intrinsic::experimental_constrained_fmul: {
6840 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6841 return simplifyFMulInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6842 *FPI->getExceptionBehavior(),
6843 *FPI->getRoundingMode());
6844 }
6845 case Intrinsic::experimental_constrained_fdiv: {
6846 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6847 return simplifyFDivInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6848 *FPI->getExceptionBehavior(),
6849 *FPI->getRoundingMode());
6850 }
6851 case Intrinsic::experimental_constrained_frem: {
6852 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6853 return simplifyFRemInst(Args[0], Args[1], FPI->getFastMathFlags(), Q,
6854 *FPI->getExceptionBehavior(),
6855 *FPI->getRoundingMode());
6856 }
6857 case Intrinsic::experimental_constrained_ldexp:
6858 return simplifyLdexp(Args[0], Args[1], Q, true);
6859 default:
6860 return nullptr;
6861 }
6862 }
6863
tryConstantFoldCall(CallBase * Call,Value * Callee,ArrayRef<Value * > Args,const SimplifyQuery & Q)6864 static Value *tryConstantFoldCall(CallBase *Call, Value *Callee,
6865 ArrayRef<Value *> Args,
6866 const SimplifyQuery &Q) {
6867 auto *F = dyn_cast<Function>(Callee);
6868 if (!F || !canConstantFoldCallTo(Call, F))
6869 return nullptr;
6870
6871 SmallVector<Constant *, 4> ConstantArgs;
6872 ConstantArgs.reserve(Args.size());
6873 for (Value *Arg : Args) {
6874 Constant *C = dyn_cast<Constant>(Arg);
6875 if (!C) {
6876 if (isa<MetadataAsValue>(Arg))
6877 continue;
6878 return nullptr;
6879 }
6880 ConstantArgs.push_back(C);
6881 }
6882
6883 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
6884 }
6885
simplifyCall(CallBase * Call,Value * Callee,ArrayRef<Value * > Args,const SimplifyQuery & Q)6886 Value *llvm::simplifyCall(CallBase *Call, Value *Callee, ArrayRef<Value *> Args,
6887 const SimplifyQuery &Q) {
6888 // Args should not contain operand bundle operands.
6889 assert(Call->arg_size() == Args.size());
6890
6891 // musttail calls can only be simplified if they are also DCEd.
6892 // As we can't guarantee this here, don't simplify them.
6893 if (Call->isMustTailCall())
6894 return nullptr;
6895
6896 // call undef -> poison
6897 // call null -> poison
6898 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6899 return PoisonValue::get(Call->getType());
6900
6901 if (Value *V = tryConstantFoldCall(Call, Callee, Args, Q))
6902 return V;
6903
6904 auto *F = dyn_cast<Function>(Callee);
6905 if (F && F->isIntrinsic())
6906 if (Value *Ret = simplifyIntrinsic(Call, Callee, Args, Q))
6907 return Ret;
6908
6909 return nullptr;
6910 }
6911
simplifyConstrainedFPCall(CallBase * Call,const SimplifyQuery & Q)6912 Value *llvm::simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q) {
6913 assert(isa<ConstrainedFPIntrinsic>(Call));
6914 SmallVector<Value *, 4> Args(Call->args());
6915 if (Value *V = tryConstantFoldCall(Call, Call->getCalledOperand(), Args, Q))
6916 return V;
6917 if (Value *Ret = simplifyIntrinsic(Call, Call->getCalledOperand(), Args, Q))
6918 return Ret;
6919 return nullptr;
6920 }
6921
6922 /// Given operands for a Freeze, see if we can fold the result.
simplifyFreezeInst(Value * Op0,const SimplifyQuery & Q)6923 static Value *simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6924 // Use a utility function defined in ValueTracking.
6925 if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
6926 return Op0;
6927 // We have room for improvement.
6928 return nullptr;
6929 }
6930
simplifyFreezeInst(Value * Op0,const SimplifyQuery & Q)6931 Value *llvm::simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6932 return ::simplifyFreezeInst(Op0, Q);
6933 }
6934
simplifyLoadInst(LoadInst * LI,Value * PtrOp,const SimplifyQuery & Q)6935 Value *llvm::simplifyLoadInst(LoadInst *LI, Value *PtrOp,
6936 const SimplifyQuery &Q) {
6937 if (LI->isVolatile())
6938 return nullptr;
6939
6940 if (auto *PtrOpC = dyn_cast<Constant>(PtrOp))
6941 return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Q.DL);
6942
6943 // We can only fold the load if it is from a constant global with definitive
6944 // initializer. Skip expensive logic if this is not the case.
6945 auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(PtrOp));
6946 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer())
6947 return nullptr;
6948
6949 // If GlobalVariable's initializer is uniform, then return the constant
6950 // regardless of its offset.
6951 if (Constant *C =
6952 ConstantFoldLoadFromUniformValue(GV->getInitializer(), LI->getType()))
6953 return C;
6954
6955 // Try to convert operand into a constant by stripping offsets while looking
6956 // through invariant.group intrinsics.
6957 APInt Offset(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
6958 PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
6959 Q.DL, Offset, /* AllowNonInbounts */ true,
6960 /* AllowInvariantGroup */ true);
6961 if (PtrOp == GV) {
6962 // Index size may have changed due to address space casts.
6963 Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
6964 return ConstantFoldLoadFromConstPtr(GV, LI->getType(), Offset, Q.DL);
6965 }
6966
6967 return nullptr;
6968 }
6969
6970 /// See if we can compute a simplified version of this instruction.
6971 /// If not, this returns null.
6972
simplifyInstructionWithOperands(Instruction * I,ArrayRef<Value * > NewOps,const SimplifyQuery & SQ,unsigned MaxRecurse)6973 static Value *simplifyInstructionWithOperands(Instruction *I,
6974 ArrayRef<Value *> NewOps,
6975 const SimplifyQuery &SQ,
6976 unsigned MaxRecurse) {
6977 assert(I->getFunction() && "instruction should be inserted in a function");
6978 assert((!SQ.CxtI || SQ.CxtI->getFunction() == I->getFunction()) &&
6979 "context instruction should be in the same function");
6980
6981 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
6982
6983 switch (I->getOpcode()) {
6984 default:
6985 if (llvm::all_of(NewOps, [](Value *V) { return isa<Constant>(V); })) {
6986 SmallVector<Constant *, 8> NewConstOps(NewOps.size());
6987 transform(NewOps, NewConstOps.begin(),
6988 [](Value *V) { return cast<Constant>(V); });
6989 return ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
6990 }
6991 return nullptr;
6992 case Instruction::FNeg:
6993 return simplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q, MaxRecurse);
6994 case Instruction::FAdd:
6995 return simplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
6996 MaxRecurse);
6997 case Instruction::Add:
6998 return simplifyAddInst(
6999 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7000 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7001 case Instruction::FSub:
7002 return simplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7003 MaxRecurse);
7004 case Instruction::Sub:
7005 return simplifySubInst(
7006 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7007 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7008 case Instruction::FMul:
7009 return simplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7010 MaxRecurse);
7011 case Instruction::Mul:
7012 return simplifyMulInst(
7013 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7014 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7015 case Instruction::SDiv:
7016 return simplifySDivInst(NewOps[0], NewOps[1],
7017 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7018 MaxRecurse);
7019 case Instruction::UDiv:
7020 return simplifyUDivInst(NewOps[0], NewOps[1],
7021 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7022 MaxRecurse);
7023 case Instruction::FDiv:
7024 return simplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7025 MaxRecurse);
7026 case Instruction::SRem:
7027 return simplifySRemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7028 case Instruction::URem:
7029 return simplifyURemInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7030 case Instruction::FRem:
7031 return simplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q,
7032 MaxRecurse);
7033 case Instruction::Shl:
7034 return simplifyShlInst(
7035 NewOps[0], NewOps[1], Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
7036 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q, MaxRecurse);
7037 case Instruction::LShr:
7038 return simplifyLShrInst(NewOps[0], NewOps[1],
7039 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7040 MaxRecurse);
7041 case Instruction::AShr:
7042 return simplifyAShrInst(NewOps[0], NewOps[1],
7043 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q,
7044 MaxRecurse);
7045 case Instruction::And:
7046 return simplifyAndInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7047 case Instruction::Or:
7048 return simplifyOrInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7049 case Instruction::Xor:
7050 return simplifyXorInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7051 case Instruction::ICmp:
7052 return simplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
7053 NewOps[1], Q, MaxRecurse);
7054 case Instruction::FCmp:
7055 return simplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
7056 NewOps[1], I->getFastMathFlags(), Q, MaxRecurse);
7057 case Instruction::Select:
7058 return simplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse);
7059 break;
7060 case Instruction::GetElementPtr: {
7061 auto *GEPI = cast<GetElementPtrInst>(I);
7062 return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
7063 ArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q,
7064 MaxRecurse);
7065 }
7066 case Instruction::InsertValue: {
7067 InsertValueInst *IV = cast<InsertValueInst>(I);
7068 return simplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q,
7069 MaxRecurse);
7070 }
7071 case Instruction::InsertElement:
7072 return simplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
7073 case Instruction::ExtractValue: {
7074 auto *EVI = cast<ExtractValueInst>(I);
7075 return simplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q,
7076 MaxRecurse);
7077 }
7078 case Instruction::ExtractElement:
7079 return simplifyExtractElementInst(NewOps[0], NewOps[1], Q, MaxRecurse);
7080 case Instruction::ShuffleVector: {
7081 auto *SVI = cast<ShuffleVectorInst>(I);
7082 return simplifyShuffleVectorInst(NewOps[0], NewOps[1],
7083 SVI->getShuffleMask(), SVI->getType(), Q,
7084 MaxRecurse);
7085 }
7086 case Instruction::PHI:
7087 return simplifyPHINode(cast<PHINode>(I), NewOps, Q);
7088 case Instruction::Call:
7089 return simplifyCall(
7090 cast<CallInst>(I), NewOps.back(),
7091 NewOps.drop_back(1 + cast<CallInst>(I)->getNumTotalBundleOperands()), Q);
7092 case Instruction::Freeze:
7093 return llvm::simplifyFreezeInst(NewOps[0], Q);
7094 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
7095 #include "llvm/IR/Instruction.def"
7096 #undef HANDLE_CAST_INST
7097 return simplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q,
7098 MaxRecurse);
7099 case Instruction::Alloca:
7100 // No simplifications for Alloca and it can't be constant folded.
7101 return nullptr;
7102 case Instruction::Load:
7103 return simplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
7104 }
7105 }
7106
simplifyInstructionWithOperands(Instruction * I,ArrayRef<Value * > NewOps,const SimplifyQuery & SQ)7107 Value *llvm::simplifyInstructionWithOperands(Instruction *I,
7108 ArrayRef<Value *> NewOps,
7109 const SimplifyQuery &SQ) {
7110 assert(NewOps.size() == I->getNumOperands() &&
7111 "Number of operands should match the instruction!");
7112 return ::simplifyInstructionWithOperands(I, NewOps, SQ, RecursionLimit);
7113 }
7114
simplifyInstruction(Instruction * I,const SimplifyQuery & SQ)7115 Value *llvm::simplifyInstruction(Instruction *I, const SimplifyQuery &SQ) {
7116 SmallVector<Value *, 8> Ops(I->operands());
7117 Value *Result = ::simplifyInstructionWithOperands(I, Ops, SQ, RecursionLimit);
7118
7119 /// If called on unreachable code, the instruction may simplify to itself.
7120 /// Make life easier for users by detecting that case here, and returning a
7121 /// safe value instead.
7122 return Result == I ? UndefValue::get(I->getType()) : Result;
7123 }
7124
7125 /// Implementation of recursive simplification through an instruction's
7126 /// uses.
7127 ///
7128 /// This is the common implementation of the recursive simplification routines.
7129 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
7130 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
7131 /// instructions to process and attempt to simplify it using
7132 /// InstructionSimplify. Recursively visited users which could not be
7133 /// simplified themselves are to the optional UnsimplifiedUsers set for
7134 /// further processing by the caller.
7135 ///
7136 /// This routine returns 'true' only when *it* simplifies something. The passed
7137 /// in simplified value does not count toward this.
replaceAndRecursivelySimplifyImpl(Instruction * I,Value * SimpleV,const TargetLibraryInfo * TLI,const DominatorTree * DT,AssumptionCache * AC,SmallSetVector<Instruction *,8> * UnsimplifiedUsers=nullptr)7138 static bool replaceAndRecursivelySimplifyImpl(
7139 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7140 const DominatorTree *DT, AssumptionCache *AC,
7141 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
7142 bool Simplified = false;
7143 SmallSetVector<Instruction *, 8> Worklist;
7144 const DataLayout &DL = I->getModule()->getDataLayout();
7145
7146 // If we have an explicit value to collapse to, do that round of the
7147 // simplification loop by hand initially.
7148 if (SimpleV) {
7149 for (User *U : I->users())
7150 if (U != I)
7151 Worklist.insert(cast<Instruction>(U));
7152
7153 // Replace the instruction with its simplified value.
7154 I->replaceAllUsesWith(SimpleV);
7155
7156 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7157 I->eraseFromParent();
7158 } else {
7159 Worklist.insert(I);
7160 }
7161
7162 // Note that we must test the size on each iteration, the worklist can grow.
7163 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
7164 I = Worklist[Idx];
7165
7166 // See if this instruction simplifies.
7167 SimpleV = simplifyInstruction(I, {DL, TLI, DT, AC});
7168 if (!SimpleV) {
7169 if (UnsimplifiedUsers)
7170 UnsimplifiedUsers->insert(I);
7171 continue;
7172 }
7173
7174 Simplified = true;
7175
7176 // Stash away all the uses of the old instruction so we can check them for
7177 // recursive simplifications after a RAUW. This is cheaper than checking all
7178 // uses of To on the recursive step in most cases.
7179 for (User *U : I->users())
7180 Worklist.insert(cast<Instruction>(U));
7181
7182 // Replace the instruction with its simplified value.
7183 I->replaceAllUsesWith(SimpleV);
7184
7185 if (!I->isEHPad() && !I->isTerminator() && !I->mayHaveSideEffects())
7186 I->eraseFromParent();
7187 }
7188 return Simplified;
7189 }
7190
replaceAndRecursivelySimplify(Instruction * I,Value * SimpleV,const TargetLibraryInfo * TLI,const DominatorTree * DT,AssumptionCache * AC,SmallSetVector<Instruction *,8> * UnsimplifiedUsers)7191 bool llvm::replaceAndRecursivelySimplify(
7192 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
7193 const DominatorTree *DT, AssumptionCache *AC,
7194 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
7195 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
7196 assert(SimpleV && "Must provide a simplified value.");
7197 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
7198 UnsimplifiedUsers);
7199 }
7200
7201 namespace llvm {
getBestSimplifyQuery(Pass & P,Function & F)7202 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
7203 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
7204 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
7205 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
7206 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
7207 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
7208 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
7209 return {F.getParent()->getDataLayout(), TLI, DT, AC};
7210 }
7211
getBestSimplifyQuery(LoopStandardAnalysisResults & AR,const DataLayout & DL)7212 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
7213 const DataLayout &DL) {
7214 return {DL, &AR.TLI, &AR.DT, &AR.AC};
7215 }
7216
7217 template <class T, class... TArgs>
getBestSimplifyQuery(AnalysisManager<T,TArgs...> & AM,Function & F)7218 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
7219 Function &F) {
7220 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
7221 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
7222 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
7223 return {F.getParent()->getDataLayout(), TLI, DT, AC};
7224 }
7225 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
7226 Function &);
7227 } // namespace llvm
7228
anchor()7229 void InstSimplifyFolder::anchor() {}
7230