1 //===- InstructionSimplify.cpp - Fold instruction operands ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements routines for folding instructions into simpler forms
10 // that do not require creating new instructions. This does constant folding
11 // ("add i32 1, 1" -> "2") but can also handle non-constant operands, either
12 // returning a constant ("and i32 %x, 0" -> "0") or an already existing value
13 // ("and i32 %x, %x" -> "%x"). All operands are assumed to have already been
14 // simplified: This is usually true and assuming it simplifies the logic (if
15 // they have not been simplified then results are correct but maybe suboptimal).
16 //
17 //===----------------------------------------------------------------------===//
18
19 #include "llvm/Analysis/InstructionSimplify.h"
20
21 #include "llvm/ADT/STLExtras.h"
22 #include "llvm/ADT/SetVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumptionCache.h"
26 #include "llvm/Analysis/CaptureTracking.h"
27 #include "llvm/Analysis/CmpInstAnalysis.h"
28 #include "llvm/Analysis/ConstantFolding.h"
29 #include "llvm/Analysis/InstSimplifyFolder.h"
30 #include "llvm/Analysis/LoopAnalysisManager.h"
31 #include "llvm/Analysis/MemoryBuiltins.h"
32 #include "llvm/Analysis/OverflowInstAnalysis.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/Analysis/VectorUtils.h"
35 #include "llvm/IR/ConstantRange.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/Dominators.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instructions.h"
40 #include "llvm/IR/Operator.h"
41 #include "llvm/IR/PatternMatch.h"
42 #include "llvm/Support/KnownBits.h"
43 #include <algorithm>
44 #include <optional>
45 using namespace llvm;
46 using namespace llvm::PatternMatch;
47
48 #define DEBUG_TYPE "instsimplify"
49
50 enum { RecursionLimit = 3 };
51
52 STATISTIC(NumExpand, "Number of expansions");
53 STATISTIC(NumReassoc, "Number of reassociations");
54
55 static Value *simplifyAndInst(Value *, Value *, const SimplifyQuery &,
56 unsigned);
57 static Value *simplifyUnOp(unsigned, Value *, const SimplifyQuery &, unsigned);
58 static Value *simplifyFPUnOp(unsigned, Value *, const FastMathFlags &,
59 const SimplifyQuery &, unsigned);
60 static Value *simplifyBinOp(unsigned, Value *, Value *, const SimplifyQuery &,
61 unsigned);
62 static Value *simplifyBinOp(unsigned, Value *, Value *, const FastMathFlags &,
63 const SimplifyQuery &, unsigned);
64 static Value *simplifyCmpInst(unsigned, Value *, Value *, const SimplifyQuery &,
65 unsigned);
66 static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
67 const SimplifyQuery &Q, unsigned MaxRecurse);
68 static Value *simplifyOrInst(Value *, Value *, const SimplifyQuery &, unsigned);
69 static Value *simplifyXorInst(Value *, Value *, const SimplifyQuery &,
70 unsigned);
71 static Value *simplifyCastInst(unsigned, Value *, Type *, const SimplifyQuery &,
72 unsigned);
73 static Value *simplifyGEPInst(Type *, Value *, ArrayRef<Value *>, bool,
74 const SimplifyQuery &, unsigned);
75 static Value *simplifySelectInst(Value *, Value *, Value *,
76 const SimplifyQuery &, unsigned);
77
foldSelectWithBinaryOp(Value * Cond,Value * TrueVal,Value * FalseVal)78 static Value *foldSelectWithBinaryOp(Value *Cond, Value *TrueVal,
79 Value *FalseVal) {
80 BinaryOperator::BinaryOps BinOpCode;
81 if (auto *BO = dyn_cast<BinaryOperator>(Cond))
82 BinOpCode = BO->getOpcode();
83 else
84 return nullptr;
85
86 CmpInst::Predicate ExpectedPred, Pred1, Pred2;
87 if (BinOpCode == BinaryOperator::Or) {
88 ExpectedPred = ICmpInst::ICMP_NE;
89 } else if (BinOpCode == BinaryOperator::And) {
90 ExpectedPred = ICmpInst::ICMP_EQ;
91 } else
92 return nullptr;
93
94 // %A = icmp eq %TV, %FV
95 // %B = icmp eq %X, %Y (and one of these is a select operand)
96 // %C = and %A, %B
97 // %D = select %C, %TV, %FV
98 // -->
99 // %FV
100
101 // %A = icmp ne %TV, %FV
102 // %B = icmp ne %X, %Y (and one of these is a select operand)
103 // %C = or %A, %B
104 // %D = select %C, %TV, %FV
105 // -->
106 // %TV
107 Value *X, *Y;
108 if (!match(Cond, m_c_BinOp(m_c_ICmp(Pred1, m_Specific(TrueVal),
109 m_Specific(FalseVal)),
110 m_ICmp(Pred2, m_Value(X), m_Value(Y)))) ||
111 Pred1 != Pred2 || Pred1 != ExpectedPred)
112 return nullptr;
113
114 if (X == TrueVal || X == FalseVal || Y == TrueVal || Y == FalseVal)
115 return BinOpCode == BinaryOperator::Or ? TrueVal : FalseVal;
116
117 return nullptr;
118 }
119
120 /// For a boolean type or a vector of boolean type, return false or a vector
121 /// with every element false.
getFalse(Type * Ty)122 static Constant *getFalse(Type *Ty) { return ConstantInt::getFalse(Ty); }
123
124 /// For a boolean type or a vector of boolean type, return true or a vector
125 /// with every element true.
getTrue(Type * Ty)126 static Constant *getTrue(Type *Ty) { return ConstantInt::getTrue(Ty); }
127
128 /// isSameCompare - Is V equivalent to the comparison "LHS Pred RHS"?
isSameCompare(Value * V,CmpInst::Predicate Pred,Value * LHS,Value * RHS)129 static bool isSameCompare(Value *V, CmpInst::Predicate Pred, Value *LHS,
130 Value *RHS) {
131 CmpInst *Cmp = dyn_cast<CmpInst>(V);
132 if (!Cmp)
133 return false;
134 CmpInst::Predicate CPred = Cmp->getPredicate();
135 Value *CLHS = Cmp->getOperand(0), *CRHS = Cmp->getOperand(1);
136 if (CPred == Pred && CLHS == LHS && CRHS == RHS)
137 return true;
138 return CPred == CmpInst::getSwappedPredicate(Pred) && CLHS == RHS &&
139 CRHS == LHS;
140 }
141
142 /// Simplify comparison with true or false branch of select:
143 /// %sel = select i1 %cond, i32 %tv, i32 %fv
144 /// %cmp = icmp sle i32 %sel, %rhs
145 /// Compose new comparison by substituting %sel with either %tv or %fv
146 /// and see if it simplifies.
simplifyCmpSelCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse,Constant * TrueOrFalse)147 static Value *simplifyCmpSelCase(CmpInst::Predicate Pred, Value *LHS,
148 Value *RHS, Value *Cond,
149 const SimplifyQuery &Q, unsigned MaxRecurse,
150 Constant *TrueOrFalse) {
151 Value *SimplifiedCmp = simplifyCmpInst(Pred, LHS, RHS, Q, MaxRecurse);
152 if (SimplifiedCmp == Cond) {
153 // %cmp simplified to the select condition (%cond).
154 return TrueOrFalse;
155 } else if (!SimplifiedCmp && isSameCompare(Cond, Pred, LHS, RHS)) {
156 // It didn't simplify. However, if composed comparison is equivalent
157 // to the select condition (%cond) then we can replace it.
158 return TrueOrFalse;
159 }
160 return SimplifiedCmp;
161 }
162
163 /// Simplify comparison with true branch of select
simplifyCmpSelTrueCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)164 static Value *simplifyCmpSelTrueCase(CmpInst::Predicate Pred, Value *LHS,
165 Value *RHS, Value *Cond,
166 const SimplifyQuery &Q,
167 unsigned MaxRecurse) {
168 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
169 getTrue(Cond->getType()));
170 }
171
172 /// Simplify comparison with false branch of select
simplifyCmpSelFalseCase(CmpInst::Predicate Pred,Value * LHS,Value * RHS,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)173 static Value *simplifyCmpSelFalseCase(CmpInst::Predicate Pred, Value *LHS,
174 Value *RHS, Value *Cond,
175 const SimplifyQuery &Q,
176 unsigned MaxRecurse) {
177 return simplifyCmpSelCase(Pred, LHS, RHS, Cond, Q, MaxRecurse,
178 getFalse(Cond->getType()));
179 }
180
181 /// We know comparison with both branches of select can be simplified, but they
182 /// are not equal. This routine handles some logical simplifications.
handleOtherCmpSelSimplifications(Value * TCmp,Value * FCmp,Value * Cond,const SimplifyQuery & Q,unsigned MaxRecurse)183 static Value *handleOtherCmpSelSimplifications(Value *TCmp, Value *FCmp,
184 Value *Cond,
185 const SimplifyQuery &Q,
186 unsigned MaxRecurse) {
187 // If the false value simplified to false, then the result of the compare
188 // is equal to "Cond && TCmp". This also catches the case when the false
189 // value simplified to false and the true value to true, returning "Cond".
190 // Folding select to and/or isn't poison-safe in general; impliesPoison
191 // checks whether folding it does not convert a well-defined value into
192 // poison.
193 if (match(FCmp, m_Zero()) && impliesPoison(TCmp, Cond))
194 if (Value *V = simplifyAndInst(Cond, TCmp, Q, MaxRecurse))
195 return V;
196 // If the true value simplified to true, then the result of the compare
197 // is equal to "Cond || FCmp".
198 if (match(TCmp, m_One()) && impliesPoison(FCmp, Cond))
199 if (Value *V = simplifyOrInst(Cond, FCmp, Q, MaxRecurse))
200 return V;
201 // Finally, if the false value simplified to true and the true value to
202 // false, then the result of the compare is equal to "!Cond".
203 if (match(FCmp, m_One()) && match(TCmp, m_Zero()))
204 if (Value *V = simplifyXorInst(
205 Cond, Constant::getAllOnesValue(Cond->getType()), Q, MaxRecurse))
206 return V;
207 return nullptr;
208 }
209
210 /// Does the given value dominate the specified phi node?
valueDominatesPHI(Value * V,PHINode * P,const DominatorTree * DT)211 static bool valueDominatesPHI(Value *V, PHINode *P, const DominatorTree *DT) {
212 Instruction *I = dyn_cast<Instruction>(V);
213 if (!I)
214 // Arguments and constants dominate all instructions.
215 return true;
216
217 // If we are processing instructions (and/or basic blocks) that have not been
218 // fully added to a function, the parent nodes may still be null. Simply
219 // return the conservative answer in these cases.
220 if (!I->getParent() || !P->getParent() || !I->getFunction())
221 return false;
222
223 // If we have a DominatorTree then do a precise test.
224 if (DT)
225 return DT->dominates(I, P);
226
227 // Otherwise, if the instruction is in the entry block and is not an invoke,
228 // then it obviously dominates all phi nodes.
229 if (I->getParent()->isEntryBlock() && !isa<InvokeInst>(I) &&
230 !isa<CallBrInst>(I))
231 return true;
232
233 return false;
234 }
235
236 /// Try to simplify a binary operator of form "V op OtherOp" where V is
237 /// "(B0 opex B1)" by distributing 'op' across 'opex' as
238 /// "(B0 op OtherOp) opex (B1 op OtherOp)".
expandBinOp(Instruction::BinaryOps Opcode,Value * V,Value * OtherOp,Instruction::BinaryOps OpcodeToExpand,const SimplifyQuery & Q,unsigned MaxRecurse)239 static Value *expandBinOp(Instruction::BinaryOps Opcode, Value *V,
240 Value *OtherOp, Instruction::BinaryOps OpcodeToExpand,
241 const SimplifyQuery &Q, unsigned MaxRecurse) {
242 auto *B = dyn_cast<BinaryOperator>(V);
243 if (!B || B->getOpcode() != OpcodeToExpand)
244 return nullptr;
245 Value *B0 = B->getOperand(0), *B1 = B->getOperand(1);
246 Value *L =
247 simplifyBinOp(Opcode, B0, OtherOp, Q.getWithoutUndef(), MaxRecurse);
248 if (!L)
249 return nullptr;
250 Value *R =
251 simplifyBinOp(Opcode, B1, OtherOp, Q.getWithoutUndef(), MaxRecurse);
252 if (!R)
253 return nullptr;
254
255 // Does the expanded pair of binops simplify to the existing binop?
256 if ((L == B0 && R == B1) ||
257 (Instruction::isCommutative(OpcodeToExpand) && L == B1 && R == B0)) {
258 ++NumExpand;
259 return B;
260 }
261
262 // Otherwise, return "L op' R" if it simplifies.
263 Value *S = simplifyBinOp(OpcodeToExpand, L, R, Q, MaxRecurse);
264 if (!S)
265 return nullptr;
266
267 ++NumExpand;
268 return S;
269 }
270
271 /// Try to simplify binops of form "A op (B op' C)" or the commuted variant by
272 /// distributing op over op'.
expandCommutativeBinOp(Instruction::BinaryOps Opcode,Value * L,Value * R,Instruction::BinaryOps OpcodeToExpand,const SimplifyQuery & Q,unsigned MaxRecurse)273 static Value *expandCommutativeBinOp(Instruction::BinaryOps Opcode, Value *L,
274 Value *R,
275 Instruction::BinaryOps OpcodeToExpand,
276 const SimplifyQuery &Q,
277 unsigned MaxRecurse) {
278 // Recursion is always used, so bail out at once if we already hit the limit.
279 if (!MaxRecurse--)
280 return nullptr;
281
282 if (Value *V = expandBinOp(Opcode, L, R, OpcodeToExpand, Q, MaxRecurse))
283 return V;
284 if (Value *V = expandBinOp(Opcode, R, L, OpcodeToExpand, Q, MaxRecurse))
285 return V;
286 return nullptr;
287 }
288
289 /// Generic simplifications for associative binary operations.
290 /// Returns the simpler value, or null if none was found.
simplifyAssociativeBinOp(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)291 static Value *simplifyAssociativeBinOp(Instruction::BinaryOps Opcode,
292 Value *LHS, Value *RHS,
293 const SimplifyQuery &Q,
294 unsigned MaxRecurse) {
295 assert(Instruction::isAssociative(Opcode) && "Not an associative operation!");
296
297 // Recursion is always used, so bail out at once if we already hit the limit.
298 if (!MaxRecurse--)
299 return nullptr;
300
301 BinaryOperator *Op0 = dyn_cast<BinaryOperator>(LHS);
302 BinaryOperator *Op1 = dyn_cast<BinaryOperator>(RHS);
303
304 // Transform: "(A op B) op C" ==> "A op (B op C)" if it simplifies completely.
305 if (Op0 && Op0->getOpcode() == Opcode) {
306 Value *A = Op0->getOperand(0);
307 Value *B = Op0->getOperand(1);
308 Value *C = RHS;
309
310 // Does "B op C" simplify?
311 if (Value *V = simplifyBinOp(Opcode, B, C, Q, MaxRecurse)) {
312 // It does! Return "A op V" if it simplifies or is already available.
313 // If V equals B then "A op V" is just the LHS.
314 if (V == B)
315 return LHS;
316 // Otherwise return "A op V" if it simplifies.
317 if (Value *W = simplifyBinOp(Opcode, A, V, Q, MaxRecurse)) {
318 ++NumReassoc;
319 return W;
320 }
321 }
322 }
323
324 // Transform: "A op (B op C)" ==> "(A op B) op C" if it simplifies completely.
325 if (Op1 && Op1->getOpcode() == Opcode) {
326 Value *A = LHS;
327 Value *B = Op1->getOperand(0);
328 Value *C = Op1->getOperand(1);
329
330 // Does "A op B" simplify?
331 if (Value *V = simplifyBinOp(Opcode, A, B, Q, MaxRecurse)) {
332 // It does! Return "V op C" if it simplifies or is already available.
333 // If V equals B then "V op C" is just the RHS.
334 if (V == B)
335 return RHS;
336 // Otherwise return "V op C" if it simplifies.
337 if (Value *W = simplifyBinOp(Opcode, V, C, Q, MaxRecurse)) {
338 ++NumReassoc;
339 return W;
340 }
341 }
342 }
343
344 // The remaining transforms require commutativity as well as associativity.
345 if (!Instruction::isCommutative(Opcode))
346 return nullptr;
347
348 // Transform: "(A op B) op C" ==> "(C op A) op B" if it simplifies completely.
349 if (Op0 && Op0->getOpcode() == Opcode) {
350 Value *A = Op0->getOperand(0);
351 Value *B = Op0->getOperand(1);
352 Value *C = RHS;
353
354 // Does "C op A" simplify?
355 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
356 // It does! Return "V op B" if it simplifies or is already available.
357 // If V equals A then "V op B" is just the LHS.
358 if (V == A)
359 return LHS;
360 // Otherwise return "V op B" if it simplifies.
361 if (Value *W = simplifyBinOp(Opcode, V, B, Q, MaxRecurse)) {
362 ++NumReassoc;
363 return W;
364 }
365 }
366 }
367
368 // Transform: "A op (B op C)" ==> "B op (C op A)" if it simplifies completely.
369 if (Op1 && Op1->getOpcode() == Opcode) {
370 Value *A = LHS;
371 Value *B = Op1->getOperand(0);
372 Value *C = Op1->getOperand(1);
373
374 // Does "C op A" simplify?
375 if (Value *V = simplifyBinOp(Opcode, C, A, Q, MaxRecurse)) {
376 // It does! Return "B op V" if it simplifies or is already available.
377 // If V equals C then "B op V" is just the RHS.
378 if (V == C)
379 return RHS;
380 // Otherwise return "B op V" if it simplifies.
381 if (Value *W = simplifyBinOp(Opcode, B, V, Q, MaxRecurse)) {
382 ++NumReassoc;
383 return W;
384 }
385 }
386 }
387
388 return nullptr;
389 }
390
391 /// In the case of a binary operation with a select instruction as an operand,
392 /// try to simplify the binop by seeing whether evaluating it on both branches
393 /// of the select results in the same value. Returns the common value if so,
394 /// otherwise returns null.
threadBinOpOverSelect(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)395 static Value *threadBinOpOverSelect(Instruction::BinaryOps Opcode, Value *LHS,
396 Value *RHS, const SimplifyQuery &Q,
397 unsigned MaxRecurse) {
398 // Recursion is always used, so bail out at once if we already hit the limit.
399 if (!MaxRecurse--)
400 return nullptr;
401
402 SelectInst *SI;
403 if (isa<SelectInst>(LHS)) {
404 SI = cast<SelectInst>(LHS);
405 } else {
406 assert(isa<SelectInst>(RHS) && "No select instruction operand!");
407 SI = cast<SelectInst>(RHS);
408 }
409
410 // Evaluate the BinOp on the true and false branches of the select.
411 Value *TV;
412 Value *FV;
413 if (SI == LHS) {
414 TV = simplifyBinOp(Opcode, SI->getTrueValue(), RHS, Q, MaxRecurse);
415 FV = simplifyBinOp(Opcode, SI->getFalseValue(), RHS, Q, MaxRecurse);
416 } else {
417 TV = simplifyBinOp(Opcode, LHS, SI->getTrueValue(), Q, MaxRecurse);
418 FV = simplifyBinOp(Opcode, LHS, SI->getFalseValue(), Q, MaxRecurse);
419 }
420
421 // If they simplified to the same value, then return the common value.
422 // If they both failed to simplify then return null.
423 if (TV == FV)
424 return TV;
425
426 // If one branch simplified to undef, return the other one.
427 if (TV && Q.isUndefValue(TV))
428 return FV;
429 if (FV && Q.isUndefValue(FV))
430 return TV;
431
432 // If applying the operation did not change the true and false select values,
433 // then the result of the binop is the select itself.
434 if (TV == SI->getTrueValue() && FV == SI->getFalseValue())
435 return SI;
436
437 // If one branch simplified and the other did not, and the simplified
438 // value is equal to the unsimplified one, return the simplified value.
439 // For example, select (cond, X, X & Z) & Z -> X & Z.
440 if ((FV && !TV) || (TV && !FV)) {
441 // Check that the simplified value has the form "X op Y" where "op" is the
442 // same as the original operation.
443 Instruction *Simplified = dyn_cast<Instruction>(FV ? FV : TV);
444 if (Simplified && Simplified->getOpcode() == unsigned(Opcode)) {
445 // The value that didn't simplify is "UnsimplifiedLHS op UnsimplifiedRHS".
446 // We already know that "op" is the same as for the simplified value. See
447 // if the operands match too. If so, return the simplified value.
448 Value *UnsimplifiedBranch = FV ? SI->getTrueValue() : SI->getFalseValue();
449 Value *UnsimplifiedLHS = SI == LHS ? UnsimplifiedBranch : LHS;
450 Value *UnsimplifiedRHS = SI == LHS ? RHS : UnsimplifiedBranch;
451 if (Simplified->getOperand(0) == UnsimplifiedLHS &&
452 Simplified->getOperand(1) == UnsimplifiedRHS)
453 return Simplified;
454 if (Simplified->isCommutative() &&
455 Simplified->getOperand(1) == UnsimplifiedLHS &&
456 Simplified->getOperand(0) == UnsimplifiedRHS)
457 return Simplified;
458 }
459 }
460
461 return nullptr;
462 }
463
464 /// In the case of a comparison with a select instruction, try to simplify the
465 /// comparison by seeing whether both branches of the select result in the same
466 /// value. Returns the common value if so, otherwise returns null.
467 /// For example, if we have:
468 /// %tmp = select i1 %cmp, i32 1, i32 2
469 /// %cmp1 = icmp sle i32 %tmp, 3
470 /// We can simplify %cmp1 to true, because both branches of select are
471 /// less than 3. We compose new comparison by substituting %tmp with both
472 /// branches of select and see if it can be simplified.
threadCmpOverSelect(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)473 static Value *threadCmpOverSelect(CmpInst::Predicate Pred, Value *LHS,
474 Value *RHS, const SimplifyQuery &Q,
475 unsigned MaxRecurse) {
476 // Recursion is always used, so bail out at once if we already hit the limit.
477 if (!MaxRecurse--)
478 return nullptr;
479
480 // Make sure the select is on the LHS.
481 if (!isa<SelectInst>(LHS)) {
482 std::swap(LHS, RHS);
483 Pred = CmpInst::getSwappedPredicate(Pred);
484 }
485 assert(isa<SelectInst>(LHS) && "Not comparing with a select instruction!");
486 SelectInst *SI = cast<SelectInst>(LHS);
487 Value *Cond = SI->getCondition();
488 Value *TV = SI->getTrueValue();
489 Value *FV = SI->getFalseValue();
490
491 // Now that we have "cmp select(Cond, TV, FV), RHS", analyse it.
492 // Does "cmp TV, RHS" simplify?
493 Value *TCmp = simplifyCmpSelTrueCase(Pred, TV, RHS, Cond, Q, MaxRecurse);
494 if (!TCmp)
495 return nullptr;
496
497 // Does "cmp FV, RHS" simplify?
498 Value *FCmp = simplifyCmpSelFalseCase(Pred, FV, RHS, Cond, Q, MaxRecurse);
499 if (!FCmp)
500 return nullptr;
501
502 // If both sides simplified to the same value, then use it as the result of
503 // the original comparison.
504 if (TCmp == FCmp)
505 return TCmp;
506
507 // The remaining cases only make sense if the select condition has the same
508 // type as the result of the comparison, so bail out if this is not so.
509 if (Cond->getType()->isVectorTy() == RHS->getType()->isVectorTy())
510 return handleOtherCmpSelSimplifications(TCmp, FCmp, Cond, Q, MaxRecurse);
511
512 return nullptr;
513 }
514
515 /// In the case of a binary operation with an operand that is a PHI instruction,
516 /// try to simplify the binop by seeing whether evaluating it on the incoming
517 /// phi values yields the same result for every value. If so returns the common
518 /// value, otherwise returns null.
threadBinOpOverPHI(Instruction::BinaryOps Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)519 static Value *threadBinOpOverPHI(Instruction::BinaryOps Opcode, Value *LHS,
520 Value *RHS, const SimplifyQuery &Q,
521 unsigned MaxRecurse) {
522 // Recursion is always used, so bail out at once if we already hit the limit.
523 if (!MaxRecurse--)
524 return nullptr;
525
526 PHINode *PI;
527 if (isa<PHINode>(LHS)) {
528 PI = cast<PHINode>(LHS);
529 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
530 if (!valueDominatesPHI(RHS, PI, Q.DT))
531 return nullptr;
532 } else {
533 assert(isa<PHINode>(RHS) && "No PHI instruction operand!");
534 PI = cast<PHINode>(RHS);
535 // Bail out if LHS and the phi may be mutually interdependent due to a loop.
536 if (!valueDominatesPHI(LHS, PI, Q.DT))
537 return nullptr;
538 }
539
540 // Evaluate the BinOp on the incoming phi values.
541 Value *CommonValue = nullptr;
542 for (Value *Incoming : PI->incoming_values()) {
543 // If the incoming value is the phi node itself, it can safely be skipped.
544 if (Incoming == PI)
545 continue;
546 Value *V = PI == LHS ? simplifyBinOp(Opcode, Incoming, RHS, Q, MaxRecurse)
547 : simplifyBinOp(Opcode, LHS, Incoming, Q, MaxRecurse);
548 // If the operation failed to simplify, or simplified to a different value
549 // to previously, then give up.
550 if (!V || (CommonValue && V != CommonValue))
551 return nullptr;
552 CommonValue = V;
553 }
554
555 return CommonValue;
556 }
557
558 /// In the case of a comparison with a PHI instruction, try to simplify the
559 /// comparison by seeing whether comparing with all of the incoming phi values
560 /// yields the same result every time. If so returns the common result,
561 /// otherwise returns null.
threadCmpOverPHI(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)562 static Value *threadCmpOverPHI(CmpInst::Predicate Pred, Value *LHS, Value *RHS,
563 const SimplifyQuery &Q, unsigned MaxRecurse) {
564 // Recursion is always used, so bail out at once if we already hit the limit.
565 if (!MaxRecurse--)
566 return nullptr;
567
568 // Make sure the phi is on the LHS.
569 if (!isa<PHINode>(LHS)) {
570 std::swap(LHS, RHS);
571 Pred = CmpInst::getSwappedPredicate(Pred);
572 }
573 assert(isa<PHINode>(LHS) && "Not comparing with a phi instruction!");
574 PHINode *PI = cast<PHINode>(LHS);
575
576 // Bail out if RHS and the phi may be mutually interdependent due to a loop.
577 if (!valueDominatesPHI(RHS, PI, Q.DT))
578 return nullptr;
579
580 // Evaluate the BinOp on the incoming phi values.
581 Value *CommonValue = nullptr;
582 for (unsigned u = 0, e = PI->getNumIncomingValues(); u < e; ++u) {
583 Value *Incoming = PI->getIncomingValue(u);
584 Instruction *InTI = PI->getIncomingBlock(u)->getTerminator();
585 // If the incoming value is the phi node itself, it can safely be skipped.
586 if (Incoming == PI)
587 continue;
588 // Change the context instruction to the "edge" that flows into the phi.
589 // This is important because that is where incoming is actually "evaluated"
590 // even though it is used later somewhere else.
591 Value *V = simplifyCmpInst(Pred, Incoming, RHS, Q.getWithInstruction(InTI),
592 MaxRecurse);
593 // If the operation failed to simplify, or simplified to a different value
594 // to previously, then give up.
595 if (!V || (CommonValue && V != CommonValue))
596 return nullptr;
597 CommonValue = V;
598 }
599
600 return CommonValue;
601 }
602
foldOrCommuteConstant(Instruction::BinaryOps Opcode,Value * & Op0,Value * & Op1,const SimplifyQuery & Q)603 static Constant *foldOrCommuteConstant(Instruction::BinaryOps Opcode,
604 Value *&Op0, Value *&Op1,
605 const SimplifyQuery &Q) {
606 if (auto *CLHS = dyn_cast<Constant>(Op0)) {
607 if (auto *CRHS = dyn_cast<Constant>(Op1)) {
608 switch (Opcode) {
609 default:
610 break;
611 case Instruction::FAdd:
612 case Instruction::FSub:
613 case Instruction::FMul:
614 case Instruction::FDiv:
615 case Instruction::FRem:
616 if (Q.CxtI != nullptr)
617 return ConstantFoldFPInstOperands(Opcode, CLHS, CRHS, Q.DL, Q.CxtI);
618 }
619 return ConstantFoldBinaryOpOperands(Opcode, CLHS, CRHS, Q.DL);
620 }
621
622 // Canonicalize the constant to the RHS if this is a commutative operation.
623 if (Instruction::isCommutative(Opcode))
624 std::swap(Op0, Op1);
625 }
626 return nullptr;
627 }
628
629 /// Given operands for an Add, see if we can fold the result.
630 /// If not, this returns null.
simplifyAddInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)631 static Value *simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
632 const SimplifyQuery &Q, unsigned MaxRecurse) {
633 if (Constant *C = foldOrCommuteConstant(Instruction::Add, Op0, Op1, Q))
634 return C;
635
636 // X + poison -> poison
637 if (isa<PoisonValue>(Op1))
638 return Op1;
639
640 // X + undef -> undef
641 if (Q.isUndefValue(Op1))
642 return Op1;
643
644 // X + 0 -> X
645 if (match(Op1, m_Zero()))
646 return Op0;
647
648 // If two operands are negative, return 0.
649 if (isKnownNegation(Op0, Op1))
650 return Constant::getNullValue(Op0->getType());
651
652 // X + (Y - X) -> Y
653 // (Y - X) + X -> Y
654 // Eg: X + -X -> 0
655 Value *Y = nullptr;
656 if (match(Op1, m_Sub(m_Value(Y), m_Specific(Op0))) ||
657 match(Op0, m_Sub(m_Value(Y), m_Specific(Op1))))
658 return Y;
659
660 // X + ~X -> -1 since ~X = -X-1
661 Type *Ty = Op0->getType();
662 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
663 return Constant::getAllOnesValue(Ty);
664
665 // add nsw/nuw (xor Y, signmask), signmask --> Y
666 // The no-wrapping add guarantees that the top bit will be set by the add.
667 // Therefore, the xor must be clearing the already set sign bit of Y.
668 if ((IsNSW || IsNUW) && match(Op1, m_SignMask()) &&
669 match(Op0, m_Xor(m_Value(Y), m_SignMask())))
670 return Y;
671
672 // add nuw %x, -1 -> -1, because %x can only be 0.
673 if (IsNUW && match(Op1, m_AllOnes()))
674 return Op1; // Which is -1.
675
676 /// i1 add -> xor.
677 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
678 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
679 return V;
680
681 // Try some generic simplifications for associative operations.
682 if (Value *V =
683 simplifyAssociativeBinOp(Instruction::Add, Op0, Op1, Q, MaxRecurse))
684 return V;
685
686 // Threading Add over selects and phi nodes is pointless, so don't bother.
687 // Threading over the select in "A + select(cond, B, C)" means evaluating
688 // "A+B" and "A+C" and seeing if they are equal; but they are equal if and
689 // only if B and C are equal. If B and C are equal then (since we assume
690 // that operands have already been simplified) "select(cond, B, C)" should
691 // have been simplified to the common value of B and C already. Analysing
692 // "A+B" and "A+C" thus gains nothing, but costs compile time. Similarly
693 // for threading over phi nodes.
694
695 return nullptr;
696 }
697
simplifyAddInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Query)698 Value *llvm::simplifyAddInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
699 const SimplifyQuery &Query) {
700 return ::simplifyAddInst(Op0, Op1, IsNSW, IsNUW, Query, RecursionLimit);
701 }
702
703 /// Compute the base pointer and cumulative constant offsets for V.
704 ///
705 /// This strips all constant offsets off of V, leaving it the base pointer, and
706 /// accumulates the total constant offset applied in the returned constant.
707 /// It returns zero if there are no constant offsets applied.
708 ///
709 /// This is very similar to stripAndAccumulateConstantOffsets(), except it
710 /// normalizes the offset bitwidth to the stripped pointer type, not the
711 /// original pointer type.
stripAndComputeConstantOffsets(const DataLayout & DL,Value * & V,bool AllowNonInbounds=false)712 static APInt stripAndComputeConstantOffsets(const DataLayout &DL, Value *&V,
713 bool AllowNonInbounds = false) {
714 assert(V->getType()->isPtrOrPtrVectorTy());
715
716 APInt Offset = APInt::getZero(DL.getIndexTypeSizeInBits(V->getType()));
717 V = V->stripAndAccumulateConstantOffsets(DL, Offset, AllowNonInbounds);
718 // As that strip may trace through `addrspacecast`, need to sext or trunc
719 // the offset calculated.
720 return Offset.sextOrTrunc(DL.getIndexTypeSizeInBits(V->getType()));
721 }
722
723 /// Compute the constant difference between two pointer values.
724 /// If the difference is not a constant, returns zero.
computePointerDifference(const DataLayout & DL,Value * LHS,Value * RHS)725 static Constant *computePointerDifference(const DataLayout &DL, Value *LHS,
726 Value *RHS) {
727 APInt LHSOffset = stripAndComputeConstantOffsets(DL, LHS);
728 APInt RHSOffset = stripAndComputeConstantOffsets(DL, RHS);
729
730 // If LHS and RHS are not related via constant offsets to the same base
731 // value, there is nothing we can do here.
732 if (LHS != RHS)
733 return nullptr;
734
735 // Otherwise, the difference of LHS - RHS can be computed as:
736 // LHS - RHS
737 // = (LHSOffset + Base) - (RHSOffset + Base)
738 // = LHSOffset - RHSOffset
739 Constant *Res = ConstantInt::get(LHS->getContext(), LHSOffset - RHSOffset);
740 if (auto *VecTy = dyn_cast<VectorType>(LHS->getType()))
741 Res = ConstantVector::getSplat(VecTy->getElementCount(), Res);
742 return Res;
743 }
744
745 /// Test if there is a dominating equivalence condition for the
746 /// two operands. If there is, try to reduce the binary operation
747 /// between the two operands.
748 /// Example: Op0 - Op1 --> 0 when Op0 == Op1
simplifyByDomEq(unsigned Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)749 static Value *simplifyByDomEq(unsigned Opcode, Value *Op0, Value *Op1,
750 const SimplifyQuery &Q, unsigned MaxRecurse) {
751 // Recursive run it can not get any benefit
752 if (MaxRecurse != RecursionLimit)
753 return nullptr;
754
755 std::optional<bool> Imp =
756 isImpliedByDomCondition(CmpInst::ICMP_EQ, Op0, Op1, Q.CxtI, Q.DL);
757 if (Imp && *Imp) {
758 Type *Ty = Op0->getType();
759 switch (Opcode) {
760 case Instruction::Sub:
761 case Instruction::Xor:
762 case Instruction::URem:
763 case Instruction::SRem:
764 return Constant::getNullValue(Ty);
765
766 case Instruction::SDiv:
767 case Instruction::UDiv:
768 return ConstantInt::get(Ty, 1);
769
770 case Instruction::And:
771 case Instruction::Or:
772 // Could be either one - choose Op1 since that's more likely a constant.
773 return Op1;
774 default:
775 break;
776 }
777 }
778 return nullptr;
779 }
780
781 /// Given operands for a Sub, see if we can fold the result.
782 /// If not, this returns null.
simplifySubInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)783 static Value *simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
784 const SimplifyQuery &Q, unsigned MaxRecurse) {
785 if (Constant *C = foldOrCommuteConstant(Instruction::Sub, Op0, Op1, Q))
786 return C;
787
788 // X - poison -> poison
789 // poison - X -> poison
790 if (isa<PoisonValue>(Op0) || isa<PoisonValue>(Op1))
791 return PoisonValue::get(Op0->getType());
792
793 // X - undef -> undef
794 // undef - X -> undef
795 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
796 return UndefValue::get(Op0->getType());
797
798 // X - 0 -> X
799 if (match(Op1, m_Zero()))
800 return Op0;
801
802 // X - X -> 0
803 if (Op0 == Op1)
804 return Constant::getNullValue(Op0->getType());
805
806 // Is this a negation?
807 if (match(Op0, m_Zero())) {
808 // 0 - X -> 0 if the sub is NUW.
809 if (IsNUW)
810 return Constant::getNullValue(Op0->getType());
811
812 KnownBits Known = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
813 if (Known.Zero.isMaxSignedValue()) {
814 // Op1 is either 0 or the minimum signed value. If the sub is NSW, then
815 // Op1 must be 0 because negating the minimum signed value is undefined.
816 if (IsNSW)
817 return Constant::getNullValue(Op0->getType());
818
819 // 0 - X -> X if X is 0 or the minimum signed value.
820 return Op1;
821 }
822 }
823
824 // (X + Y) - Z -> X + (Y - Z) or Y + (X - Z) if everything simplifies.
825 // For example, (X + Y) - Y -> X; (Y + X) - Y -> X
826 Value *X = nullptr, *Y = nullptr, *Z = Op1;
827 if (MaxRecurse && match(Op0, m_Add(m_Value(X), m_Value(Y)))) { // (X + Y) - Z
828 // See if "V === Y - Z" simplifies.
829 if (Value *V = simplifyBinOp(Instruction::Sub, Y, Z, Q, MaxRecurse - 1))
830 // It does! Now see if "X + V" simplifies.
831 if (Value *W = simplifyBinOp(Instruction::Add, X, V, Q, MaxRecurse - 1)) {
832 // It does, we successfully reassociated!
833 ++NumReassoc;
834 return W;
835 }
836 // See if "V === X - Z" simplifies.
837 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
838 // It does! Now see if "Y + V" simplifies.
839 if (Value *W = simplifyBinOp(Instruction::Add, Y, V, Q, MaxRecurse - 1)) {
840 // It does, we successfully reassociated!
841 ++NumReassoc;
842 return W;
843 }
844 }
845
846 // X - (Y + Z) -> (X - Y) - Z or (X - Z) - Y if everything simplifies.
847 // For example, X - (X + 1) -> -1
848 X = Op0;
849 if (MaxRecurse && match(Op1, m_Add(m_Value(Y), m_Value(Z)))) { // X - (Y + Z)
850 // See if "V === X - Y" simplifies.
851 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
852 // It does! Now see if "V - Z" simplifies.
853 if (Value *W = simplifyBinOp(Instruction::Sub, V, Z, Q, MaxRecurse - 1)) {
854 // It does, we successfully reassociated!
855 ++NumReassoc;
856 return W;
857 }
858 // See if "V === X - Z" simplifies.
859 if (Value *V = simplifyBinOp(Instruction::Sub, X, Z, Q, MaxRecurse - 1))
860 // It does! Now see if "V - Y" simplifies.
861 if (Value *W = simplifyBinOp(Instruction::Sub, V, Y, Q, MaxRecurse - 1)) {
862 // It does, we successfully reassociated!
863 ++NumReassoc;
864 return W;
865 }
866 }
867
868 // Z - (X - Y) -> (Z - X) + Y if everything simplifies.
869 // For example, X - (X - Y) -> Y.
870 Z = Op0;
871 if (MaxRecurse && match(Op1, m_Sub(m_Value(X), m_Value(Y)))) // Z - (X - Y)
872 // See if "V === Z - X" simplifies.
873 if (Value *V = simplifyBinOp(Instruction::Sub, Z, X, Q, MaxRecurse - 1))
874 // It does! Now see if "V + Y" simplifies.
875 if (Value *W = simplifyBinOp(Instruction::Add, V, Y, Q, MaxRecurse - 1)) {
876 // It does, we successfully reassociated!
877 ++NumReassoc;
878 return W;
879 }
880
881 // trunc(X) - trunc(Y) -> trunc(X - Y) if everything simplifies.
882 if (MaxRecurse && match(Op0, m_Trunc(m_Value(X))) &&
883 match(Op1, m_Trunc(m_Value(Y))))
884 if (X->getType() == Y->getType())
885 // See if "V === X - Y" simplifies.
886 if (Value *V = simplifyBinOp(Instruction::Sub, X, Y, Q, MaxRecurse - 1))
887 // It does! Now see if "trunc V" simplifies.
888 if (Value *W = simplifyCastInst(Instruction::Trunc, V, Op0->getType(),
889 Q, MaxRecurse - 1))
890 // It does, return the simplified "trunc V".
891 return W;
892
893 // Variations on GEP(base, I, ...) - GEP(base, i, ...) -> GEP(null, I-i, ...).
894 if (match(Op0, m_PtrToInt(m_Value(X))) && match(Op1, m_PtrToInt(m_Value(Y))))
895 if (Constant *Result = computePointerDifference(Q.DL, X, Y))
896 return ConstantExpr::getIntegerCast(Result, Op0->getType(), true);
897
898 // i1 sub -> xor.
899 if (MaxRecurse && Op0->getType()->isIntOrIntVectorTy(1))
900 if (Value *V = simplifyXorInst(Op0, Op1, Q, MaxRecurse - 1))
901 return V;
902
903 // Threading Sub over selects and phi nodes is pointless, so don't bother.
904 // Threading over the select in "A - select(cond, B, C)" means evaluating
905 // "A-B" and "A-C" and seeing if they are equal; but they are equal if and
906 // only if B and C are equal. If B and C are equal then (since we assume
907 // that operands have already been simplified) "select(cond, B, C)" should
908 // have been simplified to the common value of B and C already. Analysing
909 // "A-B" and "A-C" thus gains nothing, but costs compile time. Similarly
910 // for threading over phi nodes.
911
912 if (Value *V = simplifyByDomEq(Instruction::Sub, Op0, Op1, Q, MaxRecurse))
913 return V;
914
915 return nullptr;
916 }
917
simplifySubInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q)918 Value *llvm::simplifySubInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
919 const SimplifyQuery &Q) {
920 return ::simplifySubInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
921 }
922
923 /// Given operands for a Mul, see if we can fold the result.
924 /// If not, this returns null.
simplifyMulInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)925 static Value *simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
926 const SimplifyQuery &Q, unsigned MaxRecurse) {
927 if (Constant *C = foldOrCommuteConstant(Instruction::Mul, Op0, Op1, Q))
928 return C;
929
930 // X * poison -> poison
931 if (isa<PoisonValue>(Op1))
932 return Op1;
933
934 // X * undef -> 0
935 // X * 0 -> 0
936 if (Q.isUndefValue(Op1) || match(Op1, m_Zero()))
937 return Constant::getNullValue(Op0->getType());
938
939 // X * 1 -> X
940 if (match(Op1, m_One()))
941 return Op0;
942
943 // (X / Y) * Y -> X if the division is exact.
944 Value *X = nullptr;
945 if (Q.IIQ.UseInstrInfo &&
946 (match(Op0,
947 m_Exact(m_IDiv(m_Value(X), m_Specific(Op1)))) || // (X / Y) * Y
948 match(Op1, m_Exact(m_IDiv(m_Value(X), m_Specific(Op0)))))) // Y * (X / Y)
949 return X;
950
951 if (Op0->getType()->isIntOrIntVectorTy(1)) {
952 // mul i1 nsw is a special-case because -1 * -1 is poison (+1 is not
953 // representable). All other cases reduce to 0, so just return 0.
954 if (IsNSW)
955 return ConstantInt::getNullValue(Op0->getType());
956
957 // Treat "mul i1" as "and i1".
958 if (MaxRecurse)
959 if (Value *V = simplifyAndInst(Op0, Op1, Q, MaxRecurse - 1))
960 return V;
961 }
962
963 // Try some generic simplifications for associative operations.
964 if (Value *V =
965 simplifyAssociativeBinOp(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
966 return V;
967
968 // Mul distributes over Add. Try some generic simplifications based on this.
969 if (Value *V = expandCommutativeBinOp(Instruction::Mul, Op0, Op1,
970 Instruction::Add, Q, MaxRecurse))
971 return V;
972
973 // If the operation is with the result of a select instruction, check whether
974 // operating on either branch of the select always yields the same value.
975 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
976 if (Value *V =
977 threadBinOpOverSelect(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
978 return V;
979
980 // If the operation is with the result of a phi instruction, check whether
981 // operating on all incoming values of the phi always yields the same value.
982 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
983 if (Value *V =
984 threadBinOpOverPHI(Instruction::Mul, Op0, Op1, Q, MaxRecurse))
985 return V;
986
987 return nullptr;
988 }
989
simplifyMulInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q)990 Value *llvm::simplifyMulInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
991 const SimplifyQuery &Q) {
992 return ::simplifyMulInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
993 }
994
995 /// Check for common or similar folds of integer division or integer remainder.
996 /// This applies to all 4 opcodes (sdiv/udiv/srem/urem).
simplifyDivRem(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)997 static Value *simplifyDivRem(Instruction::BinaryOps Opcode, Value *Op0,
998 Value *Op1, const SimplifyQuery &Q,
999 unsigned MaxRecurse) {
1000 bool IsDiv = (Opcode == Instruction::SDiv || Opcode == Instruction::UDiv);
1001 bool IsSigned = (Opcode == Instruction::SDiv || Opcode == Instruction::SRem);
1002
1003 Type *Ty = Op0->getType();
1004
1005 // X / undef -> poison
1006 // X % undef -> poison
1007 if (Q.isUndefValue(Op1) || isa<PoisonValue>(Op1))
1008 return PoisonValue::get(Ty);
1009
1010 // X / 0 -> poison
1011 // X % 0 -> poison
1012 // We don't need to preserve faults!
1013 if (match(Op1, m_Zero()))
1014 return PoisonValue::get(Ty);
1015
1016 // If any element of a constant divisor fixed width vector is zero or undef
1017 // the behavior is undefined and we can fold the whole op to poison.
1018 auto *Op1C = dyn_cast<Constant>(Op1);
1019 auto *VTy = dyn_cast<FixedVectorType>(Ty);
1020 if (Op1C && VTy) {
1021 unsigned NumElts = VTy->getNumElements();
1022 for (unsigned i = 0; i != NumElts; ++i) {
1023 Constant *Elt = Op1C->getAggregateElement(i);
1024 if (Elt && (Elt->isNullValue() || Q.isUndefValue(Elt)))
1025 return PoisonValue::get(Ty);
1026 }
1027 }
1028
1029 // poison / X -> poison
1030 // poison % X -> poison
1031 if (isa<PoisonValue>(Op0))
1032 return Op0;
1033
1034 // undef / X -> 0
1035 // undef % X -> 0
1036 if (Q.isUndefValue(Op0))
1037 return Constant::getNullValue(Ty);
1038
1039 // 0 / X -> 0
1040 // 0 % X -> 0
1041 if (match(Op0, m_Zero()))
1042 return Constant::getNullValue(Op0->getType());
1043
1044 // X / X -> 1
1045 // X % X -> 0
1046 if (Op0 == Op1)
1047 return IsDiv ? ConstantInt::get(Ty, 1) : Constant::getNullValue(Ty);
1048
1049 // X / 1 -> X
1050 // X % 1 -> 0
1051 // If this is a boolean op (single-bit element type), we can't have
1052 // division-by-zero or remainder-by-zero, so assume the divisor is 1.
1053 // Similarly, if we're zero-extending a boolean divisor, then assume it's a 1.
1054 Value *X;
1055 if (match(Op1, m_One()) || Ty->isIntOrIntVectorTy(1) ||
1056 (match(Op1, m_ZExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1057 return IsDiv ? Op0 : Constant::getNullValue(Ty);
1058
1059 // If X * Y does not overflow, then:
1060 // X * Y / Y -> X
1061 // X * Y % Y -> 0
1062 if (match(Op0, m_c_Mul(m_Value(X), m_Specific(Op1)))) {
1063 auto *Mul = cast<OverflowingBinaryOperator>(Op0);
1064 // The multiplication can't overflow if it is defined not to, or if
1065 // X == A / Y for some A.
1066 if ((IsSigned && Q.IIQ.hasNoSignedWrap(Mul)) ||
1067 (!IsSigned && Q.IIQ.hasNoUnsignedWrap(Mul)) ||
1068 (IsSigned && match(X, m_SDiv(m_Value(), m_Specific(Op1)))) ||
1069 (!IsSigned && match(X, m_UDiv(m_Value(), m_Specific(Op1))))) {
1070 return IsDiv ? X : Constant::getNullValue(Op0->getType());
1071 }
1072 }
1073
1074 if (Value *V = simplifyByDomEq(Opcode, Op0, Op1, Q, MaxRecurse))
1075 return V;
1076
1077 return nullptr;
1078 }
1079
1080 /// Given a predicate and two operands, return true if the comparison is true.
1081 /// This is a helper for div/rem simplification where we return some other value
1082 /// when we can prove a relationship between the operands.
isICmpTrue(ICmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)1083 static bool isICmpTrue(ICmpInst::Predicate Pred, Value *LHS, Value *RHS,
1084 const SimplifyQuery &Q, unsigned MaxRecurse) {
1085 Value *V = simplifyICmpInst(Pred, LHS, RHS, Q, MaxRecurse);
1086 Constant *C = dyn_cast_or_null<Constant>(V);
1087 return (C && C->isAllOnesValue());
1088 }
1089
1090 /// Return true if we can simplify X / Y to 0. Remainder can adapt that answer
1091 /// to simplify X % Y to X.
isDivZero(Value * X,Value * Y,const SimplifyQuery & Q,unsigned MaxRecurse,bool IsSigned)1092 static bool isDivZero(Value *X, Value *Y, const SimplifyQuery &Q,
1093 unsigned MaxRecurse, bool IsSigned) {
1094 // Recursion is always used, so bail out at once if we already hit the limit.
1095 if (!MaxRecurse--)
1096 return false;
1097
1098 if (IsSigned) {
1099 // |X| / |Y| --> 0
1100 //
1101 // We require that 1 operand is a simple constant. That could be extended to
1102 // 2 variables if we computed the sign bit for each.
1103 //
1104 // Make sure that a constant is not the minimum signed value because taking
1105 // the abs() of that is undefined.
1106 Type *Ty = X->getType();
1107 const APInt *C;
1108 if (match(X, m_APInt(C)) && !C->isMinSignedValue()) {
1109 // Is the variable divisor magnitude always greater than the constant
1110 // dividend magnitude?
1111 // |Y| > |C| --> Y < -abs(C) or Y > abs(C)
1112 Constant *PosDividendC = ConstantInt::get(Ty, C->abs());
1113 Constant *NegDividendC = ConstantInt::get(Ty, -C->abs());
1114 if (isICmpTrue(CmpInst::ICMP_SLT, Y, NegDividendC, Q, MaxRecurse) ||
1115 isICmpTrue(CmpInst::ICMP_SGT, Y, PosDividendC, Q, MaxRecurse))
1116 return true;
1117 }
1118 if (match(Y, m_APInt(C))) {
1119 // Special-case: we can't take the abs() of a minimum signed value. If
1120 // that's the divisor, then all we have to do is prove that the dividend
1121 // is also not the minimum signed value.
1122 if (C->isMinSignedValue())
1123 return isICmpTrue(CmpInst::ICMP_NE, X, Y, Q, MaxRecurse);
1124
1125 // Is the variable dividend magnitude always less than the constant
1126 // divisor magnitude?
1127 // |X| < |C| --> X > -abs(C) and X < abs(C)
1128 Constant *PosDivisorC = ConstantInt::get(Ty, C->abs());
1129 Constant *NegDivisorC = ConstantInt::get(Ty, -C->abs());
1130 if (isICmpTrue(CmpInst::ICMP_SGT, X, NegDivisorC, Q, MaxRecurse) &&
1131 isICmpTrue(CmpInst::ICMP_SLT, X, PosDivisorC, Q, MaxRecurse))
1132 return true;
1133 }
1134 return false;
1135 }
1136
1137 // IsSigned == false.
1138
1139 // Is the unsigned dividend known to be less than a constant divisor?
1140 // TODO: Convert this (and above) to range analysis
1141 // ("computeConstantRangeIncludingKnownBits")?
1142 const APInt *C;
1143 if (match(Y, m_APInt(C)) &&
1144 computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, Q.DT).getMaxValue().ult(*C))
1145 return true;
1146
1147 // Try again for any divisor:
1148 // Is the dividend unsigned less than the divisor?
1149 return isICmpTrue(ICmpInst::ICMP_ULT, X, Y, Q, MaxRecurse);
1150 }
1151
1152 /// These are simplifications common to SDiv and UDiv.
simplifyDiv(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1153 static Value *simplifyDiv(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1154 bool IsExact, const SimplifyQuery &Q,
1155 unsigned MaxRecurse) {
1156 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1157 return C;
1158
1159 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1160 return V;
1161
1162 // If this is an exact divide by a constant, then the dividend (Op0) must have
1163 // at least as many trailing zeros as the divisor to divide evenly. If it has
1164 // less trailing zeros, then the result must be poison.
1165 const APInt *DivC;
1166 if (IsExact && match(Op1, m_APInt(DivC)) && DivC->countTrailingZeros()) {
1167 KnownBits KnownOp0 = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1168 if (KnownOp0.countMaxTrailingZeros() < DivC->countTrailingZeros())
1169 return PoisonValue::get(Op0->getType());
1170 }
1171
1172 bool IsSigned = Opcode == Instruction::SDiv;
1173
1174 // (X rem Y) / Y -> 0
1175 if ((IsSigned && match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1176 (!IsSigned && match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1177 return Constant::getNullValue(Op0->getType());
1178
1179 // (X /u C1) /u C2 -> 0 if C1 * C2 overflow
1180 ConstantInt *C1, *C2;
1181 if (!IsSigned && match(Op0, m_UDiv(m_Value(), m_ConstantInt(C1))) &&
1182 match(Op1, m_ConstantInt(C2))) {
1183 bool Overflow;
1184 (void)C1->getValue().umul_ov(C2->getValue(), Overflow);
1185 if (Overflow)
1186 return Constant::getNullValue(Op0->getType());
1187 }
1188
1189 // If the operation is with the result of a select instruction, check whether
1190 // operating on either branch of the select always yields the same value.
1191 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1192 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1193 return V;
1194
1195 // If the operation is with the result of a phi instruction, check whether
1196 // operating on all incoming values of the phi always yields the same value.
1197 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1198 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1199 return V;
1200
1201 if (isDivZero(Op0, Op1, Q, MaxRecurse, IsSigned))
1202 return Constant::getNullValue(Op0->getType());
1203
1204 return nullptr;
1205 }
1206
1207 /// These are simplifications common to SRem and URem.
simplifyRem(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1208 static Value *simplifyRem(Instruction::BinaryOps Opcode, Value *Op0, Value *Op1,
1209 const SimplifyQuery &Q, unsigned MaxRecurse) {
1210 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1211 return C;
1212
1213 if (Value *V = simplifyDivRem(Opcode, Op0, Op1, Q, MaxRecurse))
1214 return V;
1215
1216 // (X % Y) % Y -> X % Y
1217 if ((Opcode == Instruction::SRem &&
1218 match(Op0, m_SRem(m_Value(), m_Specific(Op1)))) ||
1219 (Opcode == Instruction::URem &&
1220 match(Op0, m_URem(m_Value(), m_Specific(Op1)))))
1221 return Op0;
1222
1223 // (X << Y) % X -> 0
1224 if (Q.IIQ.UseInstrInfo &&
1225 ((Opcode == Instruction::SRem &&
1226 match(Op0, m_NSWShl(m_Specific(Op1), m_Value()))) ||
1227 (Opcode == Instruction::URem &&
1228 match(Op0, m_NUWShl(m_Specific(Op1), m_Value())))))
1229 return Constant::getNullValue(Op0->getType());
1230
1231 // If the operation is with the result of a select instruction, check whether
1232 // operating on either branch of the select always yields the same value.
1233 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1234 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1235 return V;
1236
1237 // If the operation is with the result of a phi instruction, check whether
1238 // operating on all incoming values of the phi always yields the same value.
1239 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1240 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1241 return V;
1242
1243 // If X / Y == 0, then X % Y == X.
1244 if (isDivZero(Op0, Op1, Q, MaxRecurse, Opcode == Instruction::SRem))
1245 return Op0;
1246
1247 return nullptr;
1248 }
1249
1250 /// Given operands for an SDiv, see if we can fold the result.
1251 /// If not, this returns null.
simplifySDivInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1252 static Value *simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1253 const SimplifyQuery &Q, unsigned MaxRecurse) {
1254 // If two operands are negated and no signed overflow, return -1.
1255 if (isKnownNegation(Op0, Op1, /*NeedNSW=*/true))
1256 return Constant::getAllOnesValue(Op0->getType());
1257
1258 return simplifyDiv(Instruction::SDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1259 }
1260
simplifySDivInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q)1261 Value *llvm::simplifySDivInst(Value *Op0, Value *Op1, bool IsExact,
1262 const SimplifyQuery &Q) {
1263 return ::simplifySDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1264 }
1265
1266 /// Given operands for a UDiv, see if we can fold the result.
1267 /// If not, this returns null.
simplifyUDivInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1268 static Value *simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1269 const SimplifyQuery &Q, unsigned MaxRecurse) {
1270 return simplifyDiv(Instruction::UDiv, Op0, Op1, IsExact, Q, MaxRecurse);
1271 }
1272
simplifyUDivInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q)1273 Value *llvm::simplifyUDivInst(Value *Op0, Value *Op1, bool IsExact,
1274 const SimplifyQuery &Q) {
1275 return ::simplifyUDivInst(Op0, Op1, IsExact, Q, RecursionLimit);
1276 }
1277
1278 /// Given operands for an SRem, see if we can fold the result.
1279 /// If not, this returns null.
simplifySRemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1280 static Value *simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1281 unsigned MaxRecurse) {
1282 // If the divisor is 0, the result is undefined, so assume the divisor is -1.
1283 // srem Op0, (sext i1 X) --> srem Op0, -1 --> 0
1284 Value *X;
1285 if (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1))
1286 return ConstantInt::getNullValue(Op0->getType());
1287
1288 // If the two operands are negated, return 0.
1289 if (isKnownNegation(Op0, Op1))
1290 return ConstantInt::getNullValue(Op0->getType());
1291
1292 return simplifyRem(Instruction::SRem, Op0, Op1, Q, MaxRecurse);
1293 }
1294
simplifySRemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1295 Value *llvm::simplifySRemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1296 return ::simplifySRemInst(Op0, Op1, Q, RecursionLimit);
1297 }
1298
1299 /// Given operands for a URem, see if we can fold the result.
1300 /// If not, this returns null.
simplifyURemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)1301 static Value *simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
1302 unsigned MaxRecurse) {
1303 return simplifyRem(Instruction::URem, Op0, Op1, Q, MaxRecurse);
1304 }
1305
simplifyURemInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)1306 Value *llvm::simplifyURemInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
1307 return ::simplifyURemInst(Op0, Op1, Q, RecursionLimit);
1308 }
1309
1310 /// Returns true if a shift by \c Amount always yields poison.
isPoisonShift(Value * Amount,const SimplifyQuery & Q)1311 static bool isPoisonShift(Value *Amount, const SimplifyQuery &Q) {
1312 Constant *C = dyn_cast<Constant>(Amount);
1313 if (!C)
1314 return false;
1315
1316 // X shift by undef -> poison because it may shift by the bitwidth.
1317 if (Q.isUndefValue(C))
1318 return true;
1319
1320 // Shifting by the bitwidth or more is poison. This covers scalars and
1321 // fixed/scalable vectors with splat constants.
1322 const APInt *AmountC;
1323 if (match(C, m_APInt(AmountC)) && AmountC->uge(AmountC->getBitWidth()))
1324 return true;
1325
1326 // Try harder for fixed-length vectors:
1327 // If all lanes of a vector shift are poison, the whole shift is poison.
1328 if (isa<ConstantVector>(C) || isa<ConstantDataVector>(C)) {
1329 for (unsigned I = 0,
1330 E = cast<FixedVectorType>(C->getType())->getNumElements();
1331 I != E; ++I)
1332 if (!isPoisonShift(C->getAggregateElement(I), Q))
1333 return false;
1334 return true;
1335 }
1336
1337 return false;
1338 }
1339
1340 /// Given operands for an Shl, LShr or AShr, see if we can fold the result.
1341 /// If not, this returns null.
simplifyShift(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool IsNSW,const SimplifyQuery & Q,unsigned MaxRecurse)1342 static Value *simplifyShift(Instruction::BinaryOps Opcode, Value *Op0,
1343 Value *Op1, bool IsNSW, const SimplifyQuery &Q,
1344 unsigned MaxRecurse) {
1345 if (Constant *C = foldOrCommuteConstant(Opcode, Op0, Op1, Q))
1346 return C;
1347
1348 // poison shift by X -> poison
1349 if (isa<PoisonValue>(Op0))
1350 return Op0;
1351
1352 // 0 shift by X -> 0
1353 if (match(Op0, m_Zero()))
1354 return Constant::getNullValue(Op0->getType());
1355
1356 // X shift by 0 -> X
1357 // Shift-by-sign-extended bool must be shift-by-0 because shift-by-all-ones
1358 // would be poison.
1359 Value *X;
1360 if (match(Op1, m_Zero()) ||
1361 (match(Op1, m_SExt(m_Value(X))) && X->getType()->isIntOrIntVectorTy(1)))
1362 return Op0;
1363
1364 // Fold undefined shifts.
1365 if (isPoisonShift(Op1, Q))
1366 return PoisonValue::get(Op0->getType());
1367
1368 // If the operation is with the result of a select instruction, check whether
1369 // operating on either branch of the select always yields the same value.
1370 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1))
1371 if (Value *V = threadBinOpOverSelect(Opcode, Op0, Op1, Q, MaxRecurse))
1372 return V;
1373
1374 // If the operation is with the result of a phi instruction, check whether
1375 // operating on all incoming values of the phi always yields the same value.
1376 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
1377 if (Value *V = threadBinOpOverPHI(Opcode, Op0, Op1, Q, MaxRecurse))
1378 return V;
1379
1380 // If any bits in the shift amount make that value greater than or equal to
1381 // the number of bits in the type, the shift is undefined.
1382 KnownBits KnownAmt = computeKnownBits(Op1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1383 if (KnownAmt.getMinValue().uge(KnownAmt.getBitWidth()))
1384 return PoisonValue::get(Op0->getType());
1385
1386 // If all valid bits in the shift amount are known zero, the first operand is
1387 // unchanged.
1388 unsigned NumValidShiftBits = Log2_32_Ceil(KnownAmt.getBitWidth());
1389 if (KnownAmt.countMinTrailingZeros() >= NumValidShiftBits)
1390 return Op0;
1391
1392 // Check for nsw shl leading to a poison value.
1393 if (IsNSW) {
1394 assert(Opcode == Instruction::Shl && "Expected shl for nsw instruction");
1395 KnownBits KnownVal = computeKnownBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1396 KnownBits KnownShl = KnownBits::shl(KnownVal, KnownAmt);
1397
1398 if (KnownVal.Zero.isSignBitSet())
1399 KnownShl.Zero.setSignBit();
1400 if (KnownVal.One.isSignBitSet())
1401 KnownShl.One.setSignBit();
1402
1403 if (KnownShl.hasConflict())
1404 return PoisonValue::get(Op0->getType());
1405 }
1406
1407 return nullptr;
1408 }
1409
1410 /// Given operands for an Shl, LShr or AShr, see if we can
1411 /// fold the result. If not, this returns null.
simplifyRightShift(Instruction::BinaryOps Opcode,Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1412 static Value *simplifyRightShift(Instruction::BinaryOps Opcode, Value *Op0,
1413 Value *Op1, bool IsExact,
1414 const SimplifyQuery &Q, unsigned MaxRecurse) {
1415 if (Value *V =
1416 simplifyShift(Opcode, Op0, Op1, /*IsNSW*/ false, Q, MaxRecurse))
1417 return V;
1418
1419 // X >> X -> 0
1420 if (Op0 == Op1)
1421 return Constant::getNullValue(Op0->getType());
1422
1423 // undef >> X -> 0
1424 // undef >> X -> undef (if it's exact)
1425 if (Q.isUndefValue(Op0))
1426 return IsExact ? Op0 : Constant::getNullValue(Op0->getType());
1427
1428 // The low bit cannot be shifted out of an exact shift if it is set.
1429 // TODO: Generalize by counting trailing zeros (see fold for exact division).
1430 if (IsExact) {
1431 KnownBits Op0Known =
1432 computeKnownBits(Op0, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT);
1433 if (Op0Known.One[0])
1434 return Op0;
1435 }
1436
1437 return nullptr;
1438 }
1439
1440 /// Given operands for an Shl, see if we can fold the result.
1441 /// If not, this returns null.
simplifyShlInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q,unsigned MaxRecurse)1442 static Value *simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1443 const SimplifyQuery &Q, unsigned MaxRecurse) {
1444 if (Value *V =
1445 simplifyShift(Instruction::Shl, Op0, Op1, IsNSW, Q, MaxRecurse))
1446 return V;
1447
1448 // undef << X -> 0
1449 // undef << X -> undef if (if it's NSW/NUW)
1450 if (Q.isUndefValue(Op0))
1451 return IsNSW || IsNUW ? Op0 : Constant::getNullValue(Op0->getType());
1452
1453 // (X >> A) << A -> X
1454 Value *X;
1455 if (Q.IIQ.UseInstrInfo &&
1456 match(Op0, m_Exact(m_Shr(m_Value(X), m_Specific(Op1)))))
1457 return X;
1458
1459 // shl nuw i8 C, %x -> C iff C has sign bit set.
1460 if (IsNUW && match(Op0, m_Negative()))
1461 return Op0;
1462 // NOTE: could use computeKnownBits() / LazyValueInfo,
1463 // but the cost-benefit analysis suggests it isn't worth it.
1464
1465 return nullptr;
1466 }
1467
simplifyShlInst(Value * Op0,Value * Op1,bool IsNSW,bool IsNUW,const SimplifyQuery & Q)1468 Value *llvm::simplifyShlInst(Value *Op0, Value *Op1, bool IsNSW, bool IsNUW,
1469 const SimplifyQuery &Q) {
1470 return ::simplifyShlInst(Op0, Op1, IsNSW, IsNUW, Q, RecursionLimit);
1471 }
1472
1473 /// Given operands for an LShr, see if we can fold the result.
1474 /// If not, this returns null.
simplifyLShrInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1475 static Value *simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1476 const SimplifyQuery &Q, unsigned MaxRecurse) {
1477 if (Value *V = simplifyRightShift(Instruction::LShr, Op0, Op1, IsExact, Q,
1478 MaxRecurse))
1479 return V;
1480
1481 // (X << A) >> A -> X
1482 Value *X;
1483 if (match(Op0, m_NUWShl(m_Value(X), m_Specific(Op1))))
1484 return X;
1485
1486 // ((X << A) | Y) >> A -> X if effective width of Y is not larger than A.
1487 // We can return X as we do in the above case since OR alters no bits in X.
1488 // SimplifyDemandedBits in InstCombine can do more general optimization for
1489 // bit manipulation. This pattern aims to provide opportunities for other
1490 // optimizers by supporting a simple but common case in InstSimplify.
1491 Value *Y;
1492 const APInt *ShRAmt, *ShLAmt;
1493 if (match(Op1, m_APInt(ShRAmt)) &&
1494 match(Op0, m_c_Or(m_NUWShl(m_Value(X), m_APInt(ShLAmt)), m_Value(Y))) &&
1495 *ShRAmt == *ShLAmt) {
1496 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1497 const unsigned EffWidthY = YKnown.countMaxActiveBits();
1498 if (ShRAmt->uge(EffWidthY))
1499 return X;
1500 }
1501
1502 return nullptr;
1503 }
1504
simplifyLShrInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q)1505 Value *llvm::simplifyLShrInst(Value *Op0, Value *Op1, bool IsExact,
1506 const SimplifyQuery &Q) {
1507 return ::simplifyLShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1508 }
1509
1510 /// Given operands for an AShr, see if we can fold the result.
1511 /// If not, this returns null.
simplifyAShrInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q,unsigned MaxRecurse)1512 static Value *simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1513 const SimplifyQuery &Q, unsigned MaxRecurse) {
1514 if (Value *V = simplifyRightShift(Instruction::AShr, Op0, Op1, IsExact, Q,
1515 MaxRecurse))
1516 return V;
1517
1518 // -1 >>a X --> -1
1519 // (-1 << X) a>> X --> -1
1520 // Do not return Op0 because it may contain undef elements if it's a vector.
1521 if (match(Op0, m_AllOnes()) ||
1522 match(Op0, m_Shl(m_AllOnes(), m_Specific(Op1))))
1523 return Constant::getAllOnesValue(Op0->getType());
1524
1525 // (X << A) >> A -> X
1526 Value *X;
1527 if (Q.IIQ.UseInstrInfo && match(Op0, m_NSWShl(m_Value(X), m_Specific(Op1))))
1528 return X;
1529
1530 // Arithmetic shifting an all-sign-bit value is a no-op.
1531 unsigned NumSignBits = ComputeNumSignBits(Op0, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
1532 if (NumSignBits == Op0->getType()->getScalarSizeInBits())
1533 return Op0;
1534
1535 return nullptr;
1536 }
1537
simplifyAShrInst(Value * Op0,Value * Op1,bool IsExact,const SimplifyQuery & Q)1538 Value *llvm::simplifyAShrInst(Value *Op0, Value *Op1, bool IsExact,
1539 const SimplifyQuery &Q) {
1540 return ::simplifyAShrInst(Op0, Op1, IsExact, Q, RecursionLimit);
1541 }
1542
1543 /// Commuted variants are assumed to be handled by calling this function again
1544 /// with the parameters swapped.
simplifyUnsignedRangeCheck(ICmpInst * ZeroICmp,ICmpInst * UnsignedICmp,bool IsAnd,const SimplifyQuery & Q)1545 static Value *simplifyUnsignedRangeCheck(ICmpInst *ZeroICmp,
1546 ICmpInst *UnsignedICmp, bool IsAnd,
1547 const SimplifyQuery &Q) {
1548 Value *X, *Y;
1549
1550 ICmpInst::Predicate EqPred;
1551 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(Y), m_Zero())) ||
1552 !ICmpInst::isEquality(EqPred))
1553 return nullptr;
1554
1555 ICmpInst::Predicate UnsignedPred;
1556
1557 Value *A, *B;
1558 // Y = (A - B);
1559 if (match(Y, m_Sub(m_Value(A), m_Value(B)))) {
1560 if (match(UnsignedICmp,
1561 m_c_ICmp(UnsignedPred, m_Specific(A), m_Specific(B))) &&
1562 ICmpInst::isUnsigned(UnsignedPred)) {
1563 // A >=/<= B || (A - B) != 0 <--> true
1564 if ((UnsignedPred == ICmpInst::ICMP_UGE ||
1565 UnsignedPred == ICmpInst::ICMP_ULE) &&
1566 EqPred == ICmpInst::ICMP_NE && !IsAnd)
1567 return ConstantInt::getTrue(UnsignedICmp->getType());
1568 // A </> B && (A - B) == 0 <--> false
1569 if ((UnsignedPred == ICmpInst::ICMP_ULT ||
1570 UnsignedPred == ICmpInst::ICMP_UGT) &&
1571 EqPred == ICmpInst::ICMP_EQ && IsAnd)
1572 return ConstantInt::getFalse(UnsignedICmp->getType());
1573
1574 // A </> B && (A - B) != 0 <--> A </> B
1575 // A </> B || (A - B) != 0 <--> (A - B) != 0
1576 if (EqPred == ICmpInst::ICMP_NE && (UnsignedPred == ICmpInst::ICMP_ULT ||
1577 UnsignedPred == ICmpInst::ICMP_UGT))
1578 return IsAnd ? UnsignedICmp : ZeroICmp;
1579
1580 // A <=/>= B && (A - B) == 0 <--> (A - B) == 0
1581 // A <=/>= B || (A - B) == 0 <--> A <=/>= B
1582 if (EqPred == ICmpInst::ICMP_EQ && (UnsignedPred == ICmpInst::ICMP_ULE ||
1583 UnsignedPred == ICmpInst::ICMP_UGE))
1584 return IsAnd ? ZeroICmp : UnsignedICmp;
1585 }
1586
1587 // Given Y = (A - B)
1588 // Y >= A && Y != 0 --> Y >= A iff B != 0
1589 // Y < A || Y == 0 --> Y < A iff B != 0
1590 if (match(UnsignedICmp,
1591 m_c_ICmp(UnsignedPred, m_Specific(Y), m_Specific(A)))) {
1592 if (UnsignedPred == ICmpInst::ICMP_UGE && IsAnd &&
1593 EqPred == ICmpInst::ICMP_NE &&
1594 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1595 return UnsignedICmp;
1596 if (UnsignedPred == ICmpInst::ICMP_ULT && !IsAnd &&
1597 EqPred == ICmpInst::ICMP_EQ &&
1598 isKnownNonZero(B, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1599 return UnsignedICmp;
1600 }
1601 }
1602
1603 if (match(UnsignedICmp, m_ICmp(UnsignedPred, m_Value(X), m_Specific(Y))) &&
1604 ICmpInst::isUnsigned(UnsignedPred))
1605 ;
1606 else if (match(UnsignedICmp,
1607 m_ICmp(UnsignedPred, m_Specific(Y), m_Value(X))) &&
1608 ICmpInst::isUnsigned(UnsignedPred))
1609 UnsignedPred = ICmpInst::getSwappedPredicate(UnsignedPred);
1610 else
1611 return nullptr;
1612
1613 // X > Y && Y == 0 --> Y == 0 iff X != 0
1614 // X > Y || Y == 0 --> X > Y iff X != 0
1615 if (UnsignedPred == ICmpInst::ICMP_UGT && EqPred == ICmpInst::ICMP_EQ &&
1616 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1617 return IsAnd ? ZeroICmp : UnsignedICmp;
1618
1619 // X <= Y && Y != 0 --> X <= Y iff X != 0
1620 // X <= Y || Y != 0 --> Y != 0 iff X != 0
1621 if (UnsignedPred == ICmpInst::ICMP_ULE && EqPred == ICmpInst::ICMP_NE &&
1622 isKnownNonZero(X, Q.DL, /*Depth=*/0, Q.AC, Q.CxtI, Q.DT))
1623 return IsAnd ? UnsignedICmp : ZeroICmp;
1624
1625 // The transforms below here are expected to be handled more generally with
1626 // simplifyAndOrOfICmpsWithLimitConst() or in InstCombine's
1627 // foldAndOrOfICmpsWithConstEq(). If we are looking to trim optimizer overlap,
1628 // these are candidates for removal.
1629
1630 // X < Y && Y != 0 --> X < Y
1631 // X < Y || Y != 0 --> Y != 0
1632 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE)
1633 return IsAnd ? UnsignedICmp : ZeroICmp;
1634
1635 // X >= Y && Y == 0 --> Y == 0
1636 // X >= Y || Y == 0 --> X >= Y
1637 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ)
1638 return IsAnd ? ZeroICmp : UnsignedICmp;
1639
1640 // X < Y && Y == 0 --> false
1641 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_EQ &&
1642 IsAnd)
1643 return getFalse(UnsignedICmp->getType());
1644
1645 // X >= Y || Y != 0 --> true
1646 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_NE &&
1647 !IsAnd)
1648 return getTrue(UnsignedICmp->getType());
1649
1650 return nullptr;
1651 }
1652
1653 /// Test if a pair of compares with a shared operand and 2 constants has an
1654 /// empty set intersection, full set union, or if one compare is a superset of
1655 /// the other.
simplifyAndOrOfICmpsWithConstants(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1656 static Value *simplifyAndOrOfICmpsWithConstants(ICmpInst *Cmp0, ICmpInst *Cmp1,
1657 bool IsAnd) {
1658 // Look for this pattern: {and/or} (icmp X, C0), (icmp X, C1)).
1659 if (Cmp0->getOperand(0) != Cmp1->getOperand(0))
1660 return nullptr;
1661
1662 const APInt *C0, *C1;
1663 if (!match(Cmp0->getOperand(1), m_APInt(C0)) ||
1664 !match(Cmp1->getOperand(1), m_APInt(C1)))
1665 return nullptr;
1666
1667 auto Range0 = ConstantRange::makeExactICmpRegion(Cmp0->getPredicate(), *C0);
1668 auto Range1 = ConstantRange::makeExactICmpRegion(Cmp1->getPredicate(), *C1);
1669
1670 // For and-of-compares, check if the intersection is empty:
1671 // (icmp X, C0) && (icmp X, C1) --> empty set --> false
1672 if (IsAnd && Range0.intersectWith(Range1).isEmptySet())
1673 return getFalse(Cmp0->getType());
1674
1675 // For or-of-compares, check if the union is full:
1676 // (icmp X, C0) || (icmp X, C1) --> full set --> true
1677 if (!IsAnd && Range0.unionWith(Range1).isFullSet())
1678 return getTrue(Cmp0->getType());
1679
1680 // Is one range a superset of the other?
1681 // If this is and-of-compares, take the smaller set:
1682 // (icmp sgt X, 4) && (icmp sgt X, 42) --> icmp sgt X, 42
1683 // If this is or-of-compares, take the larger set:
1684 // (icmp sgt X, 4) || (icmp sgt X, 42) --> icmp sgt X, 4
1685 if (Range0.contains(Range1))
1686 return IsAnd ? Cmp1 : Cmp0;
1687 if (Range1.contains(Range0))
1688 return IsAnd ? Cmp0 : Cmp1;
1689
1690 return nullptr;
1691 }
1692
simplifyAndOrOfICmpsWithZero(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1693 static Value *simplifyAndOrOfICmpsWithZero(ICmpInst *Cmp0, ICmpInst *Cmp1,
1694 bool IsAnd) {
1695 ICmpInst::Predicate P0 = Cmp0->getPredicate(), P1 = Cmp1->getPredicate();
1696 if (!match(Cmp0->getOperand(1), m_Zero()) ||
1697 !match(Cmp1->getOperand(1), m_Zero()) || P0 != P1)
1698 return nullptr;
1699
1700 if ((IsAnd && P0 != ICmpInst::ICMP_NE) || (!IsAnd && P1 != ICmpInst::ICMP_EQ))
1701 return nullptr;
1702
1703 // We have either "(X == 0 || Y == 0)" or "(X != 0 && Y != 0)".
1704 Value *X = Cmp0->getOperand(0);
1705 Value *Y = Cmp1->getOperand(0);
1706
1707 // If one of the compares is a masked version of a (not) null check, then
1708 // that compare implies the other, so we eliminate the other. Optionally, look
1709 // through a pointer-to-int cast to match a null check of a pointer type.
1710
1711 // (X == 0) || (([ptrtoint] X & ?) == 0) --> ([ptrtoint] X & ?) == 0
1712 // (X == 0) || ((? & [ptrtoint] X) == 0) --> (? & [ptrtoint] X) == 0
1713 // (X != 0) && (([ptrtoint] X & ?) != 0) --> ([ptrtoint] X & ?) != 0
1714 // (X != 0) && ((? & [ptrtoint] X) != 0) --> (? & [ptrtoint] X) != 0
1715 if (match(Y, m_c_And(m_Specific(X), m_Value())) ||
1716 match(Y, m_c_And(m_PtrToInt(m_Specific(X)), m_Value())))
1717 return Cmp1;
1718
1719 // (([ptrtoint] Y & ?) == 0) || (Y == 0) --> ([ptrtoint] Y & ?) == 0
1720 // ((? & [ptrtoint] Y) == 0) || (Y == 0) --> (? & [ptrtoint] Y) == 0
1721 // (([ptrtoint] Y & ?) != 0) && (Y != 0) --> ([ptrtoint] Y & ?) != 0
1722 // ((? & [ptrtoint] Y) != 0) && (Y != 0) --> (? & [ptrtoint] Y) != 0
1723 if (match(X, m_c_And(m_Specific(Y), m_Value())) ||
1724 match(X, m_c_And(m_PtrToInt(m_Specific(Y)), m_Value())))
1725 return Cmp0;
1726
1727 return nullptr;
1728 }
1729
simplifyAndOfICmpsWithAdd(ICmpInst * Op0,ICmpInst * Op1,const InstrInfoQuery & IIQ)1730 static Value *simplifyAndOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1731 const InstrInfoQuery &IIQ) {
1732 // (icmp (add V, C0), C1) & (icmp V, C0)
1733 ICmpInst::Predicate Pred0, Pred1;
1734 const APInt *C0, *C1;
1735 Value *V;
1736 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1737 return nullptr;
1738
1739 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1740 return nullptr;
1741
1742 auto *AddInst = cast<OverflowingBinaryOperator>(Op0->getOperand(0));
1743 if (AddInst->getOperand(1) != Op1->getOperand(1))
1744 return nullptr;
1745
1746 Type *ITy = Op0->getType();
1747 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1748 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1749
1750 const APInt Delta = *C1 - *C0;
1751 if (C0->isStrictlyPositive()) {
1752 if (Delta == 2) {
1753 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_SGT)
1754 return getFalse(ITy);
1755 if (Pred0 == ICmpInst::ICMP_SLT && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1756 return getFalse(ITy);
1757 }
1758 if (Delta == 1) {
1759 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_SGT)
1760 return getFalse(ITy);
1761 if (Pred0 == ICmpInst::ICMP_SLE && Pred1 == ICmpInst::ICMP_SGT && IsNSW)
1762 return getFalse(ITy);
1763 }
1764 }
1765 if (C0->getBoolValue() && IsNUW) {
1766 if (Delta == 2)
1767 if (Pred0 == ICmpInst::ICMP_ULT && Pred1 == ICmpInst::ICMP_UGT)
1768 return getFalse(ITy);
1769 if (Delta == 1)
1770 if (Pred0 == ICmpInst::ICMP_ULE && Pred1 == ICmpInst::ICMP_UGT)
1771 return getFalse(ITy);
1772 }
1773
1774 return nullptr;
1775 }
1776
1777 /// Try to eliminate compares with signed or unsigned min/max constants.
simplifyAndOrOfICmpsWithLimitConst(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1778 static Value *simplifyAndOrOfICmpsWithLimitConst(ICmpInst *Cmp0, ICmpInst *Cmp1,
1779 bool IsAnd) {
1780 // Canonicalize an equality compare as Cmp0.
1781 if (Cmp1->isEquality())
1782 std::swap(Cmp0, Cmp1);
1783 if (!Cmp0->isEquality())
1784 return nullptr;
1785
1786 // The non-equality compare must include a common operand (X). Canonicalize
1787 // the common operand as operand 0 (the predicate is swapped if the common
1788 // operand was operand 1).
1789 ICmpInst::Predicate Pred0 = Cmp0->getPredicate();
1790 Value *X = Cmp0->getOperand(0);
1791 ICmpInst::Predicate Pred1;
1792 bool HasNotOp = match(Cmp1, m_c_ICmp(Pred1, m_Not(m_Specific(X)), m_Value()));
1793 if (!HasNotOp && !match(Cmp1, m_c_ICmp(Pred1, m_Specific(X), m_Value())))
1794 return nullptr;
1795 if (ICmpInst::isEquality(Pred1))
1796 return nullptr;
1797
1798 // The equality compare must be against a constant. Flip bits if we matched
1799 // a bitwise not. Convert a null pointer constant to an integer zero value.
1800 APInt MinMaxC;
1801 const APInt *C;
1802 if (match(Cmp0->getOperand(1), m_APInt(C)))
1803 MinMaxC = HasNotOp ? ~*C : *C;
1804 else if (isa<ConstantPointerNull>(Cmp0->getOperand(1)))
1805 MinMaxC = APInt::getZero(8);
1806 else
1807 return nullptr;
1808
1809 // DeMorganize if this is 'or': P0 || P1 --> !P0 && !P1.
1810 if (!IsAnd) {
1811 Pred0 = ICmpInst::getInversePredicate(Pred0);
1812 Pred1 = ICmpInst::getInversePredicate(Pred1);
1813 }
1814
1815 // Normalize to unsigned compare and unsigned min/max value.
1816 // Example for 8-bit: -128 + 128 -> 0; 127 + 128 -> 255
1817 if (ICmpInst::isSigned(Pred1)) {
1818 Pred1 = ICmpInst::getUnsignedPredicate(Pred1);
1819 MinMaxC += APInt::getSignedMinValue(MinMaxC.getBitWidth());
1820 }
1821
1822 // (X != MAX) && (X < Y) --> X < Y
1823 // (X == MAX) || (X >= Y) --> X >= Y
1824 if (MinMaxC.isMaxValue())
1825 if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT)
1826 return Cmp1;
1827
1828 // (X != MIN) && (X > Y) --> X > Y
1829 // (X == MIN) || (X <= Y) --> X <= Y
1830 if (MinMaxC.isMinValue())
1831 if (Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_UGT)
1832 return Cmp1;
1833
1834 return nullptr;
1835 }
1836
1837 /// Try to simplify and/or of icmp with ctpop intrinsic.
simplifyAndOrOfICmpsWithCtpop(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1838 static Value *simplifyAndOrOfICmpsWithCtpop(ICmpInst *Cmp0, ICmpInst *Cmp1,
1839 bool IsAnd) {
1840 ICmpInst::Predicate Pred0, Pred1;
1841 Value *X;
1842 const APInt *C;
1843 if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
1844 m_APInt(C))) ||
1845 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())) || C->isZero())
1846 return nullptr;
1847
1848 // (ctpop(X) == C) || (X != 0) --> X != 0 where C > 0
1849 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_NE)
1850 return Cmp1;
1851 // (ctpop(X) != C) && (X == 0) --> X == 0 where C > 0
1852 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_EQ)
1853 return Cmp1;
1854
1855 return nullptr;
1856 }
1857
simplifyAndOfICmps(ICmpInst * Op0,ICmpInst * Op1,const SimplifyQuery & Q)1858 static Value *simplifyAndOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1859 const SimplifyQuery &Q) {
1860 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/true, Q))
1861 return X;
1862 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/true, Q))
1863 return X;
1864
1865 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, true))
1866 return X;
1867
1868 if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, true))
1869 return X;
1870
1871 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, true))
1872 return X;
1873
1874 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, true))
1875 return X;
1876 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, true))
1877 return X;
1878
1879 if (Value *X = simplifyAndOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1880 return X;
1881 if (Value *X = simplifyAndOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1882 return X;
1883
1884 return nullptr;
1885 }
1886
simplifyOrOfICmpsWithAdd(ICmpInst * Op0,ICmpInst * Op1,const InstrInfoQuery & IIQ)1887 static Value *simplifyOrOfICmpsWithAdd(ICmpInst *Op0, ICmpInst *Op1,
1888 const InstrInfoQuery &IIQ) {
1889 // (icmp (add V, C0), C1) | (icmp V, C0)
1890 ICmpInst::Predicate Pred0, Pred1;
1891 const APInt *C0, *C1;
1892 Value *V;
1893 if (!match(Op0, m_ICmp(Pred0, m_Add(m_Value(V), m_APInt(C0)), m_APInt(C1))))
1894 return nullptr;
1895
1896 if (!match(Op1, m_ICmp(Pred1, m_Specific(V), m_Value())))
1897 return nullptr;
1898
1899 auto *AddInst = cast<BinaryOperator>(Op0->getOperand(0));
1900 if (AddInst->getOperand(1) != Op1->getOperand(1))
1901 return nullptr;
1902
1903 Type *ITy = Op0->getType();
1904 bool IsNSW = IIQ.hasNoSignedWrap(AddInst);
1905 bool IsNUW = IIQ.hasNoUnsignedWrap(AddInst);
1906
1907 const APInt Delta = *C1 - *C0;
1908 if (C0->isStrictlyPositive()) {
1909 if (Delta == 2) {
1910 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_SLE)
1911 return getTrue(ITy);
1912 if (Pred0 == ICmpInst::ICMP_SGE && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1913 return getTrue(ITy);
1914 }
1915 if (Delta == 1) {
1916 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_SLE)
1917 return getTrue(ITy);
1918 if (Pred0 == ICmpInst::ICMP_SGT && Pred1 == ICmpInst::ICMP_SLE && IsNSW)
1919 return getTrue(ITy);
1920 }
1921 }
1922 if (C0->getBoolValue() && IsNUW) {
1923 if (Delta == 2)
1924 if (Pred0 == ICmpInst::ICMP_UGE && Pred1 == ICmpInst::ICMP_ULE)
1925 return getTrue(ITy);
1926 if (Delta == 1)
1927 if (Pred0 == ICmpInst::ICMP_UGT && Pred1 == ICmpInst::ICMP_ULE)
1928 return getTrue(ITy);
1929 }
1930
1931 return nullptr;
1932 }
1933
simplifyOrOfICmps(ICmpInst * Op0,ICmpInst * Op1,const SimplifyQuery & Q)1934 static Value *simplifyOrOfICmps(ICmpInst *Op0, ICmpInst *Op1,
1935 const SimplifyQuery &Q) {
1936 if (Value *X = simplifyUnsignedRangeCheck(Op0, Op1, /*IsAnd=*/false, Q))
1937 return X;
1938 if (Value *X = simplifyUnsignedRangeCheck(Op1, Op0, /*IsAnd=*/false, Q))
1939 return X;
1940
1941 if (Value *X = simplifyAndOrOfICmpsWithConstants(Op0, Op1, false))
1942 return X;
1943
1944 if (Value *X = simplifyAndOrOfICmpsWithLimitConst(Op0, Op1, false))
1945 return X;
1946
1947 if (Value *X = simplifyAndOrOfICmpsWithZero(Op0, Op1, false))
1948 return X;
1949
1950 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op0, Op1, false))
1951 return X;
1952 if (Value *X = simplifyAndOrOfICmpsWithCtpop(Op1, Op0, false))
1953 return X;
1954
1955 if (Value *X = simplifyOrOfICmpsWithAdd(Op0, Op1, Q.IIQ))
1956 return X;
1957 if (Value *X = simplifyOrOfICmpsWithAdd(Op1, Op0, Q.IIQ))
1958 return X;
1959
1960 return nullptr;
1961 }
1962
simplifyAndOrOfFCmps(const TargetLibraryInfo * TLI,FCmpInst * LHS,FCmpInst * RHS,bool IsAnd)1963 static Value *simplifyAndOrOfFCmps(const TargetLibraryInfo *TLI, FCmpInst *LHS,
1964 FCmpInst *RHS, bool IsAnd) {
1965 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1966 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1967 if (LHS0->getType() != RHS0->getType())
1968 return nullptr;
1969
1970 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1971 if ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1972 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO && !IsAnd)) {
1973 // (fcmp ord NNAN, X) & (fcmp ord X, Y) --> fcmp ord X, Y
1974 // (fcmp ord NNAN, X) & (fcmp ord Y, X) --> fcmp ord Y, X
1975 // (fcmp ord X, NNAN) & (fcmp ord X, Y) --> fcmp ord X, Y
1976 // (fcmp ord X, NNAN) & (fcmp ord Y, X) --> fcmp ord Y, X
1977 // (fcmp uno NNAN, X) | (fcmp uno X, Y) --> fcmp uno X, Y
1978 // (fcmp uno NNAN, X) | (fcmp uno Y, X) --> fcmp uno Y, X
1979 // (fcmp uno X, NNAN) | (fcmp uno X, Y) --> fcmp uno X, Y
1980 // (fcmp uno X, NNAN) | (fcmp uno Y, X) --> fcmp uno Y, X
1981 if ((isKnownNeverNaN(LHS0, TLI) && (LHS1 == RHS0 || LHS1 == RHS1)) ||
1982 (isKnownNeverNaN(LHS1, TLI) && (LHS0 == RHS0 || LHS0 == RHS1)))
1983 return RHS;
1984
1985 // (fcmp ord X, Y) & (fcmp ord NNAN, X) --> fcmp ord X, Y
1986 // (fcmp ord Y, X) & (fcmp ord NNAN, X) --> fcmp ord Y, X
1987 // (fcmp ord X, Y) & (fcmp ord X, NNAN) --> fcmp ord X, Y
1988 // (fcmp ord Y, X) & (fcmp ord X, NNAN) --> fcmp ord Y, X
1989 // (fcmp uno X, Y) | (fcmp uno NNAN, X) --> fcmp uno X, Y
1990 // (fcmp uno Y, X) | (fcmp uno NNAN, X) --> fcmp uno Y, X
1991 // (fcmp uno X, Y) | (fcmp uno X, NNAN) --> fcmp uno X, Y
1992 // (fcmp uno Y, X) | (fcmp uno X, NNAN) --> fcmp uno Y, X
1993 if ((isKnownNeverNaN(RHS0, TLI) && (RHS1 == LHS0 || RHS1 == LHS1)) ||
1994 (isKnownNeverNaN(RHS1, TLI) && (RHS0 == LHS0 || RHS0 == LHS1)))
1995 return LHS;
1996 }
1997
1998 return nullptr;
1999 }
2000
simplifyAndOrOfCmps(const SimplifyQuery & Q,Value * Op0,Value * Op1,bool IsAnd)2001 static Value *simplifyAndOrOfCmps(const SimplifyQuery &Q, Value *Op0,
2002 Value *Op1, bool IsAnd) {
2003 // Look through casts of the 'and' operands to find compares.
2004 auto *Cast0 = dyn_cast<CastInst>(Op0);
2005 auto *Cast1 = dyn_cast<CastInst>(Op1);
2006 if (Cast0 && Cast1 && Cast0->getOpcode() == Cast1->getOpcode() &&
2007 Cast0->getSrcTy() == Cast1->getSrcTy()) {
2008 Op0 = Cast0->getOperand(0);
2009 Op1 = Cast1->getOperand(0);
2010 }
2011
2012 Value *V = nullptr;
2013 auto *ICmp0 = dyn_cast<ICmpInst>(Op0);
2014 auto *ICmp1 = dyn_cast<ICmpInst>(Op1);
2015 if (ICmp0 && ICmp1)
2016 V = IsAnd ? simplifyAndOfICmps(ICmp0, ICmp1, Q)
2017 : simplifyOrOfICmps(ICmp0, ICmp1, Q);
2018
2019 auto *FCmp0 = dyn_cast<FCmpInst>(Op0);
2020 auto *FCmp1 = dyn_cast<FCmpInst>(Op1);
2021 if (FCmp0 && FCmp1)
2022 V = simplifyAndOrOfFCmps(Q.TLI, FCmp0, FCmp1, IsAnd);
2023
2024 if (!V)
2025 return nullptr;
2026 if (!Cast0)
2027 return V;
2028
2029 // If we looked through casts, we can only handle a constant simplification
2030 // because we are not allowed to create a cast instruction here.
2031 if (auto *C = dyn_cast<Constant>(V))
2032 return ConstantExpr::getCast(Cast0->getOpcode(), C, Cast0->getType());
2033
2034 return nullptr;
2035 }
2036
2037 /// Given a bitwise logic op, check if the operands are add/sub with a common
2038 /// source value and inverted constant (identity: C - X -> ~(X + ~C)).
simplifyLogicOfAddSub(Value * Op0,Value * Op1,Instruction::BinaryOps Opcode)2039 static Value *simplifyLogicOfAddSub(Value *Op0, Value *Op1,
2040 Instruction::BinaryOps Opcode) {
2041 assert(Op0->getType() == Op1->getType() && "Mismatched binop types");
2042 assert(BinaryOperator::isBitwiseLogicOp(Opcode) && "Expected logic op");
2043 Value *X;
2044 Constant *C1, *C2;
2045 if ((match(Op0, m_Add(m_Value(X), m_Constant(C1))) &&
2046 match(Op1, m_Sub(m_Constant(C2), m_Specific(X)))) ||
2047 (match(Op1, m_Add(m_Value(X), m_Constant(C1))) &&
2048 match(Op0, m_Sub(m_Constant(C2), m_Specific(X))))) {
2049 if (ConstantExpr::getNot(C1) == C2) {
2050 // (X + C) & (~C - X) --> (X + C) & ~(X + C) --> 0
2051 // (X + C) | (~C - X) --> (X + C) | ~(X + C) --> -1
2052 // (X + C) ^ (~C - X) --> (X + C) ^ ~(X + C) --> -1
2053 Type *Ty = Op0->getType();
2054 return Opcode == Instruction::And ? ConstantInt::getNullValue(Ty)
2055 : ConstantInt::getAllOnesValue(Ty);
2056 }
2057 }
2058 return nullptr;
2059 }
2060
2061 /// Given operands for an And, see if we can fold the result.
2062 /// If not, this returns null.
simplifyAndInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2063 static Value *simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2064 unsigned MaxRecurse) {
2065 if (Constant *C = foldOrCommuteConstant(Instruction::And, Op0, Op1, Q))
2066 return C;
2067
2068 // X & poison -> poison
2069 if (isa<PoisonValue>(Op1))
2070 return Op1;
2071
2072 // X & undef -> 0
2073 if (Q.isUndefValue(Op1))
2074 return Constant::getNullValue(Op0->getType());
2075
2076 // X & X = X
2077 if (Op0 == Op1)
2078 return Op0;
2079
2080 // X & 0 = 0
2081 if (match(Op1, m_Zero()))
2082 return Constant::getNullValue(Op0->getType());
2083
2084 // X & -1 = X
2085 if (match(Op1, m_AllOnes()))
2086 return Op0;
2087
2088 // A & ~A = ~A & A = 0
2089 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2090 return Constant::getNullValue(Op0->getType());
2091
2092 // (A | ?) & A = A
2093 if (match(Op0, m_c_Or(m_Specific(Op1), m_Value())))
2094 return Op1;
2095
2096 // A & (A | ?) = A
2097 if (match(Op1, m_c_Or(m_Specific(Op0), m_Value())))
2098 return Op0;
2099
2100 // (X | Y) & (X | ~Y) --> X (commuted 8 ways)
2101 Value *X, *Y;
2102 if (match(Op0, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2103 match(Op1, m_c_Or(m_Deferred(X), m_Deferred(Y))))
2104 return X;
2105 if (match(Op1, m_c_Or(m_Value(X), m_Not(m_Value(Y)))) &&
2106 match(Op0, m_c_Or(m_Deferred(X), m_Deferred(Y))))
2107 return X;
2108
2109 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::And))
2110 return V;
2111
2112 // A mask that only clears known zeros of a shifted value is a no-op.
2113 const APInt *Mask;
2114 const APInt *ShAmt;
2115 if (match(Op1, m_APInt(Mask))) {
2116 // If all bits in the inverted and shifted mask are clear:
2117 // and (shl X, ShAmt), Mask --> shl X, ShAmt
2118 if (match(Op0, m_Shl(m_Value(X), m_APInt(ShAmt))) &&
2119 (~(*Mask)).lshr(*ShAmt).isZero())
2120 return Op0;
2121
2122 // If all bits in the inverted and shifted mask are clear:
2123 // and (lshr X, ShAmt), Mask --> lshr X, ShAmt
2124 if (match(Op0, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
2125 (~(*Mask)).shl(*ShAmt).isZero())
2126 return Op0;
2127 }
2128
2129 // If we have a multiplication overflow check that is being 'and'ed with a
2130 // check that one of the multipliers is not zero, we can omit the 'and', and
2131 // only keep the overflow check.
2132 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, true))
2133 return Op1;
2134 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, true))
2135 return Op0;
2136
2137 // A & (-A) = A if A is a power of two or zero.
2138 if (match(Op0, m_Neg(m_Specific(Op1))) ||
2139 match(Op1, m_Neg(m_Specific(Op0)))) {
2140 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2141 Q.DT))
2142 return Op0;
2143 if (isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI,
2144 Q.DT))
2145 return Op1;
2146 }
2147
2148 // This is a similar pattern used for checking if a value is a power-of-2:
2149 // (A - 1) & A --> 0 (if A is a power-of-2 or 0)
2150 // A & (A - 1) --> 0 (if A is a power-of-2 or 0)
2151 if (match(Op0, m_Add(m_Specific(Op1), m_AllOnes())) &&
2152 isKnownToBeAPowerOfTwo(Op1, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2153 return Constant::getNullValue(Op1->getType());
2154 if (match(Op1, m_Add(m_Specific(Op0), m_AllOnes())) &&
2155 isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ true, 0, Q.AC, Q.CxtI, Q.DT))
2156 return Constant::getNullValue(Op0->getType());
2157
2158 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, true))
2159 return V;
2160
2161 // Try some generic simplifications for associative operations.
2162 if (Value *V =
2163 simplifyAssociativeBinOp(Instruction::And, Op0, Op1, Q, MaxRecurse))
2164 return V;
2165
2166 // And distributes over Or. Try some generic simplifications based on this.
2167 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2168 Instruction::Or, Q, MaxRecurse))
2169 return V;
2170
2171 // And distributes over Xor. Try some generic simplifications based on this.
2172 if (Value *V = expandCommutativeBinOp(Instruction::And, Op0, Op1,
2173 Instruction::Xor, Q, MaxRecurse))
2174 return V;
2175
2176 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2177 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2178 // A & (A && B) -> A && B
2179 if (match(Op1, m_Select(m_Specific(Op0), m_Value(), m_Zero())))
2180 return Op1;
2181 else if (match(Op0, m_Select(m_Specific(Op1), m_Value(), m_Zero())))
2182 return Op0;
2183 }
2184 // If the operation is with the result of a select instruction, check
2185 // whether operating on either branch of the select always yields the same
2186 // value.
2187 if (Value *V =
2188 threadBinOpOverSelect(Instruction::And, Op0, Op1, Q, MaxRecurse))
2189 return V;
2190 }
2191
2192 // If the operation is with the result of a phi instruction, check whether
2193 // operating on all incoming values of the phi always yields the same value.
2194 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2195 if (Value *V =
2196 threadBinOpOverPHI(Instruction::And, Op0, Op1, Q, MaxRecurse))
2197 return V;
2198
2199 // Assuming the effective width of Y is not larger than A, i.e. all bits
2200 // from X and Y are disjoint in (X << A) | Y,
2201 // if the mask of this AND op covers all bits of X or Y, while it covers
2202 // no bits from the other, we can bypass this AND op. E.g.,
2203 // ((X << A) | Y) & Mask -> Y,
2204 // if Mask = ((1 << effective_width_of(Y)) - 1)
2205 // ((X << A) | Y) & Mask -> X << A,
2206 // if Mask = ((1 << effective_width_of(X)) - 1) << A
2207 // SimplifyDemandedBits in InstCombine can optimize the general case.
2208 // This pattern aims to help other passes for a common case.
2209 Value *XShifted;
2210 if (match(Op1, m_APInt(Mask)) &&
2211 match(Op0, m_c_Or(m_CombineAnd(m_NUWShl(m_Value(X), m_APInt(ShAmt)),
2212 m_Value(XShifted)),
2213 m_Value(Y)))) {
2214 const unsigned Width = Op0->getType()->getScalarSizeInBits();
2215 const unsigned ShftCnt = ShAmt->getLimitedValue(Width);
2216 const KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2217 const unsigned EffWidthY = YKnown.countMaxActiveBits();
2218 if (EffWidthY <= ShftCnt) {
2219 const KnownBits XKnown = computeKnownBits(X, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2220 const unsigned EffWidthX = XKnown.countMaxActiveBits();
2221 const APInt EffBitsY = APInt::getLowBitsSet(Width, EffWidthY);
2222 const APInt EffBitsX = APInt::getLowBitsSet(Width, EffWidthX) << ShftCnt;
2223 // If the mask is extracting all bits from X or Y as is, we can skip
2224 // this AND op.
2225 if (EffBitsY.isSubsetOf(*Mask) && !EffBitsX.intersects(*Mask))
2226 return Y;
2227 if (EffBitsX.isSubsetOf(*Mask) && !EffBitsY.intersects(*Mask))
2228 return XShifted;
2229 }
2230 }
2231
2232 // ((X | Y) ^ X ) & ((X | Y) ^ Y) --> 0
2233 // ((X | Y) ^ Y ) & ((X | Y) ^ X) --> 0
2234 BinaryOperator *Or;
2235 if (match(Op0, m_c_Xor(m_Value(X),
2236 m_CombineAnd(m_BinOp(Or),
2237 m_c_Or(m_Deferred(X), m_Value(Y))))) &&
2238 match(Op1, m_c_Xor(m_Specific(Or), m_Specific(Y))))
2239 return Constant::getNullValue(Op0->getType());
2240
2241 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2242 if (std::optional<bool> Implied = isImpliedCondition(Op0, Op1, Q.DL)) {
2243 // If Op0 is true implies Op1 is true, then Op0 is a subset of Op1.
2244 if (*Implied == true)
2245 return Op0;
2246 // If Op0 is true implies Op1 is false, then they are not true together.
2247 if (*Implied == false)
2248 return ConstantInt::getFalse(Op0->getType());
2249 }
2250 if (std::optional<bool> Implied = isImpliedCondition(Op1, Op0, Q.DL)) {
2251 // If Op1 is true implies Op0 is true, then Op1 is a subset of Op0.
2252 if (*Implied)
2253 return Op1;
2254 // If Op1 is true implies Op0 is false, then they are not true together.
2255 if (!*Implied)
2256 return ConstantInt::getFalse(Op1->getType());
2257 }
2258 }
2259
2260 if (Value *V = simplifyByDomEq(Instruction::And, Op0, Op1, Q, MaxRecurse))
2261 return V;
2262
2263 return nullptr;
2264 }
2265
simplifyAndInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2266 Value *llvm::simplifyAndInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2267 return ::simplifyAndInst(Op0, Op1, Q, RecursionLimit);
2268 }
2269
2270 // TODO: Many of these folds could use LogicalAnd/LogicalOr.
simplifyOrLogic(Value * X,Value * Y)2271 static Value *simplifyOrLogic(Value *X, Value *Y) {
2272 assert(X->getType() == Y->getType() && "Expected same type for 'or' ops");
2273 Type *Ty = X->getType();
2274
2275 // X | ~X --> -1
2276 if (match(Y, m_Not(m_Specific(X))))
2277 return ConstantInt::getAllOnesValue(Ty);
2278
2279 // X | ~(X & ?) = -1
2280 if (match(Y, m_Not(m_c_And(m_Specific(X), m_Value()))))
2281 return ConstantInt::getAllOnesValue(Ty);
2282
2283 // X | (X & ?) --> X
2284 if (match(Y, m_c_And(m_Specific(X), m_Value())))
2285 return X;
2286
2287 Value *A, *B;
2288
2289 // (A ^ B) | (A | B) --> A | B
2290 // (A ^ B) | (B | A) --> B | A
2291 if (match(X, m_Xor(m_Value(A), m_Value(B))) &&
2292 match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2293 return Y;
2294
2295 // ~(A ^ B) | (A | B) --> -1
2296 // ~(A ^ B) | (B | A) --> -1
2297 if (match(X, m_Not(m_Xor(m_Value(A), m_Value(B)))) &&
2298 match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2299 return ConstantInt::getAllOnesValue(Ty);
2300
2301 // (A & ~B) | (A ^ B) --> A ^ B
2302 // (~B & A) | (A ^ B) --> A ^ B
2303 // (A & ~B) | (B ^ A) --> B ^ A
2304 // (~B & A) | (B ^ A) --> B ^ A
2305 if (match(X, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
2306 match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2307 return Y;
2308
2309 // (~A ^ B) | (A & B) --> ~A ^ B
2310 // (B ^ ~A) | (A & B) --> B ^ ~A
2311 // (~A ^ B) | (B & A) --> ~A ^ B
2312 // (B ^ ~A) | (B & A) --> B ^ ~A
2313 if (match(X, m_c_Xor(m_NotForbidUndef(m_Value(A)), m_Value(B))) &&
2314 match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2315 return X;
2316
2317 // (~A | B) | (A ^ B) --> -1
2318 // (~A | B) | (B ^ A) --> -1
2319 // (B | ~A) | (A ^ B) --> -1
2320 // (B | ~A) | (B ^ A) --> -1
2321 if (match(X, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2322 match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2323 return ConstantInt::getAllOnesValue(Ty);
2324
2325 // (~A & B) | ~(A | B) --> ~A
2326 // (~A & B) | ~(B | A) --> ~A
2327 // (B & ~A) | ~(A | B) --> ~A
2328 // (B & ~A) | ~(B | A) --> ~A
2329 Value *NotA;
2330 if (match(X,
2331 m_c_And(m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2332 m_Value(B))) &&
2333 match(Y, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
2334 return NotA;
2335 // The same is true of Logical And
2336 // TODO: This could share the logic of the version above if there was a
2337 // version of LogicalAnd that allowed more than just i1 types.
2338 if (match(X, m_c_LogicalAnd(
2339 m_CombineAnd(m_Value(NotA), m_NotForbidUndef(m_Value(A))),
2340 m_Value(B))) &&
2341 match(Y, m_Not(m_c_LogicalOr(m_Specific(A), m_Specific(B)))))
2342 return NotA;
2343
2344 // ~(A ^ B) | (A & B) --> ~(A ^ B)
2345 // ~(A ^ B) | (B & A) --> ~(A ^ B)
2346 Value *NotAB;
2347 if (match(X, m_CombineAnd(m_NotForbidUndef(m_Xor(m_Value(A), m_Value(B))),
2348 m_Value(NotAB))) &&
2349 match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2350 return NotAB;
2351
2352 // ~(A & B) | (A ^ B) --> ~(A & B)
2353 // ~(A & B) | (B ^ A) --> ~(A & B)
2354 if (match(X, m_CombineAnd(m_NotForbidUndef(m_And(m_Value(A), m_Value(B))),
2355 m_Value(NotAB))) &&
2356 match(Y, m_c_Xor(m_Specific(A), m_Specific(B))))
2357 return NotAB;
2358
2359 return nullptr;
2360 }
2361
2362 /// Given operands for an Or, see if we can fold the result.
2363 /// If not, this returns null.
simplifyOrInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2364 static Value *simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2365 unsigned MaxRecurse) {
2366 if (Constant *C = foldOrCommuteConstant(Instruction::Or, Op0, Op1, Q))
2367 return C;
2368
2369 // X | poison -> poison
2370 if (isa<PoisonValue>(Op1))
2371 return Op1;
2372
2373 // X | undef -> -1
2374 // X | -1 = -1
2375 // Do not return Op1 because it may contain undef elements if it's a vector.
2376 if (Q.isUndefValue(Op1) || match(Op1, m_AllOnes()))
2377 return Constant::getAllOnesValue(Op0->getType());
2378
2379 // X | X = X
2380 // X | 0 = X
2381 if (Op0 == Op1 || match(Op1, m_Zero()))
2382 return Op0;
2383
2384 if (Value *R = simplifyOrLogic(Op0, Op1))
2385 return R;
2386 if (Value *R = simplifyOrLogic(Op1, Op0))
2387 return R;
2388
2389 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Or))
2390 return V;
2391
2392 // Rotated -1 is still -1:
2393 // (-1 << X) | (-1 >> (C - X)) --> -1
2394 // (-1 >> X) | (-1 << (C - X)) --> -1
2395 // ...with C <= bitwidth (and commuted variants).
2396 Value *X, *Y;
2397 if ((match(Op0, m_Shl(m_AllOnes(), m_Value(X))) &&
2398 match(Op1, m_LShr(m_AllOnes(), m_Value(Y)))) ||
2399 (match(Op1, m_Shl(m_AllOnes(), m_Value(X))) &&
2400 match(Op0, m_LShr(m_AllOnes(), m_Value(Y))))) {
2401 const APInt *C;
2402 if ((match(X, m_Sub(m_APInt(C), m_Specific(Y))) ||
2403 match(Y, m_Sub(m_APInt(C), m_Specific(X)))) &&
2404 C->ule(X->getType()->getScalarSizeInBits())) {
2405 return ConstantInt::getAllOnesValue(X->getType());
2406 }
2407 }
2408
2409 // A funnel shift (rotate) can be decomposed into simpler shifts. See if we
2410 // are mixing in another shift that is redundant with the funnel shift.
2411
2412 // (fshl X, ?, Y) | (shl X, Y) --> fshl X, ?, Y
2413 // (shl X, Y) | (fshl X, ?, Y) --> fshl X, ?, Y
2414 if (match(Op0,
2415 m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2416 match(Op1, m_Shl(m_Specific(X), m_Specific(Y))))
2417 return Op0;
2418 if (match(Op1,
2419 m_Intrinsic<Intrinsic::fshl>(m_Value(X), m_Value(), m_Value(Y))) &&
2420 match(Op0, m_Shl(m_Specific(X), m_Specific(Y))))
2421 return Op1;
2422
2423 // (fshr ?, X, Y) | (lshr X, Y) --> fshr ?, X, Y
2424 // (lshr X, Y) | (fshr ?, X, Y) --> fshr ?, X, Y
2425 if (match(Op0,
2426 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2427 match(Op1, m_LShr(m_Specific(X), m_Specific(Y))))
2428 return Op0;
2429 if (match(Op1,
2430 m_Intrinsic<Intrinsic::fshr>(m_Value(), m_Value(X), m_Value(Y))) &&
2431 match(Op0, m_LShr(m_Specific(X), m_Specific(Y))))
2432 return Op1;
2433
2434 if (Value *V = simplifyAndOrOfCmps(Q, Op0, Op1, false))
2435 return V;
2436
2437 // If we have a multiplication overflow check that is being 'and'ed with a
2438 // check that one of the multipliers is not zero, we can omit the 'and', and
2439 // only keep the overflow check.
2440 if (isCheckForZeroAndMulWithOverflow(Op0, Op1, false))
2441 return Op1;
2442 if (isCheckForZeroAndMulWithOverflow(Op1, Op0, false))
2443 return Op0;
2444
2445 // Try some generic simplifications for associative operations.
2446 if (Value *V =
2447 simplifyAssociativeBinOp(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2448 return V;
2449
2450 // Or distributes over And. Try some generic simplifications based on this.
2451 if (Value *V = expandCommutativeBinOp(Instruction::Or, Op0, Op1,
2452 Instruction::And, Q, MaxRecurse))
2453 return V;
2454
2455 if (isa<SelectInst>(Op0) || isa<SelectInst>(Op1)) {
2456 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2457 // A | (A || B) -> A || B
2458 if (match(Op1, m_Select(m_Specific(Op0), m_One(), m_Value())))
2459 return Op1;
2460 else if (match(Op0, m_Select(m_Specific(Op1), m_One(), m_Value())))
2461 return Op0;
2462 }
2463 // If the operation is with the result of a select instruction, check
2464 // whether operating on either branch of the select always yields the same
2465 // value.
2466 if (Value *V =
2467 threadBinOpOverSelect(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2468 return V;
2469 }
2470
2471 // (A & C1)|(B & C2)
2472 Value *A, *B;
2473 const APInt *C1, *C2;
2474 if (match(Op0, m_And(m_Value(A), m_APInt(C1))) &&
2475 match(Op1, m_And(m_Value(B), m_APInt(C2)))) {
2476 if (*C1 == ~*C2) {
2477 // (A & C1)|(B & C2)
2478 // If we have: ((V + N) & C1) | (V & C2)
2479 // .. and C2 = ~C1 and C2 is 0+1+ and (N & C2) == 0
2480 // replace with V+N.
2481 Value *N;
2482 if (C2->isMask() && // C2 == 0+1+
2483 match(A, m_c_Add(m_Specific(B), m_Value(N)))) {
2484 // Add commutes, try both ways.
2485 if (MaskedValueIsZero(N, *C2, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2486 return A;
2487 }
2488 // Or commutes, try both ways.
2489 if (C1->isMask() && match(B, m_c_Add(m_Specific(A), m_Value(N)))) {
2490 // Add commutes, try both ways.
2491 if (MaskedValueIsZero(N, *C1, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
2492 return B;
2493 }
2494 }
2495 }
2496
2497 // If the operation is with the result of a phi instruction, check whether
2498 // operating on all incoming values of the phi always yields the same value.
2499 if (isa<PHINode>(Op0) || isa<PHINode>(Op1))
2500 if (Value *V = threadBinOpOverPHI(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2501 return V;
2502
2503 if (Op0->getType()->isIntOrIntVectorTy(1)) {
2504 if (std::optional<bool> Implied =
2505 isImpliedCondition(Op0, Op1, Q.DL, false)) {
2506 // If Op0 is false implies Op1 is false, then Op1 is a subset of Op0.
2507 if (*Implied == false)
2508 return Op0;
2509 // If Op0 is false implies Op1 is true, then at least one is always true.
2510 if (*Implied == true)
2511 return ConstantInt::getTrue(Op0->getType());
2512 }
2513 if (std::optional<bool> Implied =
2514 isImpliedCondition(Op1, Op0, Q.DL, false)) {
2515 // If Op1 is false implies Op0 is false, then Op0 is a subset of Op1.
2516 if (*Implied == false)
2517 return Op1;
2518 // If Op1 is false implies Op0 is true, then at least one is always true.
2519 if (*Implied == true)
2520 return ConstantInt::getTrue(Op1->getType());
2521 }
2522 }
2523
2524 if (Value *V = simplifyByDomEq(Instruction::Or, Op0, Op1, Q, MaxRecurse))
2525 return V;
2526
2527 return nullptr;
2528 }
2529
simplifyOrInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2530 Value *llvm::simplifyOrInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2531 return ::simplifyOrInst(Op0, Op1, Q, RecursionLimit);
2532 }
2533
2534 /// Given operands for a Xor, see if we can fold the result.
2535 /// If not, this returns null.
simplifyXorInst(Value * Op0,Value * Op1,const SimplifyQuery & Q,unsigned MaxRecurse)2536 static Value *simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q,
2537 unsigned MaxRecurse) {
2538 if (Constant *C = foldOrCommuteConstant(Instruction::Xor, Op0, Op1, Q))
2539 return C;
2540
2541 // X ^ poison -> poison
2542 if (isa<PoisonValue>(Op1))
2543 return Op1;
2544
2545 // A ^ undef -> undef
2546 if (Q.isUndefValue(Op1))
2547 return Op1;
2548
2549 // A ^ 0 = A
2550 if (match(Op1, m_Zero()))
2551 return Op0;
2552
2553 // A ^ A = 0
2554 if (Op0 == Op1)
2555 return Constant::getNullValue(Op0->getType());
2556
2557 // A ^ ~A = ~A ^ A = -1
2558 if (match(Op0, m_Not(m_Specific(Op1))) || match(Op1, m_Not(m_Specific(Op0))))
2559 return Constant::getAllOnesValue(Op0->getType());
2560
2561 auto foldAndOrNot = [](Value *X, Value *Y) -> Value * {
2562 Value *A, *B;
2563 // (~A & B) ^ (A | B) --> A -- There are 8 commuted variants.
2564 if (match(X, m_c_And(m_Not(m_Value(A)), m_Value(B))) &&
2565 match(Y, m_c_Or(m_Specific(A), m_Specific(B))))
2566 return A;
2567
2568 // (~A | B) ^ (A & B) --> ~A -- There are 8 commuted variants.
2569 // The 'not' op must contain a complete -1 operand (no undef elements for
2570 // vector) for the transform to be safe.
2571 Value *NotA;
2572 if (match(X,
2573 m_c_Or(m_CombineAnd(m_NotForbidUndef(m_Value(A)), m_Value(NotA)),
2574 m_Value(B))) &&
2575 match(Y, m_c_And(m_Specific(A), m_Specific(B))))
2576 return NotA;
2577
2578 return nullptr;
2579 };
2580 if (Value *R = foldAndOrNot(Op0, Op1))
2581 return R;
2582 if (Value *R = foldAndOrNot(Op1, Op0))
2583 return R;
2584
2585 if (Value *V = simplifyLogicOfAddSub(Op0, Op1, Instruction::Xor))
2586 return V;
2587
2588 // Try some generic simplifications for associative operations.
2589 if (Value *V =
2590 simplifyAssociativeBinOp(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2591 return V;
2592
2593 // Threading Xor over selects and phi nodes is pointless, so don't bother.
2594 // Threading over the select in "A ^ select(cond, B, C)" means evaluating
2595 // "A^B" and "A^C" and seeing if they are equal; but they are equal if and
2596 // only if B and C are equal. If B and C are equal then (since we assume
2597 // that operands have already been simplified) "select(cond, B, C)" should
2598 // have been simplified to the common value of B and C already. Analysing
2599 // "A^B" and "A^C" thus gains nothing, but costs compile time. Similarly
2600 // for threading over phi nodes.
2601
2602 if (Value *V = simplifyByDomEq(Instruction::Xor, Op0, Op1, Q, MaxRecurse))
2603 return V;
2604
2605 return nullptr;
2606 }
2607
simplifyXorInst(Value * Op0,Value * Op1,const SimplifyQuery & Q)2608 Value *llvm::simplifyXorInst(Value *Op0, Value *Op1, const SimplifyQuery &Q) {
2609 return ::simplifyXorInst(Op0, Op1, Q, RecursionLimit);
2610 }
2611
getCompareTy(Value * Op)2612 static Type *getCompareTy(Value *Op) {
2613 return CmpInst::makeCmpResultType(Op->getType());
2614 }
2615
2616 /// Rummage around inside V looking for something equivalent to the comparison
2617 /// "LHS Pred RHS". Return such a value if found, otherwise return null.
2618 /// Helper function for analyzing max/min idioms.
extractEquivalentCondition(Value * V,CmpInst::Predicate Pred,Value * LHS,Value * RHS)2619 static Value *extractEquivalentCondition(Value *V, CmpInst::Predicate Pred,
2620 Value *LHS, Value *RHS) {
2621 SelectInst *SI = dyn_cast<SelectInst>(V);
2622 if (!SI)
2623 return nullptr;
2624 CmpInst *Cmp = dyn_cast<CmpInst>(SI->getCondition());
2625 if (!Cmp)
2626 return nullptr;
2627 Value *CmpLHS = Cmp->getOperand(0), *CmpRHS = Cmp->getOperand(1);
2628 if (Pred == Cmp->getPredicate() && LHS == CmpLHS && RHS == CmpRHS)
2629 return Cmp;
2630 if (Pred == CmpInst::getSwappedPredicate(Cmp->getPredicate()) &&
2631 LHS == CmpRHS && RHS == CmpLHS)
2632 return Cmp;
2633 return nullptr;
2634 }
2635
2636 /// Return true if the underlying object (storage) must be disjoint from
2637 /// storage returned by any noalias return call.
isAllocDisjoint(const Value * V)2638 static bool isAllocDisjoint(const Value *V) {
2639 // For allocas, we consider only static ones (dynamic
2640 // allocas might be transformed into calls to malloc not simultaneously
2641 // live with the compared-to allocation). For globals, we exclude symbols
2642 // that might be resolve lazily to symbols in another dynamically-loaded
2643 // library (and, thus, could be malloc'ed by the implementation).
2644 if (const AllocaInst *AI = dyn_cast<AllocaInst>(V))
2645 return AI->getParent() && AI->getFunction() && AI->isStaticAlloca();
2646 if (const GlobalValue *GV = dyn_cast<GlobalValue>(V))
2647 return (GV->hasLocalLinkage() || GV->hasHiddenVisibility() ||
2648 GV->hasProtectedVisibility() || GV->hasGlobalUnnamedAddr()) &&
2649 !GV->isThreadLocal();
2650 if (const Argument *A = dyn_cast<Argument>(V))
2651 return A->hasByValAttr();
2652 return false;
2653 }
2654
2655 /// Return true if V1 and V2 are each the base of some distict storage region
2656 /// [V, object_size(V)] which do not overlap. Note that zero sized regions
2657 /// *are* possible, and that zero sized regions do not overlap with any other.
haveNonOverlappingStorage(const Value * V1,const Value * V2)2658 static bool haveNonOverlappingStorage(const Value *V1, const Value *V2) {
2659 // Global variables always exist, so they always exist during the lifetime
2660 // of each other and all allocas. Global variables themselves usually have
2661 // non-overlapping storage, but since their addresses are constants, the
2662 // case involving two globals does not reach here and is instead handled in
2663 // constant folding.
2664 //
2665 // Two different allocas usually have different addresses...
2666 //
2667 // However, if there's an @llvm.stackrestore dynamically in between two
2668 // allocas, they may have the same address. It's tempting to reduce the
2669 // scope of the problem by only looking at *static* allocas here. That would
2670 // cover the majority of allocas while significantly reducing the likelihood
2671 // of having an @llvm.stackrestore pop up in the middle. However, it's not
2672 // actually impossible for an @llvm.stackrestore to pop up in the middle of
2673 // an entry block. Also, if we have a block that's not attached to a
2674 // function, we can't tell if it's "static" under the current definition.
2675 // Theoretically, this problem could be fixed by creating a new kind of
2676 // instruction kind specifically for static allocas. Such a new instruction
2677 // could be required to be at the top of the entry block, thus preventing it
2678 // from being subject to a @llvm.stackrestore. Instcombine could even
2679 // convert regular allocas into these special allocas. It'd be nifty.
2680 // However, until then, this problem remains open.
2681 //
2682 // So, we'll assume that two non-empty allocas have different addresses
2683 // for now.
2684 auto isByValArg = [](const Value *V) {
2685 const Argument *A = dyn_cast<Argument>(V);
2686 return A && A->hasByValAttr();
2687 };
2688
2689 // Byval args are backed by store which does not overlap with each other,
2690 // allocas, or globals.
2691 if (isByValArg(V1))
2692 return isa<AllocaInst>(V2) || isa<GlobalVariable>(V2) || isByValArg(V2);
2693 if (isByValArg(V2))
2694 return isa<AllocaInst>(V1) || isa<GlobalVariable>(V1) || isByValArg(V1);
2695
2696 return isa<AllocaInst>(V1) &&
2697 (isa<AllocaInst>(V2) || isa<GlobalVariable>(V2));
2698 }
2699
2700 // A significant optimization not implemented here is assuming that alloca
2701 // addresses are not equal to incoming argument values. They don't *alias*,
2702 // as we say, but that doesn't mean they aren't equal, so we take a
2703 // conservative approach.
2704 //
2705 // This is inspired in part by C++11 5.10p1:
2706 // "Two pointers of the same type compare equal if and only if they are both
2707 // null, both point to the same function, or both represent the same
2708 // address."
2709 //
2710 // This is pretty permissive.
2711 //
2712 // It's also partly due to C11 6.5.9p6:
2713 // "Two pointers compare equal if and only if both are null pointers, both are
2714 // pointers to the same object (including a pointer to an object and a
2715 // subobject at its beginning) or function, both are pointers to one past the
2716 // last element of the same array object, or one is a pointer to one past the
2717 // end of one array object and the other is a pointer to the start of a
2718 // different array object that happens to immediately follow the first array
2719 // object in the address space.)
2720 //
2721 // C11's version is more restrictive, however there's no reason why an argument
2722 // couldn't be a one-past-the-end value for a stack object in the caller and be
2723 // equal to the beginning of a stack object in the callee.
2724 //
2725 // If the C and C++ standards are ever made sufficiently restrictive in this
2726 // area, it may be possible to update LLVM's semantics accordingly and reinstate
2727 // this optimization.
computePointerICmp(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2728 static Constant *computePointerICmp(CmpInst::Predicate Pred, Value *LHS,
2729 Value *RHS, const SimplifyQuery &Q) {
2730 const DataLayout &DL = Q.DL;
2731 const TargetLibraryInfo *TLI = Q.TLI;
2732 const DominatorTree *DT = Q.DT;
2733 const Instruction *CxtI = Q.CxtI;
2734 const InstrInfoQuery &IIQ = Q.IIQ;
2735
2736 // First, skip past any trivial no-ops.
2737 LHS = LHS->stripPointerCasts();
2738 RHS = RHS->stripPointerCasts();
2739
2740 // A non-null pointer is not equal to a null pointer.
2741 if (isa<ConstantPointerNull>(RHS) && ICmpInst::isEquality(Pred) &&
2742 llvm::isKnownNonZero(LHS, DL, 0, nullptr, nullptr, nullptr,
2743 IIQ.UseInstrInfo))
2744 return ConstantInt::get(getCompareTy(LHS), !CmpInst::isTrueWhenEqual(Pred));
2745
2746 // We can only fold certain predicates on pointer comparisons.
2747 switch (Pred) {
2748 default:
2749 return nullptr;
2750
2751 // Equality comparisons are easy to fold.
2752 case CmpInst::ICMP_EQ:
2753 case CmpInst::ICMP_NE:
2754 break;
2755
2756 // We can only handle unsigned relational comparisons because 'inbounds' on
2757 // a GEP only protects against unsigned wrapping.
2758 case CmpInst::ICMP_UGT:
2759 case CmpInst::ICMP_UGE:
2760 case CmpInst::ICMP_ULT:
2761 case CmpInst::ICMP_ULE:
2762 // However, we have to switch them to their signed variants to handle
2763 // negative indices from the base pointer.
2764 Pred = ICmpInst::getSignedPredicate(Pred);
2765 break;
2766 }
2767
2768 // Strip off any constant offsets so that we can reason about them.
2769 // It's tempting to use getUnderlyingObject or even just stripInBoundsOffsets
2770 // here and compare base addresses like AliasAnalysis does, however there are
2771 // numerous hazards. AliasAnalysis and its utilities rely on special rules
2772 // governing loads and stores which don't apply to icmps. Also, AliasAnalysis
2773 // doesn't need to guarantee pointer inequality when it says NoAlias.
2774
2775 // Even if an non-inbounds GEP occurs along the path we can still optimize
2776 // equality comparisons concerning the result.
2777 bool AllowNonInbounds = ICmpInst::isEquality(Pred);
2778 APInt LHSOffset = stripAndComputeConstantOffsets(DL, LHS, AllowNonInbounds);
2779 APInt RHSOffset = stripAndComputeConstantOffsets(DL, RHS, AllowNonInbounds);
2780
2781 // If LHS and RHS are related via constant offsets to the same base
2782 // value, we can replace it with an icmp which just compares the offsets.
2783 if (LHS == RHS)
2784 return ConstantInt::get(getCompareTy(LHS),
2785 ICmpInst::compare(LHSOffset, RHSOffset, Pred));
2786
2787 // Various optimizations for (in)equality comparisons.
2788 if (Pred == CmpInst::ICMP_EQ || Pred == CmpInst::ICMP_NE) {
2789 // Different non-empty allocations that exist at the same time have
2790 // different addresses (if the program can tell). If the offsets are
2791 // within the bounds of their allocations (and not one-past-the-end!
2792 // so we can't use inbounds!), and their allocations aren't the same,
2793 // the pointers are not equal.
2794 if (haveNonOverlappingStorage(LHS, RHS)) {
2795 uint64_t LHSSize, RHSSize;
2796 ObjectSizeOpts Opts;
2797 Opts.EvalMode = ObjectSizeOpts::Mode::Min;
2798 auto *F = [](Value *V) -> Function * {
2799 if (auto *I = dyn_cast<Instruction>(V))
2800 return I->getFunction();
2801 if (auto *A = dyn_cast<Argument>(V))
2802 return A->getParent();
2803 return nullptr;
2804 }(LHS);
2805 Opts.NullIsUnknownSize = F ? NullPointerIsDefined(F) : true;
2806 if (getObjectSize(LHS, LHSSize, DL, TLI, Opts) &&
2807 getObjectSize(RHS, RHSSize, DL, TLI, Opts) &&
2808 !LHSOffset.isNegative() && !RHSOffset.isNegative() &&
2809 LHSOffset.ult(LHSSize) && RHSOffset.ult(RHSSize)) {
2810 return ConstantInt::get(getCompareTy(LHS),
2811 !CmpInst::isTrueWhenEqual(Pred));
2812 }
2813 }
2814
2815 // If one side of the equality comparison must come from a noalias call
2816 // (meaning a system memory allocation function), and the other side must
2817 // come from a pointer that cannot overlap with dynamically-allocated
2818 // memory within the lifetime of the current function (allocas, byval
2819 // arguments, globals), then determine the comparison result here.
2820 SmallVector<const Value *, 8> LHSUObjs, RHSUObjs;
2821 getUnderlyingObjects(LHS, LHSUObjs);
2822 getUnderlyingObjects(RHS, RHSUObjs);
2823
2824 // Is the set of underlying objects all noalias calls?
2825 auto IsNAC = [](ArrayRef<const Value *> Objects) {
2826 return all_of(Objects, isNoAliasCall);
2827 };
2828
2829 // Is the set of underlying objects all things which must be disjoint from
2830 // noalias calls. We assume that indexing from such disjoint storage
2831 // into the heap is undefined, and thus offsets can be safely ignored.
2832 auto IsAllocDisjoint = [](ArrayRef<const Value *> Objects) {
2833 return all_of(Objects, ::isAllocDisjoint);
2834 };
2835
2836 if ((IsNAC(LHSUObjs) && IsAllocDisjoint(RHSUObjs)) ||
2837 (IsNAC(RHSUObjs) && IsAllocDisjoint(LHSUObjs)))
2838 return ConstantInt::get(getCompareTy(LHS),
2839 !CmpInst::isTrueWhenEqual(Pred));
2840
2841 // Fold comparisons for non-escaping pointer even if the allocation call
2842 // cannot be elided. We cannot fold malloc comparison to null. Also, the
2843 // dynamic allocation call could be either of the operands. Note that
2844 // the other operand can not be based on the alloc - if it were, then
2845 // the cmp itself would be a capture.
2846 Value *MI = nullptr;
2847 if (isAllocLikeFn(LHS, TLI) &&
2848 llvm::isKnownNonZero(RHS, DL, 0, nullptr, CxtI, DT))
2849 MI = LHS;
2850 else if (isAllocLikeFn(RHS, TLI) &&
2851 llvm::isKnownNonZero(LHS, DL, 0, nullptr, CxtI, DT))
2852 MI = RHS;
2853 // FIXME: We should also fold the compare when the pointer escapes, but the
2854 // compare dominates the pointer escape
2855 if (MI && !PointerMayBeCaptured(MI, true, true))
2856 return ConstantInt::get(getCompareTy(LHS),
2857 CmpInst::isFalseWhenEqual(Pred));
2858 }
2859
2860 // Otherwise, fail.
2861 return nullptr;
2862 }
2863
2864 /// Fold an icmp when its operands have i1 scalar type.
simplifyICmpOfBools(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2865 static Value *simplifyICmpOfBools(CmpInst::Predicate Pred, Value *LHS,
2866 Value *RHS, const SimplifyQuery &Q) {
2867 Type *ITy = getCompareTy(LHS); // The return type.
2868 Type *OpTy = LHS->getType(); // The operand type.
2869 if (!OpTy->isIntOrIntVectorTy(1))
2870 return nullptr;
2871
2872 // A boolean compared to true/false can be reduced in 14 out of the 20
2873 // (10 predicates * 2 constants) possible combinations. The other
2874 // 6 cases require a 'not' of the LHS.
2875
2876 auto ExtractNotLHS = [](Value *V) -> Value * {
2877 Value *X;
2878 if (match(V, m_Not(m_Value(X))))
2879 return X;
2880 return nullptr;
2881 };
2882
2883 if (match(RHS, m_Zero())) {
2884 switch (Pred) {
2885 case CmpInst::ICMP_NE: // X != 0 -> X
2886 case CmpInst::ICMP_UGT: // X >u 0 -> X
2887 case CmpInst::ICMP_SLT: // X <s 0 -> X
2888 return LHS;
2889
2890 case CmpInst::ICMP_EQ: // not(X) == 0 -> X != 0 -> X
2891 case CmpInst::ICMP_ULE: // not(X) <=u 0 -> X >u 0 -> X
2892 case CmpInst::ICMP_SGE: // not(X) >=s 0 -> X <s 0 -> X
2893 if (Value *X = ExtractNotLHS(LHS))
2894 return X;
2895 break;
2896
2897 case CmpInst::ICMP_ULT: // X <u 0 -> false
2898 case CmpInst::ICMP_SGT: // X >s 0 -> false
2899 return getFalse(ITy);
2900
2901 case CmpInst::ICMP_UGE: // X >=u 0 -> true
2902 case CmpInst::ICMP_SLE: // X <=s 0 -> true
2903 return getTrue(ITy);
2904
2905 default:
2906 break;
2907 }
2908 } else if (match(RHS, m_One())) {
2909 switch (Pred) {
2910 case CmpInst::ICMP_EQ: // X == 1 -> X
2911 case CmpInst::ICMP_UGE: // X >=u 1 -> X
2912 case CmpInst::ICMP_SLE: // X <=s -1 -> X
2913 return LHS;
2914
2915 case CmpInst::ICMP_NE: // not(X) != 1 -> X == 1 -> X
2916 case CmpInst::ICMP_ULT: // not(X) <=u 1 -> X >=u 1 -> X
2917 case CmpInst::ICMP_SGT: // not(X) >s 1 -> X <=s -1 -> X
2918 if (Value *X = ExtractNotLHS(LHS))
2919 return X;
2920 break;
2921
2922 case CmpInst::ICMP_UGT: // X >u 1 -> false
2923 case CmpInst::ICMP_SLT: // X <s -1 -> false
2924 return getFalse(ITy);
2925
2926 case CmpInst::ICMP_ULE: // X <=u 1 -> true
2927 case CmpInst::ICMP_SGE: // X >=s -1 -> true
2928 return getTrue(ITy);
2929
2930 default:
2931 break;
2932 }
2933 }
2934
2935 switch (Pred) {
2936 default:
2937 break;
2938 case ICmpInst::ICMP_UGE:
2939 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2940 return getTrue(ITy);
2941 break;
2942 case ICmpInst::ICMP_SGE:
2943 /// For signed comparison, the values for an i1 are 0 and -1
2944 /// respectively. This maps into a truth table of:
2945 /// LHS | RHS | LHS >=s RHS | LHS implies RHS
2946 /// 0 | 0 | 1 (0 >= 0) | 1
2947 /// 0 | 1 | 1 (0 >= -1) | 1
2948 /// 1 | 0 | 0 (-1 >= 0) | 0
2949 /// 1 | 1 | 1 (-1 >= -1) | 1
2950 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2951 return getTrue(ITy);
2952 break;
2953 case ICmpInst::ICMP_ULE:
2954 if (isImpliedCondition(LHS, RHS, Q.DL).value_or(false))
2955 return getTrue(ITy);
2956 break;
2957 case ICmpInst::ICMP_SLE:
2958 /// SLE follows the same logic as SGE with the LHS and RHS swapped.
2959 if (isImpliedCondition(RHS, LHS, Q.DL).value_or(false))
2960 return getTrue(ITy);
2961 break;
2962 }
2963
2964 return nullptr;
2965 }
2966
2967 /// Try hard to fold icmp with zero RHS because this is a common case.
simplifyICmpWithZero(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q)2968 static Value *simplifyICmpWithZero(CmpInst::Predicate Pred, Value *LHS,
2969 Value *RHS, const SimplifyQuery &Q) {
2970 if (!match(RHS, m_Zero()))
2971 return nullptr;
2972
2973 Type *ITy = getCompareTy(LHS); // The return type.
2974 switch (Pred) {
2975 default:
2976 llvm_unreachable("Unknown ICmp predicate!");
2977 case ICmpInst::ICMP_ULT:
2978 return getFalse(ITy);
2979 case ICmpInst::ICMP_UGE:
2980 return getTrue(ITy);
2981 case ICmpInst::ICMP_EQ:
2982 case ICmpInst::ICMP_ULE:
2983 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2984 return getFalse(ITy);
2985 break;
2986 case ICmpInst::ICMP_NE:
2987 case ICmpInst::ICMP_UGT:
2988 if (isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo))
2989 return getTrue(ITy);
2990 break;
2991 case ICmpInst::ICMP_SLT: {
2992 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
2993 if (LHSKnown.isNegative())
2994 return getTrue(ITy);
2995 if (LHSKnown.isNonNegative())
2996 return getFalse(ITy);
2997 break;
2998 }
2999 case ICmpInst::ICMP_SLE: {
3000 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3001 if (LHSKnown.isNegative())
3002 return getTrue(ITy);
3003 if (LHSKnown.isNonNegative() &&
3004 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
3005 return getFalse(ITy);
3006 break;
3007 }
3008 case ICmpInst::ICMP_SGE: {
3009 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3010 if (LHSKnown.isNegative())
3011 return getFalse(ITy);
3012 if (LHSKnown.isNonNegative())
3013 return getTrue(ITy);
3014 break;
3015 }
3016 case ICmpInst::ICMP_SGT: {
3017 KnownBits LHSKnown = computeKnownBits(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3018 if (LHSKnown.isNegative())
3019 return getFalse(ITy);
3020 if (LHSKnown.isNonNegative() &&
3021 isKnownNonZero(LHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
3022 return getTrue(ITy);
3023 break;
3024 }
3025 }
3026
3027 return nullptr;
3028 }
3029
simplifyICmpWithConstant(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const InstrInfoQuery & IIQ)3030 static Value *simplifyICmpWithConstant(CmpInst::Predicate Pred, Value *LHS,
3031 Value *RHS, const InstrInfoQuery &IIQ) {
3032 Type *ITy = getCompareTy(RHS); // The return type.
3033
3034 Value *X;
3035 // Sign-bit checks can be optimized to true/false after unsigned
3036 // floating-point casts:
3037 // icmp slt (bitcast (uitofp X)), 0 --> false
3038 // icmp sgt (bitcast (uitofp X)), -1 --> true
3039 if (match(LHS, m_BitCast(m_UIToFP(m_Value(X))))) {
3040 if (Pred == ICmpInst::ICMP_SLT && match(RHS, m_Zero()))
3041 return ConstantInt::getFalse(ITy);
3042 if (Pred == ICmpInst::ICMP_SGT && match(RHS, m_AllOnes()))
3043 return ConstantInt::getTrue(ITy);
3044 }
3045
3046 const APInt *C;
3047 if (!match(RHS, m_APIntAllowUndef(C)))
3048 return nullptr;
3049
3050 // Rule out tautological comparisons (eg., ult 0 or uge 0).
3051 ConstantRange RHS_CR = ConstantRange::makeExactICmpRegion(Pred, *C);
3052 if (RHS_CR.isEmptySet())
3053 return ConstantInt::getFalse(ITy);
3054 if (RHS_CR.isFullSet())
3055 return ConstantInt::getTrue(ITy);
3056
3057 ConstantRange LHS_CR =
3058 computeConstantRange(LHS, CmpInst::isSigned(Pred), IIQ.UseInstrInfo);
3059 if (!LHS_CR.isFullSet()) {
3060 if (RHS_CR.contains(LHS_CR))
3061 return ConstantInt::getTrue(ITy);
3062 if (RHS_CR.inverse().contains(LHS_CR))
3063 return ConstantInt::getFalse(ITy);
3064 }
3065
3066 // (mul nuw/nsw X, MulC) != C --> true (if C is not a multiple of MulC)
3067 // (mul nuw/nsw X, MulC) == C --> false (if C is not a multiple of MulC)
3068 const APInt *MulC;
3069 if (ICmpInst::isEquality(Pred) &&
3070 ((match(LHS, m_NUWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
3071 *MulC != 0 && C->urem(*MulC) != 0) ||
3072 (match(LHS, m_NSWMul(m_Value(), m_APIntAllowUndef(MulC))) &&
3073 *MulC != 0 && C->srem(*MulC) != 0)))
3074 return ConstantInt::get(ITy, Pred == ICmpInst::ICMP_NE);
3075
3076 return nullptr;
3077 }
3078
simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,BinaryOperator * LBO,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3079 static Value *simplifyICmpWithBinOpOnLHS(CmpInst::Predicate Pred,
3080 BinaryOperator *LBO, Value *RHS,
3081 const SimplifyQuery &Q,
3082 unsigned MaxRecurse) {
3083 Type *ITy = getCompareTy(RHS); // The return type.
3084
3085 Value *Y = nullptr;
3086 // icmp pred (or X, Y), X
3087 if (match(LBO, m_c_Or(m_Value(Y), m_Specific(RHS)))) {
3088 if (Pred == ICmpInst::ICMP_ULT)
3089 return getFalse(ITy);
3090 if (Pred == ICmpInst::ICMP_UGE)
3091 return getTrue(ITy);
3092
3093 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_SGE) {
3094 KnownBits RHSKnown = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3095 KnownBits YKnown = computeKnownBits(Y, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3096 if (RHSKnown.isNonNegative() && YKnown.isNegative())
3097 return Pred == ICmpInst::ICMP_SLT ? getTrue(ITy) : getFalse(ITy);
3098 if (RHSKnown.isNegative() || YKnown.isNonNegative())
3099 return Pred == ICmpInst::ICMP_SLT ? getFalse(ITy) : getTrue(ITy);
3100 }
3101 }
3102
3103 // icmp pred (and X, Y), X
3104 if (match(LBO, m_c_And(m_Value(), m_Specific(RHS)))) {
3105 if (Pred == ICmpInst::ICMP_UGT)
3106 return getFalse(ITy);
3107 if (Pred == ICmpInst::ICMP_ULE)
3108 return getTrue(ITy);
3109 }
3110
3111 // icmp pred (urem X, Y), Y
3112 if (match(LBO, m_URem(m_Value(), m_Specific(RHS)))) {
3113 switch (Pred) {
3114 default:
3115 break;
3116 case ICmpInst::ICMP_SGT:
3117 case ICmpInst::ICMP_SGE: {
3118 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3119 if (!Known.isNonNegative())
3120 break;
3121 [[fallthrough]];
3122 }
3123 case ICmpInst::ICMP_EQ:
3124 case ICmpInst::ICMP_UGT:
3125 case ICmpInst::ICMP_UGE:
3126 return getFalse(ITy);
3127 case ICmpInst::ICMP_SLT:
3128 case ICmpInst::ICMP_SLE: {
3129 KnownBits Known = computeKnownBits(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT);
3130 if (!Known.isNonNegative())
3131 break;
3132 [[fallthrough]];
3133 }
3134 case ICmpInst::ICMP_NE:
3135 case ICmpInst::ICMP_ULT:
3136 case ICmpInst::ICMP_ULE:
3137 return getTrue(ITy);
3138 }
3139 }
3140
3141 // icmp pred (urem X, Y), X
3142 if (match(LBO, m_URem(m_Specific(RHS), m_Value()))) {
3143 if (Pred == ICmpInst::ICMP_ULE)
3144 return getTrue(ITy);
3145 if (Pred == ICmpInst::ICMP_UGT)
3146 return getFalse(ITy);
3147 }
3148
3149 // x >>u y <=u x --> true.
3150 // x >>u y >u x --> false.
3151 // x udiv y <=u x --> true.
3152 // x udiv y >u x --> false.
3153 if (match(LBO, m_LShr(m_Specific(RHS), m_Value())) ||
3154 match(LBO, m_UDiv(m_Specific(RHS), m_Value()))) {
3155 // icmp pred (X op Y), X
3156 if (Pred == ICmpInst::ICMP_UGT)
3157 return getFalse(ITy);
3158 if (Pred == ICmpInst::ICMP_ULE)
3159 return getTrue(ITy);
3160 }
3161
3162 // If x is nonzero:
3163 // x >>u C <u x --> true for C != 0.
3164 // x >>u C != x --> true for C != 0.
3165 // x >>u C >=u x --> false for C != 0.
3166 // x >>u C == x --> false for C != 0.
3167 // x udiv C <u x --> true for C != 1.
3168 // x udiv C != x --> true for C != 1.
3169 // x udiv C >=u x --> false for C != 1.
3170 // x udiv C == x --> false for C != 1.
3171 // TODO: allow non-constant shift amount/divisor
3172 const APInt *C;
3173 if ((match(LBO, m_LShr(m_Specific(RHS), m_APInt(C))) && *C != 0) ||
3174 (match(LBO, m_UDiv(m_Specific(RHS), m_APInt(C))) && *C != 1)) {
3175 if (isKnownNonZero(RHS, Q.DL, 0, Q.AC, Q.CxtI, Q.DT)) {
3176 switch (Pred) {
3177 default:
3178 break;
3179 case ICmpInst::ICMP_EQ:
3180 case ICmpInst::ICMP_UGE:
3181 return getFalse(ITy);
3182 case ICmpInst::ICMP_NE:
3183 case ICmpInst::ICMP_ULT:
3184 return getTrue(ITy);
3185 case ICmpInst::ICMP_UGT:
3186 case ICmpInst::ICMP_ULE:
3187 // UGT/ULE are handled by the more general case just above
3188 llvm_unreachable("Unexpected UGT/ULE, should have been handled");
3189 }
3190 }
3191 }
3192
3193 // (x*C1)/C2 <= x for C1 <= C2.
3194 // This holds even if the multiplication overflows: Assume that x != 0 and
3195 // arithmetic is modulo M. For overflow to occur we must have C1 >= M/x and
3196 // thus C2 >= M/x. It follows that (x*C1)/C2 <= (M-1)/C2 <= ((M-1)*x)/M < x.
3197 //
3198 // Additionally, either the multiplication and division might be represented
3199 // as shifts:
3200 // (x*C1)>>C2 <= x for C1 < 2**C2.
3201 // (x<<C1)/C2 <= x for 2**C1 < C2.
3202 const APInt *C1, *C2;
3203 if ((match(LBO, m_UDiv(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3204 C1->ule(*C2)) ||
3205 (match(LBO, m_LShr(m_Mul(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3206 C1->ule(APInt(C2->getBitWidth(), 1) << *C2)) ||
3207 (match(LBO, m_UDiv(m_Shl(m_Specific(RHS), m_APInt(C1)), m_APInt(C2))) &&
3208 (APInt(C1->getBitWidth(), 1) << *C1).ule(*C2))) {
3209 if (Pred == ICmpInst::ICMP_UGT)
3210 return getFalse(ITy);
3211 if (Pred == ICmpInst::ICMP_ULE)
3212 return getTrue(ITy);
3213 }
3214
3215 // (sub C, X) == X, C is odd --> false
3216 // (sub C, X) != X, C is odd --> true
3217 if (match(LBO, m_Sub(m_APIntAllowUndef(C), m_Specific(RHS))) &&
3218 (*C & 1) == 1 && ICmpInst::isEquality(Pred))
3219 return (Pred == ICmpInst::ICMP_EQ) ? getFalse(ITy) : getTrue(ITy);
3220
3221 return nullptr;
3222 }
3223
3224 // If only one of the icmp's operands has NSW flags, try to prove that:
3225 //
3226 // icmp slt (x + C1), (x +nsw C2)
3227 //
3228 // is equivalent to:
3229 //
3230 // icmp slt C1, C2
3231 //
3232 // which is true if x + C2 has the NSW flags set and:
3233 // *) C1 < C2 && C1 >= 0, or
3234 // *) C2 < C1 && C1 <= 0.
3235 //
trySimplifyICmpWithAdds(CmpInst::Predicate Pred,Value * LHS,Value * RHS)3236 static bool trySimplifyICmpWithAdds(CmpInst::Predicate Pred, Value *LHS,
3237 Value *RHS) {
3238 // TODO: only support icmp slt for now.
3239 if (Pred != CmpInst::ICMP_SLT)
3240 return false;
3241
3242 // Canonicalize nsw add as RHS.
3243 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3244 std::swap(LHS, RHS);
3245 if (!match(RHS, m_NSWAdd(m_Value(), m_Value())))
3246 return false;
3247
3248 Value *X;
3249 const APInt *C1, *C2;
3250 if (!match(LHS, m_c_Add(m_Value(X), m_APInt(C1))) ||
3251 !match(RHS, m_c_Add(m_Specific(X), m_APInt(C2))))
3252 return false;
3253
3254 return (C1->slt(*C2) && C1->isNonNegative()) ||
3255 (C2->slt(*C1) && C1->isNonPositive());
3256 }
3257
3258 /// TODO: A large part of this logic is duplicated in InstCombine's
3259 /// foldICmpBinOp(). We should be able to share that and avoid the code
3260 /// duplication.
simplifyICmpWithBinOp(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3261 static Value *simplifyICmpWithBinOp(CmpInst::Predicate Pred, Value *LHS,
3262 Value *RHS, const SimplifyQuery &Q,
3263 unsigned MaxRecurse) {
3264 BinaryOperator *LBO = dyn_cast<BinaryOperator>(LHS);
3265 BinaryOperator *RBO = dyn_cast<BinaryOperator>(RHS);
3266 if (MaxRecurse && (LBO || RBO)) {
3267 // Analyze the case when either LHS or RHS is an add instruction.
3268 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr;
3269 // LHS = A + B (or A and B are null); RHS = C + D (or C and D are null).
3270 bool NoLHSWrapProblem = false, NoRHSWrapProblem = false;
3271 if (LBO && LBO->getOpcode() == Instruction::Add) {
3272 A = LBO->getOperand(0);
3273 B = LBO->getOperand(1);
3274 NoLHSWrapProblem =
3275 ICmpInst::isEquality(Pred) ||
3276 (CmpInst::isUnsigned(Pred) &&
3277 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO))) ||
3278 (CmpInst::isSigned(Pred) &&
3279 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)));
3280 }
3281 if (RBO && RBO->getOpcode() == Instruction::Add) {
3282 C = RBO->getOperand(0);
3283 D = RBO->getOperand(1);
3284 NoRHSWrapProblem =
3285 ICmpInst::isEquality(Pred) ||
3286 (CmpInst::isUnsigned(Pred) &&
3287 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(RBO))) ||
3288 (CmpInst::isSigned(Pred) &&
3289 Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(RBO)));
3290 }
3291
3292 // icmp (X+Y), X -> icmp Y, 0 for equalities or if there is no overflow.
3293 if ((A == RHS || B == RHS) && NoLHSWrapProblem)
3294 if (Value *V = simplifyICmpInst(Pred, A == RHS ? B : A,
3295 Constant::getNullValue(RHS->getType()), Q,
3296 MaxRecurse - 1))
3297 return V;
3298
3299 // icmp X, (X+Y) -> icmp 0, Y for equalities or if there is no overflow.
3300 if ((C == LHS || D == LHS) && NoRHSWrapProblem)
3301 if (Value *V =
3302 simplifyICmpInst(Pred, Constant::getNullValue(LHS->getType()),
3303 C == LHS ? D : C, Q, MaxRecurse - 1))
3304 return V;
3305
3306 // icmp (X+Y), (X+Z) -> icmp Y,Z for equalities or if there is no overflow.
3307 bool CanSimplify = (NoLHSWrapProblem && NoRHSWrapProblem) ||
3308 trySimplifyICmpWithAdds(Pred, LHS, RHS);
3309 if (A && C && (A == C || A == D || B == C || B == D) && CanSimplify) {
3310 // Determine Y and Z in the form icmp (X+Y), (X+Z).
3311 Value *Y, *Z;
3312 if (A == C) {
3313 // C + B == C + D -> B == D
3314 Y = B;
3315 Z = D;
3316 } else if (A == D) {
3317 // D + B == C + D -> B == C
3318 Y = B;
3319 Z = C;
3320 } else if (B == C) {
3321 // A + C == C + D -> A == D
3322 Y = A;
3323 Z = D;
3324 } else {
3325 assert(B == D);
3326 // A + D == C + D -> A == C
3327 Y = A;
3328 Z = C;
3329 }
3330 if (Value *V = simplifyICmpInst(Pred, Y, Z, Q, MaxRecurse - 1))
3331 return V;
3332 }
3333 }
3334
3335 if (LBO)
3336 if (Value *V = simplifyICmpWithBinOpOnLHS(Pred, LBO, RHS, Q, MaxRecurse))
3337 return V;
3338
3339 if (RBO)
3340 if (Value *V = simplifyICmpWithBinOpOnLHS(
3341 ICmpInst::getSwappedPredicate(Pred), RBO, LHS, Q, MaxRecurse))
3342 return V;
3343
3344 // 0 - (zext X) pred C
3345 if (!CmpInst::isUnsigned(Pred) && match(LHS, m_Neg(m_ZExt(m_Value())))) {
3346 const APInt *C;
3347 if (match(RHS, m_APInt(C))) {
3348 if (C->isStrictlyPositive()) {
3349 if (Pred == ICmpInst::ICMP_SLT || Pred == ICmpInst::ICMP_NE)
3350 return ConstantInt::getTrue(getCompareTy(RHS));
3351 if (Pred == ICmpInst::ICMP_SGE || Pred == ICmpInst::ICMP_EQ)
3352 return ConstantInt::getFalse(getCompareTy(RHS));
3353 }
3354 if (C->isNonNegative()) {
3355 if (Pred == ICmpInst::ICMP_SLE)
3356 return ConstantInt::getTrue(getCompareTy(RHS));
3357 if (Pred == ICmpInst::ICMP_SGT)
3358 return ConstantInt::getFalse(getCompareTy(RHS));
3359 }
3360 }
3361 }
3362
3363 // If C2 is a power-of-2 and C is not:
3364 // (C2 << X) == C --> false
3365 // (C2 << X) != C --> true
3366 const APInt *C;
3367 if (match(LHS, m_Shl(m_Power2(), m_Value())) &&
3368 match(RHS, m_APIntAllowUndef(C)) && !C->isPowerOf2()) {
3369 // C2 << X can equal zero in some circumstances.
3370 // This simplification might be unsafe if C is zero.
3371 //
3372 // We know it is safe if:
3373 // - The shift is nsw. We can't shift out the one bit.
3374 // - The shift is nuw. We can't shift out the one bit.
3375 // - C2 is one.
3376 // - C isn't zero.
3377 if (Q.IIQ.hasNoSignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3378 Q.IIQ.hasNoUnsignedWrap(cast<OverflowingBinaryOperator>(LBO)) ||
3379 match(LHS, m_Shl(m_One(), m_Value())) || !C->isZero()) {
3380 if (Pred == ICmpInst::ICMP_EQ)
3381 return ConstantInt::getFalse(getCompareTy(RHS));
3382 if (Pred == ICmpInst::ICMP_NE)
3383 return ConstantInt::getTrue(getCompareTy(RHS));
3384 }
3385 }
3386
3387 // TODO: This is overly constrained. LHS can be any power-of-2.
3388 // (1 << X) >u 0x8000 --> false
3389 // (1 << X) <=u 0x8000 --> true
3390 if (match(LHS, m_Shl(m_One(), m_Value())) && match(RHS, m_SignMask())) {
3391 if (Pred == ICmpInst::ICMP_UGT)
3392 return ConstantInt::getFalse(getCompareTy(RHS));
3393 if (Pred == ICmpInst::ICMP_ULE)
3394 return ConstantInt::getTrue(getCompareTy(RHS));
3395 }
3396
3397 if (MaxRecurse && LBO && RBO && LBO->getOpcode() == RBO->getOpcode() &&
3398 LBO->getOperand(1) == RBO->getOperand(1)) {
3399 switch (LBO->getOpcode()) {
3400 default:
3401 break;
3402 case Instruction::UDiv:
3403 case Instruction::LShr:
3404 if (ICmpInst::isSigned(Pred) || !Q.IIQ.isExact(LBO) ||
3405 !Q.IIQ.isExact(RBO))
3406 break;
3407 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3408 RBO->getOperand(0), Q, MaxRecurse - 1))
3409 return V;
3410 break;
3411 case Instruction::SDiv:
3412 if (!ICmpInst::isEquality(Pred) || !Q.IIQ.isExact(LBO) ||
3413 !Q.IIQ.isExact(RBO))
3414 break;
3415 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3416 RBO->getOperand(0), Q, MaxRecurse - 1))
3417 return V;
3418 break;
3419 case Instruction::AShr:
3420 if (!Q.IIQ.isExact(LBO) || !Q.IIQ.isExact(RBO))
3421 break;
3422 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3423 RBO->getOperand(0), Q, MaxRecurse - 1))
3424 return V;
3425 break;
3426 case Instruction::Shl: {
3427 bool NUW = Q.IIQ.hasNoUnsignedWrap(LBO) && Q.IIQ.hasNoUnsignedWrap(RBO);
3428 bool NSW = Q.IIQ.hasNoSignedWrap(LBO) && Q.IIQ.hasNoSignedWrap(RBO);
3429 if (!NUW && !NSW)
3430 break;
3431 if (!NSW && ICmpInst::isSigned(Pred))
3432 break;
3433 if (Value *V = simplifyICmpInst(Pred, LBO->getOperand(0),
3434 RBO->getOperand(0), Q, MaxRecurse - 1))
3435 return V;
3436 break;
3437 }
3438 }
3439 }
3440 return nullptr;
3441 }
3442
3443 /// simplify integer comparisons where at least one operand of the compare
3444 /// matches an integer min/max idiom.
simplifyICmpWithMinMax(CmpInst::Predicate Pred,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3445 static Value *simplifyICmpWithMinMax(CmpInst::Predicate Pred, Value *LHS,
3446 Value *RHS, const SimplifyQuery &Q,
3447 unsigned MaxRecurse) {
3448 Type *ITy = getCompareTy(LHS); // The return type.
3449 Value *A, *B;
3450 CmpInst::Predicate P = CmpInst::BAD_ICMP_PREDICATE;
3451 CmpInst::Predicate EqP; // Chosen so that "A == max/min(A,B)" iff "A EqP B".
3452
3453 // Signed variants on "max(a,b)>=a -> true".
3454 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3455 if (A != RHS)
3456 std::swap(A, B); // smax(A, B) pred A.
3457 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3458 // We analyze this as smax(A, B) pred A.
3459 P = Pred;
3460 } else if (match(RHS, m_SMax(m_Value(A), m_Value(B))) &&
3461 (A == LHS || B == LHS)) {
3462 if (A != LHS)
3463 std::swap(A, B); // A pred smax(A, B).
3464 EqP = CmpInst::ICMP_SGE; // "A == smax(A, B)" iff "A sge B".
3465 // We analyze this as smax(A, B) swapped-pred A.
3466 P = CmpInst::getSwappedPredicate(Pred);
3467 } else if (match(LHS, m_SMin(m_Value(A), m_Value(B))) &&
3468 (A == RHS || B == RHS)) {
3469 if (A != RHS)
3470 std::swap(A, B); // smin(A, B) pred A.
3471 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3472 // We analyze this as smax(-A, -B) swapped-pred -A.
3473 // Note that we do not need to actually form -A or -B thanks to EqP.
3474 P = CmpInst::getSwappedPredicate(Pred);
3475 } else if (match(RHS, m_SMin(m_Value(A), m_Value(B))) &&
3476 (A == LHS || B == LHS)) {
3477 if (A != LHS)
3478 std::swap(A, B); // A pred smin(A, B).
3479 EqP = CmpInst::ICMP_SLE; // "A == smin(A, B)" iff "A sle B".
3480 // We analyze this as smax(-A, -B) pred -A.
3481 // Note that we do not need to actually form -A or -B thanks to EqP.
3482 P = Pred;
3483 }
3484 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3485 // Cases correspond to "max(A, B) p A".
3486 switch (P) {
3487 default:
3488 break;
3489 case CmpInst::ICMP_EQ:
3490 case CmpInst::ICMP_SLE:
3491 // Equivalent to "A EqP B". This may be the same as the condition tested
3492 // in the max/min; if so, we can just return that.
3493 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3494 return V;
3495 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3496 return V;
3497 // Otherwise, see if "A EqP B" simplifies.
3498 if (MaxRecurse)
3499 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3500 return V;
3501 break;
3502 case CmpInst::ICMP_NE:
3503 case CmpInst::ICMP_SGT: {
3504 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3505 // Equivalent to "A InvEqP B". This may be the same as the condition
3506 // tested in the max/min; if so, we can just return that.
3507 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3508 return V;
3509 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3510 return V;
3511 // Otherwise, see if "A InvEqP B" simplifies.
3512 if (MaxRecurse)
3513 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3514 return V;
3515 break;
3516 }
3517 case CmpInst::ICMP_SGE:
3518 // Always true.
3519 return getTrue(ITy);
3520 case CmpInst::ICMP_SLT:
3521 // Always false.
3522 return getFalse(ITy);
3523 }
3524 }
3525
3526 // Unsigned variants on "max(a,b)>=a -> true".
3527 P = CmpInst::BAD_ICMP_PREDICATE;
3528 if (match(LHS, m_UMax(m_Value(A), m_Value(B))) && (A == RHS || B == RHS)) {
3529 if (A != RHS)
3530 std::swap(A, B); // umax(A, B) pred A.
3531 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3532 // We analyze this as umax(A, B) pred A.
3533 P = Pred;
3534 } else if (match(RHS, m_UMax(m_Value(A), m_Value(B))) &&
3535 (A == LHS || B == LHS)) {
3536 if (A != LHS)
3537 std::swap(A, B); // A pred umax(A, B).
3538 EqP = CmpInst::ICMP_UGE; // "A == umax(A, B)" iff "A uge B".
3539 // We analyze this as umax(A, B) swapped-pred A.
3540 P = CmpInst::getSwappedPredicate(Pred);
3541 } else if (match(LHS, m_UMin(m_Value(A), m_Value(B))) &&
3542 (A == RHS || B == RHS)) {
3543 if (A != RHS)
3544 std::swap(A, B); // umin(A, B) pred A.
3545 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3546 // We analyze this as umax(-A, -B) swapped-pred -A.
3547 // Note that we do not need to actually form -A or -B thanks to EqP.
3548 P = CmpInst::getSwappedPredicate(Pred);
3549 } else if (match(RHS, m_UMin(m_Value(A), m_Value(B))) &&
3550 (A == LHS || B == LHS)) {
3551 if (A != LHS)
3552 std::swap(A, B); // A pred umin(A, B).
3553 EqP = CmpInst::ICMP_ULE; // "A == umin(A, B)" iff "A ule B".
3554 // We analyze this as umax(-A, -B) pred -A.
3555 // Note that we do not need to actually form -A or -B thanks to EqP.
3556 P = Pred;
3557 }
3558 if (P != CmpInst::BAD_ICMP_PREDICATE) {
3559 // Cases correspond to "max(A, B) p A".
3560 switch (P) {
3561 default:
3562 break;
3563 case CmpInst::ICMP_EQ:
3564 case CmpInst::ICMP_ULE:
3565 // Equivalent to "A EqP B". This may be the same as the condition tested
3566 // in the max/min; if so, we can just return that.
3567 if (Value *V = extractEquivalentCondition(LHS, EqP, A, B))
3568 return V;
3569 if (Value *V = extractEquivalentCondition(RHS, EqP, A, B))
3570 return V;
3571 // Otherwise, see if "A EqP B" simplifies.
3572 if (MaxRecurse)
3573 if (Value *V = simplifyICmpInst(EqP, A, B, Q, MaxRecurse - 1))
3574 return V;
3575 break;
3576 case CmpInst::ICMP_NE:
3577 case CmpInst::ICMP_UGT: {
3578 CmpInst::Predicate InvEqP = CmpInst::getInversePredicate(EqP);
3579 // Equivalent to "A InvEqP B". This may be the same as the condition
3580 // tested in the max/min; if so, we can just return that.
3581 if (Value *V = extractEquivalentCondition(LHS, InvEqP, A, B))
3582 return V;
3583 if (Value *V = extractEquivalentCondition(RHS, InvEqP, A, B))
3584 return V;
3585 // Otherwise, see if "A InvEqP B" simplifies.
3586 if (MaxRecurse)
3587 if (Value *V = simplifyICmpInst(InvEqP, A, B, Q, MaxRecurse - 1))
3588 return V;
3589 break;
3590 }
3591 case CmpInst::ICMP_UGE:
3592 return getTrue(ITy);
3593 case CmpInst::ICMP_ULT:
3594 return getFalse(ITy);
3595 }
3596 }
3597
3598 // Comparing 1 each of min/max with a common operand?
3599 // Canonicalize min operand to RHS.
3600 if (match(LHS, m_UMin(m_Value(), m_Value())) ||
3601 match(LHS, m_SMin(m_Value(), m_Value()))) {
3602 std::swap(LHS, RHS);
3603 Pred = ICmpInst::getSwappedPredicate(Pred);
3604 }
3605
3606 Value *C, *D;
3607 if (match(LHS, m_SMax(m_Value(A), m_Value(B))) &&
3608 match(RHS, m_SMin(m_Value(C), m_Value(D))) &&
3609 (A == C || A == D || B == C || B == D)) {
3610 // smax(A, B) >=s smin(A, D) --> true
3611 if (Pred == CmpInst::ICMP_SGE)
3612 return getTrue(ITy);
3613 // smax(A, B) <s smin(A, D) --> false
3614 if (Pred == CmpInst::ICMP_SLT)
3615 return getFalse(ITy);
3616 } else if (match(LHS, m_UMax(m_Value(A), m_Value(B))) &&
3617 match(RHS, m_UMin(m_Value(C), m_Value(D))) &&
3618 (A == C || A == D || B == C || B == D)) {
3619 // umax(A, B) >=u umin(A, D) --> true
3620 if (Pred == CmpInst::ICMP_UGE)
3621 return getTrue(ITy);
3622 // umax(A, B) <u umin(A, D) --> false
3623 if (Pred == CmpInst::ICMP_ULT)
3624 return getFalse(ITy);
3625 }
3626
3627 return nullptr;
3628 }
3629
simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)3630 static Value *simplifyICmpWithDominatingAssume(CmpInst::Predicate Predicate,
3631 Value *LHS, Value *RHS,
3632 const SimplifyQuery &Q) {
3633 // Gracefully handle instructions that have not been inserted yet.
3634 if (!Q.AC || !Q.CxtI || !Q.CxtI->getParent())
3635 return nullptr;
3636
3637 for (Value *AssumeBaseOp : {LHS, RHS}) {
3638 for (auto &AssumeVH : Q.AC->assumptionsFor(AssumeBaseOp)) {
3639 if (!AssumeVH)
3640 continue;
3641
3642 CallInst *Assume = cast<CallInst>(AssumeVH);
3643 if (std::optional<bool> Imp = isImpliedCondition(
3644 Assume->getArgOperand(0), Predicate, LHS, RHS, Q.DL))
3645 if (isValidAssumeForContext(Assume, Q.CxtI, Q.DT))
3646 return ConstantInt::get(getCompareTy(LHS), *Imp);
3647 }
3648 }
3649
3650 return nullptr;
3651 }
3652
3653 /// Given operands for an ICmpInst, see if we can fold the result.
3654 /// If not, this returns null.
simplifyICmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)3655 static Value *simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3656 const SimplifyQuery &Q, unsigned MaxRecurse) {
3657 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3658 assert(CmpInst::isIntPredicate(Pred) && "Not an integer compare!");
3659
3660 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3661 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3662 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI);
3663
3664 // If we have a constant, make sure it is on the RHS.
3665 std::swap(LHS, RHS);
3666 Pred = CmpInst::getSwappedPredicate(Pred);
3667 }
3668 assert(!isa<UndefValue>(LHS) && "Unexpected icmp undef,%X");
3669
3670 Type *ITy = getCompareTy(LHS); // The return type.
3671
3672 // icmp poison, X -> poison
3673 if (isa<PoisonValue>(RHS))
3674 return PoisonValue::get(ITy);
3675
3676 // For EQ and NE, we can always pick a value for the undef to make the
3677 // predicate pass or fail, so we can return undef.
3678 // Matches behavior in llvm::ConstantFoldCompareInstruction.
3679 if (Q.isUndefValue(RHS) && ICmpInst::isEquality(Pred))
3680 return UndefValue::get(ITy);
3681
3682 // icmp X, X -> true/false
3683 // icmp X, undef -> true/false because undef could be X.
3684 if (LHS == RHS || Q.isUndefValue(RHS))
3685 return ConstantInt::get(ITy, CmpInst::isTrueWhenEqual(Pred));
3686
3687 if (Value *V = simplifyICmpOfBools(Pred, LHS, RHS, Q))
3688 return V;
3689
3690 // TODO: Sink/common this with other potentially expensive calls that use
3691 // ValueTracking? See comment below for isKnownNonEqual().
3692 if (Value *V = simplifyICmpWithZero(Pred, LHS, RHS, Q))
3693 return V;
3694
3695 if (Value *V = simplifyICmpWithConstant(Pred, LHS, RHS, Q.IIQ))
3696 return V;
3697
3698 // If both operands have range metadata, use the metadata
3699 // to simplify the comparison.
3700 if (isa<Instruction>(RHS) && isa<Instruction>(LHS)) {
3701 auto RHS_Instr = cast<Instruction>(RHS);
3702 auto LHS_Instr = cast<Instruction>(LHS);
3703
3704 if (Q.IIQ.getMetadata(RHS_Instr, LLVMContext::MD_range) &&
3705 Q.IIQ.getMetadata(LHS_Instr, LLVMContext::MD_range)) {
3706 auto RHS_CR = getConstantRangeFromMetadata(
3707 *RHS_Instr->getMetadata(LLVMContext::MD_range));
3708 auto LHS_CR = getConstantRangeFromMetadata(
3709 *LHS_Instr->getMetadata(LLVMContext::MD_range));
3710
3711 if (LHS_CR.icmp(Pred, RHS_CR))
3712 return ConstantInt::getTrue(RHS->getContext());
3713
3714 if (LHS_CR.icmp(CmpInst::getInversePredicate(Pred), RHS_CR))
3715 return ConstantInt::getFalse(RHS->getContext());
3716 }
3717 }
3718
3719 // Compare of cast, for example (zext X) != 0 -> X != 0
3720 if (isa<CastInst>(LHS) && (isa<Constant>(RHS) || isa<CastInst>(RHS))) {
3721 Instruction *LI = cast<CastInst>(LHS);
3722 Value *SrcOp = LI->getOperand(0);
3723 Type *SrcTy = SrcOp->getType();
3724 Type *DstTy = LI->getType();
3725
3726 // Turn icmp (ptrtoint x), (ptrtoint/constant) into a compare of the input
3727 // if the integer type is the same size as the pointer type.
3728 if (MaxRecurse && isa<PtrToIntInst>(LI) &&
3729 Q.DL.getTypeSizeInBits(SrcTy) == DstTy->getPrimitiveSizeInBits()) {
3730 if (Constant *RHSC = dyn_cast<Constant>(RHS)) {
3731 // Transfer the cast to the constant.
3732 if (Value *V = simplifyICmpInst(Pred, SrcOp,
3733 ConstantExpr::getIntToPtr(RHSC, SrcTy),
3734 Q, MaxRecurse - 1))
3735 return V;
3736 } else if (PtrToIntInst *RI = dyn_cast<PtrToIntInst>(RHS)) {
3737 if (RI->getOperand(0)->getType() == SrcTy)
3738 // Compare without the cast.
3739 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3740 MaxRecurse - 1))
3741 return V;
3742 }
3743 }
3744
3745 if (isa<ZExtInst>(LHS)) {
3746 // Turn icmp (zext X), (zext Y) into a compare of X and Y if they have the
3747 // same type.
3748 if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3749 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3750 // Compare X and Y. Note that signed predicates become unsigned.
3751 if (Value *V =
3752 simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred), SrcOp,
3753 RI->getOperand(0), Q, MaxRecurse - 1))
3754 return V;
3755 }
3756 // Fold (zext X) ule (sext X), (zext X) sge (sext X) to true.
3757 else if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3758 if (SrcOp == RI->getOperand(0)) {
3759 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_SGE)
3760 return ConstantInt::getTrue(ITy);
3761 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SLT)
3762 return ConstantInt::getFalse(ITy);
3763 }
3764 }
3765 // Turn icmp (zext X), Cst into a compare of X and Cst if Cst is extended
3766 // too. If not, then try to deduce the result of the comparison.
3767 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3768 // Compute the constant that would happen if we truncated to SrcTy then
3769 // reextended to DstTy.
3770 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3771 Constant *RExt = ConstantExpr::getCast(CastInst::ZExt, Trunc, DstTy);
3772
3773 // If the re-extended constant didn't change then this is effectively
3774 // also a case of comparing two zero-extended values.
3775 if (RExt == CI && MaxRecurse)
3776 if (Value *V = simplifyICmpInst(ICmpInst::getUnsignedPredicate(Pred),
3777 SrcOp, Trunc, Q, MaxRecurse - 1))
3778 return V;
3779
3780 // Otherwise the upper bits of LHS are zero while RHS has a non-zero bit
3781 // there. Use this to work out the result of the comparison.
3782 if (RExt != CI) {
3783 switch (Pred) {
3784 default:
3785 llvm_unreachable("Unknown ICmp predicate!");
3786 // LHS <u RHS.
3787 case ICmpInst::ICMP_EQ:
3788 case ICmpInst::ICMP_UGT:
3789 case ICmpInst::ICMP_UGE:
3790 return ConstantInt::getFalse(CI->getContext());
3791
3792 case ICmpInst::ICMP_NE:
3793 case ICmpInst::ICMP_ULT:
3794 case ICmpInst::ICMP_ULE:
3795 return ConstantInt::getTrue(CI->getContext());
3796
3797 // LHS is non-negative. If RHS is negative then LHS >s LHS. If RHS
3798 // is non-negative then LHS <s RHS.
3799 case ICmpInst::ICMP_SGT:
3800 case ICmpInst::ICMP_SGE:
3801 return CI->getValue().isNegative()
3802 ? ConstantInt::getTrue(CI->getContext())
3803 : ConstantInt::getFalse(CI->getContext());
3804
3805 case ICmpInst::ICMP_SLT:
3806 case ICmpInst::ICMP_SLE:
3807 return CI->getValue().isNegative()
3808 ? ConstantInt::getFalse(CI->getContext())
3809 : ConstantInt::getTrue(CI->getContext());
3810 }
3811 }
3812 }
3813 }
3814
3815 if (isa<SExtInst>(LHS)) {
3816 // Turn icmp (sext X), (sext Y) into a compare of X and Y if they have the
3817 // same type.
3818 if (SExtInst *RI = dyn_cast<SExtInst>(RHS)) {
3819 if (MaxRecurse && SrcTy == RI->getOperand(0)->getType())
3820 // Compare X and Y. Note that the predicate does not change.
3821 if (Value *V = simplifyICmpInst(Pred, SrcOp, RI->getOperand(0), Q,
3822 MaxRecurse - 1))
3823 return V;
3824 }
3825 // Fold (sext X) uge (zext X), (sext X) sle (zext X) to true.
3826 else if (ZExtInst *RI = dyn_cast<ZExtInst>(RHS)) {
3827 if (SrcOp == RI->getOperand(0)) {
3828 if (Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_SLE)
3829 return ConstantInt::getTrue(ITy);
3830 if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SGT)
3831 return ConstantInt::getFalse(ITy);
3832 }
3833 }
3834 // Turn icmp (sext X), Cst into a compare of X and Cst if Cst is extended
3835 // too. If not, then try to deduce the result of the comparison.
3836 else if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
3837 // Compute the constant that would happen if we truncated to SrcTy then
3838 // reextended to DstTy.
3839 Constant *Trunc = ConstantExpr::getTrunc(CI, SrcTy);
3840 Constant *RExt = ConstantExpr::getCast(CastInst::SExt, Trunc, DstTy);
3841
3842 // If the re-extended constant didn't change then this is effectively
3843 // also a case of comparing two sign-extended values.
3844 if (RExt == CI && MaxRecurse)
3845 if (Value *V =
3846 simplifyICmpInst(Pred, SrcOp, Trunc, Q, MaxRecurse - 1))
3847 return V;
3848
3849 // Otherwise the upper bits of LHS are all equal, while RHS has varying
3850 // bits there. Use this to work out the result of the comparison.
3851 if (RExt != CI) {
3852 switch (Pred) {
3853 default:
3854 llvm_unreachable("Unknown ICmp predicate!");
3855 case ICmpInst::ICMP_EQ:
3856 return ConstantInt::getFalse(CI->getContext());
3857 case ICmpInst::ICMP_NE:
3858 return ConstantInt::getTrue(CI->getContext());
3859
3860 // If RHS is non-negative then LHS <s RHS. If RHS is negative then
3861 // LHS >s RHS.
3862 case ICmpInst::ICMP_SGT:
3863 case ICmpInst::ICMP_SGE:
3864 return CI->getValue().isNegative()
3865 ? ConstantInt::getTrue(CI->getContext())
3866 : ConstantInt::getFalse(CI->getContext());
3867 case ICmpInst::ICMP_SLT:
3868 case ICmpInst::ICMP_SLE:
3869 return CI->getValue().isNegative()
3870 ? ConstantInt::getFalse(CI->getContext())
3871 : ConstantInt::getTrue(CI->getContext());
3872
3873 // If LHS is non-negative then LHS <u RHS. If LHS is negative then
3874 // LHS >u RHS.
3875 case ICmpInst::ICMP_UGT:
3876 case ICmpInst::ICMP_UGE:
3877 // Comparison is true iff the LHS <s 0.
3878 if (MaxRecurse)
3879 if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SLT, SrcOp,
3880 Constant::getNullValue(SrcTy), Q,
3881 MaxRecurse - 1))
3882 return V;
3883 break;
3884 case ICmpInst::ICMP_ULT:
3885 case ICmpInst::ICMP_ULE:
3886 // Comparison is true iff the LHS >=s 0.
3887 if (MaxRecurse)
3888 if (Value *V = simplifyICmpInst(ICmpInst::ICMP_SGE, SrcOp,
3889 Constant::getNullValue(SrcTy), Q,
3890 MaxRecurse - 1))
3891 return V;
3892 break;
3893 }
3894 }
3895 }
3896 }
3897 }
3898
3899 // icmp eq|ne X, Y -> false|true if X != Y
3900 // This is potentially expensive, and we have already computedKnownBits for
3901 // compares with 0 above here, so only try this for a non-zero compare.
3902 if (ICmpInst::isEquality(Pred) && !match(RHS, m_Zero()) &&
3903 isKnownNonEqual(LHS, RHS, Q.DL, Q.AC, Q.CxtI, Q.DT, Q.IIQ.UseInstrInfo)) {
3904 return Pred == ICmpInst::ICMP_NE ? getTrue(ITy) : getFalse(ITy);
3905 }
3906
3907 if (Value *V = simplifyICmpWithBinOp(Pred, LHS, RHS, Q, MaxRecurse))
3908 return V;
3909
3910 if (Value *V = simplifyICmpWithMinMax(Pred, LHS, RHS, Q, MaxRecurse))
3911 return V;
3912
3913 if (Value *V = simplifyICmpWithDominatingAssume(Pred, LHS, RHS, Q))
3914 return V;
3915
3916 // Simplify comparisons of related pointers using a powerful, recursive
3917 // GEP-walk when we have target data available..
3918 if (LHS->getType()->isPointerTy())
3919 if (auto *C = computePointerICmp(Pred, LHS, RHS, Q))
3920 return C;
3921 if (auto *CLHS = dyn_cast<PtrToIntOperator>(LHS))
3922 if (auto *CRHS = dyn_cast<PtrToIntOperator>(RHS))
3923 if (Q.DL.getTypeSizeInBits(CLHS->getPointerOperandType()) ==
3924 Q.DL.getTypeSizeInBits(CLHS->getType()) &&
3925 Q.DL.getTypeSizeInBits(CRHS->getPointerOperandType()) ==
3926 Q.DL.getTypeSizeInBits(CRHS->getType()))
3927 if (auto *C = computePointerICmp(Pred, CLHS->getPointerOperand(),
3928 CRHS->getPointerOperand(), Q))
3929 return C;
3930
3931 // If the comparison is with the result of a select instruction, check whether
3932 // comparing with either branch of the select always yields the same value.
3933 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
3934 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
3935 return V;
3936
3937 // If the comparison is with the result of a phi instruction, check whether
3938 // doing the compare with each incoming phi value yields a common result.
3939 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
3940 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
3941 return V;
3942
3943 return nullptr;
3944 }
3945
simplifyICmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)3946 Value *llvm::simplifyICmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3947 const SimplifyQuery &Q) {
3948 return ::simplifyICmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
3949 }
3950
3951 /// Given operands for an FCmpInst, see if we can fold the result.
3952 /// If not, this returns null.
simplifyFCmpInst(unsigned Predicate,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)3953 static Value *simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
3954 FastMathFlags FMF, const SimplifyQuery &Q,
3955 unsigned MaxRecurse) {
3956 CmpInst::Predicate Pred = (CmpInst::Predicate)Predicate;
3957 assert(CmpInst::isFPPredicate(Pred) && "Not an FP compare!");
3958
3959 if (Constant *CLHS = dyn_cast<Constant>(LHS)) {
3960 if (Constant *CRHS = dyn_cast<Constant>(RHS))
3961 return ConstantFoldCompareInstOperands(Pred, CLHS, CRHS, Q.DL, Q.TLI,
3962 Q.CxtI);
3963
3964 // If we have a constant, make sure it is on the RHS.
3965 std::swap(LHS, RHS);
3966 Pred = CmpInst::getSwappedPredicate(Pred);
3967 }
3968
3969 // Fold trivial predicates.
3970 Type *RetTy = getCompareTy(LHS);
3971 if (Pred == FCmpInst::FCMP_FALSE)
3972 return getFalse(RetTy);
3973 if (Pred == FCmpInst::FCMP_TRUE)
3974 return getTrue(RetTy);
3975
3976 // Fold (un)ordered comparison if we can determine there are no NaNs.
3977 if (Pred == FCmpInst::FCMP_UNO || Pred == FCmpInst::FCMP_ORD)
3978 if (FMF.noNaNs() ||
3979 (isKnownNeverNaN(LHS, Q.TLI) && isKnownNeverNaN(RHS, Q.TLI)))
3980 return ConstantInt::get(RetTy, Pred == FCmpInst::FCMP_ORD);
3981
3982 // NaN is unordered; NaN is not ordered.
3983 assert((FCmpInst::isOrdered(Pred) || FCmpInst::isUnordered(Pred)) &&
3984 "Comparison must be either ordered or unordered");
3985 if (match(RHS, m_NaN()))
3986 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3987
3988 // fcmp pred x, poison and fcmp pred poison, x
3989 // fold to poison
3990 if (isa<PoisonValue>(LHS) || isa<PoisonValue>(RHS))
3991 return PoisonValue::get(RetTy);
3992
3993 // fcmp pred x, undef and fcmp pred undef, x
3994 // fold to true if unordered, false if ordered
3995 if (Q.isUndefValue(LHS) || Q.isUndefValue(RHS)) {
3996 // Choosing NaN for the undef will always make unordered comparison succeed
3997 // and ordered comparison fail.
3998 return ConstantInt::get(RetTy, CmpInst::isUnordered(Pred));
3999 }
4000
4001 // fcmp x,x -> true/false. Not all compares are foldable.
4002 if (LHS == RHS) {
4003 if (CmpInst::isTrueWhenEqual(Pred))
4004 return getTrue(RetTy);
4005 if (CmpInst::isFalseWhenEqual(Pred))
4006 return getFalse(RetTy);
4007 }
4008
4009 // Handle fcmp with constant RHS.
4010 // TODO: Use match with a specific FP value, so these work with vectors with
4011 // undef lanes.
4012 const APFloat *C;
4013 if (match(RHS, m_APFloat(C))) {
4014 // Check whether the constant is an infinity.
4015 if (C->isInfinity()) {
4016 if (C->isNegative()) {
4017 switch (Pred) {
4018 case FCmpInst::FCMP_OLT:
4019 // No value is ordered and less than negative infinity.
4020 return getFalse(RetTy);
4021 case FCmpInst::FCMP_UGE:
4022 // All values are unordered with or at least negative infinity.
4023 return getTrue(RetTy);
4024 default:
4025 break;
4026 }
4027 } else {
4028 switch (Pred) {
4029 case FCmpInst::FCMP_OGT:
4030 // No value is ordered and greater than infinity.
4031 return getFalse(RetTy);
4032 case FCmpInst::FCMP_ULE:
4033 // All values are unordered with and at most infinity.
4034 return getTrue(RetTy);
4035 default:
4036 break;
4037 }
4038 }
4039
4040 // LHS == Inf
4041 if (Pred == FCmpInst::FCMP_OEQ && isKnownNeverInfinity(LHS, Q.TLI))
4042 return getFalse(RetTy);
4043 // LHS != Inf
4044 if (Pred == FCmpInst::FCMP_UNE && isKnownNeverInfinity(LHS, Q.TLI))
4045 return getTrue(RetTy);
4046 // LHS == Inf || LHS == NaN
4047 if (Pred == FCmpInst::FCMP_UEQ && isKnownNeverInfinity(LHS, Q.TLI) &&
4048 isKnownNeverNaN(LHS, Q.TLI))
4049 return getFalse(RetTy);
4050 // LHS != Inf && LHS != NaN
4051 if (Pred == FCmpInst::FCMP_ONE && isKnownNeverInfinity(LHS, Q.TLI) &&
4052 isKnownNeverNaN(LHS, Q.TLI))
4053 return getTrue(RetTy);
4054 }
4055 if (C->isNegative() && !C->isNegZero()) {
4056 assert(!C->isNaN() && "Unexpected NaN constant!");
4057 // TODO: We can catch more cases by using a range check rather than
4058 // relying on CannotBeOrderedLessThanZero.
4059 switch (Pred) {
4060 case FCmpInst::FCMP_UGE:
4061 case FCmpInst::FCMP_UGT:
4062 case FCmpInst::FCMP_UNE:
4063 // (X >= 0) implies (X > C) when (C < 0)
4064 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
4065 return getTrue(RetTy);
4066 break;
4067 case FCmpInst::FCMP_OEQ:
4068 case FCmpInst::FCMP_OLE:
4069 case FCmpInst::FCMP_OLT:
4070 // (X >= 0) implies !(X < C) when (C < 0)
4071 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
4072 return getFalse(RetTy);
4073 break;
4074 default:
4075 break;
4076 }
4077 }
4078
4079 // Check comparison of [minnum/maxnum with constant] with other constant.
4080 const APFloat *C2;
4081 if ((match(LHS, m_Intrinsic<Intrinsic::minnum>(m_Value(), m_APFloat(C2))) &&
4082 *C2 < *C) ||
4083 (match(LHS, m_Intrinsic<Intrinsic::maxnum>(m_Value(), m_APFloat(C2))) &&
4084 *C2 > *C)) {
4085 bool IsMaxNum =
4086 cast<IntrinsicInst>(LHS)->getIntrinsicID() == Intrinsic::maxnum;
4087 // The ordered relationship and minnum/maxnum guarantee that we do not
4088 // have NaN constants, so ordered/unordered preds are handled the same.
4089 switch (Pred) {
4090 case FCmpInst::FCMP_OEQ:
4091 case FCmpInst::FCMP_UEQ:
4092 // minnum(X, LesserC) == C --> false
4093 // maxnum(X, GreaterC) == C --> false
4094 return getFalse(RetTy);
4095 case FCmpInst::FCMP_ONE:
4096 case FCmpInst::FCMP_UNE:
4097 // minnum(X, LesserC) != C --> true
4098 // maxnum(X, GreaterC) != C --> true
4099 return getTrue(RetTy);
4100 case FCmpInst::FCMP_OGE:
4101 case FCmpInst::FCMP_UGE:
4102 case FCmpInst::FCMP_OGT:
4103 case FCmpInst::FCMP_UGT:
4104 // minnum(X, LesserC) >= C --> false
4105 // minnum(X, LesserC) > C --> false
4106 // maxnum(X, GreaterC) >= C --> true
4107 // maxnum(X, GreaterC) > C --> true
4108 return ConstantInt::get(RetTy, IsMaxNum);
4109 case FCmpInst::FCMP_OLE:
4110 case FCmpInst::FCMP_ULE:
4111 case FCmpInst::FCMP_OLT:
4112 case FCmpInst::FCMP_ULT:
4113 // minnum(X, LesserC) <= C --> true
4114 // minnum(X, LesserC) < C --> true
4115 // maxnum(X, GreaterC) <= C --> false
4116 // maxnum(X, GreaterC) < C --> false
4117 return ConstantInt::get(RetTy, !IsMaxNum);
4118 default:
4119 // TRUE/FALSE/ORD/UNO should be handled before this.
4120 llvm_unreachable("Unexpected fcmp predicate");
4121 }
4122 }
4123 }
4124
4125 if (match(RHS, m_AnyZeroFP())) {
4126 switch (Pred) {
4127 case FCmpInst::FCMP_OGE:
4128 case FCmpInst::FCMP_ULT:
4129 // Positive or zero X >= 0.0 --> true
4130 // Positive or zero X < 0.0 --> false
4131 if ((FMF.noNaNs() || isKnownNeverNaN(LHS, Q.TLI)) &&
4132 CannotBeOrderedLessThanZero(LHS, Q.TLI))
4133 return Pred == FCmpInst::FCMP_OGE ? getTrue(RetTy) : getFalse(RetTy);
4134 break;
4135 case FCmpInst::FCMP_UGE:
4136 case FCmpInst::FCMP_OLT:
4137 // Positive or zero or nan X >= 0.0 --> true
4138 // Positive or zero or nan X < 0.0 --> false
4139 if (CannotBeOrderedLessThanZero(LHS, Q.TLI))
4140 return Pred == FCmpInst::FCMP_UGE ? getTrue(RetTy) : getFalse(RetTy);
4141 break;
4142 default:
4143 break;
4144 }
4145 }
4146
4147 // If the comparison is with the result of a select instruction, check whether
4148 // comparing with either branch of the select always yields the same value.
4149 if (isa<SelectInst>(LHS) || isa<SelectInst>(RHS))
4150 if (Value *V = threadCmpOverSelect(Pred, LHS, RHS, Q, MaxRecurse))
4151 return V;
4152
4153 // If the comparison is with the result of a phi instruction, check whether
4154 // doing the compare with each incoming phi value yields a common result.
4155 if (isa<PHINode>(LHS) || isa<PHINode>(RHS))
4156 if (Value *V = threadCmpOverPHI(Pred, LHS, RHS, Q, MaxRecurse))
4157 return V;
4158
4159 return nullptr;
4160 }
4161
simplifyFCmpInst(unsigned Predicate,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q)4162 Value *llvm::simplifyFCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
4163 FastMathFlags FMF, const SimplifyQuery &Q) {
4164 return ::simplifyFCmpInst(Predicate, LHS, RHS, FMF, Q, RecursionLimit);
4165 }
4166
simplifyWithOpReplaced(Value * V,Value * Op,Value * RepOp,const SimplifyQuery & Q,bool AllowRefinement,unsigned MaxRecurse)4167 static Value *simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4168 const SimplifyQuery &Q,
4169 bool AllowRefinement,
4170 unsigned MaxRecurse) {
4171 // Trivial replacement.
4172 if (V == Op)
4173 return RepOp;
4174
4175 // We cannot replace a constant, and shouldn't even try.
4176 if (isa<Constant>(Op))
4177 return nullptr;
4178
4179 auto *I = dyn_cast<Instruction>(V);
4180 if (!I || !is_contained(I->operands(), Op))
4181 return nullptr;
4182
4183 if (Op->getType()->isVectorTy()) {
4184 // For vector types, the simplification must hold per-lane, so forbid
4185 // potentially cross-lane operations like shufflevector.
4186 assert(I->getType()->isVectorTy() && "Vector type mismatch");
4187 if (isa<ShuffleVectorInst>(I) || isa<CallBase>(I))
4188 return nullptr;
4189 }
4190
4191 // Replace Op with RepOp in instruction operands.
4192 SmallVector<Value *, 8> NewOps(I->getNumOperands());
4193 transform(I->operands(), NewOps.begin(),
4194 [&](Value *V) { return V == Op ? RepOp : V; });
4195
4196 if (!AllowRefinement) {
4197 // General InstSimplify functions may refine the result, e.g. by returning
4198 // a constant for a potentially poison value. To avoid this, implement only
4199 // a few non-refining but profitable transforms here.
4200
4201 if (auto *BO = dyn_cast<BinaryOperator>(I)) {
4202 unsigned Opcode = BO->getOpcode();
4203 // id op x -> x, x op id -> x
4204 if (NewOps[0] == ConstantExpr::getBinOpIdentity(Opcode, I->getType()))
4205 return NewOps[1];
4206 if (NewOps[1] == ConstantExpr::getBinOpIdentity(Opcode, I->getType(),
4207 /* RHS */ true))
4208 return NewOps[0];
4209
4210 // x & x -> x, x | x -> x
4211 if ((Opcode == Instruction::And || Opcode == Instruction::Or) &&
4212 NewOps[0] == NewOps[1])
4213 return NewOps[0];
4214 }
4215
4216 if (auto *GEP = dyn_cast<GetElementPtrInst>(I)) {
4217 // getelementptr x, 0 -> x
4218 if (NewOps.size() == 2 && match(NewOps[1], m_Zero()) &&
4219 !GEP->isInBounds())
4220 return NewOps[0];
4221 }
4222 } else if (MaxRecurse) {
4223 // The simplification queries below may return the original value. Consider:
4224 // %div = udiv i32 %arg, %arg2
4225 // %mul = mul nsw i32 %div, %arg2
4226 // %cmp = icmp eq i32 %mul, %arg
4227 // %sel = select i1 %cmp, i32 %div, i32 undef
4228 // Replacing %arg by %mul, %div becomes "udiv i32 %mul, %arg2", which
4229 // simplifies back to %arg. This can only happen because %mul does not
4230 // dominate %div. To ensure a consistent return value contract, we make sure
4231 // that this case returns nullptr as well.
4232 auto PreventSelfSimplify = [V](Value *Simplified) {
4233 return Simplified != V ? Simplified : nullptr;
4234 };
4235
4236 if (auto *B = dyn_cast<BinaryOperator>(I))
4237 return PreventSelfSimplify(simplifyBinOp(B->getOpcode(), NewOps[0],
4238 NewOps[1], Q, MaxRecurse - 1));
4239
4240 if (CmpInst *C = dyn_cast<CmpInst>(I))
4241 return PreventSelfSimplify(simplifyCmpInst(C->getPredicate(), NewOps[0],
4242 NewOps[1], Q, MaxRecurse - 1));
4243
4244 if (auto *GEP = dyn_cast<GetElementPtrInst>(I))
4245 return PreventSelfSimplify(simplifyGEPInst(
4246 GEP->getSourceElementType(), NewOps[0], ArrayRef(NewOps).slice(1),
4247 GEP->isInBounds(), Q, MaxRecurse - 1));
4248
4249 if (isa<SelectInst>(I))
4250 return PreventSelfSimplify(simplifySelectInst(
4251 NewOps[0], NewOps[1], NewOps[2], Q, MaxRecurse - 1));
4252 // TODO: We could hand off more cases to instsimplify here.
4253 }
4254
4255 // If all operands are constant after substituting Op for RepOp then we can
4256 // constant fold the instruction.
4257 SmallVector<Constant *, 8> ConstOps;
4258 for (Value *NewOp : NewOps) {
4259 if (Constant *ConstOp = dyn_cast<Constant>(NewOp))
4260 ConstOps.push_back(ConstOp);
4261 else
4262 return nullptr;
4263 }
4264
4265 // Consider:
4266 // %cmp = icmp eq i32 %x, 2147483647
4267 // %add = add nsw i32 %x, 1
4268 // %sel = select i1 %cmp, i32 -2147483648, i32 %add
4269 //
4270 // We can't replace %sel with %add unless we strip away the flags (which
4271 // will be done in InstCombine).
4272 // TODO: This may be unsound, because it only catches some forms of
4273 // refinement.
4274 if (!AllowRefinement && canCreatePoison(cast<Operator>(I)))
4275 return nullptr;
4276
4277 return ConstantFoldInstOperands(I, ConstOps, Q.DL, Q.TLI);
4278 }
4279
simplifyWithOpReplaced(Value * V,Value * Op,Value * RepOp,const SimplifyQuery & Q,bool AllowRefinement)4280 Value *llvm::simplifyWithOpReplaced(Value *V, Value *Op, Value *RepOp,
4281 const SimplifyQuery &Q,
4282 bool AllowRefinement) {
4283 return ::simplifyWithOpReplaced(V, Op, RepOp, Q, AllowRefinement,
4284 RecursionLimit);
4285 }
4286
4287 /// Try to simplify a select instruction when its condition operand is an
4288 /// integer comparison where one operand of the compare is a constant.
simplifySelectBitTest(Value * TrueVal,Value * FalseVal,Value * X,const APInt * Y,bool TrueWhenUnset)4289 static Value *simplifySelectBitTest(Value *TrueVal, Value *FalseVal, Value *X,
4290 const APInt *Y, bool TrueWhenUnset) {
4291 const APInt *C;
4292
4293 // (X & Y) == 0 ? X & ~Y : X --> X
4294 // (X & Y) != 0 ? X & ~Y : X --> X & ~Y
4295 if (FalseVal == X && match(TrueVal, m_And(m_Specific(X), m_APInt(C))) &&
4296 *Y == ~*C)
4297 return TrueWhenUnset ? FalseVal : TrueVal;
4298
4299 // (X & Y) == 0 ? X : X & ~Y --> X & ~Y
4300 // (X & Y) != 0 ? X : X & ~Y --> X
4301 if (TrueVal == X && match(FalseVal, m_And(m_Specific(X), m_APInt(C))) &&
4302 *Y == ~*C)
4303 return TrueWhenUnset ? FalseVal : TrueVal;
4304
4305 if (Y->isPowerOf2()) {
4306 // (X & Y) == 0 ? X | Y : X --> X | Y
4307 // (X & Y) != 0 ? X | Y : X --> X
4308 if (FalseVal == X && match(TrueVal, m_Or(m_Specific(X), m_APInt(C))) &&
4309 *Y == *C)
4310 return TrueWhenUnset ? TrueVal : FalseVal;
4311
4312 // (X & Y) == 0 ? X : X | Y --> X
4313 // (X & Y) != 0 ? X : X | Y --> X | Y
4314 if (TrueVal == X && match(FalseVal, m_Or(m_Specific(X), m_APInt(C))) &&
4315 *Y == *C)
4316 return TrueWhenUnset ? TrueVal : FalseVal;
4317 }
4318
4319 return nullptr;
4320 }
4321
simplifyCmpSelOfMaxMin(Value * CmpLHS,Value * CmpRHS,ICmpInst::Predicate Pred,Value * TVal,Value * FVal)4322 static Value *simplifyCmpSelOfMaxMin(Value *CmpLHS, Value *CmpRHS,
4323 ICmpInst::Predicate Pred, Value *TVal,
4324 Value *FVal) {
4325 // Canonicalize common cmp+sel operand as CmpLHS.
4326 if (CmpRHS == TVal || CmpRHS == FVal) {
4327 std::swap(CmpLHS, CmpRHS);
4328 Pred = ICmpInst::getSwappedPredicate(Pred);
4329 }
4330
4331 // Canonicalize common cmp+sel operand as TVal.
4332 if (CmpLHS == FVal) {
4333 std::swap(TVal, FVal);
4334 Pred = ICmpInst::getInversePredicate(Pred);
4335 }
4336
4337 // A vector select may be shuffling together elements that are equivalent
4338 // based on the max/min/select relationship.
4339 Value *X = CmpLHS, *Y = CmpRHS;
4340 bool PeekedThroughSelectShuffle = false;
4341 auto *Shuf = dyn_cast<ShuffleVectorInst>(FVal);
4342 if (Shuf && Shuf->isSelect()) {
4343 if (Shuf->getOperand(0) == Y)
4344 FVal = Shuf->getOperand(1);
4345 else if (Shuf->getOperand(1) == Y)
4346 FVal = Shuf->getOperand(0);
4347 else
4348 return nullptr;
4349 PeekedThroughSelectShuffle = true;
4350 }
4351
4352 // (X pred Y) ? X : max/min(X, Y)
4353 auto *MMI = dyn_cast<MinMaxIntrinsic>(FVal);
4354 if (!MMI || TVal != X ||
4355 !match(FVal, m_c_MaxOrMin(m_Specific(X), m_Specific(Y))))
4356 return nullptr;
4357
4358 // (X > Y) ? X : max(X, Y) --> max(X, Y)
4359 // (X >= Y) ? X : max(X, Y) --> max(X, Y)
4360 // (X < Y) ? X : min(X, Y) --> min(X, Y)
4361 // (X <= Y) ? X : min(X, Y) --> min(X, Y)
4362 //
4363 // The equivalence allows a vector select (shuffle) of max/min and Y. Ex:
4364 // (X > Y) ? X : (Z ? max(X, Y) : Y)
4365 // If Z is true, this reduces as above, and if Z is false:
4366 // (X > Y) ? X : Y --> max(X, Y)
4367 ICmpInst::Predicate MMPred = MMI->getPredicate();
4368 if (MMPred == CmpInst::getStrictPredicate(Pred))
4369 return MMI;
4370
4371 // Other transforms are not valid with a shuffle.
4372 if (PeekedThroughSelectShuffle)
4373 return nullptr;
4374
4375 // (X == Y) ? X : max/min(X, Y) --> max/min(X, Y)
4376 if (Pred == CmpInst::ICMP_EQ)
4377 return MMI;
4378
4379 // (X != Y) ? X : max/min(X, Y) --> X
4380 if (Pred == CmpInst::ICMP_NE)
4381 return X;
4382
4383 // (X < Y) ? X : max(X, Y) --> X
4384 // (X <= Y) ? X : max(X, Y) --> X
4385 // (X > Y) ? X : min(X, Y) --> X
4386 // (X >= Y) ? X : min(X, Y) --> X
4387 ICmpInst::Predicate InvPred = CmpInst::getInversePredicate(Pred);
4388 if (MMPred == CmpInst::getStrictPredicate(InvPred))
4389 return X;
4390
4391 return nullptr;
4392 }
4393
4394 /// An alternative way to test if a bit is set or not uses sgt/slt instead of
4395 /// eq/ne.
simplifySelectWithFakeICmpEq(Value * CmpLHS,Value * CmpRHS,ICmpInst::Predicate Pred,Value * TrueVal,Value * FalseVal)4396 static Value *simplifySelectWithFakeICmpEq(Value *CmpLHS, Value *CmpRHS,
4397 ICmpInst::Predicate Pred,
4398 Value *TrueVal, Value *FalseVal) {
4399 Value *X;
4400 APInt Mask;
4401 if (!decomposeBitTestICmp(CmpLHS, CmpRHS, Pred, X, Mask))
4402 return nullptr;
4403
4404 return simplifySelectBitTest(TrueVal, FalseVal, X, &Mask,
4405 Pred == ICmpInst::ICMP_EQ);
4406 }
4407
4408 /// Try to simplify a select instruction when its condition operand is an
4409 /// integer comparison.
simplifySelectWithICmpCond(Value * CondVal,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4410 static Value *simplifySelectWithICmpCond(Value *CondVal, Value *TrueVal,
4411 Value *FalseVal,
4412 const SimplifyQuery &Q,
4413 unsigned MaxRecurse) {
4414 ICmpInst::Predicate Pred;
4415 Value *CmpLHS, *CmpRHS;
4416 if (!match(CondVal, m_ICmp(Pred, m_Value(CmpLHS), m_Value(CmpRHS))))
4417 return nullptr;
4418
4419 if (Value *V = simplifyCmpSelOfMaxMin(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4420 return V;
4421
4422 // Canonicalize ne to eq predicate.
4423 if (Pred == ICmpInst::ICMP_NE) {
4424 Pred = ICmpInst::ICMP_EQ;
4425 std::swap(TrueVal, FalseVal);
4426 }
4427
4428 // Check for integer min/max with a limit constant:
4429 // X > MIN_INT ? X : MIN_INT --> X
4430 // X < MAX_INT ? X : MAX_INT --> X
4431 if (TrueVal->getType()->isIntOrIntVectorTy()) {
4432 Value *X, *Y;
4433 SelectPatternFlavor SPF =
4434 matchDecomposedSelectPattern(cast<ICmpInst>(CondVal), TrueVal, FalseVal,
4435 X, Y)
4436 .Flavor;
4437 if (SelectPatternResult::isMinOrMax(SPF) && Pred == getMinMaxPred(SPF)) {
4438 APInt LimitC = getMinMaxLimit(getInverseMinMaxFlavor(SPF),
4439 X->getType()->getScalarSizeInBits());
4440 if (match(Y, m_SpecificInt(LimitC)))
4441 return X;
4442 }
4443 }
4444
4445 if (Pred == ICmpInst::ICMP_EQ && match(CmpRHS, m_Zero())) {
4446 Value *X;
4447 const APInt *Y;
4448 if (match(CmpLHS, m_And(m_Value(X), m_APInt(Y))))
4449 if (Value *V = simplifySelectBitTest(TrueVal, FalseVal, X, Y,
4450 /*TrueWhenUnset=*/true))
4451 return V;
4452
4453 // Test for a bogus zero-shift-guard-op around funnel-shift or rotate.
4454 Value *ShAmt;
4455 auto isFsh = m_CombineOr(m_FShl(m_Value(X), m_Value(), m_Value(ShAmt)),
4456 m_FShr(m_Value(), m_Value(X), m_Value(ShAmt)));
4457 // (ShAmt == 0) ? fshl(X, *, ShAmt) : X --> X
4458 // (ShAmt == 0) ? fshr(*, X, ShAmt) : X --> X
4459 if (match(TrueVal, isFsh) && FalseVal == X && CmpLHS == ShAmt)
4460 return X;
4461
4462 // Test for a zero-shift-guard-op around rotates. These are used to
4463 // avoid UB from oversized shifts in raw IR rotate patterns, but the
4464 // intrinsics do not have that problem.
4465 // We do not allow this transform for the general funnel shift case because
4466 // that would not preserve the poison safety of the original code.
4467 auto isRotate =
4468 m_CombineOr(m_FShl(m_Value(X), m_Deferred(X), m_Value(ShAmt)),
4469 m_FShr(m_Value(X), m_Deferred(X), m_Value(ShAmt)));
4470 // (ShAmt == 0) ? X : fshl(X, X, ShAmt) --> fshl(X, X, ShAmt)
4471 // (ShAmt == 0) ? X : fshr(X, X, ShAmt) --> fshr(X, X, ShAmt)
4472 if (match(FalseVal, isRotate) && TrueVal == X && CmpLHS == ShAmt &&
4473 Pred == ICmpInst::ICMP_EQ)
4474 return FalseVal;
4475
4476 // X == 0 ? abs(X) : -abs(X) --> -abs(X)
4477 // X == 0 ? -abs(X) : abs(X) --> abs(X)
4478 if (match(TrueVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))) &&
4479 match(FalseVal, m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))))
4480 return FalseVal;
4481 if (match(TrueVal,
4482 m_Neg(m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS)))) &&
4483 match(FalseVal, m_Intrinsic<Intrinsic::abs>(m_Specific(CmpLHS))))
4484 return FalseVal;
4485 }
4486
4487 // Check for other compares that behave like bit test.
4488 if (Value *V =
4489 simplifySelectWithFakeICmpEq(CmpLHS, CmpRHS, Pred, TrueVal, FalseVal))
4490 return V;
4491
4492 // If we have a scalar equality comparison, then we know the value in one of
4493 // the arms of the select. See if substituting this value into the arm and
4494 // simplifying the result yields the same value as the other arm.
4495 if (Pred == ICmpInst::ICMP_EQ) {
4496 if (simplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, Q,
4497 /* AllowRefinement */ false,
4498 MaxRecurse) == TrueVal ||
4499 simplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, Q,
4500 /* AllowRefinement */ false,
4501 MaxRecurse) == TrueVal)
4502 return FalseVal;
4503 if (simplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, Q,
4504 /* AllowRefinement */ true,
4505 MaxRecurse) == FalseVal ||
4506 simplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, Q,
4507 /* AllowRefinement */ true,
4508 MaxRecurse) == FalseVal)
4509 return FalseVal;
4510 }
4511
4512 return nullptr;
4513 }
4514
4515 /// Try to simplify a select instruction when its condition operand is a
4516 /// floating-point comparison.
simplifySelectWithFCmp(Value * Cond,Value * T,Value * F,const SimplifyQuery & Q)4517 static Value *simplifySelectWithFCmp(Value *Cond, Value *T, Value *F,
4518 const SimplifyQuery &Q) {
4519 FCmpInst::Predicate Pred;
4520 if (!match(Cond, m_FCmp(Pred, m_Specific(T), m_Specific(F))) &&
4521 !match(Cond, m_FCmp(Pred, m_Specific(F), m_Specific(T))))
4522 return nullptr;
4523
4524 // This transform is safe if we do not have (do not care about) -0.0 or if
4525 // at least one operand is known to not be -0.0. Otherwise, the select can
4526 // change the sign of a zero operand.
4527 bool HasNoSignedZeros =
4528 Q.CxtI && isa<FPMathOperator>(Q.CxtI) && Q.CxtI->hasNoSignedZeros();
4529 const APFloat *C;
4530 if (HasNoSignedZeros || (match(T, m_APFloat(C)) && C->isNonZero()) ||
4531 (match(F, m_APFloat(C)) && C->isNonZero())) {
4532 // (T == F) ? T : F --> F
4533 // (F == T) ? T : F --> F
4534 if (Pred == FCmpInst::FCMP_OEQ)
4535 return F;
4536
4537 // (T != F) ? T : F --> T
4538 // (F != T) ? T : F --> T
4539 if (Pred == FCmpInst::FCMP_UNE)
4540 return T;
4541 }
4542
4543 return nullptr;
4544 }
4545
4546 /// Given operands for a SelectInst, see if we can fold the result.
4547 /// If not, this returns null.
simplifySelectInst(Value * Cond,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q,unsigned MaxRecurse)4548 static Value *simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4549 const SimplifyQuery &Q, unsigned MaxRecurse) {
4550 if (auto *CondC = dyn_cast<Constant>(Cond)) {
4551 if (auto *TrueC = dyn_cast<Constant>(TrueVal))
4552 if (auto *FalseC = dyn_cast<Constant>(FalseVal))
4553 return ConstantFoldSelectInstruction(CondC, TrueC, FalseC);
4554
4555 // select poison, X, Y -> poison
4556 if (isa<PoisonValue>(CondC))
4557 return PoisonValue::get(TrueVal->getType());
4558
4559 // select undef, X, Y -> X or Y
4560 if (Q.isUndefValue(CondC))
4561 return isa<Constant>(FalseVal) ? FalseVal : TrueVal;
4562
4563 // select true, X, Y --> X
4564 // select false, X, Y --> Y
4565 // For vectors, allow undef/poison elements in the condition to match the
4566 // defined elements, so we can eliminate the select.
4567 if (match(CondC, m_One()))
4568 return TrueVal;
4569 if (match(CondC, m_Zero()))
4570 return FalseVal;
4571 }
4572
4573 assert(Cond->getType()->isIntOrIntVectorTy(1) &&
4574 "Select must have bool or bool vector condition");
4575 assert(TrueVal->getType() == FalseVal->getType() &&
4576 "Select must have same types for true/false ops");
4577
4578 if (Cond->getType() == TrueVal->getType()) {
4579 // select i1 Cond, i1 true, i1 false --> i1 Cond
4580 if (match(TrueVal, m_One()) && match(FalseVal, m_ZeroInt()))
4581 return Cond;
4582
4583 // (X && Y) ? X : Y --> Y (commuted 2 ways)
4584 if (match(Cond, m_c_LogicalAnd(m_Specific(TrueVal), m_Specific(FalseVal))))
4585 return FalseVal;
4586
4587 // (X || Y) ? X : Y --> X (commuted 2 ways)
4588 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Specific(FalseVal))))
4589 return TrueVal;
4590
4591 // (X || Y) ? false : X --> false (commuted 2 ways)
4592 if (match(Cond, m_c_LogicalOr(m_Specific(FalseVal), m_Value())) &&
4593 match(TrueVal, m_ZeroInt()))
4594 return ConstantInt::getFalse(Cond->getType());
4595
4596 // Match patterns that end in logical-and.
4597 if (match(FalseVal, m_ZeroInt())) {
4598 // !(X || Y) && X --> false (commuted 2 ways)
4599 if (match(Cond, m_Not(m_c_LogicalOr(m_Specific(TrueVal), m_Value()))))
4600 return ConstantInt::getFalse(Cond->getType());
4601
4602 // (X || Y) && Y --> Y (commuted 2 ways)
4603 if (match(Cond, m_c_LogicalOr(m_Specific(TrueVal), m_Value())))
4604 return TrueVal;
4605 // Y && (X || Y) --> Y (commuted 2 ways)
4606 if (match(TrueVal, m_c_LogicalOr(m_Specific(Cond), m_Value())))
4607 return Cond;
4608
4609 // (X || Y) && (X || !Y) --> X (commuted 8 ways)
4610 Value *X, *Y;
4611 if (match(Cond, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4612 match(TrueVal, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4613 return X;
4614 if (match(TrueVal, m_c_LogicalOr(m_Value(X), m_Not(m_Value(Y)))) &&
4615 match(Cond, m_c_LogicalOr(m_Specific(X), m_Specific(Y))))
4616 return X;
4617 }
4618
4619 // Match patterns that end in logical-or.
4620 if (match(TrueVal, m_One())) {
4621 // (X && Y) || Y --> Y (commuted 2 ways)
4622 if (match(Cond, m_c_LogicalAnd(m_Specific(FalseVal), m_Value())))
4623 return FalseVal;
4624 // Y || (X && Y) --> Y (commuted 2 ways)
4625 if (match(FalseVal, m_c_LogicalAnd(m_Specific(Cond), m_Value())))
4626 return Cond;
4627 }
4628 }
4629
4630 // select ?, X, X -> X
4631 if (TrueVal == FalseVal)
4632 return TrueVal;
4633
4634 if (Cond == TrueVal) {
4635 // select i1 X, i1 X, i1 false --> X (logical-and)
4636 if (match(FalseVal, m_ZeroInt()))
4637 return Cond;
4638 // select i1 X, i1 X, i1 true --> true
4639 if (match(FalseVal, m_One()))
4640 return ConstantInt::getTrue(Cond->getType());
4641 }
4642 if (Cond == FalseVal) {
4643 // select i1 X, i1 true, i1 X --> X (logical-or)
4644 if (match(TrueVal, m_One()))
4645 return Cond;
4646 // select i1 X, i1 false, i1 X --> false
4647 if (match(TrueVal, m_ZeroInt()))
4648 return ConstantInt::getFalse(Cond->getType());
4649 }
4650
4651 // If the true or false value is poison, we can fold to the other value.
4652 // If the true or false value is undef, we can fold to the other value as
4653 // long as the other value isn't poison.
4654 // select ?, poison, X -> X
4655 // select ?, undef, X -> X
4656 if (isa<PoisonValue>(TrueVal) ||
4657 (Q.isUndefValue(TrueVal) &&
4658 isGuaranteedNotToBePoison(FalseVal, Q.AC, Q.CxtI, Q.DT)))
4659 return FalseVal;
4660 // select ?, X, poison -> X
4661 // select ?, X, undef -> X
4662 if (isa<PoisonValue>(FalseVal) ||
4663 (Q.isUndefValue(FalseVal) &&
4664 isGuaranteedNotToBePoison(TrueVal, Q.AC, Q.CxtI, Q.DT)))
4665 return TrueVal;
4666
4667 // Deal with partial undef vector constants: select ?, VecC, VecC' --> VecC''
4668 Constant *TrueC, *FalseC;
4669 if (isa<FixedVectorType>(TrueVal->getType()) &&
4670 match(TrueVal, m_Constant(TrueC)) &&
4671 match(FalseVal, m_Constant(FalseC))) {
4672 unsigned NumElts =
4673 cast<FixedVectorType>(TrueC->getType())->getNumElements();
4674 SmallVector<Constant *, 16> NewC;
4675 for (unsigned i = 0; i != NumElts; ++i) {
4676 // Bail out on incomplete vector constants.
4677 Constant *TEltC = TrueC->getAggregateElement(i);
4678 Constant *FEltC = FalseC->getAggregateElement(i);
4679 if (!TEltC || !FEltC)
4680 break;
4681
4682 // If the elements match (undef or not), that value is the result. If only
4683 // one element is undef, choose the defined element as the safe result.
4684 if (TEltC == FEltC)
4685 NewC.push_back(TEltC);
4686 else if (isa<PoisonValue>(TEltC) ||
4687 (Q.isUndefValue(TEltC) && isGuaranteedNotToBePoison(FEltC)))
4688 NewC.push_back(FEltC);
4689 else if (isa<PoisonValue>(FEltC) ||
4690 (Q.isUndefValue(FEltC) && isGuaranteedNotToBePoison(TEltC)))
4691 NewC.push_back(TEltC);
4692 else
4693 break;
4694 }
4695 if (NewC.size() == NumElts)
4696 return ConstantVector::get(NewC);
4697 }
4698
4699 if (Value *V =
4700 simplifySelectWithICmpCond(Cond, TrueVal, FalseVal, Q, MaxRecurse))
4701 return V;
4702
4703 if (Value *V = simplifySelectWithFCmp(Cond, TrueVal, FalseVal, Q))
4704 return V;
4705
4706 if (Value *V = foldSelectWithBinaryOp(Cond, TrueVal, FalseVal))
4707 return V;
4708
4709 std::optional<bool> Imp = isImpliedByDomCondition(Cond, Q.CxtI, Q.DL);
4710 if (Imp)
4711 return *Imp ? TrueVal : FalseVal;
4712
4713 return nullptr;
4714 }
4715
simplifySelectInst(Value * Cond,Value * TrueVal,Value * FalseVal,const SimplifyQuery & Q)4716 Value *llvm::simplifySelectInst(Value *Cond, Value *TrueVal, Value *FalseVal,
4717 const SimplifyQuery &Q) {
4718 return ::simplifySelectInst(Cond, TrueVal, FalseVal, Q, RecursionLimit);
4719 }
4720
4721 /// Given operands for an GetElementPtrInst, see if we can fold the result.
4722 /// If not, this returns null.
simplifyGEPInst(Type * SrcTy,Value * Ptr,ArrayRef<Value * > Indices,bool InBounds,const SimplifyQuery & Q,unsigned)4723 static Value *simplifyGEPInst(Type *SrcTy, Value *Ptr,
4724 ArrayRef<Value *> Indices, bool InBounds,
4725 const SimplifyQuery &Q, unsigned) {
4726 // The type of the GEP pointer operand.
4727 unsigned AS =
4728 cast<PointerType>(Ptr->getType()->getScalarType())->getAddressSpace();
4729
4730 // getelementptr P -> P.
4731 if (Indices.empty())
4732 return Ptr;
4733
4734 // Compute the (pointer) type returned by the GEP instruction.
4735 Type *LastType = GetElementPtrInst::getIndexedType(SrcTy, Indices);
4736 Type *GEPTy = PointerType::get(LastType, AS);
4737 if (VectorType *VT = dyn_cast<VectorType>(Ptr->getType()))
4738 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4739 else {
4740 for (Value *Op : Indices) {
4741 // If one of the operands is a vector, the result type is a vector of
4742 // pointers. All vector operands must have the same number of elements.
4743 if (VectorType *VT = dyn_cast<VectorType>(Op->getType())) {
4744 GEPTy = VectorType::get(GEPTy, VT->getElementCount());
4745 break;
4746 }
4747 }
4748 }
4749
4750 // For opaque pointers an all-zero GEP is a no-op. For typed pointers,
4751 // it may be equivalent to a bitcast.
4752 if (Ptr->getType()->getScalarType()->isOpaquePointerTy() &&
4753 Ptr->getType() == GEPTy &&
4754 all_of(Indices, [](const auto *V) { return match(V, m_Zero()); }))
4755 return Ptr;
4756
4757 // getelementptr poison, idx -> poison
4758 // getelementptr baseptr, poison -> poison
4759 if (isa<PoisonValue>(Ptr) ||
4760 any_of(Indices, [](const auto *V) { return isa<PoisonValue>(V); }))
4761 return PoisonValue::get(GEPTy);
4762
4763 if (Q.isUndefValue(Ptr))
4764 // If inbounds, we can choose an out-of-bounds pointer as a base pointer.
4765 return InBounds ? PoisonValue::get(GEPTy) : UndefValue::get(GEPTy);
4766
4767 bool IsScalableVec =
4768 isa<ScalableVectorType>(SrcTy) || any_of(Indices, [](const Value *V) {
4769 return isa<ScalableVectorType>(V->getType());
4770 });
4771
4772 if (Indices.size() == 1) {
4773 // getelementptr P, 0 -> P.
4774 if (match(Indices[0], m_Zero()) && Ptr->getType() == GEPTy)
4775 return Ptr;
4776
4777 Type *Ty = SrcTy;
4778 if (!IsScalableVec && Ty->isSized()) {
4779 Value *P;
4780 uint64_t C;
4781 uint64_t TyAllocSize = Q.DL.getTypeAllocSize(Ty);
4782 // getelementptr P, N -> P if P points to a type of zero size.
4783 if (TyAllocSize == 0 && Ptr->getType() == GEPTy)
4784 return Ptr;
4785
4786 // The following transforms are only safe if the ptrtoint cast
4787 // doesn't truncate the pointers.
4788 if (Indices[0]->getType()->getScalarSizeInBits() ==
4789 Q.DL.getPointerSizeInBits(AS)) {
4790 auto CanSimplify = [GEPTy, &P, Ptr]() -> bool {
4791 return P->getType() == GEPTy &&
4792 getUnderlyingObject(P) == getUnderlyingObject(Ptr);
4793 };
4794 // getelementptr V, (sub P, V) -> P if P points to a type of size 1.
4795 if (TyAllocSize == 1 &&
4796 match(Indices[0],
4797 m_Sub(m_PtrToInt(m_Value(P)), m_PtrToInt(m_Specific(Ptr)))) &&
4798 CanSimplify())
4799 return P;
4800
4801 // getelementptr V, (ashr (sub P, V), C) -> P if P points to a type of
4802 // size 1 << C.
4803 if (match(Indices[0], m_AShr(m_Sub(m_PtrToInt(m_Value(P)),
4804 m_PtrToInt(m_Specific(Ptr))),
4805 m_ConstantInt(C))) &&
4806 TyAllocSize == 1ULL << C && CanSimplify())
4807 return P;
4808
4809 // getelementptr V, (sdiv (sub P, V), C) -> P if P points to a type of
4810 // size C.
4811 if (match(Indices[0], m_SDiv(m_Sub(m_PtrToInt(m_Value(P)),
4812 m_PtrToInt(m_Specific(Ptr))),
4813 m_SpecificInt(TyAllocSize))) &&
4814 CanSimplify())
4815 return P;
4816 }
4817 }
4818 }
4819
4820 if (!IsScalableVec && Q.DL.getTypeAllocSize(LastType) == 1 &&
4821 all_of(Indices.drop_back(1),
4822 [](Value *Idx) { return match(Idx, m_Zero()); })) {
4823 unsigned IdxWidth =
4824 Q.DL.getIndexSizeInBits(Ptr->getType()->getPointerAddressSpace());
4825 if (Q.DL.getTypeSizeInBits(Indices.back()->getType()) == IdxWidth) {
4826 APInt BasePtrOffset(IdxWidth, 0);
4827 Value *StrippedBasePtr =
4828 Ptr->stripAndAccumulateInBoundsConstantOffsets(Q.DL, BasePtrOffset);
4829
4830 // Avoid creating inttoptr of zero here: While LLVMs treatment of
4831 // inttoptr is generally conservative, this particular case is folded to
4832 // a null pointer, which will have incorrect provenance.
4833
4834 // gep (gep V, C), (sub 0, V) -> C
4835 if (match(Indices.back(),
4836 m_Sub(m_Zero(), m_PtrToInt(m_Specific(StrippedBasePtr)))) &&
4837 !BasePtrOffset.isZero()) {
4838 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset);
4839 return ConstantExpr::getIntToPtr(CI, GEPTy);
4840 }
4841 // gep (gep V, C), (xor V, -1) -> C-1
4842 if (match(Indices.back(),
4843 m_Xor(m_PtrToInt(m_Specific(StrippedBasePtr)), m_AllOnes())) &&
4844 !BasePtrOffset.isOne()) {
4845 auto *CI = ConstantInt::get(GEPTy->getContext(), BasePtrOffset - 1);
4846 return ConstantExpr::getIntToPtr(CI, GEPTy);
4847 }
4848 }
4849 }
4850
4851 // Check to see if this is constant foldable.
4852 if (!isa<Constant>(Ptr) ||
4853 !all_of(Indices, [](Value *V) { return isa<Constant>(V); }))
4854 return nullptr;
4855
4856 auto *CE = ConstantExpr::getGetElementPtr(SrcTy, cast<Constant>(Ptr), Indices,
4857 InBounds);
4858 return ConstantFoldConstant(CE, Q.DL);
4859 }
4860
simplifyGEPInst(Type * SrcTy,Value * Ptr,ArrayRef<Value * > Indices,bool InBounds,const SimplifyQuery & Q)4861 Value *llvm::simplifyGEPInst(Type *SrcTy, Value *Ptr, ArrayRef<Value *> Indices,
4862 bool InBounds, const SimplifyQuery &Q) {
4863 return ::simplifyGEPInst(SrcTy, Ptr, Indices, InBounds, Q, RecursionLimit);
4864 }
4865
4866 /// Given operands for an InsertValueInst, see if we can fold the result.
4867 /// If not, this returns null.
simplifyInsertValueInst(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q,unsigned)4868 static Value *simplifyInsertValueInst(Value *Agg, Value *Val,
4869 ArrayRef<unsigned> Idxs,
4870 const SimplifyQuery &Q, unsigned) {
4871 if (Constant *CAgg = dyn_cast<Constant>(Agg))
4872 if (Constant *CVal = dyn_cast<Constant>(Val))
4873 return ConstantFoldInsertValueInstruction(CAgg, CVal, Idxs);
4874
4875 // insertvalue x, poison, n -> x
4876 // insertvalue x, undef, n -> x if x cannot be poison
4877 if (isa<PoisonValue>(Val) ||
4878 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Agg)))
4879 return Agg;
4880
4881 // insertvalue x, (extractvalue y, n), n
4882 if (ExtractValueInst *EV = dyn_cast<ExtractValueInst>(Val))
4883 if (EV->getAggregateOperand()->getType() == Agg->getType() &&
4884 EV->getIndices() == Idxs) {
4885 // insertvalue undef, (extractvalue y, n), n -> y
4886 if (Q.isUndefValue(Agg))
4887 return EV->getAggregateOperand();
4888
4889 // insertvalue y, (extractvalue y, n), n -> y
4890 if (Agg == EV->getAggregateOperand())
4891 return Agg;
4892 }
4893
4894 return nullptr;
4895 }
4896
simplifyInsertValueInst(Value * Agg,Value * Val,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q)4897 Value *llvm::simplifyInsertValueInst(Value *Agg, Value *Val,
4898 ArrayRef<unsigned> Idxs,
4899 const SimplifyQuery &Q) {
4900 return ::simplifyInsertValueInst(Agg, Val, Idxs, Q, RecursionLimit);
4901 }
4902
simplifyInsertElementInst(Value * Vec,Value * Val,Value * Idx,const SimplifyQuery & Q)4903 Value *llvm::simplifyInsertElementInst(Value *Vec, Value *Val, Value *Idx,
4904 const SimplifyQuery &Q) {
4905 // Try to constant fold.
4906 auto *VecC = dyn_cast<Constant>(Vec);
4907 auto *ValC = dyn_cast<Constant>(Val);
4908 auto *IdxC = dyn_cast<Constant>(Idx);
4909 if (VecC && ValC && IdxC)
4910 return ConstantExpr::getInsertElement(VecC, ValC, IdxC);
4911
4912 // For fixed-length vector, fold into poison if index is out of bounds.
4913 if (auto *CI = dyn_cast<ConstantInt>(Idx)) {
4914 if (isa<FixedVectorType>(Vec->getType()) &&
4915 CI->uge(cast<FixedVectorType>(Vec->getType())->getNumElements()))
4916 return PoisonValue::get(Vec->getType());
4917 }
4918
4919 // If index is undef, it might be out of bounds (see above case)
4920 if (Q.isUndefValue(Idx))
4921 return PoisonValue::get(Vec->getType());
4922
4923 // If the scalar is poison, or it is undef and there is no risk of
4924 // propagating poison from the vector value, simplify to the vector value.
4925 if (isa<PoisonValue>(Val) ||
4926 (Q.isUndefValue(Val) && isGuaranteedNotToBePoison(Vec)))
4927 return Vec;
4928
4929 // If we are extracting a value from a vector, then inserting it into the same
4930 // place, that's the input vector:
4931 // insertelt Vec, (extractelt Vec, Idx), Idx --> Vec
4932 if (match(Val, m_ExtractElt(m_Specific(Vec), m_Specific(Idx))))
4933 return Vec;
4934
4935 return nullptr;
4936 }
4937
4938 /// Given operands for an ExtractValueInst, see if we can fold the result.
4939 /// If not, this returns null.
simplifyExtractValueInst(Value * Agg,ArrayRef<unsigned> Idxs,const SimplifyQuery &,unsigned)4940 static Value *simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4941 const SimplifyQuery &, unsigned) {
4942 if (auto *CAgg = dyn_cast<Constant>(Agg))
4943 return ConstantFoldExtractValueInstruction(CAgg, Idxs);
4944
4945 // extractvalue x, (insertvalue y, elt, n), n -> elt
4946 unsigned NumIdxs = Idxs.size();
4947 for (auto *IVI = dyn_cast<InsertValueInst>(Agg); IVI != nullptr;
4948 IVI = dyn_cast<InsertValueInst>(IVI->getAggregateOperand())) {
4949 ArrayRef<unsigned> InsertValueIdxs = IVI->getIndices();
4950 unsigned NumInsertValueIdxs = InsertValueIdxs.size();
4951 unsigned NumCommonIdxs = std::min(NumInsertValueIdxs, NumIdxs);
4952 if (InsertValueIdxs.slice(0, NumCommonIdxs) ==
4953 Idxs.slice(0, NumCommonIdxs)) {
4954 if (NumIdxs == NumInsertValueIdxs)
4955 return IVI->getInsertedValueOperand();
4956 break;
4957 }
4958 }
4959
4960 return nullptr;
4961 }
4962
simplifyExtractValueInst(Value * Agg,ArrayRef<unsigned> Idxs,const SimplifyQuery & Q)4963 Value *llvm::simplifyExtractValueInst(Value *Agg, ArrayRef<unsigned> Idxs,
4964 const SimplifyQuery &Q) {
4965 return ::simplifyExtractValueInst(Agg, Idxs, Q, RecursionLimit);
4966 }
4967
4968 /// Given operands for an ExtractElementInst, see if we can fold the result.
4969 /// If not, this returns null.
simplifyExtractElementInst(Value * Vec,Value * Idx,const SimplifyQuery & Q,unsigned)4970 static Value *simplifyExtractElementInst(Value *Vec, Value *Idx,
4971 const SimplifyQuery &Q, unsigned) {
4972 auto *VecVTy = cast<VectorType>(Vec->getType());
4973 if (auto *CVec = dyn_cast<Constant>(Vec)) {
4974 if (auto *CIdx = dyn_cast<Constant>(Idx))
4975 return ConstantExpr::getExtractElement(CVec, CIdx);
4976
4977 if (Q.isUndefValue(Vec))
4978 return UndefValue::get(VecVTy->getElementType());
4979 }
4980
4981 // An undef extract index can be arbitrarily chosen to be an out-of-range
4982 // index value, which would result in the instruction being poison.
4983 if (Q.isUndefValue(Idx))
4984 return PoisonValue::get(VecVTy->getElementType());
4985
4986 // If extracting a specified index from the vector, see if we can recursively
4987 // find a previously computed scalar that was inserted into the vector.
4988 if (auto *IdxC = dyn_cast<ConstantInt>(Idx)) {
4989 // For fixed-length vector, fold into undef if index is out of bounds.
4990 unsigned MinNumElts = VecVTy->getElementCount().getKnownMinValue();
4991 if (isa<FixedVectorType>(VecVTy) && IdxC->getValue().uge(MinNumElts))
4992 return PoisonValue::get(VecVTy->getElementType());
4993 // Handle case where an element is extracted from a splat.
4994 if (IdxC->getValue().ult(MinNumElts))
4995 if (auto *Splat = getSplatValue(Vec))
4996 return Splat;
4997 if (Value *Elt = findScalarElement(Vec, IdxC->getZExtValue()))
4998 return Elt;
4999 } else {
5000 // extractelt x, (insertelt y, elt, n), n -> elt
5001 // If the possibly-variable indices are trivially known to be equal
5002 // (because they are the same operand) then use the value that was
5003 // inserted directly.
5004 auto *IE = dyn_cast<InsertElementInst>(Vec);
5005 if (IE && IE->getOperand(2) == Idx)
5006 return IE->getOperand(1);
5007
5008 // The index is not relevant if our vector is a splat.
5009 if (Value *Splat = getSplatValue(Vec))
5010 return Splat;
5011 }
5012 return nullptr;
5013 }
5014
simplifyExtractElementInst(Value * Vec,Value * Idx,const SimplifyQuery & Q)5015 Value *llvm::simplifyExtractElementInst(Value *Vec, Value *Idx,
5016 const SimplifyQuery &Q) {
5017 return ::simplifyExtractElementInst(Vec, Idx, Q, RecursionLimit);
5018 }
5019
5020 /// See if we can fold the given phi. If not, returns null.
simplifyPHINode(PHINode * PN,ArrayRef<Value * > IncomingValues,const SimplifyQuery & Q)5021 static Value *simplifyPHINode(PHINode *PN, ArrayRef<Value *> IncomingValues,
5022 const SimplifyQuery &Q) {
5023 // WARNING: no matter how worthwhile it may seem, we can not perform PHI CSE
5024 // here, because the PHI we may succeed simplifying to was not
5025 // def-reachable from the original PHI!
5026
5027 // If all of the PHI's incoming values are the same then replace the PHI node
5028 // with the common value.
5029 Value *CommonValue = nullptr;
5030 bool HasUndefInput = false;
5031 for (Value *Incoming : IncomingValues) {
5032 // If the incoming value is the phi node itself, it can safely be skipped.
5033 if (Incoming == PN)
5034 continue;
5035 if (Q.isUndefValue(Incoming)) {
5036 // Remember that we saw an undef value, but otherwise ignore them.
5037 HasUndefInput = true;
5038 continue;
5039 }
5040 if (CommonValue && Incoming != CommonValue)
5041 return nullptr; // Not the same, bail out.
5042 CommonValue = Incoming;
5043 }
5044
5045 // If CommonValue is null then all of the incoming values were either undef or
5046 // equal to the phi node itself.
5047 if (!CommonValue)
5048 return UndefValue::get(PN->getType());
5049
5050 if (HasUndefInput) {
5051 // If we have a PHI node like phi(X, undef, X), where X is defined by some
5052 // instruction, we cannot return X as the result of the PHI node unless it
5053 // dominates the PHI block.
5054 return valueDominatesPHI(CommonValue, PN, Q.DT) ? CommonValue : nullptr;
5055 }
5056
5057 return CommonValue;
5058 }
5059
simplifyCastInst(unsigned CastOpc,Value * Op,Type * Ty,const SimplifyQuery & Q,unsigned MaxRecurse)5060 static Value *simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5061 const SimplifyQuery &Q, unsigned MaxRecurse) {
5062 if (auto *C = dyn_cast<Constant>(Op))
5063 return ConstantFoldCastOperand(CastOpc, C, Ty, Q.DL);
5064
5065 if (auto *CI = dyn_cast<CastInst>(Op)) {
5066 auto *Src = CI->getOperand(0);
5067 Type *SrcTy = Src->getType();
5068 Type *MidTy = CI->getType();
5069 Type *DstTy = Ty;
5070 if (Src->getType() == Ty) {
5071 auto FirstOp = static_cast<Instruction::CastOps>(CI->getOpcode());
5072 auto SecondOp = static_cast<Instruction::CastOps>(CastOpc);
5073 Type *SrcIntPtrTy =
5074 SrcTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(SrcTy) : nullptr;
5075 Type *MidIntPtrTy =
5076 MidTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(MidTy) : nullptr;
5077 Type *DstIntPtrTy =
5078 DstTy->isPtrOrPtrVectorTy() ? Q.DL.getIntPtrType(DstTy) : nullptr;
5079 if (CastInst::isEliminableCastPair(FirstOp, SecondOp, SrcTy, MidTy, DstTy,
5080 SrcIntPtrTy, MidIntPtrTy,
5081 DstIntPtrTy) == Instruction::BitCast)
5082 return Src;
5083 }
5084 }
5085
5086 // bitcast x -> x
5087 if (CastOpc == Instruction::BitCast)
5088 if (Op->getType() == Ty)
5089 return Op;
5090
5091 return nullptr;
5092 }
5093
simplifyCastInst(unsigned CastOpc,Value * Op,Type * Ty,const SimplifyQuery & Q)5094 Value *llvm::simplifyCastInst(unsigned CastOpc, Value *Op, Type *Ty,
5095 const SimplifyQuery &Q) {
5096 return ::simplifyCastInst(CastOpc, Op, Ty, Q, RecursionLimit);
5097 }
5098
5099 /// For the given destination element of a shuffle, peek through shuffles to
5100 /// match a root vector source operand that contains that element in the same
5101 /// vector lane (ie, the same mask index), so we can eliminate the shuffle(s).
foldIdentityShuffles(int DestElt,Value * Op0,Value * Op1,int MaskVal,Value * RootVec,unsigned MaxRecurse)5102 static Value *foldIdentityShuffles(int DestElt, Value *Op0, Value *Op1,
5103 int MaskVal, Value *RootVec,
5104 unsigned MaxRecurse) {
5105 if (!MaxRecurse--)
5106 return nullptr;
5107
5108 // Bail out if any mask value is undefined. That kind of shuffle may be
5109 // simplified further based on demanded bits or other folds.
5110 if (MaskVal == -1)
5111 return nullptr;
5112
5113 // The mask value chooses which source operand we need to look at next.
5114 int InVecNumElts = cast<FixedVectorType>(Op0->getType())->getNumElements();
5115 int RootElt = MaskVal;
5116 Value *SourceOp = Op0;
5117 if (MaskVal >= InVecNumElts) {
5118 RootElt = MaskVal - InVecNumElts;
5119 SourceOp = Op1;
5120 }
5121
5122 // If the source operand is a shuffle itself, look through it to find the
5123 // matching root vector.
5124 if (auto *SourceShuf = dyn_cast<ShuffleVectorInst>(SourceOp)) {
5125 return foldIdentityShuffles(
5126 DestElt, SourceShuf->getOperand(0), SourceShuf->getOperand(1),
5127 SourceShuf->getMaskValue(RootElt), RootVec, MaxRecurse);
5128 }
5129
5130 // TODO: Look through bitcasts? What if the bitcast changes the vector element
5131 // size?
5132
5133 // The source operand is not a shuffle. Initialize the root vector value for
5134 // this shuffle if that has not been done yet.
5135 if (!RootVec)
5136 RootVec = SourceOp;
5137
5138 // Give up as soon as a source operand does not match the existing root value.
5139 if (RootVec != SourceOp)
5140 return nullptr;
5141
5142 // The element must be coming from the same lane in the source vector
5143 // (although it may have crossed lanes in intermediate shuffles).
5144 if (RootElt != DestElt)
5145 return nullptr;
5146
5147 return RootVec;
5148 }
5149
simplifyShuffleVectorInst(Value * Op0,Value * Op1,ArrayRef<int> Mask,Type * RetTy,const SimplifyQuery & Q,unsigned MaxRecurse)5150 static Value *simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5151 ArrayRef<int> Mask, Type *RetTy,
5152 const SimplifyQuery &Q,
5153 unsigned MaxRecurse) {
5154 if (all_of(Mask, [](int Elem) { return Elem == UndefMaskElem; }))
5155 return UndefValue::get(RetTy);
5156
5157 auto *InVecTy = cast<VectorType>(Op0->getType());
5158 unsigned MaskNumElts = Mask.size();
5159 ElementCount InVecEltCount = InVecTy->getElementCount();
5160
5161 bool Scalable = InVecEltCount.isScalable();
5162
5163 SmallVector<int, 32> Indices;
5164 Indices.assign(Mask.begin(), Mask.end());
5165
5166 // Canonicalization: If mask does not select elements from an input vector,
5167 // replace that input vector with poison.
5168 if (!Scalable) {
5169 bool MaskSelects0 = false, MaskSelects1 = false;
5170 unsigned InVecNumElts = InVecEltCount.getKnownMinValue();
5171 for (unsigned i = 0; i != MaskNumElts; ++i) {
5172 if (Indices[i] == -1)
5173 continue;
5174 if ((unsigned)Indices[i] < InVecNumElts)
5175 MaskSelects0 = true;
5176 else
5177 MaskSelects1 = true;
5178 }
5179 if (!MaskSelects0)
5180 Op0 = PoisonValue::get(InVecTy);
5181 if (!MaskSelects1)
5182 Op1 = PoisonValue::get(InVecTy);
5183 }
5184
5185 auto *Op0Const = dyn_cast<Constant>(Op0);
5186 auto *Op1Const = dyn_cast<Constant>(Op1);
5187
5188 // If all operands are constant, constant fold the shuffle. This
5189 // transformation depends on the value of the mask which is not known at
5190 // compile time for scalable vectors
5191 if (Op0Const && Op1Const)
5192 return ConstantExpr::getShuffleVector(Op0Const, Op1Const, Mask);
5193
5194 // Canonicalization: if only one input vector is constant, it shall be the
5195 // second one. This transformation depends on the value of the mask which
5196 // is not known at compile time for scalable vectors
5197 if (!Scalable && Op0Const && !Op1Const) {
5198 std::swap(Op0, Op1);
5199 ShuffleVectorInst::commuteShuffleMask(Indices,
5200 InVecEltCount.getKnownMinValue());
5201 }
5202
5203 // A splat of an inserted scalar constant becomes a vector constant:
5204 // shuf (inselt ?, C, IndexC), undef, <IndexC, IndexC...> --> <C, C...>
5205 // NOTE: We may have commuted above, so analyze the updated Indices, not the
5206 // original mask constant.
5207 // NOTE: This transformation depends on the value of the mask which is not
5208 // known at compile time for scalable vectors
5209 Constant *C;
5210 ConstantInt *IndexC;
5211 if (!Scalable && match(Op0, m_InsertElt(m_Value(), m_Constant(C),
5212 m_ConstantInt(IndexC)))) {
5213 // Match a splat shuffle mask of the insert index allowing undef elements.
5214 int InsertIndex = IndexC->getZExtValue();
5215 if (all_of(Indices, [InsertIndex](int MaskElt) {
5216 return MaskElt == InsertIndex || MaskElt == -1;
5217 })) {
5218 assert(isa<UndefValue>(Op1) && "Expected undef operand 1 for splat");
5219
5220 // Shuffle mask undefs become undefined constant result elements.
5221 SmallVector<Constant *, 16> VecC(MaskNumElts, C);
5222 for (unsigned i = 0; i != MaskNumElts; ++i)
5223 if (Indices[i] == -1)
5224 VecC[i] = UndefValue::get(C->getType());
5225 return ConstantVector::get(VecC);
5226 }
5227 }
5228
5229 // A shuffle of a splat is always the splat itself. Legal if the shuffle's
5230 // value type is same as the input vectors' type.
5231 if (auto *OpShuf = dyn_cast<ShuffleVectorInst>(Op0))
5232 if (Q.isUndefValue(Op1) && RetTy == InVecTy &&
5233 all_equal(OpShuf->getShuffleMask()))
5234 return Op0;
5235
5236 // All remaining transformation depend on the value of the mask, which is
5237 // not known at compile time for scalable vectors.
5238 if (Scalable)
5239 return nullptr;
5240
5241 // Don't fold a shuffle with undef mask elements. This may get folded in a
5242 // better way using demanded bits or other analysis.
5243 // TODO: Should we allow this?
5244 if (is_contained(Indices, -1))
5245 return nullptr;
5246
5247 // Check if every element of this shuffle can be mapped back to the
5248 // corresponding element of a single root vector. If so, we don't need this
5249 // shuffle. This handles simple identity shuffles as well as chains of
5250 // shuffles that may widen/narrow and/or move elements across lanes and back.
5251 Value *RootVec = nullptr;
5252 for (unsigned i = 0; i != MaskNumElts; ++i) {
5253 // Note that recursion is limited for each vector element, so if any element
5254 // exceeds the limit, this will fail to simplify.
5255 RootVec =
5256 foldIdentityShuffles(i, Op0, Op1, Indices[i], RootVec, MaxRecurse);
5257
5258 // We can't replace a widening/narrowing shuffle with one of its operands.
5259 if (!RootVec || RootVec->getType() != RetTy)
5260 return nullptr;
5261 }
5262 return RootVec;
5263 }
5264
5265 /// Given operands for a ShuffleVectorInst, fold the result or return null.
simplifyShuffleVectorInst(Value * Op0,Value * Op1,ArrayRef<int> Mask,Type * RetTy,const SimplifyQuery & Q)5266 Value *llvm::simplifyShuffleVectorInst(Value *Op0, Value *Op1,
5267 ArrayRef<int> Mask, Type *RetTy,
5268 const SimplifyQuery &Q) {
5269 return ::simplifyShuffleVectorInst(Op0, Op1, Mask, RetTy, Q, RecursionLimit);
5270 }
5271
foldConstant(Instruction::UnaryOps Opcode,Value * & Op,const SimplifyQuery & Q)5272 static Constant *foldConstant(Instruction::UnaryOps Opcode, Value *&Op,
5273 const SimplifyQuery &Q) {
5274 if (auto *C = dyn_cast<Constant>(Op))
5275 return ConstantFoldUnaryOpOperand(Opcode, C, Q.DL);
5276 return nullptr;
5277 }
5278
5279 /// Given the operand for an FNeg, see if we can fold the result. If not, this
5280 /// returns null.
simplifyFNegInst(Value * Op,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5281 static Value *simplifyFNegInst(Value *Op, FastMathFlags FMF,
5282 const SimplifyQuery &Q, unsigned MaxRecurse) {
5283 if (Constant *C = foldConstant(Instruction::FNeg, Op, Q))
5284 return C;
5285
5286 Value *X;
5287 // fneg (fneg X) ==> X
5288 if (match(Op, m_FNeg(m_Value(X))))
5289 return X;
5290
5291 return nullptr;
5292 }
5293
simplifyFNegInst(Value * Op,FastMathFlags FMF,const SimplifyQuery & Q)5294 Value *llvm::simplifyFNegInst(Value *Op, FastMathFlags FMF,
5295 const SimplifyQuery &Q) {
5296 return ::simplifyFNegInst(Op, FMF, Q, RecursionLimit);
5297 }
5298
5299 /// Try to propagate existing NaN values when possible. If not, replace the
5300 /// constant or elements in the constant with a canonical NaN.
propagateNaN(Constant * In)5301 static Constant *propagateNaN(Constant *In) {
5302 if (auto *VecTy = dyn_cast<FixedVectorType>(In->getType())) {
5303 unsigned NumElts = VecTy->getNumElements();
5304 SmallVector<Constant *, 32> NewC(NumElts);
5305 for (unsigned i = 0; i != NumElts; ++i) {
5306 Constant *EltC = In->getAggregateElement(i);
5307 // Poison and existing NaN elements propagate.
5308 // Replace unknown or undef elements with canonical NaN.
5309 if (EltC && (isa<PoisonValue>(EltC) || EltC->isNaN()))
5310 NewC[i] = EltC;
5311 else
5312 NewC[i] = (ConstantFP::getNaN(VecTy->getElementType()));
5313 }
5314 return ConstantVector::get(NewC);
5315 }
5316
5317 // It is not a fixed vector, but not a simple NaN either?
5318 if (!In->isNaN())
5319 return ConstantFP::getNaN(In->getType());
5320
5321 // Propagate the existing NaN constant when possible.
5322 // TODO: Should we quiet a signaling NaN?
5323 return In;
5324 }
5325
5326 /// Perform folds that are common to any floating-point operation. This implies
5327 /// transforms based on poison/undef/NaN because the operation itself makes no
5328 /// difference to the result.
simplifyFPOp(ArrayRef<Value * > Ops,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5329 static Constant *simplifyFPOp(ArrayRef<Value *> Ops, FastMathFlags FMF,
5330 const SimplifyQuery &Q,
5331 fp::ExceptionBehavior ExBehavior,
5332 RoundingMode Rounding) {
5333 // Poison is independent of anything else. It always propagates from an
5334 // operand to a math result.
5335 if (any_of(Ops, [](Value *V) { return match(V, m_Poison()); }))
5336 return PoisonValue::get(Ops[0]->getType());
5337
5338 for (Value *V : Ops) {
5339 bool IsNan = match(V, m_NaN());
5340 bool IsInf = match(V, m_Inf());
5341 bool IsUndef = Q.isUndefValue(V);
5342
5343 // If this operation has 'nnan' or 'ninf' and at least 1 disallowed operand
5344 // (an undef operand can be chosen to be Nan/Inf), then the result of
5345 // this operation is poison.
5346 if (FMF.noNaNs() && (IsNan || IsUndef))
5347 return PoisonValue::get(V->getType());
5348 if (FMF.noInfs() && (IsInf || IsUndef))
5349 return PoisonValue::get(V->getType());
5350
5351 if (isDefaultFPEnvironment(ExBehavior, Rounding)) {
5352 // Undef does not propagate because undef means that all bits can take on
5353 // any value. If this is undef * NaN for example, then the result values
5354 // (at least the exponent bits) are limited. Assume the undef is a
5355 // canonical NaN and propagate that.
5356 if (IsUndef)
5357 return ConstantFP::getNaN(V->getType());
5358 if (IsNan)
5359 return propagateNaN(cast<Constant>(V));
5360 } else if (ExBehavior != fp::ebStrict) {
5361 if (IsNan)
5362 return propagateNaN(cast<Constant>(V));
5363 }
5364 }
5365 return nullptr;
5366 }
5367
5368 /// Given operands for an FAdd, see if we can fold the result. If not, this
5369 /// returns null.
5370 static Value *
simplifyFAddInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5371 simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5372 const SimplifyQuery &Q, unsigned MaxRecurse,
5373 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5374 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5375 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5376 if (Constant *C = foldOrCommuteConstant(Instruction::FAdd, Op0, Op1, Q))
5377 return C;
5378
5379 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5380 return C;
5381
5382 // fadd X, -0 ==> X
5383 // With strict/constrained FP, we have these possible edge cases that do
5384 // not simplify to Op0:
5385 // fadd SNaN, -0.0 --> QNaN
5386 // fadd +0.0, -0.0 --> -0.0 (but only with round toward negative)
5387 if (canIgnoreSNaN(ExBehavior, FMF) &&
5388 (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5389 FMF.noSignedZeros()))
5390 if (match(Op1, m_NegZeroFP()))
5391 return Op0;
5392
5393 // fadd X, 0 ==> X, when we know X is not -0
5394 if (canIgnoreSNaN(ExBehavior, FMF))
5395 if (match(Op1, m_PosZeroFP()) &&
5396 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
5397 return Op0;
5398
5399 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5400 return nullptr;
5401
5402 if (FMF.noNaNs()) {
5403 // With nnan: X + {+/-}Inf --> {+/-}Inf
5404 if (match(Op1, m_Inf()))
5405 return Op1;
5406
5407 // With nnan: -X + X --> 0.0 (and commuted variant)
5408 // We don't have to explicitly exclude infinities (ninf): INF + -INF == NaN.
5409 // Negative zeros are allowed because we always end up with positive zero:
5410 // X = -0.0: (-0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5411 // X = -0.0: ( 0.0 - (-0.0)) + (-0.0) == ( 0.0) + (-0.0) == 0.0
5412 // X = 0.0: (-0.0 - ( 0.0)) + ( 0.0) == (-0.0) + ( 0.0) == 0.0
5413 // X = 0.0: ( 0.0 - ( 0.0)) + ( 0.0) == ( 0.0) + ( 0.0) == 0.0
5414 if (match(Op0, m_FSub(m_AnyZeroFP(), m_Specific(Op1))) ||
5415 match(Op1, m_FSub(m_AnyZeroFP(), m_Specific(Op0))))
5416 return ConstantFP::getNullValue(Op0->getType());
5417
5418 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
5419 match(Op1, m_FNeg(m_Specific(Op0))))
5420 return ConstantFP::getNullValue(Op0->getType());
5421 }
5422
5423 // (X - Y) + Y --> X
5424 // Y + (X - Y) --> X
5425 Value *X;
5426 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5427 (match(Op0, m_FSub(m_Value(X), m_Specific(Op1))) ||
5428 match(Op1, m_FSub(m_Value(X), m_Specific(Op0)))))
5429 return X;
5430
5431 return nullptr;
5432 }
5433
5434 /// Given operands for an FSub, see if we can fold the result. If not, this
5435 /// returns null.
5436 static Value *
simplifyFSubInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5437 simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5438 const SimplifyQuery &Q, unsigned MaxRecurse,
5439 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5440 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5441 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5442 if (Constant *C = foldOrCommuteConstant(Instruction::FSub, Op0, Op1, Q))
5443 return C;
5444
5445 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5446 return C;
5447
5448 // fsub X, +0 ==> X
5449 if (canIgnoreSNaN(ExBehavior, FMF) &&
5450 (!canRoundingModeBe(Rounding, RoundingMode::TowardNegative) ||
5451 FMF.noSignedZeros()))
5452 if (match(Op1, m_PosZeroFP()))
5453 return Op0;
5454
5455 // fsub X, -0 ==> X, when we know X is not -0
5456 if (canIgnoreSNaN(ExBehavior, FMF))
5457 if (match(Op1, m_NegZeroFP()) &&
5458 (FMF.noSignedZeros() || CannotBeNegativeZero(Op0, Q.TLI)))
5459 return Op0;
5460
5461 // fsub -0.0, (fsub -0.0, X) ==> X
5462 // fsub -0.0, (fneg X) ==> X
5463 Value *X;
5464 if (canIgnoreSNaN(ExBehavior, FMF))
5465 if (match(Op0, m_NegZeroFP()) && match(Op1, m_FNeg(m_Value(X))))
5466 return X;
5467
5468 // fsub 0.0, (fsub 0.0, X) ==> X if signed zeros are ignored.
5469 // fsub 0.0, (fneg X) ==> X if signed zeros are ignored.
5470 if (canIgnoreSNaN(ExBehavior, FMF))
5471 if (FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()) &&
5472 (match(Op1, m_FSub(m_AnyZeroFP(), m_Value(X))) ||
5473 match(Op1, m_FNeg(m_Value(X)))))
5474 return X;
5475
5476 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5477 return nullptr;
5478
5479 if (FMF.noNaNs()) {
5480 // fsub nnan x, x ==> 0.0
5481 if (Op0 == Op1)
5482 return Constant::getNullValue(Op0->getType());
5483
5484 // With nnan: {+/-}Inf - X --> {+/-}Inf
5485 if (match(Op0, m_Inf()))
5486 return Op0;
5487
5488 // With nnan: X - {+/-}Inf --> {-/+}Inf
5489 if (match(Op1, m_Inf()))
5490 return foldConstant(Instruction::FNeg, Op1, Q);
5491 }
5492
5493 // Y - (Y - X) --> X
5494 // (X + Y) - Y --> X
5495 if (FMF.noSignedZeros() && FMF.allowReassoc() &&
5496 (match(Op1, m_FSub(m_Specific(Op0), m_Value(X))) ||
5497 match(Op0, m_c_FAdd(m_Specific(Op1), m_Value(X)))))
5498 return X;
5499
5500 return nullptr;
5501 }
5502
simplifyFMAFMul(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5503 static Value *simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5504 const SimplifyQuery &Q, unsigned MaxRecurse,
5505 fp::ExceptionBehavior ExBehavior,
5506 RoundingMode Rounding) {
5507 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5508 return C;
5509
5510 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5511 return nullptr;
5512
5513 // Canonicalize special constants as operand 1.
5514 if (match(Op0, m_FPOne()) || match(Op0, m_AnyZeroFP()))
5515 std::swap(Op0, Op1);
5516
5517 // X * 1.0 --> X
5518 if (match(Op1, m_FPOne()))
5519 return Op0;
5520
5521 if (match(Op1, m_AnyZeroFP())) {
5522 // X * 0.0 --> 0.0 (with nnan and nsz)
5523 if (FMF.noNaNs() && FMF.noSignedZeros())
5524 return ConstantFP::getNullValue(Op0->getType());
5525
5526 // +normal number * (-)0.0 --> (-)0.0
5527 if (isKnownNeverInfinity(Op0, Q.TLI) && isKnownNeverNaN(Op0, Q.TLI) &&
5528 SignBitMustBeZero(Op0, Q.TLI))
5529 return Op1;
5530 }
5531
5532 // sqrt(X) * sqrt(X) --> X, if we can:
5533 // 1. Remove the intermediate rounding (reassociate).
5534 // 2. Ignore non-zero negative numbers because sqrt would produce NAN.
5535 // 3. Ignore -0.0 because sqrt(-0.0) == -0.0, but -0.0 * -0.0 == 0.0.
5536 Value *X;
5537 if (Op0 == Op1 && match(Op0, m_Sqrt(m_Value(X))) && FMF.allowReassoc() &&
5538 FMF.noNaNs() && FMF.noSignedZeros())
5539 return X;
5540
5541 return nullptr;
5542 }
5543
5544 /// Given the operands for an FMul, see if we can fold the result
5545 static Value *
simplifyFMulInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned MaxRecurse,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5546 simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5547 const SimplifyQuery &Q, unsigned MaxRecurse,
5548 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5549 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5550 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5551 if (Constant *C = foldOrCommuteConstant(Instruction::FMul, Op0, Op1, Q))
5552 return C;
5553
5554 // Now apply simplifications that do not require rounding.
5555 return simplifyFMAFMul(Op0, Op1, FMF, Q, MaxRecurse, ExBehavior, Rounding);
5556 }
5557
simplifyFAddInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5558 Value *llvm::simplifyFAddInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5559 const SimplifyQuery &Q,
5560 fp::ExceptionBehavior ExBehavior,
5561 RoundingMode Rounding) {
5562 return ::simplifyFAddInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5563 Rounding);
5564 }
5565
simplifyFSubInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5566 Value *llvm::simplifyFSubInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5567 const SimplifyQuery &Q,
5568 fp::ExceptionBehavior ExBehavior,
5569 RoundingMode Rounding) {
5570 return ::simplifyFSubInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5571 Rounding);
5572 }
5573
simplifyFMulInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5574 Value *llvm::simplifyFMulInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5575 const SimplifyQuery &Q,
5576 fp::ExceptionBehavior ExBehavior,
5577 RoundingMode Rounding) {
5578 return ::simplifyFMulInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5579 Rounding);
5580 }
5581
simplifyFMAFMul(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5582 Value *llvm::simplifyFMAFMul(Value *Op0, Value *Op1, FastMathFlags FMF,
5583 const SimplifyQuery &Q,
5584 fp::ExceptionBehavior ExBehavior,
5585 RoundingMode Rounding) {
5586 return ::simplifyFMAFMul(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5587 Rounding);
5588 }
5589
5590 static Value *
simplifyFDivInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5591 simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5592 const SimplifyQuery &Q, unsigned,
5593 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5594 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5595 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5596 if (Constant *C = foldOrCommuteConstant(Instruction::FDiv, Op0, Op1, Q))
5597 return C;
5598
5599 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5600 return C;
5601
5602 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5603 return nullptr;
5604
5605 // X / 1.0 -> X
5606 if (match(Op1, m_FPOne()))
5607 return Op0;
5608
5609 // 0 / X -> 0
5610 // Requires that NaNs are off (X could be zero) and signed zeroes are
5611 // ignored (X could be positive or negative, so the output sign is unknown).
5612 if (FMF.noNaNs() && FMF.noSignedZeros() && match(Op0, m_AnyZeroFP()))
5613 return ConstantFP::getNullValue(Op0->getType());
5614
5615 if (FMF.noNaNs()) {
5616 // X / X -> 1.0 is legal when NaNs are ignored.
5617 // We can ignore infinities because INF/INF is NaN.
5618 if (Op0 == Op1)
5619 return ConstantFP::get(Op0->getType(), 1.0);
5620
5621 // (X * Y) / Y --> X if we can reassociate to the above form.
5622 Value *X;
5623 if (FMF.allowReassoc() && match(Op0, m_c_FMul(m_Value(X), m_Specific(Op1))))
5624 return X;
5625
5626 // -X / X -> -1.0 and
5627 // X / -X -> -1.0 are legal when NaNs are ignored.
5628 // We can ignore signed zeros because +-0.0/+-0.0 is NaN and ignored.
5629 if (match(Op0, m_FNegNSZ(m_Specific(Op1))) ||
5630 match(Op1, m_FNegNSZ(m_Specific(Op0))))
5631 return ConstantFP::get(Op0->getType(), -1.0);
5632
5633 // nnan ninf X / [-]0.0 -> poison
5634 if (FMF.noInfs() && match(Op1, m_AnyZeroFP()))
5635 return PoisonValue::get(Op1->getType());
5636 }
5637
5638 return nullptr;
5639 }
5640
simplifyFDivInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5641 Value *llvm::simplifyFDivInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5642 const SimplifyQuery &Q,
5643 fp::ExceptionBehavior ExBehavior,
5644 RoundingMode Rounding) {
5645 return ::simplifyFDivInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5646 Rounding);
5647 }
5648
5649 static Value *
simplifyFRemInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,unsigned,fp::ExceptionBehavior ExBehavior=fp::ebIgnore,RoundingMode Rounding=RoundingMode::NearestTiesToEven)5650 simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5651 const SimplifyQuery &Q, unsigned,
5652 fp::ExceptionBehavior ExBehavior = fp::ebIgnore,
5653 RoundingMode Rounding = RoundingMode::NearestTiesToEven) {
5654 if (isDefaultFPEnvironment(ExBehavior, Rounding))
5655 if (Constant *C = foldOrCommuteConstant(Instruction::FRem, Op0, Op1, Q))
5656 return C;
5657
5658 if (Constant *C = simplifyFPOp({Op0, Op1}, FMF, Q, ExBehavior, Rounding))
5659 return C;
5660
5661 if (!isDefaultFPEnvironment(ExBehavior, Rounding))
5662 return nullptr;
5663
5664 // Unlike fdiv, the result of frem always matches the sign of the dividend.
5665 // The constant match may include undef elements in a vector, so return a full
5666 // zero constant as the result.
5667 if (FMF.noNaNs()) {
5668 // +0 % X -> 0
5669 if (match(Op0, m_PosZeroFP()))
5670 return ConstantFP::getNullValue(Op0->getType());
5671 // -0 % X -> -0
5672 if (match(Op0, m_NegZeroFP()))
5673 return ConstantFP::getNegativeZero(Op0->getType());
5674 }
5675
5676 return nullptr;
5677 }
5678
simplifyFRemInst(Value * Op0,Value * Op1,FastMathFlags FMF,const SimplifyQuery & Q,fp::ExceptionBehavior ExBehavior,RoundingMode Rounding)5679 Value *llvm::simplifyFRemInst(Value *Op0, Value *Op1, FastMathFlags FMF,
5680 const SimplifyQuery &Q,
5681 fp::ExceptionBehavior ExBehavior,
5682 RoundingMode Rounding) {
5683 return ::simplifyFRemInst(Op0, Op1, FMF, Q, RecursionLimit, ExBehavior,
5684 Rounding);
5685 }
5686
5687 //=== Helper functions for higher up the class hierarchy.
5688
5689 /// Given the operand for a UnaryOperator, see if we can fold the result.
5690 /// If not, this returns null.
simplifyUnOp(unsigned Opcode,Value * Op,const SimplifyQuery & Q,unsigned MaxRecurse)5691 static Value *simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q,
5692 unsigned MaxRecurse) {
5693 switch (Opcode) {
5694 case Instruction::FNeg:
5695 return simplifyFNegInst(Op, FastMathFlags(), Q, MaxRecurse);
5696 default:
5697 llvm_unreachable("Unexpected opcode");
5698 }
5699 }
5700
5701 /// Given the operand for a UnaryOperator, see if we can fold the result.
5702 /// If not, this returns null.
5703 /// Try to use FastMathFlags when folding the result.
simplifyFPUnOp(unsigned Opcode,Value * Op,const FastMathFlags & FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5704 static Value *simplifyFPUnOp(unsigned Opcode, Value *Op,
5705 const FastMathFlags &FMF, const SimplifyQuery &Q,
5706 unsigned MaxRecurse) {
5707 switch (Opcode) {
5708 case Instruction::FNeg:
5709 return simplifyFNegInst(Op, FMF, Q, MaxRecurse);
5710 default:
5711 return simplifyUnOp(Opcode, Op, Q, MaxRecurse);
5712 }
5713 }
5714
simplifyUnOp(unsigned Opcode,Value * Op,const SimplifyQuery & Q)5715 Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, const SimplifyQuery &Q) {
5716 return ::simplifyUnOp(Opcode, Op, Q, RecursionLimit);
5717 }
5718
simplifyUnOp(unsigned Opcode,Value * Op,FastMathFlags FMF,const SimplifyQuery & Q)5719 Value *llvm::simplifyUnOp(unsigned Opcode, Value *Op, FastMathFlags FMF,
5720 const SimplifyQuery &Q) {
5721 return ::simplifyFPUnOp(Opcode, Op, FMF, Q, RecursionLimit);
5722 }
5723
5724 /// Given operands for a BinaryOperator, see if we can fold the result.
5725 /// If not, this returns null.
simplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)5726 static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5727 const SimplifyQuery &Q, unsigned MaxRecurse) {
5728 switch (Opcode) {
5729 case Instruction::Add:
5730 return simplifyAddInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5731 MaxRecurse);
5732 case Instruction::Sub:
5733 return simplifySubInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5734 MaxRecurse);
5735 case Instruction::Mul:
5736 return simplifyMulInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5737 MaxRecurse);
5738 case Instruction::SDiv:
5739 return simplifySDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5740 case Instruction::UDiv:
5741 return simplifyUDivInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5742 case Instruction::SRem:
5743 return simplifySRemInst(LHS, RHS, Q, MaxRecurse);
5744 case Instruction::URem:
5745 return simplifyURemInst(LHS, RHS, Q, MaxRecurse);
5746 case Instruction::Shl:
5747 return simplifyShlInst(LHS, RHS, /* IsNSW */ false, /* IsNUW */ false, Q,
5748 MaxRecurse);
5749 case Instruction::LShr:
5750 return simplifyLShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5751 case Instruction::AShr:
5752 return simplifyAShrInst(LHS, RHS, /* IsExact */ false, Q, MaxRecurse);
5753 case Instruction::And:
5754 return simplifyAndInst(LHS, RHS, Q, MaxRecurse);
5755 case Instruction::Or:
5756 return simplifyOrInst(LHS, RHS, Q, MaxRecurse);
5757 case Instruction::Xor:
5758 return simplifyXorInst(LHS, RHS, Q, MaxRecurse);
5759 case Instruction::FAdd:
5760 return simplifyFAddInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5761 case Instruction::FSub:
5762 return simplifyFSubInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5763 case Instruction::FMul:
5764 return simplifyFMulInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5765 case Instruction::FDiv:
5766 return simplifyFDivInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5767 case Instruction::FRem:
5768 return simplifyFRemInst(LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5769 default:
5770 llvm_unreachable("Unexpected opcode");
5771 }
5772 }
5773
5774 /// Given operands for a BinaryOperator, see if we can fold the result.
5775 /// If not, this returns null.
5776 /// Try to use FastMathFlags when folding the result.
simplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const FastMathFlags & FMF,const SimplifyQuery & Q,unsigned MaxRecurse)5777 static Value *simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5778 const FastMathFlags &FMF, const SimplifyQuery &Q,
5779 unsigned MaxRecurse) {
5780 switch (Opcode) {
5781 case Instruction::FAdd:
5782 return simplifyFAddInst(LHS, RHS, FMF, Q, MaxRecurse);
5783 case Instruction::FSub:
5784 return simplifyFSubInst(LHS, RHS, FMF, Q, MaxRecurse);
5785 case Instruction::FMul:
5786 return simplifyFMulInst(LHS, RHS, FMF, Q, MaxRecurse);
5787 case Instruction::FDiv:
5788 return simplifyFDivInst(LHS, RHS, FMF, Q, MaxRecurse);
5789 default:
5790 return simplifyBinOp(Opcode, LHS, RHS, Q, MaxRecurse);
5791 }
5792 }
5793
simplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,const SimplifyQuery & Q)5794 Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5795 const SimplifyQuery &Q) {
5796 return ::simplifyBinOp(Opcode, LHS, RHS, Q, RecursionLimit);
5797 }
5798
simplifyBinOp(unsigned Opcode,Value * LHS,Value * RHS,FastMathFlags FMF,const SimplifyQuery & Q)5799 Value *llvm::simplifyBinOp(unsigned Opcode, Value *LHS, Value *RHS,
5800 FastMathFlags FMF, const SimplifyQuery &Q) {
5801 return ::simplifyBinOp(Opcode, LHS, RHS, FMF, Q, RecursionLimit);
5802 }
5803
5804 /// Given operands for a CmpInst, see if we can fold the result.
simplifyCmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q,unsigned MaxRecurse)5805 static Value *simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5806 const SimplifyQuery &Q, unsigned MaxRecurse) {
5807 if (CmpInst::isIntPredicate((CmpInst::Predicate)Predicate))
5808 return simplifyICmpInst(Predicate, LHS, RHS, Q, MaxRecurse);
5809 return simplifyFCmpInst(Predicate, LHS, RHS, FastMathFlags(), Q, MaxRecurse);
5810 }
5811
simplifyCmpInst(unsigned Predicate,Value * LHS,Value * RHS,const SimplifyQuery & Q)5812 Value *llvm::simplifyCmpInst(unsigned Predicate, Value *LHS, Value *RHS,
5813 const SimplifyQuery &Q) {
5814 return ::simplifyCmpInst(Predicate, LHS, RHS, Q, RecursionLimit);
5815 }
5816
isIdempotent(Intrinsic::ID ID)5817 static bool isIdempotent(Intrinsic::ID ID) {
5818 switch (ID) {
5819 default:
5820 return false;
5821
5822 // Unary idempotent: f(f(x)) = f(x)
5823 case Intrinsic::fabs:
5824 case Intrinsic::floor:
5825 case Intrinsic::ceil:
5826 case Intrinsic::trunc:
5827 case Intrinsic::rint:
5828 case Intrinsic::nearbyint:
5829 case Intrinsic::round:
5830 case Intrinsic::roundeven:
5831 case Intrinsic::canonicalize:
5832 case Intrinsic::arithmetic_fence:
5833 return true;
5834 }
5835 }
5836
5837 /// Return true if the intrinsic rounds a floating-point value to an integral
5838 /// floating-point value (not an integer type).
removesFPFraction(Intrinsic::ID ID)5839 static bool removesFPFraction(Intrinsic::ID ID) {
5840 switch (ID) {
5841 default:
5842 return false;
5843
5844 case Intrinsic::floor:
5845 case Intrinsic::ceil:
5846 case Intrinsic::trunc:
5847 case Intrinsic::rint:
5848 case Intrinsic::nearbyint:
5849 case Intrinsic::round:
5850 case Intrinsic::roundeven:
5851 return true;
5852 }
5853 }
5854
simplifyRelativeLoad(Constant * Ptr,Constant * Offset,const DataLayout & DL)5855 static Value *simplifyRelativeLoad(Constant *Ptr, Constant *Offset,
5856 const DataLayout &DL) {
5857 GlobalValue *PtrSym;
5858 APInt PtrOffset;
5859 if (!IsConstantOffsetFromGlobal(Ptr, PtrSym, PtrOffset, DL))
5860 return nullptr;
5861
5862 Type *Int8PtrTy = Type::getInt8PtrTy(Ptr->getContext());
5863 Type *Int32Ty = Type::getInt32Ty(Ptr->getContext());
5864 Type *Int32PtrTy = Int32Ty->getPointerTo();
5865 Type *Int64Ty = Type::getInt64Ty(Ptr->getContext());
5866
5867 auto *OffsetConstInt = dyn_cast<ConstantInt>(Offset);
5868 if (!OffsetConstInt || OffsetConstInt->getType()->getBitWidth() > 64)
5869 return nullptr;
5870
5871 uint64_t OffsetInt = OffsetConstInt->getSExtValue();
5872 if (OffsetInt % 4 != 0)
5873 return nullptr;
5874
5875 Constant *C = ConstantExpr::getGetElementPtr(
5876 Int32Ty, ConstantExpr::getBitCast(Ptr, Int32PtrTy),
5877 ConstantInt::get(Int64Ty, OffsetInt / 4));
5878 Constant *Loaded = ConstantFoldLoadFromConstPtr(C, Int32Ty, DL);
5879 if (!Loaded)
5880 return nullptr;
5881
5882 auto *LoadedCE = dyn_cast<ConstantExpr>(Loaded);
5883 if (!LoadedCE)
5884 return nullptr;
5885
5886 if (LoadedCE->getOpcode() == Instruction::Trunc) {
5887 LoadedCE = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5888 if (!LoadedCE)
5889 return nullptr;
5890 }
5891
5892 if (LoadedCE->getOpcode() != Instruction::Sub)
5893 return nullptr;
5894
5895 auto *LoadedLHS = dyn_cast<ConstantExpr>(LoadedCE->getOperand(0));
5896 if (!LoadedLHS || LoadedLHS->getOpcode() != Instruction::PtrToInt)
5897 return nullptr;
5898 auto *LoadedLHSPtr = LoadedLHS->getOperand(0);
5899
5900 Constant *LoadedRHS = LoadedCE->getOperand(1);
5901 GlobalValue *LoadedRHSSym;
5902 APInt LoadedRHSOffset;
5903 if (!IsConstantOffsetFromGlobal(LoadedRHS, LoadedRHSSym, LoadedRHSOffset,
5904 DL) ||
5905 PtrSym != LoadedRHSSym || PtrOffset != LoadedRHSOffset)
5906 return nullptr;
5907
5908 return ConstantExpr::getBitCast(LoadedLHSPtr, Int8PtrTy);
5909 }
5910
simplifyUnaryIntrinsic(Function * F,Value * Op0,const SimplifyQuery & Q)5911 static Value *simplifyUnaryIntrinsic(Function *F, Value *Op0,
5912 const SimplifyQuery &Q) {
5913 // Idempotent functions return the same result when called repeatedly.
5914 Intrinsic::ID IID = F->getIntrinsicID();
5915 if (isIdempotent(IID))
5916 if (auto *II = dyn_cast<IntrinsicInst>(Op0))
5917 if (II->getIntrinsicID() == IID)
5918 return II;
5919
5920 if (removesFPFraction(IID)) {
5921 // Converting from int or calling a rounding function always results in a
5922 // finite integral number or infinity. For those inputs, rounding functions
5923 // always return the same value, so the (2nd) rounding is eliminated. Ex:
5924 // floor (sitofp x) -> sitofp x
5925 // round (ceil x) -> ceil x
5926 auto *II = dyn_cast<IntrinsicInst>(Op0);
5927 if ((II && removesFPFraction(II->getIntrinsicID())) ||
5928 match(Op0, m_SIToFP(m_Value())) || match(Op0, m_UIToFP(m_Value())))
5929 return Op0;
5930 }
5931
5932 Value *X;
5933 switch (IID) {
5934 case Intrinsic::fabs:
5935 if (SignBitMustBeZero(Op0, Q.TLI))
5936 return Op0;
5937 break;
5938 case Intrinsic::bswap:
5939 // bswap(bswap(x)) -> x
5940 if (match(Op0, m_BSwap(m_Value(X))))
5941 return X;
5942 break;
5943 case Intrinsic::bitreverse:
5944 // bitreverse(bitreverse(x)) -> x
5945 if (match(Op0, m_BitReverse(m_Value(X))))
5946 return X;
5947 break;
5948 case Intrinsic::ctpop: {
5949 // ctpop(X) -> 1 iff X is non-zero power of 2.
5950 if (isKnownToBeAPowerOfTwo(Op0, Q.DL, /*OrZero*/ false, 0, Q.AC, Q.CxtI,
5951 Q.DT))
5952 return ConstantInt::get(Op0->getType(), 1);
5953 // If everything but the lowest bit is zero, that bit is the pop-count. Ex:
5954 // ctpop(and X, 1) --> and X, 1
5955 unsigned BitWidth = Op0->getType()->getScalarSizeInBits();
5956 if (MaskedValueIsZero(Op0, APInt::getHighBitsSet(BitWidth, BitWidth - 1),
5957 Q.DL, 0, Q.AC, Q.CxtI, Q.DT))
5958 return Op0;
5959 break;
5960 }
5961 case Intrinsic::exp:
5962 // exp(log(x)) -> x
5963 if (Q.CxtI->hasAllowReassoc() &&
5964 match(Op0, m_Intrinsic<Intrinsic::log>(m_Value(X))))
5965 return X;
5966 break;
5967 case Intrinsic::exp2:
5968 // exp2(log2(x)) -> x
5969 if (Q.CxtI->hasAllowReassoc() &&
5970 match(Op0, m_Intrinsic<Intrinsic::log2>(m_Value(X))))
5971 return X;
5972 break;
5973 case Intrinsic::log:
5974 // log(exp(x)) -> x
5975 if (Q.CxtI->hasAllowReassoc() &&
5976 match(Op0, m_Intrinsic<Intrinsic::exp>(m_Value(X))))
5977 return X;
5978 break;
5979 case Intrinsic::log2:
5980 // log2(exp2(x)) -> x
5981 if (Q.CxtI->hasAllowReassoc() &&
5982 (match(Op0, m_Intrinsic<Intrinsic::exp2>(m_Value(X))) ||
5983 match(Op0,
5984 m_Intrinsic<Intrinsic::pow>(m_SpecificFP(2.0), m_Value(X)))))
5985 return X;
5986 break;
5987 case Intrinsic::log10:
5988 // log10(pow(10.0, x)) -> x
5989 if (Q.CxtI->hasAllowReassoc() &&
5990 match(Op0, m_Intrinsic<Intrinsic::pow>(m_SpecificFP(10.0), m_Value(X))))
5991 return X;
5992 break;
5993 case Intrinsic::experimental_vector_reverse:
5994 // experimental.vector.reverse(experimental.vector.reverse(x)) -> x
5995 if (match(Op0, m_VecReverse(m_Value(X))))
5996 return X;
5997 // experimental.vector.reverse(splat(X)) -> splat(X)
5998 if (isSplatValue(Op0))
5999 return Op0;
6000 break;
6001 default:
6002 break;
6003 }
6004
6005 return nullptr;
6006 }
6007
6008 /// Given a min/max intrinsic, see if it can be removed based on having an
6009 /// operand that is another min/max intrinsic with shared operand(s). The caller
6010 /// is expected to swap the operand arguments to handle commutation.
foldMinMaxSharedOp(Intrinsic::ID IID,Value * Op0,Value * Op1)6011 static Value *foldMinMaxSharedOp(Intrinsic::ID IID, Value *Op0, Value *Op1) {
6012 Value *X, *Y;
6013 if (!match(Op0, m_MaxOrMin(m_Value(X), m_Value(Y))))
6014 return nullptr;
6015
6016 auto *MM0 = dyn_cast<IntrinsicInst>(Op0);
6017 if (!MM0)
6018 return nullptr;
6019 Intrinsic::ID IID0 = MM0->getIntrinsicID();
6020
6021 if (Op1 == X || Op1 == Y ||
6022 match(Op1, m_c_MaxOrMin(m_Specific(X), m_Specific(Y)))) {
6023 // max (max X, Y), X --> max X, Y
6024 if (IID0 == IID)
6025 return MM0;
6026 // max (min X, Y), X --> X
6027 if (IID0 == getInverseMinMaxIntrinsic(IID))
6028 return Op1;
6029 }
6030 return nullptr;
6031 }
6032
simplifyBinaryIntrinsic(Function * F,Value * Op0,Value * Op1,const SimplifyQuery & Q)6033 static Value *simplifyBinaryIntrinsic(Function *F, Value *Op0, Value *Op1,
6034 const SimplifyQuery &Q) {
6035 Intrinsic::ID IID = F->getIntrinsicID();
6036 Type *ReturnType = F->getReturnType();
6037 unsigned BitWidth = ReturnType->getScalarSizeInBits();
6038 switch (IID) {
6039 case Intrinsic::abs:
6040 // abs(abs(x)) -> abs(x). We don't need to worry about the nsw arg here.
6041 // It is always ok to pick the earlier abs. We'll just lose nsw if its only
6042 // on the outer abs.
6043 if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(), m_Value())))
6044 return Op0;
6045 break;
6046
6047 case Intrinsic::cttz: {
6048 Value *X;
6049 if (match(Op0, m_Shl(m_One(), m_Value(X))))
6050 return X;
6051 break;
6052 }
6053 case Intrinsic::ctlz: {
6054 Value *X;
6055 if (match(Op0, m_LShr(m_Negative(), m_Value(X))))
6056 return X;
6057 if (match(Op0, m_AShr(m_Negative(), m_Value())))
6058 return Constant::getNullValue(ReturnType);
6059 break;
6060 }
6061 case Intrinsic::smax:
6062 case Intrinsic::smin:
6063 case Intrinsic::umax:
6064 case Intrinsic::umin: {
6065 // If the arguments are the same, this is a no-op.
6066 if (Op0 == Op1)
6067 return Op0;
6068
6069 // Canonicalize immediate constant operand as Op1.
6070 if (match(Op0, m_ImmConstant()))
6071 std::swap(Op0, Op1);
6072
6073 // Assume undef is the limit value.
6074 if (Q.isUndefValue(Op1))
6075 return ConstantInt::get(
6076 ReturnType, MinMaxIntrinsic::getSaturationPoint(IID, BitWidth));
6077
6078 const APInt *C;
6079 if (match(Op1, m_APIntAllowUndef(C))) {
6080 // Clamp to limit value. For example:
6081 // umax(i8 %x, i8 255) --> 255
6082 if (*C == MinMaxIntrinsic::getSaturationPoint(IID, BitWidth))
6083 return ConstantInt::get(ReturnType, *C);
6084
6085 // If the constant op is the opposite of the limit value, the other must
6086 // be larger/smaller or equal. For example:
6087 // umin(i8 %x, i8 255) --> %x
6088 if (*C == MinMaxIntrinsic::getSaturationPoint(
6089 getInverseMinMaxIntrinsic(IID), BitWidth))
6090 return Op0;
6091
6092 // Remove nested call if constant operands allow it. Example:
6093 // max (max X, 7), 5 -> max X, 7
6094 auto *MinMax0 = dyn_cast<IntrinsicInst>(Op0);
6095 if (MinMax0 && MinMax0->getIntrinsicID() == IID) {
6096 // TODO: loosen undef/splat restrictions for vector constants.
6097 Value *M00 = MinMax0->getOperand(0), *M01 = MinMax0->getOperand(1);
6098 const APInt *InnerC;
6099 if ((match(M00, m_APInt(InnerC)) || match(M01, m_APInt(InnerC))) &&
6100 ICmpInst::compare(*InnerC, *C,
6101 ICmpInst::getNonStrictPredicate(
6102 MinMaxIntrinsic::getPredicate(IID))))
6103 return Op0;
6104 }
6105 }
6106
6107 if (Value *V = foldMinMaxSharedOp(IID, Op0, Op1))
6108 return V;
6109 if (Value *V = foldMinMaxSharedOp(IID, Op1, Op0))
6110 return V;
6111
6112 ICmpInst::Predicate Pred =
6113 ICmpInst::getNonStrictPredicate(MinMaxIntrinsic::getPredicate(IID));
6114 if (isICmpTrue(Pred, Op0, Op1, Q.getWithoutUndef(), RecursionLimit))
6115 return Op0;
6116 if (isICmpTrue(Pred, Op1, Op0, Q.getWithoutUndef(), RecursionLimit))
6117 return Op1;
6118
6119 if (std::optional<bool> Imp =
6120 isImpliedByDomCondition(Pred, Op0, Op1, Q.CxtI, Q.DL))
6121 return *Imp ? Op0 : Op1;
6122 if (std::optional<bool> Imp =
6123 isImpliedByDomCondition(Pred, Op1, Op0, Q.CxtI, Q.DL))
6124 return *Imp ? Op1 : Op0;
6125
6126 break;
6127 }
6128 case Intrinsic::usub_with_overflow:
6129 case Intrinsic::ssub_with_overflow:
6130 // X - X -> { 0, false }
6131 // X - undef -> { 0, false }
6132 // undef - X -> { 0, false }
6133 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6134 return Constant::getNullValue(ReturnType);
6135 break;
6136 case Intrinsic::uadd_with_overflow:
6137 case Intrinsic::sadd_with_overflow:
6138 // X + undef -> { -1, false }
6139 // undef + x -> { -1, false }
6140 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1)) {
6141 return ConstantStruct::get(
6142 cast<StructType>(ReturnType),
6143 {Constant::getAllOnesValue(ReturnType->getStructElementType(0)),
6144 Constant::getNullValue(ReturnType->getStructElementType(1))});
6145 }
6146 break;
6147 case Intrinsic::umul_with_overflow:
6148 case Intrinsic::smul_with_overflow:
6149 // 0 * X -> { 0, false }
6150 // X * 0 -> { 0, false }
6151 if (match(Op0, m_Zero()) || match(Op1, m_Zero()))
6152 return Constant::getNullValue(ReturnType);
6153 // undef * X -> { 0, false }
6154 // X * undef -> { 0, false }
6155 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6156 return Constant::getNullValue(ReturnType);
6157 break;
6158 case Intrinsic::uadd_sat:
6159 // sat(MAX + X) -> MAX
6160 // sat(X + MAX) -> MAX
6161 if (match(Op0, m_AllOnes()) || match(Op1, m_AllOnes()))
6162 return Constant::getAllOnesValue(ReturnType);
6163 [[fallthrough]];
6164 case Intrinsic::sadd_sat:
6165 // sat(X + undef) -> -1
6166 // sat(undef + X) -> -1
6167 // For unsigned: Assume undef is MAX, thus we saturate to MAX (-1).
6168 // For signed: Assume undef is ~X, in which case X + ~X = -1.
6169 if (Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6170 return Constant::getAllOnesValue(ReturnType);
6171
6172 // X + 0 -> X
6173 if (match(Op1, m_Zero()))
6174 return Op0;
6175 // 0 + X -> X
6176 if (match(Op0, m_Zero()))
6177 return Op1;
6178 break;
6179 case Intrinsic::usub_sat:
6180 // sat(0 - X) -> 0, sat(X - MAX) -> 0
6181 if (match(Op0, m_Zero()) || match(Op1, m_AllOnes()))
6182 return Constant::getNullValue(ReturnType);
6183 [[fallthrough]];
6184 case Intrinsic::ssub_sat:
6185 // X - X -> 0, X - undef -> 0, undef - X -> 0
6186 if (Op0 == Op1 || Q.isUndefValue(Op0) || Q.isUndefValue(Op1))
6187 return Constant::getNullValue(ReturnType);
6188 // X - 0 -> X
6189 if (match(Op1, m_Zero()))
6190 return Op0;
6191 break;
6192 case Intrinsic::load_relative:
6193 if (auto *C0 = dyn_cast<Constant>(Op0))
6194 if (auto *C1 = dyn_cast<Constant>(Op1))
6195 return simplifyRelativeLoad(C0, C1, Q.DL);
6196 break;
6197 case Intrinsic::powi:
6198 if (auto *Power = dyn_cast<ConstantInt>(Op1)) {
6199 // powi(x, 0) -> 1.0
6200 if (Power->isZero())
6201 return ConstantFP::get(Op0->getType(), 1.0);
6202 // powi(x, 1) -> x
6203 if (Power->isOne())
6204 return Op0;
6205 }
6206 break;
6207 case Intrinsic::copysign:
6208 // copysign X, X --> X
6209 if (Op0 == Op1)
6210 return Op0;
6211 // copysign -X, X --> X
6212 // copysign X, -X --> -X
6213 if (match(Op0, m_FNeg(m_Specific(Op1))) ||
6214 match(Op1, m_FNeg(m_Specific(Op0))))
6215 return Op1;
6216 break;
6217 case Intrinsic::is_fpclass: {
6218 if (isa<PoisonValue>(Op0))
6219 return PoisonValue::get(ReturnType);
6220
6221 uint64_t Mask = cast<ConstantInt>(Op1)->getZExtValue();
6222 // If all tests are made, it doesn't matter what the value is.
6223 if ((Mask & fcAllFlags) == fcAllFlags)
6224 return ConstantInt::get(ReturnType, true);
6225 if ((Mask & fcAllFlags) == 0)
6226 return ConstantInt::get(ReturnType, false);
6227 if (Q.isUndefValue(Op0))
6228 return UndefValue::get(ReturnType);
6229 break;
6230 }
6231 case Intrinsic::maxnum:
6232 case Intrinsic::minnum:
6233 case Intrinsic::maximum:
6234 case Intrinsic::minimum: {
6235 // If the arguments are the same, this is a no-op.
6236 if (Op0 == Op1)
6237 return Op0;
6238
6239 // Canonicalize constant operand as Op1.
6240 if (isa<Constant>(Op0))
6241 std::swap(Op0, Op1);
6242
6243 // If an argument is undef, return the other argument.
6244 if (Q.isUndefValue(Op1))
6245 return Op0;
6246
6247 bool PropagateNaN = IID == Intrinsic::minimum || IID == Intrinsic::maximum;
6248 bool IsMin = IID == Intrinsic::minimum || IID == Intrinsic::minnum;
6249
6250 // minnum(X, nan) -> X
6251 // maxnum(X, nan) -> X
6252 // minimum(X, nan) -> nan
6253 // maximum(X, nan) -> nan
6254 if (match(Op1, m_NaN()))
6255 return PropagateNaN ? propagateNaN(cast<Constant>(Op1)) : Op0;
6256
6257 // In the following folds, inf can be replaced with the largest finite
6258 // float, if the ninf flag is set.
6259 const APFloat *C;
6260 if (match(Op1, m_APFloat(C)) &&
6261 (C->isInfinity() || (Q.CxtI->hasNoInfs() && C->isLargest()))) {
6262 // minnum(X, -inf) -> -inf
6263 // maxnum(X, +inf) -> +inf
6264 // minimum(X, -inf) -> -inf if nnan
6265 // maximum(X, +inf) -> +inf if nnan
6266 if (C->isNegative() == IsMin && (!PropagateNaN || Q.CxtI->hasNoNaNs()))
6267 return ConstantFP::get(ReturnType, *C);
6268
6269 // minnum(X, +inf) -> X if nnan
6270 // maxnum(X, -inf) -> X if nnan
6271 // minimum(X, +inf) -> X
6272 // maximum(X, -inf) -> X
6273 if (C->isNegative() != IsMin && (PropagateNaN || Q.CxtI->hasNoNaNs()))
6274 return Op0;
6275 }
6276
6277 // Min/max of the same operation with common operand:
6278 // m(m(X, Y)), X --> m(X, Y) (4 commuted variants)
6279 if (auto *M0 = dyn_cast<IntrinsicInst>(Op0))
6280 if (M0->getIntrinsicID() == IID &&
6281 (M0->getOperand(0) == Op1 || M0->getOperand(1) == Op1))
6282 return Op0;
6283 if (auto *M1 = dyn_cast<IntrinsicInst>(Op1))
6284 if (M1->getIntrinsicID() == IID &&
6285 (M1->getOperand(0) == Op0 || M1->getOperand(1) == Op0))
6286 return Op1;
6287
6288 break;
6289 }
6290 case Intrinsic::vector_extract: {
6291 Type *ReturnType = F->getReturnType();
6292
6293 // (extract_vector (insert_vector _, X, 0), 0) -> X
6294 unsigned IdxN = cast<ConstantInt>(Op1)->getZExtValue();
6295 Value *X = nullptr;
6296 if (match(Op0, m_Intrinsic<Intrinsic::vector_insert>(m_Value(), m_Value(X),
6297 m_Zero())) &&
6298 IdxN == 0 && X->getType() == ReturnType)
6299 return X;
6300
6301 break;
6302 }
6303 default:
6304 break;
6305 }
6306
6307 return nullptr;
6308 }
6309
simplifyIntrinsic(CallBase * Call,const SimplifyQuery & Q)6310 static Value *simplifyIntrinsic(CallBase *Call, const SimplifyQuery &Q) {
6311
6312 unsigned NumOperands = Call->arg_size();
6313 Function *F = cast<Function>(Call->getCalledFunction());
6314 Intrinsic::ID IID = F->getIntrinsicID();
6315
6316 // Most of the intrinsics with no operands have some kind of side effect.
6317 // Don't simplify.
6318 if (!NumOperands) {
6319 switch (IID) {
6320 case Intrinsic::vscale: {
6321 // Call may not be inserted into the IR yet at point of calling simplify.
6322 if (!Call->getParent() || !Call->getParent()->getParent())
6323 return nullptr;
6324 auto Attr = Call->getFunction()->getFnAttribute(Attribute::VScaleRange);
6325 if (!Attr.isValid())
6326 return nullptr;
6327 unsigned VScaleMin = Attr.getVScaleRangeMin();
6328 std::optional<unsigned> VScaleMax = Attr.getVScaleRangeMax();
6329 if (VScaleMax && VScaleMin == VScaleMax)
6330 return ConstantInt::get(F->getReturnType(), VScaleMin);
6331 return nullptr;
6332 }
6333 default:
6334 return nullptr;
6335 }
6336 }
6337
6338 if (NumOperands == 1)
6339 return simplifyUnaryIntrinsic(F, Call->getArgOperand(0), Q);
6340
6341 if (NumOperands == 2)
6342 return simplifyBinaryIntrinsic(F, Call->getArgOperand(0),
6343 Call->getArgOperand(1), Q);
6344
6345 // Handle intrinsics with 3 or more arguments.
6346 switch (IID) {
6347 case Intrinsic::masked_load:
6348 case Intrinsic::masked_gather: {
6349 Value *MaskArg = Call->getArgOperand(2);
6350 Value *PassthruArg = Call->getArgOperand(3);
6351 // If the mask is all zeros or undef, the "passthru" argument is the result.
6352 if (maskIsAllZeroOrUndef(MaskArg))
6353 return PassthruArg;
6354 return nullptr;
6355 }
6356 case Intrinsic::fshl:
6357 case Intrinsic::fshr: {
6358 Value *Op0 = Call->getArgOperand(0), *Op1 = Call->getArgOperand(1),
6359 *ShAmtArg = Call->getArgOperand(2);
6360
6361 // If both operands are undef, the result is undef.
6362 if (Q.isUndefValue(Op0) && Q.isUndefValue(Op1))
6363 return UndefValue::get(F->getReturnType());
6364
6365 // If shift amount is undef, assume it is zero.
6366 if (Q.isUndefValue(ShAmtArg))
6367 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
6368
6369 const APInt *ShAmtC;
6370 if (match(ShAmtArg, m_APInt(ShAmtC))) {
6371 // If there's effectively no shift, return the 1st arg or 2nd arg.
6372 APInt BitWidth = APInt(ShAmtC->getBitWidth(), ShAmtC->getBitWidth());
6373 if (ShAmtC->urem(BitWidth).isZero())
6374 return Call->getArgOperand(IID == Intrinsic::fshl ? 0 : 1);
6375 }
6376
6377 // Rotating zero by anything is zero.
6378 if (match(Op0, m_Zero()) && match(Op1, m_Zero()))
6379 return ConstantInt::getNullValue(F->getReturnType());
6380
6381 // Rotating -1 by anything is -1.
6382 if (match(Op0, m_AllOnes()) && match(Op1, m_AllOnes()))
6383 return ConstantInt::getAllOnesValue(F->getReturnType());
6384
6385 return nullptr;
6386 }
6387 case Intrinsic::experimental_constrained_fma: {
6388 Value *Op0 = Call->getArgOperand(0);
6389 Value *Op1 = Call->getArgOperand(1);
6390 Value *Op2 = Call->getArgOperand(2);
6391 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6392 if (Value *V =
6393 simplifyFPOp({Op0, Op1, Op2}, {}, Q, *FPI->getExceptionBehavior(),
6394 *FPI->getRoundingMode()))
6395 return V;
6396 return nullptr;
6397 }
6398 case Intrinsic::fma:
6399 case Intrinsic::fmuladd: {
6400 Value *Op0 = Call->getArgOperand(0);
6401 Value *Op1 = Call->getArgOperand(1);
6402 Value *Op2 = Call->getArgOperand(2);
6403 if (Value *V = simplifyFPOp({Op0, Op1, Op2}, {}, Q, fp::ebIgnore,
6404 RoundingMode::NearestTiesToEven))
6405 return V;
6406 return nullptr;
6407 }
6408 case Intrinsic::smul_fix:
6409 case Intrinsic::smul_fix_sat: {
6410 Value *Op0 = Call->getArgOperand(0);
6411 Value *Op1 = Call->getArgOperand(1);
6412 Value *Op2 = Call->getArgOperand(2);
6413 Type *ReturnType = F->getReturnType();
6414
6415 // Canonicalize constant operand as Op1 (ConstantFolding handles the case
6416 // when both Op0 and Op1 are constant so we do not care about that special
6417 // case here).
6418 if (isa<Constant>(Op0))
6419 std::swap(Op0, Op1);
6420
6421 // X * 0 -> 0
6422 if (match(Op1, m_Zero()))
6423 return Constant::getNullValue(ReturnType);
6424
6425 // X * undef -> 0
6426 if (Q.isUndefValue(Op1))
6427 return Constant::getNullValue(ReturnType);
6428
6429 // X * (1 << Scale) -> X
6430 APInt ScaledOne =
6431 APInt::getOneBitSet(ReturnType->getScalarSizeInBits(),
6432 cast<ConstantInt>(Op2)->getZExtValue());
6433 if (ScaledOne.isNonNegative() && match(Op1, m_SpecificInt(ScaledOne)))
6434 return Op0;
6435
6436 return nullptr;
6437 }
6438 case Intrinsic::vector_insert: {
6439 Value *Vec = Call->getArgOperand(0);
6440 Value *SubVec = Call->getArgOperand(1);
6441 Value *Idx = Call->getArgOperand(2);
6442 Type *ReturnType = F->getReturnType();
6443
6444 // (insert_vector Y, (extract_vector X, 0), 0) -> X
6445 // where: Y is X, or Y is undef
6446 unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
6447 Value *X = nullptr;
6448 if (match(SubVec,
6449 m_Intrinsic<Intrinsic::vector_extract>(m_Value(X), m_Zero())) &&
6450 (Q.isUndefValue(Vec) || Vec == X) && IdxN == 0 &&
6451 X->getType() == ReturnType)
6452 return X;
6453
6454 return nullptr;
6455 }
6456 case Intrinsic::experimental_constrained_fadd: {
6457 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6458 return simplifyFAddInst(
6459 FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
6460 Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
6461 }
6462 case Intrinsic::experimental_constrained_fsub: {
6463 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6464 return simplifyFSubInst(
6465 FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
6466 Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
6467 }
6468 case Intrinsic::experimental_constrained_fmul: {
6469 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6470 return simplifyFMulInst(
6471 FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
6472 Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
6473 }
6474 case Intrinsic::experimental_constrained_fdiv: {
6475 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6476 return simplifyFDivInst(
6477 FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
6478 Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
6479 }
6480 case Intrinsic::experimental_constrained_frem: {
6481 auto *FPI = cast<ConstrainedFPIntrinsic>(Call);
6482 return simplifyFRemInst(
6483 FPI->getArgOperand(0), FPI->getArgOperand(1), FPI->getFastMathFlags(),
6484 Q, *FPI->getExceptionBehavior(), *FPI->getRoundingMode());
6485 }
6486 default:
6487 return nullptr;
6488 }
6489 }
6490
tryConstantFoldCall(CallBase * Call,const SimplifyQuery & Q)6491 static Value *tryConstantFoldCall(CallBase *Call, const SimplifyQuery &Q) {
6492 auto *F = dyn_cast<Function>(Call->getCalledOperand());
6493 if (!F || !canConstantFoldCallTo(Call, F))
6494 return nullptr;
6495
6496 SmallVector<Constant *, 4> ConstantArgs;
6497 unsigned NumArgs = Call->arg_size();
6498 ConstantArgs.reserve(NumArgs);
6499 for (auto &Arg : Call->args()) {
6500 Constant *C = dyn_cast<Constant>(&Arg);
6501 if (!C) {
6502 if (isa<MetadataAsValue>(Arg.get()))
6503 continue;
6504 return nullptr;
6505 }
6506 ConstantArgs.push_back(C);
6507 }
6508
6509 return ConstantFoldCall(Call, F, ConstantArgs, Q.TLI);
6510 }
6511
simplifyCall(CallBase * Call,const SimplifyQuery & Q)6512 Value *llvm::simplifyCall(CallBase *Call, const SimplifyQuery &Q) {
6513 // musttail calls can only be simplified if they are also DCEd.
6514 // As we can't guarantee this here, don't simplify them.
6515 if (Call->isMustTailCall())
6516 return nullptr;
6517
6518 // call undef -> poison
6519 // call null -> poison
6520 Value *Callee = Call->getCalledOperand();
6521 if (isa<UndefValue>(Callee) || isa<ConstantPointerNull>(Callee))
6522 return PoisonValue::get(Call->getType());
6523
6524 if (Value *V = tryConstantFoldCall(Call, Q))
6525 return V;
6526
6527 auto *F = dyn_cast<Function>(Callee);
6528 if (F && F->isIntrinsic())
6529 if (Value *Ret = simplifyIntrinsic(Call, Q))
6530 return Ret;
6531
6532 return nullptr;
6533 }
6534
simplifyConstrainedFPCall(CallBase * Call,const SimplifyQuery & Q)6535 Value *llvm::simplifyConstrainedFPCall(CallBase *Call, const SimplifyQuery &Q) {
6536 assert(isa<ConstrainedFPIntrinsic>(Call));
6537 if (Value *V = tryConstantFoldCall(Call, Q))
6538 return V;
6539 if (Value *Ret = simplifyIntrinsic(Call, Q))
6540 return Ret;
6541 return nullptr;
6542 }
6543
6544 /// Given operands for a Freeze, see if we can fold the result.
simplifyFreezeInst(Value * Op0,const SimplifyQuery & Q)6545 static Value *simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6546 // Use a utility function defined in ValueTracking.
6547 if (llvm::isGuaranteedNotToBeUndefOrPoison(Op0, Q.AC, Q.CxtI, Q.DT))
6548 return Op0;
6549 // We have room for improvement.
6550 return nullptr;
6551 }
6552
simplifyFreezeInst(Value * Op0,const SimplifyQuery & Q)6553 Value *llvm::simplifyFreezeInst(Value *Op0, const SimplifyQuery &Q) {
6554 return ::simplifyFreezeInst(Op0, Q);
6555 }
6556
simplifyLoadInst(LoadInst * LI,Value * PtrOp,const SimplifyQuery & Q)6557 static Value *simplifyLoadInst(LoadInst *LI, Value *PtrOp,
6558 const SimplifyQuery &Q) {
6559 if (LI->isVolatile())
6560 return nullptr;
6561
6562 APInt Offset(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()), 0);
6563 auto *PtrOpC = dyn_cast<Constant>(PtrOp);
6564 // Try to convert operand into a constant by stripping offsets while looking
6565 // through invariant.group intrinsics. Don't bother if the underlying object
6566 // is not constant, as calculating GEP offsets is expensive.
6567 if (!PtrOpC && isa<Constant>(getUnderlyingObject(PtrOp))) {
6568 PtrOp = PtrOp->stripAndAccumulateConstantOffsets(
6569 Q.DL, Offset, /* AllowNonInbounts */ true,
6570 /* AllowInvariantGroup */ true);
6571 // Index size may have changed due to address space casts.
6572 Offset = Offset.sextOrTrunc(Q.DL.getIndexTypeSizeInBits(PtrOp->getType()));
6573 PtrOpC = dyn_cast<Constant>(PtrOp);
6574 }
6575
6576 if (PtrOpC)
6577 return ConstantFoldLoadFromConstPtr(PtrOpC, LI->getType(), Offset, Q.DL);
6578 return nullptr;
6579 }
6580
6581 /// See if we can compute a simplified version of this instruction.
6582 /// If not, this returns null.
6583
simplifyInstructionWithOperands(Instruction * I,ArrayRef<Value * > NewOps,const SimplifyQuery & SQ,OptimizationRemarkEmitter * ORE)6584 static Value *simplifyInstructionWithOperands(Instruction *I,
6585 ArrayRef<Value *> NewOps,
6586 const SimplifyQuery &SQ,
6587 OptimizationRemarkEmitter *ORE) {
6588 const SimplifyQuery Q = SQ.CxtI ? SQ : SQ.getWithInstruction(I);
6589
6590 switch (I->getOpcode()) {
6591 default:
6592 if (llvm::all_of(NewOps, [](Value *V) { return isa<Constant>(V); })) {
6593 SmallVector<Constant *, 8> NewConstOps(NewOps.size());
6594 transform(NewOps, NewConstOps.begin(),
6595 [](Value *V) { return cast<Constant>(V); });
6596 return ConstantFoldInstOperands(I, NewConstOps, Q.DL, Q.TLI);
6597 }
6598 return nullptr;
6599 case Instruction::FNeg:
6600 return simplifyFNegInst(NewOps[0], I->getFastMathFlags(), Q);
6601 case Instruction::FAdd:
6602 return simplifyFAddInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6603 case Instruction::Add:
6604 return simplifyAddInst(NewOps[0], NewOps[1],
6605 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6606 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6607 case Instruction::FSub:
6608 return simplifyFSubInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6609 case Instruction::Sub:
6610 return simplifySubInst(NewOps[0], NewOps[1],
6611 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6612 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6613 case Instruction::FMul:
6614 return simplifyFMulInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6615 case Instruction::Mul:
6616 return simplifyMulInst(NewOps[0], NewOps[1],
6617 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6618 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6619 case Instruction::SDiv:
6620 return simplifySDivInst(NewOps[0], NewOps[1],
6621 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
6622 case Instruction::UDiv:
6623 return simplifyUDivInst(NewOps[0], NewOps[1],
6624 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
6625 case Instruction::FDiv:
6626 return simplifyFDivInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6627 case Instruction::SRem:
6628 return simplifySRemInst(NewOps[0], NewOps[1], Q);
6629 case Instruction::URem:
6630 return simplifyURemInst(NewOps[0], NewOps[1], Q);
6631 case Instruction::FRem:
6632 return simplifyFRemInst(NewOps[0], NewOps[1], I->getFastMathFlags(), Q);
6633 case Instruction::Shl:
6634 return simplifyShlInst(NewOps[0], NewOps[1],
6635 Q.IIQ.hasNoSignedWrap(cast<BinaryOperator>(I)),
6636 Q.IIQ.hasNoUnsignedWrap(cast<BinaryOperator>(I)), Q);
6637 case Instruction::LShr:
6638 return simplifyLShrInst(NewOps[0], NewOps[1],
6639 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
6640 case Instruction::AShr:
6641 return simplifyAShrInst(NewOps[0], NewOps[1],
6642 Q.IIQ.isExact(cast<BinaryOperator>(I)), Q);
6643 case Instruction::And:
6644 return simplifyAndInst(NewOps[0], NewOps[1], Q);
6645 case Instruction::Or:
6646 return simplifyOrInst(NewOps[0], NewOps[1], Q);
6647 case Instruction::Xor:
6648 return simplifyXorInst(NewOps[0], NewOps[1], Q);
6649 case Instruction::ICmp:
6650 return simplifyICmpInst(cast<ICmpInst>(I)->getPredicate(), NewOps[0],
6651 NewOps[1], Q);
6652 case Instruction::FCmp:
6653 return simplifyFCmpInst(cast<FCmpInst>(I)->getPredicate(), NewOps[0],
6654 NewOps[1], I->getFastMathFlags(), Q);
6655 case Instruction::Select:
6656 return simplifySelectInst(NewOps[0], NewOps[1], NewOps[2], Q);
6657 break;
6658 case Instruction::GetElementPtr: {
6659 auto *GEPI = cast<GetElementPtrInst>(I);
6660 return simplifyGEPInst(GEPI->getSourceElementType(), NewOps[0],
6661 ArrayRef(NewOps).slice(1), GEPI->isInBounds(), Q);
6662 }
6663 case Instruction::InsertValue: {
6664 InsertValueInst *IV = cast<InsertValueInst>(I);
6665 return simplifyInsertValueInst(NewOps[0], NewOps[1], IV->getIndices(), Q);
6666 }
6667 case Instruction::InsertElement:
6668 return simplifyInsertElementInst(NewOps[0], NewOps[1], NewOps[2], Q);
6669 case Instruction::ExtractValue: {
6670 auto *EVI = cast<ExtractValueInst>(I);
6671 return simplifyExtractValueInst(NewOps[0], EVI->getIndices(), Q);
6672 }
6673 case Instruction::ExtractElement:
6674 return simplifyExtractElementInst(NewOps[0], NewOps[1], Q);
6675 case Instruction::ShuffleVector: {
6676 auto *SVI = cast<ShuffleVectorInst>(I);
6677 return simplifyShuffleVectorInst(NewOps[0], NewOps[1],
6678 SVI->getShuffleMask(), SVI->getType(), Q);
6679 }
6680 case Instruction::PHI:
6681 return simplifyPHINode(cast<PHINode>(I), NewOps, Q);
6682 case Instruction::Call:
6683 // TODO: Use NewOps
6684 return simplifyCall(cast<CallInst>(I), Q);
6685 case Instruction::Freeze:
6686 return llvm::simplifyFreezeInst(NewOps[0], Q);
6687 #define HANDLE_CAST_INST(num, opc, clas) case Instruction::opc:
6688 #include "llvm/IR/Instruction.def"
6689 #undef HANDLE_CAST_INST
6690 return simplifyCastInst(I->getOpcode(), NewOps[0], I->getType(), Q);
6691 case Instruction::Alloca:
6692 // No simplifications for Alloca and it can't be constant folded.
6693 return nullptr;
6694 case Instruction::Load:
6695 return simplifyLoadInst(cast<LoadInst>(I), NewOps[0], Q);
6696 }
6697 }
6698
simplifyInstructionWithOperands(Instruction * I,ArrayRef<Value * > NewOps,const SimplifyQuery & SQ,OptimizationRemarkEmitter * ORE)6699 Value *llvm::simplifyInstructionWithOperands(Instruction *I,
6700 ArrayRef<Value *> NewOps,
6701 const SimplifyQuery &SQ,
6702 OptimizationRemarkEmitter *ORE) {
6703 assert(NewOps.size() == I->getNumOperands() &&
6704 "Number of operands should match the instruction!");
6705 return ::simplifyInstructionWithOperands(I, NewOps, SQ, ORE);
6706 }
6707
simplifyInstruction(Instruction * I,const SimplifyQuery & SQ,OptimizationRemarkEmitter * ORE)6708 Value *llvm::simplifyInstruction(Instruction *I, const SimplifyQuery &SQ,
6709 OptimizationRemarkEmitter *ORE) {
6710 SmallVector<Value *, 8> Ops(I->operands());
6711 Value *Result = ::simplifyInstructionWithOperands(I, Ops, SQ, ORE);
6712
6713 /// If called on unreachable code, the instruction may simplify to itself.
6714 /// Make life easier for users by detecting that case here, and returning a
6715 /// safe value instead.
6716 return Result == I ? UndefValue::get(I->getType()) : Result;
6717 }
6718
6719 /// Implementation of recursive simplification through an instruction's
6720 /// uses.
6721 ///
6722 /// This is the common implementation of the recursive simplification routines.
6723 /// If we have a pre-simplified value in 'SimpleV', that is forcibly used to
6724 /// replace the instruction 'I'. Otherwise, we simply add 'I' to the list of
6725 /// instructions to process and attempt to simplify it using
6726 /// InstructionSimplify. Recursively visited users which could not be
6727 /// simplified themselves are to the optional UnsimplifiedUsers set for
6728 /// further processing by the caller.
6729 ///
6730 /// This routine returns 'true' only when *it* simplifies something. The passed
6731 /// in simplified value does not count toward this.
replaceAndRecursivelySimplifyImpl(Instruction * I,Value * SimpleV,const TargetLibraryInfo * TLI,const DominatorTree * DT,AssumptionCache * AC,SmallSetVector<Instruction *,8> * UnsimplifiedUsers=nullptr)6732 static bool replaceAndRecursivelySimplifyImpl(
6733 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6734 const DominatorTree *DT, AssumptionCache *AC,
6735 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers = nullptr) {
6736 bool Simplified = false;
6737 SmallSetVector<Instruction *, 8> Worklist;
6738 const DataLayout &DL = I->getModule()->getDataLayout();
6739
6740 // If we have an explicit value to collapse to, do that round of the
6741 // simplification loop by hand initially.
6742 if (SimpleV) {
6743 for (User *U : I->users())
6744 if (U != I)
6745 Worklist.insert(cast<Instruction>(U));
6746
6747 // Replace the instruction with its simplified value.
6748 I->replaceAllUsesWith(SimpleV);
6749
6750 // Gracefully handle edge cases where the instruction is not wired into any
6751 // parent block.
6752 if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6753 !I->mayHaveSideEffects())
6754 I->eraseFromParent();
6755 } else {
6756 Worklist.insert(I);
6757 }
6758
6759 // Note that we must test the size on each iteration, the worklist can grow.
6760 for (unsigned Idx = 0; Idx != Worklist.size(); ++Idx) {
6761 I = Worklist[Idx];
6762
6763 // See if this instruction simplifies.
6764 SimpleV = simplifyInstruction(I, {DL, TLI, DT, AC});
6765 if (!SimpleV) {
6766 if (UnsimplifiedUsers)
6767 UnsimplifiedUsers->insert(I);
6768 continue;
6769 }
6770
6771 Simplified = true;
6772
6773 // Stash away all the uses of the old instruction so we can check them for
6774 // recursive simplifications after a RAUW. This is cheaper than checking all
6775 // uses of To on the recursive step in most cases.
6776 for (User *U : I->users())
6777 Worklist.insert(cast<Instruction>(U));
6778
6779 // Replace the instruction with its simplified value.
6780 I->replaceAllUsesWith(SimpleV);
6781
6782 // Gracefully handle edge cases where the instruction is not wired into any
6783 // parent block.
6784 if (I->getParent() && !I->isEHPad() && !I->isTerminator() &&
6785 !I->mayHaveSideEffects())
6786 I->eraseFromParent();
6787 }
6788 return Simplified;
6789 }
6790
replaceAndRecursivelySimplify(Instruction * I,Value * SimpleV,const TargetLibraryInfo * TLI,const DominatorTree * DT,AssumptionCache * AC,SmallSetVector<Instruction *,8> * UnsimplifiedUsers)6791 bool llvm::replaceAndRecursivelySimplify(
6792 Instruction *I, Value *SimpleV, const TargetLibraryInfo *TLI,
6793 const DominatorTree *DT, AssumptionCache *AC,
6794 SmallSetVector<Instruction *, 8> *UnsimplifiedUsers) {
6795 assert(I != SimpleV && "replaceAndRecursivelySimplify(X,X) is not valid!");
6796 assert(SimpleV && "Must provide a simplified value.");
6797 return replaceAndRecursivelySimplifyImpl(I, SimpleV, TLI, DT, AC,
6798 UnsimplifiedUsers);
6799 }
6800
6801 namespace llvm {
getBestSimplifyQuery(Pass & P,Function & F)6802 const SimplifyQuery getBestSimplifyQuery(Pass &P, Function &F) {
6803 auto *DTWP = P.getAnalysisIfAvailable<DominatorTreeWrapperPass>();
6804 auto *DT = DTWP ? &DTWP->getDomTree() : nullptr;
6805 auto *TLIWP = P.getAnalysisIfAvailable<TargetLibraryInfoWrapperPass>();
6806 auto *TLI = TLIWP ? &TLIWP->getTLI(F) : nullptr;
6807 auto *ACWP = P.getAnalysisIfAvailable<AssumptionCacheTracker>();
6808 auto *AC = ACWP ? &ACWP->getAssumptionCache(F) : nullptr;
6809 return {F.getParent()->getDataLayout(), TLI, DT, AC};
6810 }
6811
getBestSimplifyQuery(LoopStandardAnalysisResults & AR,const DataLayout & DL)6812 const SimplifyQuery getBestSimplifyQuery(LoopStandardAnalysisResults &AR,
6813 const DataLayout &DL) {
6814 return {DL, &AR.TLI, &AR.DT, &AR.AC};
6815 }
6816
6817 template <class T, class... TArgs>
getBestSimplifyQuery(AnalysisManager<T,TArgs...> & AM,Function & F)6818 const SimplifyQuery getBestSimplifyQuery(AnalysisManager<T, TArgs...> &AM,
6819 Function &F) {
6820 auto *DT = AM.template getCachedResult<DominatorTreeAnalysis>(F);
6821 auto *TLI = AM.template getCachedResult<TargetLibraryAnalysis>(F);
6822 auto *AC = AM.template getCachedResult<AssumptionAnalysis>(F);
6823 return {F.getParent()->getDataLayout(), TLI, DT, AC};
6824 }
6825 template const SimplifyQuery getBestSimplifyQuery(AnalysisManager<Function> &,
6826 Function &);
6827 } // namespace llvm
6828
anchor()6829 void InstSimplifyFolder::anchor() {}
6830