1 //===- InstCombineSelect.cpp ----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitSelect function.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APInt.h"
15 #include "llvm/ADT/Optional.h"
16 #include "llvm/ADT/STLExtras.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/AssumptionCache.h"
19 #include "llvm/Analysis/CmpInstAnalysis.h"
20 #include "llvm/Analysis/InstructionSimplify.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/IR/BasicBlock.h"
23 #include "llvm/IR/Constant.h"
24 #include "llvm/IR/Constants.h"
25 #include "llvm/IR/DerivedTypes.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InstrTypes.h"
28 #include "llvm/IR/Instruction.h"
29 #include "llvm/IR/Instructions.h"
30 #include "llvm/IR/IntrinsicInst.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/Operator.h"
33 #include "llvm/IR/PatternMatch.h"
34 #include "llvm/IR/Type.h"
35 #include "llvm/IR/User.h"
36 #include "llvm/IR/Value.h"
37 #include "llvm/Support/Casting.h"
38 #include "llvm/Support/ErrorHandling.h"
39 #include "llvm/Support/KnownBits.h"
40 #include "llvm/Transforms/InstCombine/InstCombineWorklist.h"
41 #include "llvm/Transforms/InstCombine/InstCombiner.h"
42 #include <cassert>
43 #include <utility>
44
45 using namespace llvm;
46 using namespace PatternMatch;
47
48 #define DEBUG_TYPE "instcombine"
49
50 /// FIXME: Enabled by default until the pattern is supported well.
51 static cl::opt<bool> EnableUnsafeSelectTransform(
52 "instcombine-unsafe-select-transform", cl::init(true),
53 cl::desc("Enable poison-unsafe select to and/or transform"));
54
createMinMax(InstCombiner::BuilderTy & Builder,SelectPatternFlavor SPF,Value * A,Value * B)55 static Value *createMinMax(InstCombiner::BuilderTy &Builder,
56 SelectPatternFlavor SPF, Value *A, Value *B) {
57 CmpInst::Predicate Pred = getMinMaxPred(SPF);
58 assert(CmpInst::isIntPredicate(Pred) && "Expected integer predicate");
59 return Builder.CreateSelect(Builder.CreateICmp(Pred, A, B), A, B);
60 }
61
62 /// Replace a select operand based on an equality comparison with the identity
63 /// constant of a binop.
foldSelectBinOpIdentity(SelectInst & Sel,const TargetLibraryInfo & TLI,InstCombinerImpl & IC)64 static Instruction *foldSelectBinOpIdentity(SelectInst &Sel,
65 const TargetLibraryInfo &TLI,
66 InstCombinerImpl &IC) {
67 // The select condition must be an equality compare with a constant operand.
68 Value *X;
69 Constant *C;
70 CmpInst::Predicate Pred;
71 if (!match(Sel.getCondition(), m_Cmp(Pred, m_Value(X), m_Constant(C))))
72 return nullptr;
73
74 bool IsEq;
75 if (ICmpInst::isEquality(Pred))
76 IsEq = Pred == ICmpInst::ICMP_EQ;
77 else if (Pred == FCmpInst::FCMP_OEQ)
78 IsEq = true;
79 else if (Pred == FCmpInst::FCMP_UNE)
80 IsEq = false;
81 else
82 return nullptr;
83
84 // A select operand must be a binop.
85 BinaryOperator *BO;
86 if (!match(Sel.getOperand(IsEq ? 1 : 2), m_BinOp(BO)))
87 return nullptr;
88
89 // The compare constant must be the identity constant for that binop.
90 // If this a floating-point compare with 0.0, any zero constant will do.
91 Type *Ty = BO->getType();
92 Constant *IdC = ConstantExpr::getBinOpIdentity(BO->getOpcode(), Ty, true);
93 if (IdC != C) {
94 if (!IdC || !CmpInst::isFPPredicate(Pred))
95 return nullptr;
96 if (!match(IdC, m_AnyZeroFP()) || !match(C, m_AnyZeroFP()))
97 return nullptr;
98 }
99
100 // Last, match the compare variable operand with a binop operand.
101 Value *Y;
102 if (!BO->isCommutative() && !match(BO, m_BinOp(m_Value(Y), m_Specific(X))))
103 return nullptr;
104 if (!match(BO, m_c_BinOp(m_Value(Y), m_Specific(X))))
105 return nullptr;
106
107 // +0.0 compares equal to -0.0, and so it does not behave as required for this
108 // transform. Bail out if we can not exclude that possibility.
109 if (isa<FPMathOperator>(BO))
110 if (!BO->hasNoSignedZeros() && !CannotBeNegativeZero(Y, &TLI))
111 return nullptr;
112
113 // BO = binop Y, X
114 // S = { select (cmp eq X, C), BO, ? } or { select (cmp ne X, C), ?, BO }
115 // =>
116 // S = { select (cmp eq X, C), Y, ? } or { select (cmp ne X, C), ?, Y }
117 return IC.replaceOperand(Sel, IsEq ? 1 : 2, Y);
118 }
119
120 /// This folds:
121 /// select (icmp eq (and X, C1)), TC, FC
122 /// iff C1 is a power 2 and the difference between TC and FC is a power-of-2.
123 /// To something like:
124 /// (shr (and (X, C1)), (log2(C1) - log2(TC-FC))) + FC
125 /// Or:
126 /// (shl (and (X, C1)), (log2(TC-FC) - log2(C1))) + FC
127 /// With some variations depending if FC is larger than TC, or the shift
128 /// isn't needed, or the bit widths don't match.
foldSelectICmpAnd(SelectInst & Sel,ICmpInst * Cmp,InstCombiner::BuilderTy & Builder)129 static Value *foldSelectICmpAnd(SelectInst &Sel, ICmpInst *Cmp,
130 InstCombiner::BuilderTy &Builder) {
131 const APInt *SelTC, *SelFC;
132 if (!match(Sel.getTrueValue(), m_APInt(SelTC)) ||
133 !match(Sel.getFalseValue(), m_APInt(SelFC)))
134 return nullptr;
135
136 // If this is a vector select, we need a vector compare.
137 Type *SelType = Sel.getType();
138 if (SelType->isVectorTy() != Cmp->getType()->isVectorTy())
139 return nullptr;
140
141 Value *V;
142 APInt AndMask;
143 bool CreateAnd = false;
144 ICmpInst::Predicate Pred = Cmp->getPredicate();
145 if (ICmpInst::isEquality(Pred)) {
146 if (!match(Cmp->getOperand(1), m_Zero()))
147 return nullptr;
148
149 V = Cmp->getOperand(0);
150 const APInt *AndRHS;
151 if (!match(V, m_And(m_Value(), m_Power2(AndRHS))))
152 return nullptr;
153
154 AndMask = *AndRHS;
155 } else if (decomposeBitTestICmp(Cmp->getOperand(0), Cmp->getOperand(1),
156 Pred, V, AndMask)) {
157 assert(ICmpInst::isEquality(Pred) && "Not equality test?");
158 if (!AndMask.isPowerOf2())
159 return nullptr;
160
161 CreateAnd = true;
162 } else {
163 return nullptr;
164 }
165
166 // In general, when both constants are non-zero, we would need an offset to
167 // replace the select. This would require more instructions than we started
168 // with. But there's one special-case that we handle here because it can
169 // simplify/reduce the instructions.
170 APInt TC = *SelTC;
171 APInt FC = *SelFC;
172 if (!TC.isNullValue() && !FC.isNullValue()) {
173 // If the select constants differ by exactly one bit and that's the same
174 // bit that is masked and checked by the select condition, the select can
175 // be replaced by bitwise logic to set/clear one bit of the constant result.
176 if (TC.getBitWidth() != AndMask.getBitWidth() || (TC ^ FC) != AndMask)
177 return nullptr;
178 if (CreateAnd) {
179 // If we have to create an 'and', then we must kill the cmp to not
180 // increase the instruction count.
181 if (!Cmp->hasOneUse())
182 return nullptr;
183 V = Builder.CreateAnd(V, ConstantInt::get(SelType, AndMask));
184 }
185 bool ExtraBitInTC = TC.ugt(FC);
186 if (Pred == ICmpInst::ICMP_EQ) {
187 // If the masked bit in V is clear, clear or set the bit in the result:
188 // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) ^ TC
189 // (V & AndMaskC) == 0 ? TC : FC --> (V & AndMaskC) | TC
190 Constant *C = ConstantInt::get(SelType, TC);
191 return ExtraBitInTC ? Builder.CreateXor(V, C) : Builder.CreateOr(V, C);
192 }
193 if (Pred == ICmpInst::ICMP_NE) {
194 // If the masked bit in V is set, set or clear the bit in the result:
195 // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) | FC
196 // (V & AndMaskC) != 0 ? TC : FC --> (V & AndMaskC) ^ FC
197 Constant *C = ConstantInt::get(SelType, FC);
198 return ExtraBitInTC ? Builder.CreateOr(V, C) : Builder.CreateXor(V, C);
199 }
200 llvm_unreachable("Only expecting equality predicates");
201 }
202
203 // Make sure one of the select arms is a power-of-2.
204 if (!TC.isPowerOf2() && !FC.isPowerOf2())
205 return nullptr;
206
207 // Determine which shift is needed to transform result of the 'and' into the
208 // desired result.
209 const APInt &ValC = !TC.isNullValue() ? TC : FC;
210 unsigned ValZeros = ValC.logBase2();
211 unsigned AndZeros = AndMask.logBase2();
212
213 // Insert the 'and' instruction on the input to the truncate.
214 if (CreateAnd)
215 V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), AndMask));
216
217 // If types don't match, we can still convert the select by introducing a zext
218 // or a trunc of the 'and'.
219 if (ValZeros > AndZeros) {
220 V = Builder.CreateZExtOrTrunc(V, SelType);
221 V = Builder.CreateShl(V, ValZeros - AndZeros);
222 } else if (ValZeros < AndZeros) {
223 V = Builder.CreateLShr(V, AndZeros - ValZeros);
224 V = Builder.CreateZExtOrTrunc(V, SelType);
225 } else {
226 V = Builder.CreateZExtOrTrunc(V, SelType);
227 }
228
229 // Okay, now we know that everything is set up, we just don't know whether we
230 // have a icmp_ne or icmp_eq and whether the true or false val is the zero.
231 bool ShouldNotVal = !TC.isNullValue();
232 ShouldNotVal ^= Pred == ICmpInst::ICMP_NE;
233 if (ShouldNotVal)
234 V = Builder.CreateXor(V, ValC);
235
236 return V;
237 }
238
239 /// We want to turn code that looks like this:
240 /// %C = or %A, %B
241 /// %D = select %cond, %C, %A
242 /// into:
243 /// %C = select %cond, %B, 0
244 /// %D = or %A, %C
245 ///
246 /// Assuming that the specified instruction is an operand to the select, return
247 /// a bitmask indicating which operands of this instruction are foldable if they
248 /// equal the other incoming value of the select.
getSelectFoldableOperands(BinaryOperator * I)249 static unsigned getSelectFoldableOperands(BinaryOperator *I) {
250 switch (I->getOpcode()) {
251 case Instruction::Add:
252 case Instruction::Mul:
253 case Instruction::And:
254 case Instruction::Or:
255 case Instruction::Xor:
256 return 3; // Can fold through either operand.
257 case Instruction::Sub: // Can only fold on the amount subtracted.
258 case Instruction::Shl: // Can only fold on the shift amount.
259 case Instruction::LShr:
260 case Instruction::AShr:
261 return 1;
262 default:
263 return 0; // Cannot fold
264 }
265 }
266
267 /// We have (select c, TI, FI), and we know that TI and FI have the same opcode.
foldSelectOpOp(SelectInst & SI,Instruction * TI,Instruction * FI)268 Instruction *InstCombinerImpl::foldSelectOpOp(SelectInst &SI, Instruction *TI,
269 Instruction *FI) {
270 // Don't break up min/max patterns. The hasOneUse checks below prevent that
271 // for most cases, but vector min/max with bitcasts can be transformed. If the
272 // one-use restrictions are eased for other patterns, we still don't want to
273 // obfuscate min/max.
274 if ((match(&SI, m_SMin(m_Value(), m_Value())) ||
275 match(&SI, m_SMax(m_Value(), m_Value())) ||
276 match(&SI, m_UMin(m_Value(), m_Value())) ||
277 match(&SI, m_UMax(m_Value(), m_Value()))))
278 return nullptr;
279
280 // If this is a cast from the same type, merge.
281 Value *Cond = SI.getCondition();
282 Type *CondTy = Cond->getType();
283 if (TI->getNumOperands() == 1 && TI->isCast()) {
284 Type *FIOpndTy = FI->getOperand(0)->getType();
285 if (TI->getOperand(0)->getType() != FIOpndTy)
286 return nullptr;
287
288 // The select condition may be a vector. We may only change the operand
289 // type if the vector width remains the same (and matches the condition).
290 if (auto *CondVTy = dyn_cast<VectorType>(CondTy)) {
291 if (!FIOpndTy->isVectorTy() ||
292 CondVTy->getElementCount() !=
293 cast<VectorType>(FIOpndTy)->getElementCount())
294 return nullptr;
295
296 // TODO: If the backend knew how to deal with casts better, we could
297 // remove this limitation. For now, there's too much potential to create
298 // worse codegen by promoting the select ahead of size-altering casts
299 // (PR28160).
300 //
301 // Note that ValueTracking's matchSelectPattern() looks through casts
302 // without checking 'hasOneUse' when it matches min/max patterns, so this
303 // transform may end up happening anyway.
304 if (TI->getOpcode() != Instruction::BitCast &&
305 (!TI->hasOneUse() || !FI->hasOneUse()))
306 return nullptr;
307 } else if (!TI->hasOneUse() || !FI->hasOneUse()) {
308 // TODO: The one-use restrictions for a scalar select could be eased if
309 // the fold of a select in visitLoadInst() was enhanced to match a pattern
310 // that includes a cast.
311 return nullptr;
312 }
313
314 // Fold this by inserting a select from the input values.
315 Value *NewSI =
316 Builder.CreateSelect(Cond, TI->getOperand(0), FI->getOperand(0),
317 SI.getName() + ".v", &SI);
318 return CastInst::Create(Instruction::CastOps(TI->getOpcode()), NewSI,
319 TI->getType());
320 }
321
322 // Cond ? -X : -Y --> -(Cond ? X : Y)
323 Value *X, *Y;
324 if (match(TI, m_FNeg(m_Value(X))) && match(FI, m_FNeg(m_Value(Y))) &&
325 (TI->hasOneUse() || FI->hasOneUse())) {
326 Value *NewSel = Builder.CreateSelect(Cond, X, Y, SI.getName() + ".v", &SI);
327 return UnaryOperator::CreateFNegFMF(NewSel, TI);
328 }
329
330 // Only handle binary operators (including two-operand getelementptr) with
331 // one-use here. As with the cast case above, it may be possible to relax the
332 // one-use constraint, but that needs be examined carefully since it may not
333 // reduce the total number of instructions.
334 if (TI->getNumOperands() != 2 || FI->getNumOperands() != 2 ||
335 (!isa<BinaryOperator>(TI) && !isa<GetElementPtrInst>(TI)) ||
336 !TI->hasOneUse() || !FI->hasOneUse())
337 return nullptr;
338
339 // Figure out if the operations have any operands in common.
340 Value *MatchOp, *OtherOpT, *OtherOpF;
341 bool MatchIsOpZero;
342 if (TI->getOperand(0) == FI->getOperand(0)) {
343 MatchOp = TI->getOperand(0);
344 OtherOpT = TI->getOperand(1);
345 OtherOpF = FI->getOperand(1);
346 MatchIsOpZero = true;
347 } else if (TI->getOperand(1) == FI->getOperand(1)) {
348 MatchOp = TI->getOperand(1);
349 OtherOpT = TI->getOperand(0);
350 OtherOpF = FI->getOperand(0);
351 MatchIsOpZero = false;
352 } else if (!TI->isCommutative()) {
353 return nullptr;
354 } else if (TI->getOperand(0) == FI->getOperand(1)) {
355 MatchOp = TI->getOperand(0);
356 OtherOpT = TI->getOperand(1);
357 OtherOpF = FI->getOperand(0);
358 MatchIsOpZero = true;
359 } else if (TI->getOperand(1) == FI->getOperand(0)) {
360 MatchOp = TI->getOperand(1);
361 OtherOpT = TI->getOperand(0);
362 OtherOpF = FI->getOperand(1);
363 MatchIsOpZero = true;
364 } else {
365 return nullptr;
366 }
367
368 // If the select condition is a vector, the operands of the original select's
369 // operands also must be vectors. This may not be the case for getelementptr
370 // for example.
371 if (CondTy->isVectorTy() && (!OtherOpT->getType()->isVectorTy() ||
372 !OtherOpF->getType()->isVectorTy()))
373 return nullptr;
374
375 // If we reach here, they do have operations in common.
376 Value *NewSI = Builder.CreateSelect(Cond, OtherOpT, OtherOpF,
377 SI.getName() + ".v", &SI);
378 Value *Op0 = MatchIsOpZero ? MatchOp : NewSI;
379 Value *Op1 = MatchIsOpZero ? NewSI : MatchOp;
380 if (auto *BO = dyn_cast<BinaryOperator>(TI)) {
381 BinaryOperator *NewBO = BinaryOperator::Create(BO->getOpcode(), Op0, Op1);
382 NewBO->copyIRFlags(TI);
383 NewBO->andIRFlags(FI);
384 return NewBO;
385 }
386 if (auto *TGEP = dyn_cast<GetElementPtrInst>(TI)) {
387 auto *FGEP = cast<GetElementPtrInst>(FI);
388 Type *ElementType = TGEP->getResultElementType();
389 return TGEP->isInBounds() && FGEP->isInBounds()
390 ? GetElementPtrInst::CreateInBounds(ElementType, Op0, {Op1})
391 : GetElementPtrInst::Create(ElementType, Op0, {Op1});
392 }
393 llvm_unreachable("Expected BinaryOperator or GEP");
394 return nullptr;
395 }
396
isSelect01(const APInt & C1I,const APInt & C2I)397 static bool isSelect01(const APInt &C1I, const APInt &C2I) {
398 if (!C1I.isNullValue() && !C2I.isNullValue()) // One side must be zero.
399 return false;
400 return C1I.isOneValue() || C1I.isAllOnesValue() ||
401 C2I.isOneValue() || C2I.isAllOnesValue();
402 }
403
404 /// Try to fold the select into one of the operands to allow further
405 /// optimization.
foldSelectIntoOp(SelectInst & SI,Value * TrueVal,Value * FalseVal)406 Instruction *InstCombinerImpl::foldSelectIntoOp(SelectInst &SI, Value *TrueVal,
407 Value *FalseVal) {
408 // See the comment above GetSelectFoldableOperands for a description of the
409 // transformation we are doing here.
410 if (auto *TVI = dyn_cast<BinaryOperator>(TrueVal)) {
411 if (TVI->hasOneUse() && !isa<Constant>(FalseVal)) {
412 if (unsigned SFO = getSelectFoldableOperands(TVI)) {
413 unsigned OpToFold = 0;
414 if ((SFO & 1) && FalseVal == TVI->getOperand(0)) {
415 OpToFold = 1;
416 } else if ((SFO & 2) && FalseVal == TVI->getOperand(1)) {
417 OpToFold = 2;
418 }
419
420 if (OpToFold) {
421 Constant *C = ConstantExpr::getBinOpIdentity(TVI->getOpcode(),
422 TVI->getType(), true);
423 Value *OOp = TVI->getOperand(2-OpToFold);
424 // Avoid creating select between 2 constants unless it's selecting
425 // between 0, 1 and -1.
426 const APInt *OOpC;
427 bool OOpIsAPInt = match(OOp, m_APInt(OOpC));
428 if (!isa<Constant>(OOp) ||
429 (OOpIsAPInt && isSelect01(C->getUniqueInteger(), *OOpC))) {
430 Value *NewSel = Builder.CreateSelect(SI.getCondition(), OOp, C);
431 NewSel->takeName(TVI);
432 BinaryOperator *BO = BinaryOperator::Create(TVI->getOpcode(),
433 FalseVal, NewSel);
434 BO->copyIRFlags(TVI);
435 return BO;
436 }
437 }
438 }
439 }
440 }
441
442 if (auto *FVI = dyn_cast<BinaryOperator>(FalseVal)) {
443 if (FVI->hasOneUse() && !isa<Constant>(TrueVal)) {
444 if (unsigned SFO = getSelectFoldableOperands(FVI)) {
445 unsigned OpToFold = 0;
446 if ((SFO & 1) && TrueVal == FVI->getOperand(0)) {
447 OpToFold = 1;
448 } else if ((SFO & 2) && TrueVal == FVI->getOperand(1)) {
449 OpToFold = 2;
450 }
451
452 if (OpToFold) {
453 Constant *C = ConstantExpr::getBinOpIdentity(FVI->getOpcode(),
454 FVI->getType(), true);
455 Value *OOp = FVI->getOperand(2-OpToFold);
456 // Avoid creating select between 2 constants unless it's selecting
457 // between 0, 1 and -1.
458 const APInt *OOpC;
459 bool OOpIsAPInt = match(OOp, m_APInt(OOpC));
460 if (!isa<Constant>(OOp) ||
461 (OOpIsAPInt && isSelect01(C->getUniqueInteger(), *OOpC))) {
462 Value *NewSel = Builder.CreateSelect(SI.getCondition(), C, OOp);
463 NewSel->takeName(FVI);
464 BinaryOperator *BO = BinaryOperator::Create(FVI->getOpcode(),
465 TrueVal, NewSel);
466 BO->copyIRFlags(FVI);
467 return BO;
468 }
469 }
470 }
471 }
472 }
473
474 return nullptr;
475 }
476
477 /// We want to turn:
478 /// (select (icmp eq (and X, Y), 0), (and (lshr X, Z), 1), 1)
479 /// into:
480 /// zext (icmp ne i32 (and X, (or Y, (shl 1, Z))), 0)
481 /// Note:
482 /// Z may be 0 if lshr is missing.
483 /// Worst-case scenario is that we will replace 5 instructions with 5 different
484 /// instructions, but we got rid of select.
foldSelectICmpAndAnd(Type * SelType,const ICmpInst * Cmp,Value * TVal,Value * FVal,InstCombiner::BuilderTy & Builder)485 static Instruction *foldSelectICmpAndAnd(Type *SelType, const ICmpInst *Cmp,
486 Value *TVal, Value *FVal,
487 InstCombiner::BuilderTy &Builder) {
488 if (!(Cmp->hasOneUse() && Cmp->getOperand(0)->hasOneUse() &&
489 Cmp->getPredicate() == ICmpInst::ICMP_EQ &&
490 match(Cmp->getOperand(1), m_Zero()) && match(FVal, m_One())))
491 return nullptr;
492
493 // The TrueVal has general form of: and %B, 1
494 Value *B;
495 if (!match(TVal, m_OneUse(m_And(m_Value(B), m_One()))))
496 return nullptr;
497
498 // Where %B may be optionally shifted: lshr %X, %Z.
499 Value *X, *Z;
500 const bool HasShift = match(B, m_OneUse(m_LShr(m_Value(X), m_Value(Z))));
501 if (!HasShift)
502 X = B;
503
504 Value *Y;
505 if (!match(Cmp->getOperand(0), m_c_And(m_Specific(X), m_Value(Y))))
506 return nullptr;
507
508 // ((X & Y) == 0) ? ((X >> Z) & 1) : 1 --> (X & (Y | (1 << Z))) != 0
509 // ((X & Y) == 0) ? (X & 1) : 1 --> (X & (Y | 1)) != 0
510 Constant *One = ConstantInt::get(SelType, 1);
511 Value *MaskB = HasShift ? Builder.CreateShl(One, Z) : One;
512 Value *FullMask = Builder.CreateOr(Y, MaskB);
513 Value *MaskedX = Builder.CreateAnd(X, FullMask);
514 Value *ICmpNeZero = Builder.CreateIsNotNull(MaskedX);
515 return new ZExtInst(ICmpNeZero, SelType);
516 }
517
518 /// We want to turn:
519 /// (select (icmp sgt x, C), lshr (X, Y), ashr (X, Y)); iff C s>= -1
520 /// (select (icmp slt x, C), ashr (X, Y), lshr (X, Y)); iff C s>= 0
521 /// into:
522 /// ashr (X, Y)
foldSelectICmpLshrAshr(const ICmpInst * IC,Value * TrueVal,Value * FalseVal,InstCombiner::BuilderTy & Builder)523 static Value *foldSelectICmpLshrAshr(const ICmpInst *IC, Value *TrueVal,
524 Value *FalseVal,
525 InstCombiner::BuilderTy &Builder) {
526 ICmpInst::Predicate Pred = IC->getPredicate();
527 Value *CmpLHS = IC->getOperand(0);
528 Value *CmpRHS = IC->getOperand(1);
529 if (!CmpRHS->getType()->isIntOrIntVectorTy())
530 return nullptr;
531
532 Value *X, *Y;
533 unsigned Bitwidth = CmpRHS->getType()->getScalarSizeInBits();
534 if ((Pred != ICmpInst::ICMP_SGT ||
535 !match(CmpRHS,
536 m_SpecificInt_ICMP(ICmpInst::ICMP_SGE, APInt(Bitwidth, -1)))) &&
537 (Pred != ICmpInst::ICMP_SLT ||
538 !match(CmpRHS,
539 m_SpecificInt_ICMP(ICmpInst::ICMP_SGE, APInt(Bitwidth, 0)))))
540 return nullptr;
541
542 // Canonicalize so that ashr is in FalseVal.
543 if (Pred == ICmpInst::ICMP_SLT)
544 std::swap(TrueVal, FalseVal);
545
546 if (match(TrueVal, m_LShr(m_Value(X), m_Value(Y))) &&
547 match(FalseVal, m_AShr(m_Specific(X), m_Specific(Y))) &&
548 match(CmpLHS, m_Specific(X))) {
549 const auto *Ashr = cast<Instruction>(FalseVal);
550 // if lshr is not exact and ashr is, this new ashr must not be exact.
551 bool IsExact = Ashr->isExact() && cast<Instruction>(TrueVal)->isExact();
552 return Builder.CreateAShr(X, Y, IC->getName(), IsExact);
553 }
554
555 return nullptr;
556 }
557
558 /// We want to turn:
559 /// (select (icmp eq (and X, C1), 0), Y, (or Y, C2))
560 /// into:
561 /// (or (shl (and X, C1), C3), Y)
562 /// iff:
563 /// C1 and C2 are both powers of 2
564 /// where:
565 /// C3 = Log(C2) - Log(C1)
566 ///
567 /// This transform handles cases where:
568 /// 1. The icmp predicate is inverted
569 /// 2. The select operands are reversed
570 /// 3. The magnitude of C2 and C1 are flipped
foldSelectICmpAndOr(const ICmpInst * IC,Value * TrueVal,Value * FalseVal,InstCombiner::BuilderTy & Builder)571 static Value *foldSelectICmpAndOr(const ICmpInst *IC, Value *TrueVal,
572 Value *FalseVal,
573 InstCombiner::BuilderTy &Builder) {
574 // Only handle integer compares. Also, if this is a vector select, we need a
575 // vector compare.
576 if (!TrueVal->getType()->isIntOrIntVectorTy() ||
577 TrueVal->getType()->isVectorTy() != IC->getType()->isVectorTy())
578 return nullptr;
579
580 Value *CmpLHS = IC->getOperand(0);
581 Value *CmpRHS = IC->getOperand(1);
582
583 Value *V;
584 unsigned C1Log;
585 bool IsEqualZero;
586 bool NeedAnd = false;
587 if (IC->isEquality()) {
588 if (!match(CmpRHS, m_Zero()))
589 return nullptr;
590
591 const APInt *C1;
592 if (!match(CmpLHS, m_And(m_Value(), m_Power2(C1))))
593 return nullptr;
594
595 V = CmpLHS;
596 C1Log = C1->logBase2();
597 IsEqualZero = IC->getPredicate() == ICmpInst::ICMP_EQ;
598 } else if (IC->getPredicate() == ICmpInst::ICMP_SLT ||
599 IC->getPredicate() == ICmpInst::ICMP_SGT) {
600 // We also need to recognize (icmp slt (trunc (X)), 0) and
601 // (icmp sgt (trunc (X)), -1).
602 IsEqualZero = IC->getPredicate() == ICmpInst::ICMP_SGT;
603 if ((IsEqualZero && !match(CmpRHS, m_AllOnes())) ||
604 (!IsEqualZero && !match(CmpRHS, m_Zero())))
605 return nullptr;
606
607 if (!match(CmpLHS, m_OneUse(m_Trunc(m_Value(V)))))
608 return nullptr;
609
610 C1Log = CmpLHS->getType()->getScalarSizeInBits() - 1;
611 NeedAnd = true;
612 } else {
613 return nullptr;
614 }
615
616 const APInt *C2;
617 bool OrOnTrueVal = false;
618 bool OrOnFalseVal = match(FalseVal, m_Or(m_Specific(TrueVal), m_Power2(C2)));
619 if (!OrOnFalseVal)
620 OrOnTrueVal = match(TrueVal, m_Or(m_Specific(FalseVal), m_Power2(C2)));
621
622 if (!OrOnFalseVal && !OrOnTrueVal)
623 return nullptr;
624
625 Value *Y = OrOnFalseVal ? TrueVal : FalseVal;
626
627 unsigned C2Log = C2->logBase2();
628
629 bool NeedXor = (!IsEqualZero && OrOnFalseVal) || (IsEqualZero && OrOnTrueVal);
630 bool NeedShift = C1Log != C2Log;
631 bool NeedZExtTrunc = Y->getType()->getScalarSizeInBits() !=
632 V->getType()->getScalarSizeInBits();
633
634 // Make sure we don't create more instructions than we save.
635 Value *Or = OrOnFalseVal ? FalseVal : TrueVal;
636 if ((NeedShift + NeedXor + NeedZExtTrunc) >
637 (IC->hasOneUse() + Or->hasOneUse()))
638 return nullptr;
639
640 if (NeedAnd) {
641 // Insert the AND instruction on the input to the truncate.
642 APInt C1 = APInt::getOneBitSet(V->getType()->getScalarSizeInBits(), C1Log);
643 V = Builder.CreateAnd(V, ConstantInt::get(V->getType(), C1));
644 }
645
646 if (C2Log > C1Log) {
647 V = Builder.CreateZExtOrTrunc(V, Y->getType());
648 V = Builder.CreateShl(V, C2Log - C1Log);
649 } else if (C1Log > C2Log) {
650 V = Builder.CreateLShr(V, C1Log - C2Log);
651 V = Builder.CreateZExtOrTrunc(V, Y->getType());
652 } else
653 V = Builder.CreateZExtOrTrunc(V, Y->getType());
654
655 if (NeedXor)
656 V = Builder.CreateXor(V, *C2);
657
658 return Builder.CreateOr(V, Y);
659 }
660
661 /// Canonicalize a set or clear of a masked set of constant bits to
662 /// select-of-constants form.
foldSetClearBits(SelectInst & Sel,InstCombiner::BuilderTy & Builder)663 static Instruction *foldSetClearBits(SelectInst &Sel,
664 InstCombiner::BuilderTy &Builder) {
665 Value *Cond = Sel.getCondition();
666 Value *T = Sel.getTrueValue();
667 Value *F = Sel.getFalseValue();
668 Type *Ty = Sel.getType();
669 Value *X;
670 const APInt *NotC, *C;
671
672 // Cond ? (X & ~C) : (X | C) --> (X & ~C) | (Cond ? 0 : C)
673 if (match(T, m_And(m_Value(X), m_APInt(NotC))) &&
674 match(F, m_OneUse(m_Or(m_Specific(X), m_APInt(C)))) && *NotC == ~(*C)) {
675 Constant *Zero = ConstantInt::getNullValue(Ty);
676 Constant *OrC = ConstantInt::get(Ty, *C);
677 Value *NewSel = Builder.CreateSelect(Cond, Zero, OrC, "masksel", &Sel);
678 return BinaryOperator::CreateOr(T, NewSel);
679 }
680
681 // Cond ? (X | C) : (X & ~C) --> (X & ~C) | (Cond ? C : 0)
682 if (match(F, m_And(m_Value(X), m_APInt(NotC))) &&
683 match(T, m_OneUse(m_Or(m_Specific(X), m_APInt(C)))) && *NotC == ~(*C)) {
684 Constant *Zero = ConstantInt::getNullValue(Ty);
685 Constant *OrC = ConstantInt::get(Ty, *C);
686 Value *NewSel = Builder.CreateSelect(Cond, OrC, Zero, "masksel", &Sel);
687 return BinaryOperator::CreateOr(F, NewSel);
688 }
689
690 return nullptr;
691 }
692
693 /// Transform patterns such as (a > b) ? a - b : 0 into usub.sat(a, b).
694 /// There are 8 commuted/swapped variants of this pattern.
695 /// TODO: Also support a - UMIN(a,b) patterns.
canonicalizeSaturatedSubtract(const ICmpInst * ICI,const Value * TrueVal,const Value * FalseVal,InstCombiner::BuilderTy & Builder)696 static Value *canonicalizeSaturatedSubtract(const ICmpInst *ICI,
697 const Value *TrueVal,
698 const Value *FalseVal,
699 InstCombiner::BuilderTy &Builder) {
700 ICmpInst::Predicate Pred = ICI->getPredicate();
701 if (!ICmpInst::isUnsigned(Pred))
702 return nullptr;
703
704 // (b > a) ? 0 : a - b -> (b <= a) ? a - b : 0
705 if (match(TrueVal, m_Zero())) {
706 Pred = ICmpInst::getInversePredicate(Pred);
707 std::swap(TrueVal, FalseVal);
708 }
709 if (!match(FalseVal, m_Zero()))
710 return nullptr;
711
712 Value *A = ICI->getOperand(0);
713 Value *B = ICI->getOperand(1);
714 if (Pred == ICmpInst::ICMP_ULE || Pred == ICmpInst::ICMP_ULT) {
715 // (b < a) ? a - b : 0 -> (a > b) ? a - b : 0
716 std::swap(A, B);
717 Pred = ICmpInst::getSwappedPredicate(Pred);
718 }
719
720 assert((Pred == ICmpInst::ICMP_UGE || Pred == ICmpInst::ICMP_UGT) &&
721 "Unexpected isUnsigned predicate!");
722
723 // Ensure the sub is of the form:
724 // (a > b) ? a - b : 0 -> usub.sat(a, b)
725 // (a > b) ? b - a : 0 -> -usub.sat(a, b)
726 // Checking for both a-b and a+(-b) as a constant.
727 bool IsNegative = false;
728 const APInt *C;
729 if (match(TrueVal, m_Sub(m_Specific(B), m_Specific(A))) ||
730 (match(A, m_APInt(C)) &&
731 match(TrueVal, m_Add(m_Specific(B), m_SpecificInt(-*C)))))
732 IsNegative = true;
733 else if (!match(TrueVal, m_Sub(m_Specific(A), m_Specific(B))) &&
734 !(match(B, m_APInt(C)) &&
735 match(TrueVal, m_Add(m_Specific(A), m_SpecificInt(-*C)))))
736 return nullptr;
737
738 // If we are adding a negate and the sub and icmp are used anywhere else, we
739 // would end up with more instructions.
740 if (IsNegative && !TrueVal->hasOneUse() && !ICI->hasOneUse())
741 return nullptr;
742
743 // (a > b) ? a - b : 0 -> usub.sat(a, b)
744 // (a > b) ? b - a : 0 -> -usub.sat(a, b)
745 Value *Result = Builder.CreateBinaryIntrinsic(Intrinsic::usub_sat, A, B);
746 if (IsNegative)
747 Result = Builder.CreateNeg(Result);
748 return Result;
749 }
750
canonicalizeSaturatedAdd(ICmpInst * Cmp,Value * TVal,Value * FVal,InstCombiner::BuilderTy & Builder)751 static Value *canonicalizeSaturatedAdd(ICmpInst *Cmp, Value *TVal, Value *FVal,
752 InstCombiner::BuilderTy &Builder) {
753 if (!Cmp->hasOneUse())
754 return nullptr;
755
756 // Match unsigned saturated add with constant.
757 Value *Cmp0 = Cmp->getOperand(0);
758 Value *Cmp1 = Cmp->getOperand(1);
759 ICmpInst::Predicate Pred = Cmp->getPredicate();
760 Value *X;
761 const APInt *C, *CmpC;
762 if (Pred == ICmpInst::ICMP_ULT &&
763 match(TVal, m_Add(m_Value(X), m_APInt(C))) && X == Cmp0 &&
764 match(FVal, m_AllOnes()) && match(Cmp1, m_APInt(CmpC)) && *CmpC == ~*C) {
765 // (X u< ~C) ? (X + C) : -1 --> uadd.sat(X, C)
766 return Builder.CreateBinaryIntrinsic(
767 Intrinsic::uadd_sat, X, ConstantInt::get(X->getType(), *C));
768 }
769
770 // Match unsigned saturated add of 2 variables with an unnecessary 'not'.
771 // There are 8 commuted variants.
772 // Canonicalize -1 (saturated result) to true value of the select.
773 if (match(FVal, m_AllOnes())) {
774 std::swap(TVal, FVal);
775 Pred = CmpInst::getInversePredicate(Pred);
776 }
777 if (!match(TVal, m_AllOnes()))
778 return nullptr;
779
780 // Canonicalize predicate to less-than or less-or-equal-than.
781 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_UGE) {
782 std::swap(Cmp0, Cmp1);
783 Pred = CmpInst::getSwappedPredicate(Pred);
784 }
785 if (Pred != ICmpInst::ICMP_ULT && Pred != ICmpInst::ICMP_ULE)
786 return nullptr;
787
788 // Match unsigned saturated add of 2 variables with an unnecessary 'not'.
789 // Strictness of the comparison is irrelevant.
790 Value *Y;
791 if (match(Cmp0, m_Not(m_Value(X))) &&
792 match(FVal, m_c_Add(m_Specific(X), m_Value(Y))) && Y == Cmp1) {
793 // (~X u< Y) ? -1 : (X + Y) --> uadd.sat(X, Y)
794 // (~X u< Y) ? -1 : (Y + X) --> uadd.sat(X, Y)
795 return Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat, X, Y);
796 }
797 // The 'not' op may be included in the sum but not the compare.
798 // Strictness of the comparison is irrelevant.
799 X = Cmp0;
800 Y = Cmp1;
801 if (match(FVal, m_c_Add(m_Not(m_Specific(X)), m_Specific(Y)))) {
802 // (X u< Y) ? -1 : (~X + Y) --> uadd.sat(~X, Y)
803 // (X u< Y) ? -1 : (Y + ~X) --> uadd.sat(Y, ~X)
804 BinaryOperator *BO = cast<BinaryOperator>(FVal);
805 return Builder.CreateBinaryIntrinsic(
806 Intrinsic::uadd_sat, BO->getOperand(0), BO->getOperand(1));
807 }
808 // The overflow may be detected via the add wrapping round.
809 // This is only valid for strict comparison!
810 if (Pred == ICmpInst::ICMP_ULT &&
811 match(Cmp0, m_c_Add(m_Specific(Cmp1), m_Value(Y))) &&
812 match(FVal, m_c_Add(m_Specific(Cmp1), m_Specific(Y)))) {
813 // ((X + Y) u< X) ? -1 : (X + Y) --> uadd.sat(X, Y)
814 // ((X + Y) u< Y) ? -1 : (X + Y) --> uadd.sat(X, Y)
815 return Builder.CreateBinaryIntrinsic(Intrinsic::uadd_sat, Cmp1, Y);
816 }
817
818 return nullptr;
819 }
820
821 /// Fold the following code sequence:
822 /// \code
823 /// int a = ctlz(x & -x);
824 // x ? 31 - a : a;
825 /// \code
826 ///
827 /// into:
828 /// cttz(x)
foldSelectCtlzToCttz(ICmpInst * ICI,Value * TrueVal,Value * FalseVal,InstCombiner::BuilderTy & Builder)829 static Instruction *foldSelectCtlzToCttz(ICmpInst *ICI, Value *TrueVal,
830 Value *FalseVal,
831 InstCombiner::BuilderTy &Builder) {
832 unsigned BitWidth = TrueVal->getType()->getScalarSizeInBits();
833 if (!ICI->isEquality() || !match(ICI->getOperand(1), m_Zero()))
834 return nullptr;
835
836 if (ICI->getPredicate() == ICmpInst::ICMP_NE)
837 std::swap(TrueVal, FalseVal);
838
839 if (!match(FalseVal,
840 m_Xor(m_Deferred(TrueVal), m_SpecificInt(BitWidth - 1))))
841 return nullptr;
842
843 if (!match(TrueVal, m_Intrinsic<Intrinsic::ctlz>()))
844 return nullptr;
845
846 Value *X = ICI->getOperand(0);
847 auto *II = cast<IntrinsicInst>(TrueVal);
848 if (!match(II->getOperand(0), m_c_And(m_Specific(X), m_Neg(m_Specific(X)))))
849 return nullptr;
850
851 Function *F = Intrinsic::getDeclaration(II->getModule(), Intrinsic::cttz,
852 II->getType());
853 return CallInst::Create(F, {X, II->getArgOperand(1)});
854 }
855
856 /// Attempt to fold a cttz/ctlz followed by a icmp plus select into a single
857 /// call to cttz/ctlz with flag 'is_zero_undef' cleared.
858 ///
859 /// For example, we can fold the following code sequence:
860 /// \code
861 /// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 true)
862 /// %1 = icmp ne i32 %x, 0
863 /// %2 = select i1 %1, i32 %0, i32 32
864 /// \code
865 ///
866 /// into:
867 /// %0 = tail call i32 @llvm.cttz.i32(i32 %x, i1 false)
foldSelectCttzCtlz(ICmpInst * ICI,Value * TrueVal,Value * FalseVal,InstCombiner::BuilderTy & Builder)868 static Value *foldSelectCttzCtlz(ICmpInst *ICI, Value *TrueVal, Value *FalseVal,
869 InstCombiner::BuilderTy &Builder) {
870 ICmpInst::Predicate Pred = ICI->getPredicate();
871 Value *CmpLHS = ICI->getOperand(0);
872 Value *CmpRHS = ICI->getOperand(1);
873
874 // Check if the condition value compares a value for equality against zero.
875 if (!ICI->isEquality() || !match(CmpRHS, m_Zero()))
876 return nullptr;
877
878 Value *SelectArg = FalseVal;
879 Value *ValueOnZero = TrueVal;
880 if (Pred == ICmpInst::ICMP_NE)
881 std::swap(SelectArg, ValueOnZero);
882
883 // Skip zero extend/truncate.
884 Value *Count = nullptr;
885 if (!match(SelectArg, m_ZExt(m_Value(Count))) &&
886 !match(SelectArg, m_Trunc(m_Value(Count))))
887 Count = SelectArg;
888
889 // Check that 'Count' is a call to intrinsic cttz/ctlz. Also check that the
890 // input to the cttz/ctlz is used as LHS for the compare instruction.
891 if (!match(Count, m_Intrinsic<Intrinsic::cttz>(m_Specific(CmpLHS))) &&
892 !match(Count, m_Intrinsic<Intrinsic::ctlz>(m_Specific(CmpLHS))))
893 return nullptr;
894
895 IntrinsicInst *II = cast<IntrinsicInst>(Count);
896
897 // Check if the value propagated on zero is a constant number equal to the
898 // sizeof in bits of 'Count'.
899 unsigned SizeOfInBits = Count->getType()->getScalarSizeInBits();
900 if (match(ValueOnZero, m_SpecificInt(SizeOfInBits))) {
901 // Explicitly clear the 'undef_on_zero' flag. It's always valid to go from
902 // true to false on this flag, so we can replace it for all users.
903 II->setArgOperand(1, ConstantInt::getFalse(II->getContext()));
904 return SelectArg;
905 }
906
907 // The ValueOnZero is not the bitwidth. But if the cttz/ctlz (and optional
908 // zext/trunc) have one use (ending at the select), the cttz/ctlz result will
909 // not be used if the input is zero. Relax to 'undef_on_zero' for that case.
910 if (II->hasOneUse() && SelectArg->hasOneUse() &&
911 !match(II->getArgOperand(1), m_One()))
912 II->setArgOperand(1, ConstantInt::getTrue(II->getContext()));
913
914 return nullptr;
915 }
916
917 /// Return true if we find and adjust an icmp+select pattern where the compare
918 /// is with a constant that can be incremented or decremented to match the
919 /// minimum or maximum idiom.
adjustMinMax(SelectInst & Sel,ICmpInst & Cmp)920 static bool adjustMinMax(SelectInst &Sel, ICmpInst &Cmp) {
921 ICmpInst::Predicate Pred = Cmp.getPredicate();
922 Value *CmpLHS = Cmp.getOperand(0);
923 Value *CmpRHS = Cmp.getOperand(1);
924 Value *TrueVal = Sel.getTrueValue();
925 Value *FalseVal = Sel.getFalseValue();
926
927 // We may move or edit the compare, so make sure the select is the only user.
928 const APInt *CmpC;
929 if (!Cmp.hasOneUse() || !match(CmpRHS, m_APInt(CmpC)))
930 return false;
931
932 // These transforms only work for selects of integers or vector selects of
933 // integer vectors.
934 Type *SelTy = Sel.getType();
935 auto *SelEltTy = dyn_cast<IntegerType>(SelTy->getScalarType());
936 if (!SelEltTy || SelTy->isVectorTy() != Cmp.getType()->isVectorTy())
937 return false;
938
939 Constant *AdjustedRHS;
940 if (Pred == ICmpInst::ICMP_UGT || Pred == ICmpInst::ICMP_SGT)
941 AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC + 1);
942 else if (Pred == ICmpInst::ICMP_ULT || Pred == ICmpInst::ICMP_SLT)
943 AdjustedRHS = ConstantInt::get(CmpRHS->getType(), *CmpC - 1);
944 else
945 return false;
946
947 // X > C ? X : C+1 --> X < C+1 ? C+1 : X
948 // X < C ? X : C-1 --> X > C-1 ? C-1 : X
949 if ((CmpLHS == TrueVal && AdjustedRHS == FalseVal) ||
950 (CmpLHS == FalseVal && AdjustedRHS == TrueVal)) {
951 ; // Nothing to do here. Values match without any sign/zero extension.
952 }
953 // Types do not match. Instead of calculating this with mixed types, promote
954 // all to the larger type. This enables scalar evolution to analyze this
955 // expression.
956 else if (CmpRHS->getType()->getScalarSizeInBits() < SelEltTy->getBitWidth()) {
957 Constant *SextRHS = ConstantExpr::getSExt(AdjustedRHS, SelTy);
958
959 // X = sext x; x >s c ? X : C+1 --> X = sext x; X <s C+1 ? C+1 : X
960 // X = sext x; x <s c ? X : C-1 --> X = sext x; X >s C-1 ? C-1 : X
961 // X = sext x; x >u c ? X : C+1 --> X = sext x; X <u C+1 ? C+1 : X
962 // X = sext x; x <u c ? X : C-1 --> X = sext x; X >u C-1 ? C-1 : X
963 if (match(TrueVal, m_SExt(m_Specific(CmpLHS))) && SextRHS == FalseVal) {
964 CmpLHS = TrueVal;
965 AdjustedRHS = SextRHS;
966 } else if (match(FalseVal, m_SExt(m_Specific(CmpLHS))) &&
967 SextRHS == TrueVal) {
968 CmpLHS = FalseVal;
969 AdjustedRHS = SextRHS;
970 } else if (Cmp.isUnsigned()) {
971 Constant *ZextRHS = ConstantExpr::getZExt(AdjustedRHS, SelTy);
972 // X = zext x; x >u c ? X : C+1 --> X = zext x; X <u C+1 ? C+1 : X
973 // X = zext x; x <u c ? X : C-1 --> X = zext x; X >u C-1 ? C-1 : X
974 // zext + signed compare cannot be changed:
975 // 0xff <s 0x00, but 0x00ff >s 0x0000
976 if (match(TrueVal, m_ZExt(m_Specific(CmpLHS))) && ZextRHS == FalseVal) {
977 CmpLHS = TrueVal;
978 AdjustedRHS = ZextRHS;
979 } else if (match(FalseVal, m_ZExt(m_Specific(CmpLHS))) &&
980 ZextRHS == TrueVal) {
981 CmpLHS = FalseVal;
982 AdjustedRHS = ZextRHS;
983 } else {
984 return false;
985 }
986 } else {
987 return false;
988 }
989 } else {
990 return false;
991 }
992
993 Pred = ICmpInst::getSwappedPredicate(Pred);
994 CmpRHS = AdjustedRHS;
995 std::swap(FalseVal, TrueVal);
996 Cmp.setPredicate(Pred);
997 Cmp.setOperand(0, CmpLHS);
998 Cmp.setOperand(1, CmpRHS);
999 Sel.setOperand(1, TrueVal);
1000 Sel.setOperand(2, FalseVal);
1001 Sel.swapProfMetadata();
1002
1003 // Move the compare instruction right before the select instruction. Otherwise
1004 // the sext/zext value may be defined after the compare instruction uses it.
1005 Cmp.moveBefore(&Sel);
1006
1007 return true;
1008 }
1009
1010 /// If this is an integer min/max (icmp + select) with a constant operand,
1011 /// create the canonical icmp for the min/max operation and canonicalize the
1012 /// constant to the 'false' operand of the select:
1013 /// select (icmp Pred X, C1), C2, X --> select (icmp Pred' X, C2), X, C2
1014 /// Note: if C1 != C2, this will change the icmp constant to the existing
1015 /// constant operand of the select.
canonicalizeMinMaxWithConstant(SelectInst & Sel,ICmpInst & Cmp,InstCombinerImpl & IC)1016 static Instruction *canonicalizeMinMaxWithConstant(SelectInst &Sel,
1017 ICmpInst &Cmp,
1018 InstCombinerImpl &IC) {
1019 if (!Cmp.hasOneUse() || !isa<Constant>(Cmp.getOperand(1)))
1020 return nullptr;
1021
1022 // Canonicalize the compare predicate based on whether we have min or max.
1023 Value *LHS, *RHS;
1024 SelectPatternResult SPR = matchSelectPattern(&Sel, LHS, RHS);
1025 if (!SelectPatternResult::isMinOrMax(SPR.Flavor))
1026 return nullptr;
1027
1028 // Is this already canonical?
1029 ICmpInst::Predicate CanonicalPred = getMinMaxPred(SPR.Flavor);
1030 if (Cmp.getOperand(0) == LHS && Cmp.getOperand(1) == RHS &&
1031 Cmp.getPredicate() == CanonicalPred)
1032 return nullptr;
1033
1034 // Bail out on unsimplified X-0 operand (due to some worklist management bug),
1035 // as this may cause an infinite combine loop. Let the sub be folded first.
1036 if (match(LHS, m_Sub(m_Value(), m_Zero())) ||
1037 match(RHS, m_Sub(m_Value(), m_Zero())))
1038 return nullptr;
1039
1040 // Create the canonical compare and plug it into the select.
1041 IC.replaceOperand(Sel, 0, IC.Builder.CreateICmp(CanonicalPred, LHS, RHS));
1042
1043 // If the select operands did not change, we're done.
1044 if (Sel.getTrueValue() == LHS && Sel.getFalseValue() == RHS)
1045 return &Sel;
1046
1047 // If we are swapping the select operands, swap the metadata too.
1048 assert(Sel.getTrueValue() == RHS && Sel.getFalseValue() == LHS &&
1049 "Unexpected results from matchSelectPattern");
1050 Sel.swapValues();
1051 Sel.swapProfMetadata();
1052 return &Sel;
1053 }
1054
canonicalizeAbsNabs(SelectInst & Sel,ICmpInst & Cmp,InstCombinerImpl & IC)1055 static Instruction *canonicalizeAbsNabs(SelectInst &Sel, ICmpInst &Cmp,
1056 InstCombinerImpl &IC) {
1057 if (!Cmp.hasOneUse() || !isa<Constant>(Cmp.getOperand(1)))
1058 return nullptr;
1059
1060 Value *LHS, *RHS;
1061 SelectPatternFlavor SPF = matchSelectPattern(&Sel, LHS, RHS).Flavor;
1062 if (SPF != SelectPatternFlavor::SPF_ABS &&
1063 SPF != SelectPatternFlavor::SPF_NABS)
1064 return nullptr;
1065
1066 // Note that NSW flag can only be propagated for normal, non-negated abs!
1067 bool IntMinIsPoison = SPF == SelectPatternFlavor::SPF_ABS &&
1068 match(RHS, m_NSWNeg(m_Specific(LHS)));
1069 Constant *IntMinIsPoisonC =
1070 ConstantInt::get(Type::getInt1Ty(Sel.getContext()), IntMinIsPoison);
1071 Instruction *Abs =
1072 IC.Builder.CreateBinaryIntrinsic(Intrinsic::abs, LHS, IntMinIsPoisonC);
1073
1074 if (SPF == SelectPatternFlavor::SPF_NABS)
1075 return BinaryOperator::CreateNeg(Abs); // Always without NSW flag!
1076
1077 return IC.replaceInstUsesWith(Sel, Abs);
1078 }
1079
1080 /// If we have a select with an equality comparison, then we know the value in
1081 /// one of the arms of the select. See if substituting this value into an arm
1082 /// and simplifying the result yields the same value as the other arm.
1083 ///
1084 /// To make this transform safe, we must drop poison-generating flags
1085 /// (nsw, etc) if we simplified to a binop because the select may be guarding
1086 /// that poison from propagating. If the existing binop already had no
1087 /// poison-generating flags, then this transform can be done by instsimplify.
1088 ///
1089 /// Consider:
1090 /// %cmp = icmp eq i32 %x, 2147483647
1091 /// %add = add nsw i32 %x, 1
1092 /// %sel = select i1 %cmp, i32 -2147483648, i32 %add
1093 ///
1094 /// We can't replace %sel with %add unless we strip away the flags.
1095 /// TODO: Wrapping flags could be preserved in some cases with better analysis.
foldSelectValueEquivalence(SelectInst & Sel,ICmpInst & Cmp)1096 Instruction *InstCombinerImpl::foldSelectValueEquivalence(SelectInst &Sel,
1097 ICmpInst &Cmp) {
1098 // Value equivalence substitution requires an all-or-nothing replacement.
1099 // It does not make sense for a vector compare where each lane is chosen
1100 // independently.
1101 if (!Cmp.isEquality() || Cmp.getType()->isVectorTy())
1102 return nullptr;
1103
1104 // Canonicalize the pattern to ICMP_EQ by swapping the select operands.
1105 Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue();
1106 bool Swapped = false;
1107 if (Cmp.getPredicate() == ICmpInst::ICMP_NE) {
1108 std::swap(TrueVal, FalseVal);
1109 Swapped = true;
1110 }
1111
1112 // In X == Y ? f(X) : Z, try to evaluate f(Y) and replace the operand.
1113 // Make sure Y cannot be undef though, as we might pick different values for
1114 // undef in the icmp and in f(Y). Additionally, take care to avoid replacing
1115 // X == Y ? X : Z with X == Y ? Y : Z, as that would lead to an infinite
1116 // replacement cycle.
1117 Value *CmpLHS = Cmp.getOperand(0), *CmpRHS = Cmp.getOperand(1);
1118 if (TrueVal != CmpLHS &&
1119 isGuaranteedNotToBeUndefOrPoison(CmpRHS, SQ.AC, &Sel, &DT)) {
1120 if (Value *V = SimplifyWithOpReplaced(TrueVal, CmpLHS, CmpRHS, SQ,
1121 /* AllowRefinement */ true))
1122 return replaceOperand(Sel, Swapped ? 2 : 1, V);
1123
1124 // Even if TrueVal does not simplify, we can directly replace a use of
1125 // CmpLHS with CmpRHS, as long as the instruction is not used anywhere
1126 // else and is safe to speculatively execute (we may end up executing it
1127 // with different operands, which should not cause side-effects or trigger
1128 // undefined behavior). Only do this if CmpRHS is a constant, as
1129 // profitability is not clear for other cases.
1130 // FIXME: The replacement could be performed recursively.
1131 if (match(CmpRHS, m_ImmConstant()) && !match(CmpLHS, m_ImmConstant()))
1132 if (auto *I = dyn_cast<Instruction>(TrueVal))
1133 if (I->hasOneUse() && isSafeToSpeculativelyExecute(I))
1134 for (Use &U : I->operands())
1135 if (U == CmpLHS) {
1136 replaceUse(U, CmpRHS);
1137 return &Sel;
1138 }
1139 }
1140 if (TrueVal != CmpRHS &&
1141 isGuaranteedNotToBeUndefOrPoison(CmpLHS, SQ.AC, &Sel, &DT))
1142 if (Value *V = SimplifyWithOpReplaced(TrueVal, CmpRHS, CmpLHS, SQ,
1143 /* AllowRefinement */ true))
1144 return replaceOperand(Sel, Swapped ? 2 : 1, V);
1145
1146 auto *FalseInst = dyn_cast<Instruction>(FalseVal);
1147 if (!FalseInst)
1148 return nullptr;
1149
1150 // InstSimplify already performed this fold if it was possible subject to
1151 // current poison-generating flags. Try the transform again with
1152 // poison-generating flags temporarily dropped.
1153 bool WasNUW = false, WasNSW = false, WasExact = false, WasInBounds = false;
1154 if (auto *OBO = dyn_cast<OverflowingBinaryOperator>(FalseVal)) {
1155 WasNUW = OBO->hasNoUnsignedWrap();
1156 WasNSW = OBO->hasNoSignedWrap();
1157 FalseInst->setHasNoUnsignedWrap(false);
1158 FalseInst->setHasNoSignedWrap(false);
1159 }
1160 if (auto *PEO = dyn_cast<PossiblyExactOperator>(FalseVal)) {
1161 WasExact = PEO->isExact();
1162 FalseInst->setIsExact(false);
1163 }
1164 if (auto *GEP = dyn_cast<GetElementPtrInst>(FalseVal)) {
1165 WasInBounds = GEP->isInBounds();
1166 GEP->setIsInBounds(false);
1167 }
1168
1169 // Try each equivalence substitution possibility.
1170 // We have an 'EQ' comparison, so the select's false value will propagate.
1171 // Example:
1172 // (X == 42) ? 43 : (X + 1) --> (X == 42) ? (X + 1) : (X + 1) --> X + 1
1173 if (SimplifyWithOpReplaced(FalseVal, CmpLHS, CmpRHS, SQ,
1174 /* AllowRefinement */ false) == TrueVal ||
1175 SimplifyWithOpReplaced(FalseVal, CmpRHS, CmpLHS, SQ,
1176 /* AllowRefinement */ false) == TrueVal) {
1177 return replaceInstUsesWith(Sel, FalseVal);
1178 }
1179
1180 // Restore poison-generating flags if the transform did not apply.
1181 if (WasNUW)
1182 FalseInst->setHasNoUnsignedWrap();
1183 if (WasNSW)
1184 FalseInst->setHasNoSignedWrap();
1185 if (WasExact)
1186 FalseInst->setIsExact();
1187 if (WasInBounds)
1188 cast<GetElementPtrInst>(FalseInst)->setIsInBounds();
1189
1190 return nullptr;
1191 }
1192
1193 // See if this is a pattern like:
1194 // %old_cmp1 = icmp slt i32 %x, C2
1195 // %old_replacement = select i1 %old_cmp1, i32 %target_low, i32 %target_high
1196 // %old_x_offseted = add i32 %x, C1
1197 // %old_cmp0 = icmp ult i32 %old_x_offseted, C0
1198 // %r = select i1 %old_cmp0, i32 %x, i32 %old_replacement
1199 // This can be rewritten as more canonical pattern:
1200 // %new_cmp1 = icmp slt i32 %x, -C1
1201 // %new_cmp2 = icmp sge i32 %x, C0-C1
1202 // %new_clamped_low = select i1 %new_cmp1, i32 %target_low, i32 %x
1203 // %r = select i1 %new_cmp2, i32 %target_high, i32 %new_clamped_low
1204 // Iff -C1 s<= C2 s<= C0-C1
1205 // Also ULT predicate can also be UGT iff C0 != -1 (+invert result)
1206 // SLT predicate can also be SGT iff C2 != INT_MAX (+invert res.)
canonicalizeClampLike(SelectInst & Sel0,ICmpInst & Cmp0,InstCombiner::BuilderTy & Builder)1207 static Instruction *canonicalizeClampLike(SelectInst &Sel0, ICmpInst &Cmp0,
1208 InstCombiner::BuilderTy &Builder) {
1209 Value *X = Sel0.getTrueValue();
1210 Value *Sel1 = Sel0.getFalseValue();
1211
1212 // First match the condition of the outermost select.
1213 // Said condition must be one-use.
1214 if (!Cmp0.hasOneUse())
1215 return nullptr;
1216 Value *Cmp00 = Cmp0.getOperand(0);
1217 Constant *C0;
1218 if (!match(Cmp0.getOperand(1),
1219 m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C0))))
1220 return nullptr;
1221 // Canonicalize Cmp0 into the form we expect.
1222 // FIXME: we shouldn't care about lanes that are 'undef' in the end?
1223 switch (Cmp0.getPredicate()) {
1224 case ICmpInst::Predicate::ICMP_ULT:
1225 break; // Great!
1226 case ICmpInst::Predicate::ICMP_ULE:
1227 // We'd have to increment C0 by one, and for that it must not have all-ones
1228 // element, but then it would have been canonicalized to 'ult' before
1229 // we get here. So we can't do anything useful with 'ule'.
1230 return nullptr;
1231 case ICmpInst::Predicate::ICMP_UGT:
1232 // We want to canonicalize it to 'ult', so we'll need to increment C0,
1233 // which again means it must not have any all-ones elements.
1234 if (!match(C0,
1235 m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_NE,
1236 APInt::getAllOnesValue(
1237 C0->getType()->getScalarSizeInBits()))))
1238 return nullptr; // Can't do, have all-ones element[s].
1239 C0 = InstCombiner::AddOne(C0);
1240 std::swap(X, Sel1);
1241 break;
1242 case ICmpInst::Predicate::ICMP_UGE:
1243 // The only way we'd get this predicate if this `icmp` has extra uses,
1244 // but then we won't be able to do this fold.
1245 return nullptr;
1246 default:
1247 return nullptr; // Unknown predicate.
1248 }
1249
1250 // Now that we've canonicalized the ICmp, we know the X we expect;
1251 // the select in other hand should be one-use.
1252 if (!Sel1->hasOneUse())
1253 return nullptr;
1254
1255 // We now can finish matching the condition of the outermost select:
1256 // it should either be the X itself, or an addition of some constant to X.
1257 Constant *C1;
1258 if (Cmp00 == X)
1259 C1 = ConstantInt::getNullValue(Sel0.getType());
1260 else if (!match(Cmp00,
1261 m_Add(m_Specific(X),
1262 m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C1)))))
1263 return nullptr;
1264
1265 Value *Cmp1;
1266 ICmpInst::Predicate Pred1;
1267 Constant *C2;
1268 Value *ReplacementLow, *ReplacementHigh;
1269 if (!match(Sel1, m_Select(m_Value(Cmp1), m_Value(ReplacementLow),
1270 m_Value(ReplacementHigh))) ||
1271 !match(Cmp1,
1272 m_ICmp(Pred1, m_Specific(X),
1273 m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C2)))))
1274 return nullptr;
1275
1276 if (!Cmp1->hasOneUse() && (Cmp00 == X || !Cmp00->hasOneUse()))
1277 return nullptr; // Not enough one-use instructions for the fold.
1278 // FIXME: this restriction could be relaxed if Cmp1 can be reused as one of
1279 // two comparisons we'll need to build.
1280
1281 // Canonicalize Cmp1 into the form we expect.
1282 // FIXME: we shouldn't care about lanes that are 'undef' in the end?
1283 switch (Pred1) {
1284 case ICmpInst::Predicate::ICMP_SLT:
1285 break;
1286 case ICmpInst::Predicate::ICMP_SLE:
1287 // We'd have to increment C2 by one, and for that it must not have signed
1288 // max element, but then it would have been canonicalized to 'slt' before
1289 // we get here. So we can't do anything useful with 'sle'.
1290 return nullptr;
1291 case ICmpInst::Predicate::ICMP_SGT:
1292 // We want to canonicalize it to 'slt', so we'll need to increment C2,
1293 // which again means it must not have any signed max elements.
1294 if (!match(C2,
1295 m_SpecificInt_ICMP(ICmpInst::Predicate::ICMP_NE,
1296 APInt::getSignedMaxValue(
1297 C2->getType()->getScalarSizeInBits()))))
1298 return nullptr; // Can't do, have signed max element[s].
1299 C2 = InstCombiner::AddOne(C2);
1300 LLVM_FALLTHROUGH;
1301 case ICmpInst::Predicate::ICMP_SGE:
1302 // Also non-canonical, but here we don't need to change C2,
1303 // so we don't have any restrictions on C2, so we can just handle it.
1304 std::swap(ReplacementLow, ReplacementHigh);
1305 break;
1306 default:
1307 return nullptr; // Unknown predicate.
1308 }
1309
1310 // The thresholds of this clamp-like pattern.
1311 auto *ThresholdLowIncl = ConstantExpr::getNeg(C1);
1312 auto *ThresholdHighExcl = ConstantExpr::getSub(C0, C1);
1313
1314 // The fold has a precondition 1: C2 s>= ThresholdLow
1315 auto *Precond1 = ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_SGE, C2,
1316 ThresholdLowIncl);
1317 if (!match(Precond1, m_One()))
1318 return nullptr;
1319 // The fold has a precondition 2: C2 s<= ThresholdHigh
1320 auto *Precond2 = ConstantExpr::getICmp(ICmpInst::Predicate::ICMP_SLE, C2,
1321 ThresholdHighExcl);
1322 if (!match(Precond2, m_One()))
1323 return nullptr;
1324
1325 // All good, finally emit the new pattern.
1326 Value *ShouldReplaceLow = Builder.CreateICmpSLT(X, ThresholdLowIncl);
1327 Value *ShouldReplaceHigh = Builder.CreateICmpSGE(X, ThresholdHighExcl);
1328 Value *MaybeReplacedLow =
1329 Builder.CreateSelect(ShouldReplaceLow, ReplacementLow, X);
1330 Instruction *MaybeReplacedHigh =
1331 SelectInst::Create(ShouldReplaceHigh, ReplacementHigh, MaybeReplacedLow);
1332
1333 return MaybeReplacedHigh;
1334 }
1335
1336 // If we have
1337 // %cmp = icmp [canonical predicate] i32 %x, C0
1338 // %r = select i1 %cmp, i32 %y, i32 C1
1339 // Where C0 != C1 and %x may be different from %y, see if the constant that we
1340 // will have if we flip the strictness of the predicate (i.e. without changing
1341 // the result) is identical to the C1 in select. If it matches we can change
1342 // original comparison to one with swapped predicate, reuse the constant,
1343 // and swap the hands of select.
1344 static Instruction *
tryToReuseConstantFromSelectInComparison(SelectInst & Sel,ICmpInst & Cmp,InstCombinerImpl & IC)1345 tryToReuseConstantFromSelectInComparison(SelectInst &Sel, ICmpInst &Cmp,
1346 InstCombinerImpl &IC) {
1347 ICmpInst::Predicate Pred;
1348 Value *X;
1349 Constant *C0;
1350 if (!match(&Cmp, m_OneUse(m_ICmp(
1351 Pred, m_Value(X),
1352 m_CombineAnd(m_AnyIntegralConstant(), m_Constant(C0))))))
1353 return nullptr;
1354
1355 // If comparison predicate is non-relational, we won't be able to do anything.
1356 if (ICmpInst::isEquality(Pred))
1357 return nullptr;
1358
1359 // If comparison predicate is non-canonical, then we certainly won't be able
1360 // to make it canonical; canonicalizeCmpWithConstant() already tried.
1361 if (!InstCombiner::isCanonicalPredicate(Pred))
1362 return nullptr;
1363
1364 // If the [input] type of comparison and select type are different, lets abort
1365 // for now. We could try to compare constants with trunc/[zs]ext though.
1366 if (C0->getType() != Sel.getType())
1367 return nullptr;
1368
1369 // FIXME: are there any magic icmp predicate+constant pairs we must not touch?
1370
1371 Value *SelVal0, *SelVal1; // We do not care which one is from where.
1372 match(&Sel, m_Select(m_Value(), m_Value(SelVal0), m_Value(SelVal1)));
1373 // At least one of these values we are selecting between must be a constant
1374 // else we'll never succeed.
1375 if (!match(SelVal0, m_AnyIntegralConstant()) &&
1376 !match(SelVal1, m_AnyIntegralConstant()))
1377 return nullptr;
1378
1379 // Does this constant C match any of the `select` values?
1380 auto MatchesSelectValue = [SelVal0, SelVal1](Constant *C) {
1381 return C->isElementWiseEqual(SelVal0) || C->isElementWiseEqual(SelVal1);
1382 };
1383
1384 // If C0 *already* matches true/false value of select, we are done.
1385 if (MatchesSelectValue(C0))
1386 return nullptr;
1387
1388 // Check the constant we'd have with flipped-strictness predicate.
1389 auto FlippedStrictness =
1390 InstCombiner::getFlippedStrictnessPredicateAndConstant(Pred, C0);
1391 if (!FlippedStrictness)
1392 return nullptr;
1393
1394 // If said constant doesn't match either, then there is no hope,
1395 if (!MatchesSelectValue(FlippedStrictness->second))
1396 return nullptr;
1397
1398 // It matched! Lets insert the new comparison just before select.
1399 InstCombiner::BuilderTy::InsertPointGuard Guard(IC.Builder);
1400 IC.Builder.SetInsertPoint(&Sel);
1401
1402 Pred = ICmpInst::getSwappedPredicate(Pred); // Yes, swapped.
1403 Value *NewCmp = IC.Builder.CreateICmp(Pred, X, FlippedStrictness->second,
1404 Cmp.getName() + ".inv");
1405 IC.replaceOperand(Sel, 0, NewCmp);
1406 Sel.swapValues();
1407 Sel.swapProfMetadata();
1408
1409 return &Sel;
1410 }
1411
1412 /// Visit a SelectInst that has an ICmpInst as its first operand.
foldSelectInstWithICmp(SelectInst & SI,ICmpInst * ICI)1413 Instruction *InstCombinerImpl::foldSelectInstWithICmp(SelectInst &SI,
1414 ICmpInst *ICI) {
1415 if (Instruction *NewSel = foldSelectValueEquivalence(SI, *ICI))
1416 return NewSel;
1417
1418 if (Instruction *NewSel = canonicalizeMinMaxWithConstant(SI, *ICI, *this))
1419 return NewSel;
1420
1421 if (Instruction *NewAbs = canonicalizeAbsNabs(SI, *ICI, *this))
1422 return NewAbs;
1423
1424 if (Instruction *NewAbs = canonicalizeClampLike(SI, *ICI, Builder))
1425 return NewAbs;
1426
1427 if (Instruction *NewSel =
1428 tryToReuseConstantFromSelectInComparison(SI, *ICI, *this))
1429 return NewSel;
1430
1431 bool Changed = adjustMinMax(SI, *ICI);
1432
1433 if (Value *V = foldSelectICmpAnd(SI, ICI, Builder))
1434 return replaceInstUsesWith(SI, V);
1435
1436 // NOTE: if we wanted to, this is where to detect integer MIN/MAX
1437 Value *TrueVal = SI.getTrueValue();
1438 Value *FalseVal = SI.getFalseValue();
1439 ICmpInst::Predicate Pred = ICI->getPredicate();
1440 Value *CmpLHS = ICI->getOperand(0);
1441 Value *CmpRHS = ICI->getOperand(1);
1442 if (CmpRHS != CmpLHS && isa<Constant>(CmpRHS)) {
1443 if (CmpLHS == TrueVal && Pred == ICmpInst::ICMP_EQ) {
1444 // Transform (X == C) ? X : Y -> (X == C) ? C : Y
1445 SI.setOperand(1, CmpRHS);
1446 Changed = true;
1447 } else if (CmpLHS == FalseVal && Pred == ICmpInst::ICMP_NE) {
1448 // Transform (X != C) ? Y : X -> (X != C) ? Y : C
1449 SI.setOperand(2, CmpRHS);
1450 Changed = true;
1451 }
1452 }
1453
1454 // FIXME: This code is nearly duplicated in InstSimplify. Using/refactoring
1455 // decomposeBitTestICmp() might help.
1456 {
1457 unsigned BitWidth =
1458 DL.getTypeSizeInBits(TrueVal->getType()->getScalarType());
1459 APInt MinSignedValue = APInt::getSignedMinValue(BitWidth);
1460 Value *X;
1461 const APInt *Y, *C;
1462 bool TrueWhenUnset;
1463 bool IsBitTest = false;
1464 if (ICmpInst::isEquality(Pred) &&
1465 match(CmpLHS, m_And(m_Value(X), m_Power2(Y))) &&
1466 match(CmpRHS, m_Zero())) {
1467 IsBitTest = true;
1468 TrueWhenUnset = Pred == ICmpInst::ICMP_EQ;
1469 } else if (Pred == ICmpInst::ICMP_SLT && match(CmpRHS, m_Zero())) {
1470 X = CmpLHS;
1471 Y = &MinSignedValue;
1472 IsBitTest = true;
1473 TrueWhenUnset = false;
1474 } else if (Pred == ICmpInst::ICMP_SGT && match(CmpRHS, m_AllOnes())) {
1475 X = CmpLHS;
1476 Y = &MinSignedValue;
1477 IsBitTest = true;
1478 TrueWhenUnset = true;
1479 }
1480 if (IsBitTest) {
1481 Value *V = nullptr;
1482 // (X & Y) == 0 ? X : X ^ Y --> X & ~Y
1483 if (TrueWhenUnset && TrueVal == X &&
1484 match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1485 V = Builder.CreateAnd(X, ~(*Y));
1486 // (X & Y) != 0 ? X ^ Y : X --> X & ~Y
1487 else if (!TrueWhenUnset && FalseVal == X &&
1488 match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1489 V = Builder.CreateAnd(X, ~(*Y));
1490 // (X & Y) == 0 ? X ^ Y : X --> X | Y
1491 else if (TrueWhenUnset && FalseVal == X &&
1492 match(TrueVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1493 V = Builder.CreateOr(X, *Y);
1494 // (X & Y) != 0 ? X : X ^ Y --> X | Y
1495 else if (!TrueWhenUnset && TrueVal == X &&
1496 match(FalseVal, m_Xor(m_Specific(X), m_APInt(C))) && *Y == *C)
1497 V = Builder.CreateOr(X, *Y);
1498
1499 if (V)
1500 return replaceInstUsesWith(SI, V);
1501 }
1502 }
1503
1504 if (Instruction *V =
1505 foldSelectICmpAndAnd(SI.getType(), ICI, TrueVal, FalseVal, Builder))
1506 return V;
1507
1508 if (Instruction *V = foldSelectCtlzToCttz(ICI, TrueVal, FalseVal, Builder))
1509 return V;
1510
1511 if (Value *V = foldSelectICmpAndOr(ICI, TrueVal, FalseVal, Builder))
1512 return replaceInstUsesWith(SI, V);
1513
1514 if (Value *V = foldSelectICmpLshrAshr(ICI, TrueVal, FalseVal, Builder))
1515 return replaceInstUsesWith(SI, V);
1516
1517 if (Value *V = foldSelectCttzCtlz(ICI, TrueVal, FalseVal, Builder))
1518 return replaceInstUsesWith(SI, V);
1519
1520 if (Value *V = canonicalizeSaturatedSubtract(ICI, TrueVal, FalseVal, Builder))
1521 return replaceInstUsesWith(SI, V);
1522
1523 if (Value *V = canonicalizeSaturatedAdd(ICI, TrueVal, FalseVal, Builder))
1524 return replaceInstUsesWith(SI, V);
1525
1526 return Changed ? &SI : nullptr;
1527 }
1528
1529 /// SI is a select whose condition is a PHI node (but the two may be in
1530 /// different blocks). See if the true/false values (V) are live in all of the
1531 /// predecessor blocks of the PHI. For example, cases like this can't be mapped:
1532 ///
1533 /// X = phi [ C1, BB1], [C2, BB2]
1534 /// Y = add
1535 /// Z = select X, Y, 0
1536 ///
1537 /// because Y is not live in BB1/BB2.
canSelectOperandBeMappingIntoPredBlock(const Value * V,const SelectInst & SI)1538 static bool canSelectOperandBeMappingIntoPredBlock(const Value *V,
1539 const SelectInst &SI) {
1540 // If the value is a non-instruction value like a constant or argument, it
1541 // can always be mapped.
1542 const Instruction *I = dyn_cast<Instruction>(V);
1543 if (!I) return true;
1544
1545 // If V is a PHI node defined in the same block as the condition PHI, we can
1546 // map the arguments.
1547 const PHINode *CondPHI = cast<PHINode>(SI.getCondition());
1548
1549 if (const PHINode *VP = dyn_cast<PHINode>(I))
1550 if (VP->getParent() == CondPHI->getParent())
1551 return true;
1552
1553 // Otherwise, if the PHI and select are defined in the same block and if V is
1554 // defined in a different block, then we can transform it.
1555 if (SI.getParent() == CondPHI->getParent() &&
1556 I->getParent() != CondPHI->getParent())
1557 return true;
1558
1559 // Otherwise we have a 'hard' case and we can't tell without doing more
1560 // detailed dominator based analysis, punt.
1561 return false;
1562 }
1563
1564 /// We have an SPF (e.g. a min or max) of an SPF of the form:
1565 /// SPF2(SPF1(A, B), C)
foldSPFofSPF(Instruction * Inner,SelectPatternFlavor SPF1,Value * A,Value * B,Instruction & Outer,SelectPatternFlavor SPF2,Value * C)1566 Instruction *InstCombinerImpl::foldSPFofSPF(Instruction *Inner,
1567 SelectPatternFlavor SPF1, Value *A,
1568 Value *B, Instruction &Outer,
1569 SelectPatternFlavor SPF2,
1570 Value *C) {
1571 if (Outer.getType() != Inner->getType())
1572 return nullptr;
1573
1574 if (C == A || C == B) {
1575 // MAX(MAX(A, B), B) -> MAX(A, B)
1576 // MIN(MIN(a, b), a) -> MIN(a, b)
1577 // TODO: This could be done in instsimplify.
1578 if (SPF1 == SPF2 && SelectPatternResult::isMinOrMax(SPF1))
1579 return replaceInstUsesWith(Outer, Inner);
1580
1581 // MAX(MIN(a, b), a) -> a
1582 // MIN(MAX(a, b), a) -> a
1583 // TODO: This could be done in instsimplify.
1584 if ((SPF1 == SPF_SMIN && SPF2 == SPF_SMAX) ||
1585 (SPF1 == SPF_SMAX && SPF2 == SPF_SMIN) ||
1586 (SPF1 == SPF_UMIN && SPF2 == SPF_UMAX) ||
1587 (SPF1 == SPF_UMAX && SPF2 == SPF_UMIN))
1588 return replaceInstUsesWith(Outer, C);
1589 }
1590
1591 if (SPF1 == SPF2) {
1592 const APInt *CB, *CC;
1593 if (match(B, m_APInt(CB)) && match(C, m_APInt(CC))) {
1594 // MIN(MIN(A, 23), 97) -> MIN(A, 23)
1595 // MAX(MAX(A, 97), 23) -> MAX(A, 97)
1596 // TODO: This could be done in instsimplify.
1597 if ((SPF1 == SPF_UMIN && CB->ule(*CC)) ||
1598 (SPF1 == SPF_SMIN && CB->sle(*CC)) ||
1599 (SPF1 == SPF_UMAX && CB->uge(*CC)) ||
1600 (SPF1 == SPF_SMAX && CB->sge(*CC)))
1601 return replaceInstUsesWith(Outer, Inner);
1602
1603 // MIN(MIN(A, 97), 23) -> MIN(A, 23)
1604 // MAX(MAX(A, 23), 97) -> MAX(A, 97)
1605 if ((SPF1 == SPF_UMIN && CB->ugt(*CC)) ||
1606 (SPF1 == SPF_SMIN && CB->sgt(*CC)) ||
1607 (SPF1 == SPF_UMAX && CB->ult(*CC)) ||
1608 (SPF1 == SPF_SMAX && CB->slt(*CC))) {
1609 Outer.replaceUsesOfWith(Inner, A);
1610 return &Outer;
1611 }
1612 }
1613 }
1614
1615 // max(max(A, B), min(A, B)) --> max(A, B)
1616 // min(min(A, B), max(A, B)) --> min(A, B)
1617 // TODO: This could be done in instsimplify.
1618 if (SPF1 == SPF2 &&
1619 ((SPF1 == SPF_UMIN && match(C, m_c_UMax(m_Specific(A), m_Specific(B)))) ||
1620 (SPF1 == SPF_SMIN && match(C, m_c_SMax(m_Specific(A), m_Specific(B)))) ||
1621 (SPF1 == SPF_UMAX && match(C, m_c_UMin(m_Specific(A), m_Specific(B)))) ||
1622 (SPF1 == SPF_SMAX && match(C, m_c_SMin(m_Specific(A), m_Specific(B))))))
1623 return replaceInstUsesWith(Outer, Inner);
1624
1625 // ABS(ABS(X)) -> ABS(X)
1626 // NABS(NABS(X)) -> NABS(X)
1627 // TODO: This could be done in instsimplify.
1628 if (SPF1 == SPF2 && (SPF1 == SPF_ABS || SPF1 == SPF_NABS)) {
1629 return replaceInstUsesWith(Outer, Inner);
1630 }
1631
1632 // ABS(NABS(X)) -> ABS(X)
1633 // NABS(ABS(X)) -> NABS(X)
1634 if ((SPF1 == SPF_ABS && SPF2 == SPF_NABS) ||
1635 (SPF1 == SPF_NABS && SPF2 == SPF_ABS)) {
1636 SelectInst *SI = cast<SelectInst>(Inner);
1637 Value *NewSI =
1638 Builder.CreateSelect(SI->getCondition(), SI->getFalseValue(),
1639 SI->getTrueValue(), SI->getName(), SI);
1640 return replaceInstUsesWith(Outer, NewSI);
1641 }
1642
1643 auto IsFreeOrProfitableToInvert =
1644 [&](Value *V, Value *&NotV, bool &ElidesXor) {
1645 if (match(V, m_Not(m_Value(NotV)))) {
1646 // If V has at most 2 uses then we can get rid of the xor operation
1647 // entirely.
1648 ElidesXor |= !V->hasNUsesOrMore(3);
1649 return true;
1650 }
1651
1652 if (isFreeToInvert(V, !V->hasNUsesOrMore(3))) {
1653 NotV = nullptr;
1654 return true;
1655 }
1656
1657 return false;
1658 };
1659
1660 Value *NotA, *NotB, *NotC;
1661 bool ElidesXor = false;
1662
1663 // MIN(MIN(~A, ~B), ~C) == ~MAX(MAX(A, B), C)
1664 // MIN(MAX(~A, ~B), ~C) == ~MAX(MIN(A, B), C)
1665 // MAX(MIN(~A, ~B), ~C) == ~MIN(MAX(A, B), C)
1666 // MAX(MAX(~A, ~B), ~C) == ~MIN(MIN(A, B), C)
1667 //
1668 // This transform is performance neutral if we can elide at least one xor from
1669 // the set of three operands, since we'll be tacking on an xor at the very
1670 // end.
1671 if (SelectPatternResult::isMinOrMax(SPF1) &&
1672 SelectPatternResult::isMinOrMax(SPF2) &&
1673 IsFreeOrProfitableToInvert(A, NotA, ElidesXor) &&
1674 IsFreeOrProfitableToInvert(B, NotB, ElidesXor) &&
1675 IsFreeOrProfitableToInvert(C, NotC, ElidesXor) && ElidesXor) {
1676 if (!NotA)
1677 NotA = Builder.CreateNot(A);
1678 if (!NotB)
1679 NotB = Builder.CreateNot(B);
1680 if (!NotC)
1681 NotC = Builder.CreateNot(C);
1682
1683 Value *NewInner = createMinMax(Builder, getInverseMinMaxFlavor(SPF1), NotA,
1684 NotB);
1685 Value *NewOuter = Builder.CreateNot(
1686 createMinMax(Builder, getInverseMinMaxFlavor(SPF2), NewInner, NotC));
1687 return replaceInstUsesWith(Outer, NewOuter);
1688 }
1689
1690 return nullptr;
1691 }
1692
1693 /// Turn select C, (X + Y), (X - Y) --> (X + (select C, Y, (-Y))).
1694 /// This is even legal for FP.
foldAddSubSelect(SelectInst & SI,InstCombiner::BuilderTy & Builder)1695 static Instruction *foldAddSubSelect(SelectInst &SI,
1696 InstCombiner::BuilderTy &Builder) {
1697 Value *CondVal = SI.getCondition();
1698 Value *TrueVal = SI.getTrueValue();
1699 Value *FalseVal = SI.getFalseValue();
1700 auto *TI = dyn_cast<Instruction>(TrueVal);
1701 auto *FI = dyn_cast<Instruction>(FalseVal);
1702 if (!TI || !FI || !TI->hasOneUse() || !FI->hasOneUse())
1703 return nullptr;
1704
1705 Instruction *AddOp = nullptr, *SubOp = nullptr;
1706 if ((TI->getOpcode() == Instruction::Sub &&
1707 FI->getOpcode() == Instruction::Add) ||
1708 (TI->getOpcode() == Instruction::FSub &&
1709 FI->getOpcode() == Instruction::FAdd)) {
1710 AddOp = FI;
1711 SubOp = TI;
1712 } else if ((FI->getOpcode() == Instruction::Sub &&
1713 TI->getOpcode() == Instruction::Add) ||
1714 (FI->getOpcode() == Instruction::FSub &&
1715 TI->getOpcode() == Instruction::FAdd)) {
1716 AddOp = TI;
1717 SubOp = FI;
1718 }
1719
1720 if (AddOp) {
1721 Value *OtherAddOp = nullptr;
1722 if (SubOp->getOperand(0) == AddOp->getOperand(0)) {
1723 OtherAddOp = AddOp->getOperand(1);
1724 } else if (SubOp->getOperand(0) == AddOp->getOperand(1)) {
1725 OtherAddOp = AddOp->getOperand(0);
1726 }
1727
1728 if (OtherAddOp) {
1729 // So at this point we know we have (Y -> OtherAddOp):
1730 // select C, (add X, Y), (sub X, Z)
1731 Value *NegVal; // Compute -Z
1732 if (SI.getType()->isFPOrFPVectorTy()) {
1733 NegVal = Builder.CreateFNeg(SubOp->getOperand(1));
1734 if (Instruction *NegInst = dyn_cast<Instruction>(NegVal)) {
1735 FastMathFlags Flags = AddOp->getFastMathFlags();
1736 Flags &= SubOp->getFastMathFlags();
1737 NegInst->setFastMathFlags(Flags);
1738 }
1739 } else {
1740 NegVal = Builder.CreateNeg(SubOp->getOperand(1));
1741 }
1742
1743 Value *NewTrueOp = OtherAddOp;
1744 Value *NewFalseOp = NegVal;
1745 if (AddOp != TI)
1746 std::swap(NewTrueOp, NewFalseOp);
1747 Value *NewSel = Builder.CreateSelect(CondVal, NewTrueOp, NewFalseOp,
1748 SI.getName() + ".p", &SI);
1749
1750 if (SI.getType()->isFPOrFPVectorTy()) {
1751 Instruction *RI =
1752 BinaryOperator::CreateFAdd(SubOp->getOperand(0), NewSel);
1753
1754 FastMathFlags Flags = AddOp->getFastMathFlags();
1755 Flags &= SubOp->getFastMathFlags();
1756 RI->setFastMathFlags(Flags);
1757 return RI;
1758 } else
1759 return BinaryOperator::CreateAdd(SubOp->getOperand(0), NewSel);
1760 }
1761 }
1762 return nullptr;
1763 }
1764
1765 /// Turn X + Y overflows ? -1 : X + Y -> uadd_sat X, Y
1766 /// And X - Y overflows ? 0 : X - Y -> usub_sat X, Y
1767 /// Along with a number of patterns similar to:
1768 /// X + Y overflows ? (X < 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1769 /// X - Y overflows ? (X > 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1770 static Instruction *
foldOverflowingAddSubSelect(SelectInst & SI,InstCombiner::BuilderTy & Builder)1771 foldOverflowingAddSubSelect(SelectInst &SI, InstCombiner::BuilderTy &Builder) {
1772 Value *CondVal = SI.getCondition();
1773 Value *TrueVal = SI.getTrueValue();
1774 Value *FalseVal = SI.getFalseValue();
1775
1776 WithOverflowInst *II;
1777 if (!match(CondVal, m_ExtractValue<1>(m_WithOverflowInst(II))) ||
1778 !match(FalseVal, m_ExtractValue<0>(m_Specific(II))))
1779 return nullptr;
1780
1781 Value *X = II->getLHS();
1782 Value *Y = II->getRHS();
1783
1784 auto IsSignedSaturateLimit = [&](Value *Limit, bool IsAdd) {
1785 Type *Ty = Limit->getType();
1786
1787 ICmpInst::Predicate Pred;
1788 Value *TrueVal, *FalseVal, *Op;
1789 const APInt *C;
1790 if (!match(Limit, m_Select(m_ICmp(Pred, m_Value(Op), m_APInt(C)),
1791 m_Value(TrueVal), m_Value(FalseVal))))
1792 return false;
1793
1794 auto IsZeroOrOne = [](const APInt &C) {
1795 return C.isNullValue() || C.isOneValue();
1796 };
1797 auto IsMinMax = [&](Value *Min, Value *Max) {
1798 APInt MinVal = APInt::getSignedMinValue(Ty->getScalarSizeInBits());
1799 APInt MaxVal = APInt::getSignedMaxValue(Ty->getScalarSizeInBits());
1800 return match(Min, m_SpecificInt(MinVal)) &&
1801 match(Max, m_SpecificInt(MaxVal));
1802 };
1803
1804 if (Op != X && Op != Y)
1805 return false;
1806
1807 if (IsAdd) {
1808 // X + Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1809 // X + Y overflows ? (X <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1810 // X + Y overflows ? (Y <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1811 // X + Y overflows ? (Y <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1812 if (Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C) &&
1813 IsMinMax(TrueVal, FalseVal))
1814 return true;
1815 // X + Y overflows ? (X >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1816 // X + Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1817 // X + Y overflows ? (Y >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1818 // X + Y overflows ? (Y >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1819 if (Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 1) &&
1820 IsMinMax(FalseVal, TrueVal))
1821 return true;
1822 } else {
1823 // X - Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1824 // X - Y overflows ? (X <s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1825 if (Op == X && Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C + 1) &&
1826 IsMinMax(TrueVal, FalseVal))
1827 return true;
1828 // X - Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1829 // X - Y overflows ? (X >s -2 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1830 if (Op == X && Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 2) &&
1831 IsMinMax(FalseVal, TrueVal))
1832 return true;
1833 // X - Y overflows ? (Y <s 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1834 // X - Y overflows ? (Y <s 1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1835 if (Op == Y && Pred == ICmpInst::ICMP_SLT && IsZeroOrOne(*C) &&
1836 IsMinMax(FalseVal, TrueVal))
1837 return true;
1838 // X - Y overflows ? (Y >s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1839 // X - Y overflows ? (Y >s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1840 if (Op == Y && Pred == ICmpInst::ICMP_SGT && IsZeroOrOne(*C + 1) &&
1841 IsMinMax(TrueVal, FalseVal))
1842 return true;
1843 }
1844
1845 return false;
1846 };
1847
1848 Intrinsic::ID NewIntrinsicID;
1849 if (II->getIntrinsicID() == Intrinsic::uadd_with_overflow &&
1850 match(TrueVal, m_AllOnes()))
1851 // X + Y overflows ? -1 : X + Y -> uadd_sat X, Y
1852 NewIntrinsicID = Intrinsic::uadd_sat;
1853 else if (II->getIntrinsicID() == Intrinsic::usub_with_overflow &&
1854 match(TrueVal, m_Zero()))
1855 // X - Y overflows ? 0 : X - Y -> usub_sat X, Y
1856 NewIntrinsicID = Intrinsic::usub_sat;
1857 else if (II->getIntrinsicID() == Intrinsic::sadd_with_overflow &&
1858 IsSignedSaturateLimit(TrueVal, /*IsAdd=*/true))
1859 // X + Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1860 // X + Y overflows ? (X <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1861 // X + Y overflows ? (X >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1862 // X + Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1863 // X + Y overflows ? (Y <s 0 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1864 // X + Y overflows ? (Y <s 1 ? INTMIN : INTMAX) : X + Y --> sadd_sat X, Y
1865 // X + Y overflows ? (Y >s 0 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1866 // X + Y overflows ? (Y >s -1 ? INTMAX : INTMIN) : X + Y --> sadd_sat X, Y
1867 NewIntrinsicID = Intrinsic::sadd_sat;
1868 else if (II->getIntrinsicID() == Intrinsic::ssub_with_overflow &&
1869 IsSignedSaturateLimit(TrueVal, /*IsAdd=*/false))
1870 // X - Y overflows ? (X <s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1871 // X - Y overflows ? (X <s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1872 // X - Y overflows ? (X >s -1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1873 // X - Y overflows ? (X >s -2 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1874 // X - Y overflows ? (Y <s 0 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1875 // X - Y overflows ? (Y <s 1 ? INTMAX : INTMIN) : X - Y --> ssub_sat X, Y
1876 // X - Y overflows ? (Y >s 0 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1877 // X - Y overflows ? (Y >s -1 ? INTMIN : INTMAX) : X - Y --> ssub_sat X, Y
1878 NewIntrinsicID = Intrinsic::ssub_sat;
1879 else
1880 return nullptr;
1881
1882 Function *F =
1883 Intrinsic::getDeclaration(SI.getModule(), NewIntrinsicID, SI.getType());
1884 return CallInst::Create(F, {X, Y});
1885 }
1886
foldSelectExtConst(SelectInst & Sel)1887 Instruction *InstCombinerImpl::foldSelectExtConst(SelectInst &Sel) {
1888 Constant *C;
1889 if (!match(Sel.getTrueValue(), m_Constant(C)) &&
1890 !match(Sel.getFalseValue(), m_Constant(C)))
1891 return nullptr;
1892
1893 Instruction *ExtInst;
1894 if (!match(Sel.getTrueValue(), m_Instruction(ExtInst)) &&
1895 !match(Sel.getFalseValue(), m_Instruction(ExtInst)))
1896 return nullptr;
1897
1898 auto ExtOpcode = ExtInst->getOpcode();
1899 if (ExtOpcode != Instruction::ZExt && ExtOpcode != Instruction::SExt)
1900 return nullptr;
1901
1902 // If we are extending from a boolean type or if we can create a select that
1903 // has the same size operands as its condition, try to narrow the select.
1904 Value *X = ExtInst->getOperand(0);
1905 Type *SmallType = X->getType();
1906 Value *Cond = Sel.getCondition();
1907 auto *Cmp = dyn_cast<CmpInst>(Cond);
1908 if (!SmallType->isIntOrIntVectorTy(1) &&
1909 (!Cmp || Cmp->getOperand(0)->getType() != SmallType))
1910 return nullptr;
1911
1912 // If the constant is the same after truncation to the smaller type and
1913 // extension to the original type, we can narrow the select.
1914 Type *SelType = Sel.getType();
1915 Constant *TruncC = ConstantExpr::getTrunc(C, SmallType);
1916 Constant *ExtC = ConstantExpr::getCast(ExtOpcode, TruncC, SelType);
1917 if (ExtC == C && ExtInst->hasOneUse()) {
1918 Value *TruncCVal = cast<Value>(TruncC);
1919 if (ExtInst == Sel.getFalseValue())
1920 std::swap(X, TruncCVal);
1921
1922 // select Cond, (ext X), C --> ext(select Cond, X, C')
1923 // select Cond, C, (ext X) --> ext(select Cond, C', X)
1924 Value *NewSel = Builder.CreateSelect(Cond, X, TruncCVal, "narrow", &Sel);
1925 return CastInst::Create(Instruction::CastOps(ExtOpcode), NewSel, SelType);
1926 }
1927
1928 // If one arm of the select is the extend of the condition, replace that arm
1929 // with the extension of the appropriate known bool value.
1930 if (Cond == X) {
1931 if (ExtInst == Sel.getTrueValue()) {
1932 // select X, (sext X), C --> select X, -1, C
1933 // select X, (zext X), C --> select X, 1, C
1934 Constant *One = ConstantInt::getTrue(SmallType);
1935 Constant *AllOnesOrOne = ConstantExpr::getCast(ExtOpcode, One, SelType);
1936 return SelectInst::Create(Cond, AllOnesOrOne, C, "", nullptr, &Sel);
1937 } else {
1938 // select X, C, (sext X) --> select X, C, 0
1939 // select X, C, (zext X) --> select X, C, 0
1940 Constant *Zero = ConstantInt::getNullValue(SelType);
1941 return SelectInst::Create(Cond, C, Zero, "", nullptr, &Sel);
1942 }
1943 }
1944
1945 return nullptr;
1946 }
1947
1948 /// Try to transform a vector select with a constant condition vector into a
1949 /// shuffle for easier combining with other shuffles and insert/extract.
canonicalizeSelectToShuffle(SelectInst & SI)1950 static Instruction *canonicalizeSelectToShuffle(SelectInst &SI) {
1951 Value *CondVal = SI.getCondition();
1952 Constant *CondC;
1953 auto *CondValTy = dyn_cast<FixedVectorType>(CondVal->getType());
1954 if (!CondValTy || !match(CondVal, m_Constant(CondC)))
1955 return nullptr;
1956
1957 unsigned NumElts = CondValTy->getNumElements();
1958 SmallVector<int, 16> Mask;
1959 Mask.reserve(NumElts);
1960 for (unsigned i = 0; i != NumElts; ++i) {
1961 Constant *Elt = CondC->getAggregateElement(i);
1962 if (!Elt)
1963 return nullptr;
1964
1965 if (Elt->isOneValue()) {
1966 // If the select condition element is true, choose from the 1st vector.
1967 Mask.push_back(i);
1968 } else if (Elt->isNullValue()) {
1969 // If the select condition element is false, choose from the 2nd vector.
1970 Mask.push_back(i + NumElts);
1971 } else if (isa<UndefValue>(Elt)) {
1972 // Undef in a select condition (choose one of the operands) does not mean
1973 // the same thing as undef in a shuffle mask (any value is acceptable), so
1974 // give up.
1975 return nullptr;
1976 } else {
1977 // Bail out on a constant expression.
1978 return nullptr;
1979 }
1980 }
1981
1982 return new ShuffleVectorInst(SI.getTrueValue(), SI.getFalseValue(), Mask);
1983 }
1984
1985 /// If we have a select of vectors with a scalar condition, try to convert that
1986 /// to a vector select by splatting the condition. A splat may get folded with
1987 /// other operations in IR and having all operands of a select be vector types
1988 /// is likely better for vector codegen.
canonicalizeScalarSelectOfVecs(SelectInst & Sel,InstCombinerImpl & IC)1989 static Instruction *canonicalizeScalarSelectOfVecs(SelectInst &Sel,
1990 InstCombinerImpl &IC) {
1991 auto *Ty = dyn_cast<VectorType>(Sel.getType());
1992 if (!Ty)
1993 return nullptr;
1994
1995 // We can replace a single-use extract with constant index.
1996 Value *Cond = Sel.getCondition();
1997 if (!match(Cond, m_OneUse(m_ExtractElt(m_Value(), m_ConstantInt()))))
1998 return nullptr;
1999
2000 // select (extelt V, Index), T, F --> select (splat V, Index), T, F
2001 // Splatting the extracted condition reduces code (we could directly create a
2002 // splat shuffle of the source vector to eliminate the intermediate step).
2003 return IC.replaceOperand(
2004 Sel, 0, IC.Builder.CreateVectorSplat(Ty->getElementCount(), Cond));
2005 }
2006
2007 /// Reuse bitcasted operands between a compare and select:
2008 /// select (cmp (bitcast C), (bitcast D)), (bitcast' C), (bitcast' D) -->
2009 /// bitcast (select (cmp (bitcast C), (bitcast D)), (bitcast C), (bitcast D))
foldSelectCmpBitcasts(SelectInst & Sel,InstCombiner::BuilderTy & Builder)2010 static Instruction *foldSelectCmpBitcasts(SelectInst &Sel,
2011 InstCombiner::BuilderTy &Builder) {
2012 Value *Cond = Sel.getCondition();
2013 Value *TVal = Sel.getTrueValue();
2014 Value *FVal = Sel.getFalseValue();
2015
2016 CmpInst::Predicate Pred;
2017 Value *A, *B;
2018 if (!match(Cond, m_Cmp(Pred, m_Value(A), m_Value(B))))
2019 return nullptr;
2020
2021 // The select condition is a compare instruction. If the select's true/false
2022 // values are already the same as the compare operands, there's nothing to do.
2023 if (TVal == A || TVal == B || FVal == A || FVal == B)
2024 return nullptr;
2025
2026 Value *C, *D;
2027 if (!match(A, m_BitCast(m_Value(C))) || !match(B, m_BitCast(m_Value(D))))
2028 return nullptr;
2029
2030 // select (cmp (bitcast C), (bitcast D)), (bitcast TSrc), (bitcast FSrc)
2031 Value *TSrc, *FSrc;
2032 if (!match(TVal, m_BitCast(m_Value(TSrc))) ||
2033 !match(FVal, m_BitCast(m_Value(FSrc))))
2034 return nullptr;
2035
2036 // If the select true/false values are *different bitcasts* of the same source
2037 // operands, make the select operands the same as the compare operands and
2038 // cast the result. This is the canonical select form for min/max.
2039 Value *NewSel;
2040 if (TSrc == C && FSrc == D) {
2041 // select (cmp (bitcast C), (bitcast D)), (bitcast' C), (bitcast' D) -->
2042 // bitcast (select (cmp A, B), A, B)
2043 NewSel = Builder.CreateSelect(Cond, A, B, "", &Sel);
2044 } else if (TSrc == D && FSrc == C) {
2045 // select (cmp (bitcast C), (bitcast D)), (bitcast' D), (bitcast' C) -->
2046 // bitcast (select (cmp A, B), B, A)
2047 NewSel = Builder.CreateSelect(Cond, B, A, "", &Sel);
2048 } else {
2049 return nullptr;
2050 }
2051 return CastInst::CreateBitOrPointerCast(NewSel, Sel.getType());
2052 }
2053
2054 /// Try to eliminate select instructions that test the returned flag of cmpxchg
2055 /// instructions.
2056 ///
2057 /// If a select instruction tests the returned flag of a cmpxchg instruction and
2058 /// selects between the returned value of the cmpxchg instruction its compare
2059 /// operand, the result of the select will always be equal to its false value.
2060 /// For example:
2061 ///
2062 /// %0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
2063 /// %1 = extractvalue { i64, i1 } %0, 1
2064 /// %2 = extractvalue { i64, i1 } %0, 0
2065 /// %3 = select i1 %1, i64 %compare, i64 %2
2066 /// ret i64 %3
2067 ///
2068 /// The returned value of the cmpxchg instruction (%2) is the original value
2069 /// located at %ptr prior to any update. If the cmpxchg operation succeeds, %2
2070 /// must have been equal to %compare. Thus, the result of the select is always
2071 /// equal to %2, and the code can be simplified to:
2072 ///
2073 /// %0 = cmpxchg i64* %ptr, i64 %compare, i64 %new_value seq_cst seq_cst
2074 /// %1 = extractvalue { i64, i1 } %0, 0
2075 /// ret i64 %1
2076 ///
foldSelectCmpXchg(SelectInst & SI)2077 static Value *foldSelectCmpXchg(SelectInst &SI) {
2078 // A helper that determines if V is an extractvalue instruction whose
2079 // aggregate operand is a cmpxchg instruction and whose single index is equal
2080 // to I. If such conditions are true, the helper returns the cmpxchg
2081 // instruction; otherwise, a nullptr is returned.
2082 auto isExtractFromCmpXchg = [](Value *V, unsigned I) -> AtomicCmpXchgInst * {
2083 auto *Extract = dyn_cast<ExtractValueInst>(V);
2084 if (!Extract)
2085 return nullptr;
2086 if (Extract->getIndices()[0] != I)
2087 return nullptr;
2088 return dyn_cast<AtomicCmpXchgInst>(Extract->getAggregateOperand());
2089 };
2090
2091 // If the select has a single user, and this user is a select instruction that
2092 // we can simplify, skip the cmpxchg simplification for now.
2093 if (SI.hasOneUse())
2094 if (auto *Select = dyn_cast<SelectInst>(SI.user_back()))
2095 if (Select->getCondition() == SI.getCondition())
2096 if (Select->getFalseValue() == SI.getTrueValue() ||
2097 Select->getTrueValue() == SI.getFalseValue())
2098 return nullptr;
2099
2100 // Ensure the select condition is the returned flag of a cmpxchg instruction.
2101 auto *CmpXchg = isExtractFromCmpXchg(SI.getCondition(), 1);
2102 if (!CmpXchg)
2103 return nullptr;
2104
2105 // Check the true value case: The true value of the select is the returned
2106 // value of the same cmpxchg used by the condition, and the false value is the
2107 // cmpxchg instruction's compare operand.
2108 if (auto *X = isExtractFromCmpXchg(SI.getTrueValue(), 0))
2109 if (X == CmpXchg && X->getCompareOperand() == SI.getFalseValue())
2110 return SI.getFalseValue();
2111
2112 // Check the false value case: The false value of the select is the returned
2113 // value of the same cmpxchg used by the condition, and the true value is the
2114 // cmpxchg instruction's compare operand.
2115 if (auto *X = isExtractFromCmpXchg(SI.getFalseValue(), 0))
2116 if (X == CmpXchg && X->getCompareOperand() == SI.getTrueValue())
2117 return SI.getFalseValue();
2118
2119 return nullptr;
2120 }
2121
moveAddAfterMinMax(SelectPatternFlavor SPF,Value * X,Value * Y,InstCombiner::BuilderTy & Builder)2122 static Instruction *moveAddAfterMinMax(SelectPatternFlavor SPF, Value *X,
2123 Value *Y,
2124 InstCombiner::BuilderTy &Builder) {
2125 assert(SelectPatternResult::isMinOrMax(SPF) && "Expected min/max pattern");
2126 bool IsUnsigned = SPF == SelectPatternFlavor::SPF_UMIN ||
2127 SPF == SelectPatternFlavor::SPF_UMAX;
2128 // TODO: If InstSimplify could fold all cases where C2 <= C1, we could change
2129 // the constant value check to an assert.
2130 Value *A;
2131 const APInt *C1, *C2;
2132 if (IsUnsigned && match(X, m_NUWAdd(m_Value(A), m_APInt(C1))) &&
2133 match(Y, m_APInt(C2)) && C2->uge(*C1) && X->hasNUses(2)) {
2134 // umin (add nuw A, C1), C2 --> add nuw (umin A, C2 - C1), C1
2135 // umax (add nuw A, C1), C2 --> add nuw (umax A, C2 - C1), C1
2136 Value *NewMinMax = createMinMax(Builder, SPF, A,
2137 ConstantInt::get(X->getType(), *C2 - *C1));
2138 return BinaryOperator::CreateNUW(BinaryOperator::Add, NewMinMax,
2139 ConstantInt::get(X->getType(), *C1));
2140 }
2141
2142 if (!IsUnsigned && match(X, m_NSWAdd(m_Value(A), m_APInt(C1))) &&
2143 match(Y, m_APInt(C2)) && X->hasNUses(2)) {
2144 bool Overflow;
2145 APInt Diff = C2->ssub_ov(*C1, Overflow);
2146 if (!Overflow) {
2147 // smin (add nsw A, C1), C2 --> add nsw (smin A, C2 - C1), C1
2148 // smax (add nsw A, C1), C2 --> add nsw (smax A, C2 - C1), C1
2149 Value *NewMinMax = createMinMax(Builder, SPF, A,
2150 ConstantInt::get(X->getType(), Diff));
2151 return BinaryOperator::CreateNSW(BinaryOperator::Add, NewMinMax,
2152 ConstantInt::get(X->getType(), *C1));
2153 }
2154 }
2155
2156 return nullptr;
2157 }
2158
2159 /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value.
matchSAddSubSat(SelectInst & MinMax1)2160 Instruction *InstCombinerImpl::matchSAddSubSat(SelectInst &MinMax1) {
2161 Type *Ty = MinMax1.getType();
2162
2163 // We are looking for a tree of:
2164 // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B))))
2165 // Where the min and max could be reversed
2166 Instruction *MinMax2;
2167 BinaryOperator *AddSub;
2168 const APInt *MinValue, *MaxValue;
2169 if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) {
2170 if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue))))
2171 return nullptr;
2172 } else if (match(&MinMax1,
2173 m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) {
2174 if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue))))
2175 return nullptr;
2176 } else
2177 return nullptr;
2178
2179 // Check that the constants clamp a saturate, and that the new type would be
2180 // sensible to convert to.
2181 if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
2182 return nullptr;
2183 // In what bitwidth can this be treated as saturating arithmetics?
2184 unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
2185 // FIXME: This isn't quite right for vectors, but using the scalar type is a
2186 // good first approximation for what should be done there.
2187 if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth))
2188 return nullptr;
2189
2190 // Also make sure that the number of uses is as expected. The "3"s are for the
2191 // the two items of min/max (the compare and the select).
2192 if (MinMax2->hasNUsesOrMore(3) || AddSub->hasNUsesOrMore(3))
2193 return nullptr;
2194
2195 // Create the new type (which can be a vector type)
2196 Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth);
2197 // Match the two extends from the add/sub
2198 Value *A, *B;
2199 if(!match(AddSub, m_BinOp(m_SExt(m_Value(A)), m_SExt(m_Value(B)))))
2200 return nullptr;
2201 // And check the incoming values are of a type smaller than or equal to the
2202 // size of the saturation. Otherwise the higher bits can cause different
2203 // results.
2204 if (A->getType()->getScalarSizeInBits() > NewBitWidth ||
2205 B->getType()->getScalarSizeInBits() > NewBitWidth)
2206 return nullptr;
2207
2208 Intrinsic::ID IntrinsicID;
2209 if (AddSub->getOpcode() == Instruction::Add)
2210 IntrinsicID = Intrinsic::sadd_sat;
2211 else if (AddSub->getOpcode() == Instruction::Sub)
2212 IntrinsicID = Intrinsic::ssub_sat;
2213 else
2214 return nullptr;
2215
2216 // Finally create and return the sat intrinsic, truncated to the new type
2217 Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy);
2218 Value *AT = Builder.CreateSExt(A, NewTy);
2219 Value *BT = Builder.CreateSExt(B, NewTy);
2220 Value *Sat = Builder.CreateCall(F, {AT, BT});
2221 return CastInst::Create(Instruction::SExt, Sat, Ty);
2222 }
2223
2224 /// Reduce a sequence of min/max with a common operand.
factorizeMinMaxTree(SelectPatternFlavor SPF,Value * LHS,Value * RHS,InstCombiner::BuilderTy & Builder)2225 static Instruction *factorizeMinMaxTree(SelectPatternFlavor SPF, Value *LHS,
2226 Value *RHS,
2227 InstCombiner::BuilderTy &Builder) {
2228 assert(SelectPatternResult::isMinOrMax(SPF) && "Expected a min/max");
2229 // TODO: Allow FP min/max with nnan/nsz.
2230 if (!LHS->getType()->isIntOrIntVectorTy())
2231 return nullptr;
2232
2233 // Match 3 of the same min/max ops. Example: umin(umin(), umin()).
2234 Value *A, *B, *C, *D;
2235 SelectPatternResult L = matchSelectPattern(LHS, A, B);
2236 SelectPatternResult R = matchSelectPattern(RHS, C, D);
2237 if (SPF != L.Flavor || L.Flavor != R.Flavor)
2238 return nullptr;
2239
2240 // Look for a common operand. The use checks are different than usual because
2241 // a min/max pattern typically has 2 uses of each op: 1 by the cmp and 1 by
2242 // the select.
2243 Value *MinMaxOp = nullptr;
2244 Value *ThirdOp = nullptr;
2245 if (!LHS->hasNUsesOrMore(3) && RHS->hasNUsesOrMore(3)) {
2246 // If the LHS is only used in this chain and the RHS is used outside of it,
2247 // reuse the RHS min/max because that will eliminate the LHS.
2248 if (D == A || C == A) {
2249 // min(min(a, b), min(c, a)) --> min(min(c, a), b)
2250 // min(min(a, b), min(a, d)) --> min(min(a, d), b)
2251 MinMaxOp = RHS;
2252 ThirdOp = B;
2253 } else if (D == B || C == B) {
2254 // min(min(a, b), min(c, b)) --> min(min(c, b), a)
2255 // min(min(a, b), min(b, d)) --> min(min(b, d), a)
2256 MinMaxOp = RHS;
2257 ThirdOp = A;
2258 }
2259 } else if (!RHS->hasNUsesOrMore(3)) {
2260 // Reuse the LHS. This will eliminate the RHS.
2261 if (D == A || D == B) {
2262 // min(min(a, b), min(c, a)) --> min(min(a, b), c)
2263 // min(min(a, b), min(c, b)) --> min(min(a, b), c)
2264 MinMaxOp = LHS;
2265 ThirdOp = C;
2266 } else if (C == A || C == B) {
2267 // min(min(a, b), min(b, d)) --> min(min(a, b), d)
2268 // min(min(a, b), min(c, b)) --> min(min(a, b), d)
2269 MinMaxOp = LHS;
2270 ThirdOp = D;
2271 }
2272 }
2273 if (!MinMaxOp || !ThirdOp)
2274 return nullptr;
2275
2276 CmpInst::Predicate P = getMinMaxPred(SPF);
2277 Value *CmpABC = Builder.CreateICmp(P, MinMaxOp, ThirdOp);
2278 return SelectInst::Create(CmpABC, MinMaxOp, ThirdOp);
2279 }
2280
2281 /// Try to reduce a funnel/rotate pattern that includes a compare and select
2282 /// into a funnel shift intrinsic. Example:
2283 /// rotl32(a, b) --> (b == 0 ? a : ((a >> (32 - b)) | (a << b)))
2284 /// --> call llvm.fshl.i32(a, a, b)
2285 /// fshl32(a, b, c) --> (c == 0 ? a : ((b >> (32 - c)) | (a << c)))
2286 /// --> call llvm.fshl.i32(a, b, c)
2287 /// fshr32(a, b, c) --> (c == 0 ? b : ((a >> (32 - c)) | (b << c)))
2288 /// --> call llvm.fshr.i32(a, b, c)
foldSelectFunnelShift(SelectInst & Sel,InstCombiner::BuilderTy & Builder)2289 static Instruction *foldSelectFunnelShift(SelectInst &Sel,
2290 InstCombiner::BuilderTy &Builder) {
2291 // This must be a power-of-2 type for a bitmasking transform to be valid.
2292 unsigned Width = Sel.getType()->getScalarSizeInBits();
2293 if (!isPowerOf2_32(Width))
2294 return nullptr;
2295
2296 BinaryOperator *Or0, *Or1;
2297 if (!match(Sel.getFalseValue(), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1)))))
2298 return nullptr;
2299
2300 Value *SV0, *SV1, *SA0, *SA1;
2301 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(SV0),
2302 m_ZExtOrSelf(m_Value(SA0))))) ||
2303 !match(Or1, m_OneUse(m_LogicalShift(m_Value(SV1),
2304 m_ZExtOrSelf(m_Value(SA1))))) ||
2305 Or0->getOpcode() == Or1->getOpcode())
2306 return nullptr;
2307
2308 // Canonicalize to or(shl(SV0, SA0), lshr(SV1, SA1)).
2309 if (Or0->getOpcode() == BinaryOperator::LShr) {
2310 std::swap(Or0, Or1);
2311 std::swap(SV0, SV1);
2312 std::swap(SA0, SA1);
2313 }
2314 assert(Or0->getOpcode() == BinaryOperator::Shl &&
2315 Or1->getOpcode() == BinaryOperator::LShr &&
2316 "Illegal or(shift,shift) pair");
2317
2318 // Check the shift amounts to see if they are an opposite pair.
2319 Value *ShAmt;
2320 if (match(SA1, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA0)))))
2321 ShAmt = SA0;
2322 else if (match(SA0, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(SA1)))))
2323 ShAmt = SA1;
2324 else
2325 return nullptr;
2326
2327 // We should now have this pattern:
2328 // select ?, TVal, (or (shl SV0, SA0), (lshr SV1, SA1))
2329 // The false value of the select must be a funnel-shift of the true value:
2330 // IsFShl -> TVal must be SV0 else TVal must be SV1.
2331 bool IsFshl = (ShAmt == SA0);
2332 Value *TVal = Sel.getTrueValue();
2333 if ((IsFshl && TVal != SV0) || (!IsFshl && TVal != SV1))
2334 return nullptr;
2335
2336 // Finally, see if the select is filtering out a shift-by-zero.
2337 Value *Cond = Sel.getCondition();
2338 ICmpInst::Predicate Pred;
2339 if (!match(Cond, m_OneUse(m_ICmp(Pred, m_Specific(ShAmt), m_ZeroInt()))) ||
2340 Pred != ICmpInst::ICMP_EQ)
2341 return nullptr;
2342
2343 // If this is not a rotate then the select was blocking poison from the
2344 // 'shift-by-zero' non-TVal, but a funnel shift won't - so freeze it.
2345 if (SV0 != SV1) {
2346 if (IsFshl && !llvm::isGuaranteedNotToBePoison(SV1))
2347 SV1 = Builder.CreateFreeze(SV1);
2348 else if (!IsFshl && !llvm::isGuaranteedNotToBePoison(SV0))
2349 SV0 = Builder.CreateFreeze(SV0);
2350 }
2351
2352 // This is a funnel/rotate that avoids shift-by-bitwidth UB in a suboptimal way.
2353 // Convert to funnel shift intrinsic.
2354 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
2355 Function *F = Intrinsic::getDeclaration(Sel.getModule(), IID, Sel.getType());
2356 ShAmt = Builder.CreateZExt(ShAmt, Sel.getType());
2357 return IntrinsicInst::Create(F, { SV0, SV1, ShAmt });
2358 }
2359
foldSelectToCopysign(SelectInst & Sel,InstCombiner::BuilderTy & Builder)2360 static Instruction *foldSelectToCopysign(SelectInst &Sel,
2361 InstCombiner::BuilderTy &Builder) {
2362 Value *Cond = Sel.getCondition();
2363 Value *TVal = Sel.getTrueValue();
2364 Value *FVal = Sel.getFalseValue();
2365 Type *SelType = Sel.getType();
2366
2367 // Match select ?, TC, FC where the constants are equal but negated.
2368 // TODO: Generalize to handle a negated variable operand?
2369 const APFloat *TC, *FC;
2370 if (!match(TVal, m_APFloat(TC)) || !match(FVal, m_APFloat(FC)) ||
2371 !abs(*TC).bitwiseIsEqual(abs(*FC)))
2372 return nullptr;
2373
2374 assert(TC != FC && "Expected equal select arms to simplify");
2375
2376 Value *X;
2377 const APInt *C;
2378 bool IsTrueIfSignSet;
2379 ICmpInst::Predicate Pred;
2380 if (!match(Cond, m_OneUse(m_ICmp(Pred, m_BitCast(m_Value(X)), m_APInt(C)))) ||
2381 !InstCombiner::isSignBitCheck(Pred, *C, IsTrueIfSignSet) ||
2382 X->getType() != SelType)
2383 return nullptr;
2384
2385 // If needed, negate the value that will be the sign argument of the copysign:
2386 // (bitcast X) < 0 ? -TC : TC --> copysign(TC, X)
2387 // (bitcast X) < 0 ? TC : -TC --> copysign(TC, -X)
2388 // (bitcast X) >= 0 ? -TC : TC --> copysign(TC, -X)
2389 // (bitcast X) >= 0 ? TC : -TC --> copysign(TC, X)
2390 if (IsTrueIfSignSet ^ TC->isNegative())
2391 X = Builder.CreateFNegFMF(X, &Sel);
2392
2393 // Canonicalize the magnitude argument as the positive constant since we do
2394 // not care about its sign.
2395 Value *MagArg = TC->isNegative() ? FVal : TVal;
2396 Function *F = Intrinsic::getDeclaration(Sel.getModule(), Intrinsic::copysign,
2397 Sel.getType());
2398 Instruction *CopySign = IntrinsicInst::Create(F, { MagArg, X });
2399 CopySign->setFastMathFlags(Sel.getFastMathFlags());
2400 return CopySign;
2401 }
2402
foldVectorSelect(SelectInst & Sel)2403 Instruction *InstCombinerImpl::foldVectorSelect(SelectInst &Sel) {
2404 auto *VecTy = dyn_cast<FixedVectorType>(Sel.getType());
2405 if (!VecTy)
2406 return nullptr;
2407
2408 unsigned NumElts = VecTy->getNumElements();
2409 APInt UndefElts(NumElts, 0);
2410 APInt AllOnesEltMask(APInt::getAllOnesValue(NumElts));
2411 if (Value *V = SimplifyDemandedVectorElts(&Sel, AllOnesEltMask, UndefElts)) {
2412 if (V != &Sel)
2413 return replaceInstUsesWith(Sel, V);
2414 return &Sel;
2415 }
2416
2417 // A select of a "select shuffle" with a common operand can be rearranged
2418 // to select followed by "select shuffle". Because of poison, this only works
2419 // in the case of a shuffle with no undefined mask elements.
2420 Value *Cond = Sel.getCondition();
2421 Value *TVal = Sel.getTrueValue();
2422 Value *FVal = Sel.getFalseValue();
2423 Value *X, *Y;
2424 ArrayRef<int> Mask;
2425 if (match(TVal, m_OneUse(m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask)))) &&
2426 !is_contained(Mask, UndefMaskElem) &&
2427 cast<ShuffleVectorInst>(TVal)->isSelect()) {
2428 if (X == FVal) {
2429 // select Cond, (shuf_sel X, Y), X --> shuf_sel X, (select Cond, Y, X)
2430 Value *NewSel = Builder.CreateSelect(Cond, Y, X, "sel", &Sel);
2431 return new ShuffleVectorInst(X, NewSel, Mask);
2432 }
2433 if (Y == FVal) {
2434 // select Cond, (shuf_sel X, Y), Y --> shuf_sel (select Cond, X, Y), Y
2435 Value *NewSel = Builder.CreateSelect(Cond, X, Y, "sel", &Sel);
2436 return new ShuffleVectorInst(NewSel, Y, Mask);
2437 }
2438 }
2439 if (match(FVal, m_OneUse(m_Shuffle(m_Value(X), m_Value(Y), m_Mask(Mask)))) &&
2440 !is_contained(Mask, UndefMaskElem) &&
2441 cast<ShuffleVectorInst>(FVal)->isSelect()) {
2442 if (X == TVal) {
2443 // select Cond, X, (shuf_sel X, Y) --> shuf_sel X, (select Cond, X, Y)
2444 Value *NewSel = Builder.CreateSelect(Cond, X, Y, "sel", &Sel);
2445 return new ShuffleVectorInst(X, NewSel, Mask);
2446 }
2447 if (Y == TVal) {
2448 // select Cond, Y, (shuf_sel X, Y) --> shuf_sel (select Cond, Y, X), Y
2449 Value *NewSel = Builder.CreateSelect(Cond, Y, X, "sel", &Sel);
2450 return new ShuffleVectorInst(NewSel, Y, Mask);
2451 }
2452 }
2453
2454 return nullptr;
2455 }
2456
foldSelectToPhiImpl(SelectInst & Sel,BasicBlock * BB,const DominatorTree & DT,InstCombiner::BuilderTy & Builder)2457 static Instruction *foldSelectToPhiImpl(SelectInst &Sel, BasicBlock *BB,
2458 const DominatorTree &DT,
2459 InstCombiner::BuilderTy &Builder) {
2460 // Find the block's immediate dominator that ends with a conditional branch
2461 // that matches select's condition (maybe inverted).
2462 auto *IDomNode = DT[BB]->getIDom();
2463 if (!IDomNode)
2464 return nullptr;
2465 BasicBlock *IDom = IDomNode->getBlock();
2466
2467 Value *Cond = Sel.getCondition();
2468 Value *IfTrue, *IfFalse;
2469 BasicBlock *TrueSucc, *FalseSucc;
2470 if (match(IDom->getTerminator(),
2471 m_Br(m_Specific(Cond), m_BasicBlock(TrueSucc),
2472 m_BasicBlock(FalseSucc)))) {
2473 IfTrue = Sel.getTrueValue();
2474 IfFalse = Sel.getFalseValue();
2475 } else if (match(IDom->getTerminator(),
2476 m_Br(m_Not(m_Specific(Cond)), m_BasicBlock(TrueSucc),
2477 m_BasicBlock(FalseSucc)))) {
2478 IfTrue = Sel.getFalseValue();
2479 IfFalse = Sel.getTrueValue();
2480 } else
2481 return nullptr;
2482
2483 // Make sure the branches are actually different.
2484 if (TrueSucc == FalseSucc)
2485 return nullptr;
2486
2487 // We want to replace select %cond, %a, %b with a phi that takes value %a
2488 // for all incoming edges that are dominated by condition `%cond == true`,
2489 // and value %b for edges dominated by condition `%cond == false`. If %a
2490 // or %b are also phis from the same basic block, we can go further and take
2491 // their incoming values from the corresponding blocks.
2492 BasicBlockEdge TrueEdge(IDom, TrueSucc);
2493 BasicBlockEdge FalseEdge(IDom, FalseSucc);
2494 DenseMap<BasicBlock *, Value *> Inputs;
2495 for (auto *Pred : predecessors(BB)) {
2496 // Check implication.
2497 BasicBlockEdge Incoming(Pred, BB);
2498 if (DT.dominates(TrueEdge, Incoming))
2499 Inputs[Pred] = IfTrue->DoPHITranslation(BB, Pred);
2500 else if (DT.dominates(FalseEdge, Incoming))
2501 Inputs[Pred] = IfFalse->DoPHITranslation(BB, Pred);
2502 else
2503 return nullptr;
2504 // Check availability.
2505 if (auto *Insn = dyn_cast<Instruction>(Inputs[Pred]))
2506 if (!DT.dominates(Insn, Pred->getTerminator()))
2507 return nullptr;
2508 }
2509
2510 Builder.SetInsertPoint(&*BB->begin());
2511 auto *PN = Builder.CreatePHI(Sel.getType(), Inputs.size());
2512 for (auto *Pred : predecessors(BB))
2513 PN->addIncoming(Inputs[Pred], Pred);
2514 PN->takeName(&Sel);
2515 return PN;
2516 }
2517
foldSelectToPhi(SelectInst & Sel,const DominatorTree & DT,InstCombiner::BuilderTy & Builder)2518 static Instruction *foldSelectToPhi(SelectInst &Sel, const DominatorTree &DT,
2519 InstCombiner::BuilderTy &Builder) {
2520 // Try to replace this select with Phi in one of these blocks.
2521 SmallSetVector<BasicBlock *, 4> CandidateBlocks;
2522 CandidateBlocks.insert(Sel.getParent());
2523 for (Value *V : Sel.operands())
2524 if (auto *I = dyn_cast<Instruction>(V))
2525 CandidateBlocks.insert(I->getParent());
2526
2527 for (BasicBlock *BB : CandidateBlocks)
2528 if (auto *PN = foldSelectToPhiImpl(Sel, BB, DT, Builder))
2529 return PN;
2530 return nullptr;
2531 }
2532
foldSelectWithFrozenICmp(SelectInst & Sel,InstCombiner::BuilderTy & Builder)2533 static Value *foldSelectWithFrozenICmp(SelectInst &Sel, InstCombiner::BuilderTy &Builder) {
2534 FreezeInst *FI = dyn_cast<FreezeInst>(Sel.getCondition());
2535 if (!FI)
2536 return nullptr;
2537
2538 Value *Cond = FI->getOperand(0);
2539 Value *TrueVal = Sel.getTrueValue(), *FalseVal = Sel.getFalseValue();
2540
2541 // select (freeze(x == y)), x, y --> y
2542 // select (freeze(x != y)), x, y --> x
2543 // The freeze should be only used by this select. Otherwise, remaining uses of
2544 // the freeze can observe a contradictory value.
2545 // c = freeze(x == y) ; Let's assume that y = poison & x = 42; c is 0 or 1
2546 // a = select c, x, y ;
2547 // f(a, c) ; f(poison, 1) cannot happen, but if a is folded
2548 // ; to y, this can happen.
2549 CmpInst::Predicate Pred;
2550 if (FI->hasOneUse() &&
2551 match(Cond, m_c_ICmp(Pred, m_Specific(TrueVal), m_Specific(FalseVal))) &&
2552 (Pred == ICmpInst::ICMP_EQ || Pred == ICmpInst::ICMP_NE)) {
2553 return Pred == ICmpInst::ICMP_EQ ? FalseVal : TrueVal;
2554 }
2555
2556 return nullptr;
2557 }
2558
visitSelectInst(SelectInst & SI)2559 Instruction *InstCombinerImpl::visitSelectInst(SelectInst &SI) {
2560 Value *CondVal = SI.getCondition();
2561 Value *TrueVal = SI.getTrueValue();
2562 Value *FalseVal = SI.getFalseValue();
2563 Type *SelType = SI.getType();
2564
2565 // FIXME: Remove this workaround when freeze related patches are done.
2566 // For select with undef operand which feeds into an equality comparison,
2567 // don't simplify it so loop unswitch can know the equality comparison
2568 // may have an undef operand. This is a workaround for PR31652 caused by
2569 // descrepancy about branch on undef between LoopUnswitch and GVN.
2570 if (isa<UndefValue>(TrueVal) || isa<UndefValue>(FalseVal)) {
2571 if (llvm::any_of(SI.users(), [&](User *U) {
2572 ICmpInst *CI = dyn_cast<ICmpInst>(U);
2573 if (CI && CI->isEquality())
2574 return true;
2575 return false;
2576 })) {
2577 return nullptr;
2578 }
2579 }
2580
2581 if (Value *V = SimplifySelectInst(CondVal, TrueVal, FalseVal,
2582 SQ.getWithInstruction(&SI)))
2583 return replaceInstUsesWith(SI, V);
2584
2585 if (Instruction *I = canonicalizeSelectToShuffle(SI))
2586 return I;
2587
2588 if (Instruction *I = canonicalizeScalarSelectOfVecs(SI, *this))
2589 return I;
2590
2591 CmpInst::Predicate Pred;
2592
2593 if (SelType->isIntOrIntVectorTy(1) &&
2594 TrueVal->getType() == CondVal->getType()) {
2595 if (match(TrueVal, m_One()) &&
2596 (EnableUnsafeSelectTransform || impliesPoison(FalseVal, CondVal))) {
2597 // Change: A = select B, true, C --> A = or B, C
2598 return BinaryOperator::CreateOr(CondVal, FalseVal);
2599 }
2600 if (match(FalseVal, m_Zero()) &&
2601 (EnableUnsafeSelectTransform || impliesPoison(TrueVal, CondVal))) {
2602 // Change: A = select B, C, false --> A = and B, C
2603 return BinaryOperator::CreateAnd(CondVal, TrueVal);
2604 }
2605
2606 // select a, false, b -> select !a, b, false
2607 if (match(TrueVal, m_Zero())) {
2608 Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2609 return SelectInst::Create(NotCond, FalseVal,
2610 ConstantInt::getFalse(SelType));
2611 }
2612 // select a, b, true -> select !a, true, b
2613 if (match(FalseVal, m_One())) {
2614 Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2615 return SelectInst::Create(NotCond, ConstantInt::getTrue(SelType),
2616 TrueVal);
2617 }
2618
2619 // select a, a, b -> select a, true, b
2620 if (CondVal == TrueVal)
2621 return replaceOperand(SI, 1, ConstantInt::getTrue(SelType));
2622 // select a, b, a -> select a, b, false
2623 if (CondVal == FalseVal)
2624 return replaceOperand(SI, 2, ConstantInt::getFalse(SelType));
2625
2626 // select a, !a, b -> select !a, b, false
2627 if (match(TrueVal, m_Not(m_Specific(CondVal))))
2628 return SelectInst::Create(TrueVal, FalseVal,
2629 ConstantInt::getFalse(SelType));
2630 // select a, b, !a -> select !a, true, b
2631 if (match(FalseVal, m_Not(m_Specific(CondVal))))
2632 return SelectInst::Create(FalseVal, ConstantInt::getTrue(SelType),
2633 TrueVal);
2634 }
2635
2636 // Selecting between two integer or vector splat integer constants?
2637 //
2638 // Note that we don't handle a scalar select of vectors:
2639 // select i1 %c, <2 x i8> <1, 1>, <2 x i8> <0, 0>
2640 // because that may need 3 instructions to splat the condition value:
2641 // extend, insertelement, shufflevector.
2642 //
2643 // Do not handle i1 TrueVal and FalseVal otherwise would result in
2644 // zext/sext i1 to i1.
2645 if (SelType->isIntOrIntVectorTy() && !SelType->isIntOrIntVectorTy(1) &&
2646 CondVal->getType()->isVectorTy() == SelType->isVectorTy()) {
2647 // select C, 1, 0 -> zext C to int
2648 if (match(TrueVal, m_One()) && match(FalseVal, m_Zero()))
2649 return new ZExtInst(CondVal, SelType);
2650
2651 // select C, -1, 0 -> sext C to int
2652 if (match(TrueVal, m_AllOnes()) && match(FalseVal, m_Zero()))
2653 return new SExtInst(CondVal, SelType);
2654
2655 // select C, 0, 1 -> zext !C to int
2656 if (match(TrueVal, m_Zero()) && match(FalseVal, m_One())) {
2657 Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2658 return new ZExtInst(NotCond, SelType);
2659 }
2660
2661 // select C, 0, -1 -> sext !C to int
2662 if (match(TrueVal, m_Zero()) && match(FalseVal, m_AllOnes())) {
2663 Value *NotCond = Builder.CreateNot(CondVal, "not." + CondVal->getName());
2664 return new SExtInst(NotCond, SelType);
2665 }
2666 }
2667
2668 // See if we are selecting two values based on a comparison of the two values.
2669 if (FCmpInst *FCI = dyn_cast<FCmpInst>(CondVal)) {
2670 Value *Cmp0 = FCI->getOperand(0), *Cmp1 = FCI->getOperand(1);
2671 if ((Cmp0 == TrueVal && Cmp1 == FalseVal) ||
2672 (Cmp0 == FalseVal && Cmp1 == TrueVal)) {
2673 // Canonicalize to use ordered comparisons by swapping the select
2674 // operands.
2675 //
2676 // e.g.
2677 // (X ugt Y) ? X : Y -> (X ole Y) ? Y : X
2678 if (FCI->hasOneUse() && FCmpInst::isUnordered(FCI->getPredicate())) {
2679 FCmpInst::Predicate InvPred = FCI->getInversePredicate();
2680 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2681 // FIXME: The FMF should propagate from the select, not the fcmp.
2682 Builder.setFastMathFlags(FCI->getFastMathFlags());
2683 Value *NewCond = Builder.CreateFCmp(InvPred, Cmp0, Cmp1,
2684 FCI->getName() + ".inv");
2685 Value *NewSel = Builder.CreateSelect(NewCond, FalseVal, TrueVal);
2686 return replaceInstUsesWith(SI, NewSel);
2687 }
2688
2689 // NOTE: if we wanted to, this is where to detect MIN/MAX
2690 }
2691 }
2692
2693 // Canonicalize select with fcmp to fabs(). -0.0 makes this tricky. We need
2694 // fast-math-flags (nsz) or fsub with +0.0 (not fneg) for this to work. We
2695 // also require nnan because we do not want to unintentionally change the
2696 // sign of a NaN value.
2697 // FIXME: These folds should test/propagate FMF from the select, not the
2698 // fsub or fneg.
2699 // (X <= +/-0.0) ? (0.0 - X) : X --> fabs(X)
2700 Instruction *FSub;
2701 if (match(CondVal, m_FCmp(Pred, m_Specific(FalseVal), m_AnyZeroFP())) &&
2702 match(TrueVal, m_FSub(m_PosZeroFP(), m_Specific(FalseVal))) &&
2703 match(TrueVal, m_Instruction(FSub)) && FSub->hasNoNaNs() &&
2704 (Pred == FCmpInst::FCMP_OLE || Pred == FCmpInst::FCMP_ULE)) {
2705 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FalseVal, FSub);
2706 return replaceInstUsesWith(SI, Fabs);
2707 }
2708 // (X > +/-0.0) ? X : (0.0 - X) --> fabs(X)
2709 if (match(CondVal, m_FCmp(Pred, m_Specific(TrueVal), m_AnyZeroFP())) &&
2710 match(FalseVal, m_FSub(m_PosZeroFP(), m_Specific(TrueVal))) &&
2711 match(FalseVal, m_Instruction(FSub)) && FSub->hasNoNaNs() &&
2712 (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_UGT)) {
2713 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, TrueVal, FSub);
2714 return replaceInstUsesWith(SI, Fabs);
2715 }
2716 // With nnan and nsz:
2717 // (X < +/-0.0) ? -X : X --> fabs(X)
2718 // (X <= +/-0.0) ? -X : X --> fabs(X)
2719 Instruction *FNeg;
2720 if (match(CondVal, m_FCmp(Pred, m_Specific(FalseVal), m_AnyZeroFP())) &&
2721 match(TrueVal, m_FNeg(m_Specific(FalseVal))) &&
2722 match(TrueVal, m_Instruction(FNeg)) &&
2723 FNeg->hasNoNaNs() && FNeg->hasNoSignedZeros() &&
2724 (Pred == FCmpInst::FCMP_OLT || Pred == FCmpInst::FCMP_OLE ||
2725 Pred == FCmpInst::FCMP_ULT || Pred == FCmpInst::FCMP_ULE)) {
2726 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, FalseVal, FNeg);
2727 return replaceInstUsesWith(SI, Fabs);
2728 }
2729 // With nnan and nsz:
2730 // (X > +/-0.0) ? X : -X --> fabs(X)
2731 // (X >= +/-0.0) ? X : -X --> fabs(X)
2732 if (match(CondVal, m_FCmp(Pred, m_Specific(TrueVal), m_AnyZeroFP())) &&
2733 match(FalseVal, m_FNeg(m_Specific(TrueVal))) &&
2734 match(FalseVal, m_Instruction(FNeg)) &&
2735 FNeg->hasNoNaNs() && FNeg->hasNoSignedZeros() &&
2736 (Pred == FCmpInst::FCMP_OGT || Pred == FCmpInst::FCMP_OGE ||
2737 Pred == FCmpInst::FCMP_UGT || Pred == FCmpInst::FCMP_UGE)) {
2738 Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, TrueVal, FNeg);
2739 return replaceInstUsesWith(SI, Fabs);
2740 }
2741
2742 // See if we are selecting two values based on a comparison of the two values.
2743 if (ICmpInst *ICI = dyn_cast<ICmpInst>(CondVal))
2744 if (Instruction *Result = foldSelectInstWithICmp(SI, ICI))
2745 return Result;
2746
2747 if (Instruction *Add = foldAddSubSelect(SI, Builder))
2748 return Add;
2749 if (Instruction *Add = foldOverflowingAddSubSelect(SI, Builder))
2750 return Add;
2751 if (Instruction *Or = foldSetClearBits(SI, Builder))
2752 return Or;
2753
2754 // Turn (select C, (op X, Y), (op X, Z)) -> (op X, (select C, Y, Z))
2755 auto *TI = dyn_cast<Instruction>(TrueVal);
2756 auto *FI = dyn_cast<Instruction>(FalseVal);
2757 if (TI && FI && TI->getOpcode() == FI->getOpcode())
2758 if (Instruction *IV = foldSelectOpOp(SI, TI, FI))
2759 return IV;
2760
2761 if (Instruction *I = foldSelectExtConst(SI))
2762 return I;
2763
2764 // See if we can fold the select into one of our operands.
2765 if (SelType->isIntOrIntVectorTy() || SelType->isFPOrFPVectorTy()) {
2766 if (Instruction *FoldI = foldSelectIntoOp(SI, TrueVal, FalseVal))
2767 return FoldI;
2768
2769 Value *LHS, *RHS;
2770 Instruction::CastOps CastOp;
2771 SelectPatternResult SPR = matchSelectPattern(&SI, LHS, RHS, &CastOp);
2772 auto SPF = SPR.Flavor;
2773 if (SPF) {
2774 Value *LHS2, *RHS2;
2775 if (SelectPatternFlavor SPF2 = matchSelectPattern(LHS, LHS2, RHS2).Flavor)
2776 if (Instruction *R = foldSPFofSPF(cast<Instruction>(LHS), SPF2, LHS2,
2777 RHS2, SI, SPF, RHS))
2778 return R;
2779 if (SelectPatternFlavor SPF2 = matchSelectPattern(RHS, LHS2, RHS2).Flavor)
2780 if (Instruction *R = foldSPFofSPF(cast<Instruction>(RHS), SPF2, LHS2,
2781 RHS2, SI, SPF, LHS))
2782 return R;
2783 // TODO.
2784 // ABS(-X) -> ABS(X)
2785 }
2786
2787 if (SelectPatternResult::isMinOrMax(SPF)) {
2788 // Canonicalize so that
2789 // - type casts are outside select patterns.
2790 // - float clamp is transformed to min/max pattern
2791
2792 bool IsCastNeeded = LHS->getType() != SelType;
2793 Value *CmpLHS = cast<CmpInst>(CondVal)->getOperand(0);
2794 Value *CmpRHS = cast<CmpInst>(CondVal)->getOperand(1);
2795 if (IsCastNeeded ||
2796 (LHS->getType()->isFPOrFPVectorTy() &&
2797 ((CmpLHS != LHS && CmpLHS != RHS) ||
2798 (CmpRHS != LHS && CmpRHS != RHS)))) {
2799 CmpInst::Predicate MinMaxPred = getMinMaxPred(SPF, SPR.Ordered);
2800
2801 Value *Cmp;
2802 if (CmpInst::isIntPredicate(MinMaxPred)) {
2803 Cmp = Builder.CreateICmp(MinMaxPred, LHS, RHS);
2804 } else {
2805 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
2806 auto FMF =
2807 cast<FPMathOperator>(SI.getCondition())->getFastMathFlags();
2808 Builder.setFastMathFlags(FMF);
2809 Cmp = Builder.CreateFCmp(MinMaxPred, LHS, RHS);
2810 }
2811
2812 Value *NewSI = Builder.CreateSelect(Cmp, LHS, RHS, SI.getName(), &SI);
2813 if (!IsCastNeeded)
2814 return replaceInstUsesWith(SI, NewSI);
2815
2816 Value *NewCast = Builder.CreateCast(CastOp, NewSI, SelType);
2817 return replaceInstUsesWith(SI, NewCast);
2818 }
2819
2820 // MAX(~a, ~b) -> ~MIN(a, b)
2821 // MAX(~a, C) -> ~MIN(a, ~C)
2822 // MIN(~a, ~b) -> ~MAX(a, b)
2823 // MIN(~a, C) -> ~MAX(a, ~C)
2824 auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
2825 Value *A;
2826 if (match(X, m_Not(m_Value(A))) && !X->hasNUsesOrMore(3) &&
2827 !isFreeToInvert(A, A->hasOneUse()) &&
2828 // Passing false to only consider m_Not and constants.
2829 isFreeToInvert(Y, false)) {
2830 Value *B = Builder.CreateNot(Y);
2831 Value *NewMinMax = createMinMax(Builder, getInverseMinMaxFlavor(SPF),
2832 A, B);
2833 // Copy the profile metadata.
2834 if (MDNode *MD = SI.getMetadata(LLVMContext::MD_prof)) {
2835 cast<SelectInst>(NewMinMax)->setMetadata(LLVMContext::MD_prof, MD);
2836 // Swap the metadata if the operands are swapped.
2837 if (X == SI.getFalseValue() && Y == SI.getTrueValue())
2838 cast<SelectInst>(NewMinMax)->swapProfMetadata();
2839 }
2840
2841 return BinaryOperator::CreateNot(NewMinMax);
2842 }
2843
2844 return nullptr;
2845 };
2846
2847 if (Instruction *I = moveNotAfterMinMax(LHS, RHS))
2848 return I;
2849 if (Instruction *I = moveNotAfterMinMax(RHS, LHS))
2850 return I;
2851
2852 if (Instruction *I = moveAddAfterMinMax(SPF, LHS, RHS, Builder))
2853 return I;
2854
2855 if (Instruction *I = factorizeMinMaxTree(SPF, LHS, RHS, Builder))
2856 return I;
2857 if (Instruction *I = matchSAddSubSat(SI))
2858 return I;
2859 }
2860 }
2861
2862 // Canonicalize select of FP values where NaN and -0.0 are not valid as
2863 // minnum/maxnum intrinsics.
2864 if (isa<FPMathOperator>(SI) && SI.hasNoNaNs() && SI.hasNoSignedZeros()) {
2865 Value *X, *Y;
2866 if (match(&SI, m_OrdFMax(m_Value(X), m_Value(Y))))
2867 return replaceInstUsesWith(
2868 SI, Builder.CreateBinaryIntrinsic(Intrinsic::maxnum, X, Y, &SI));
2869
2870 if (match(&SI, m_OrdFMin(m_Value(X), m_Value(Y))))
2871 return replaceInstUsesWith(
2872 SI, Builder.CreateBinaryIntrinsic(Intrinsic::minnum, X, Y, &SI));
2873 }
2874
2875 // See if we can fold the select into a phi node if the condition is a select.
2876 if (auto *PN = dyn_cast<PHINode>(SI.getCondition()))
2877 // The true/false values have to be live in the PHI predecessor's blocks.
2878 if (canSelectOperandBeMappingIntoPredBlock(TrueVal, SI) &&
2879 canSelectOperandBeMappingIntoPredBlock(FalseVal, SI))
2880 if (Instruction *NV = foldOpIntoPhi(SI, PN))
2881 return NV;
2882
2883 if (SelectInst *TrueSI = dyn_cast<SelectInst>(TrueVal)) {
2884 if (TrueSI->getCondition()->getType() == CondVal->getType()) {
2885 // select(C, select(C, a, b), c) -> select(C, a, c)
2886 if (TrueSI->getCondition() == CondVal) {
2887 if (SI.getTrueValue() == TrueSI->getTrueValue())
2888 return nullptr;
2889 return replaceOperand(SI, 1, TrueSI->getTrueValue());
2890 }
2891 // select(C0, select(C1, a, b), b) -> select(C0&C1, a, b)
2892 // We choose this as normal form to enable folding on the And and
2893 // shortening paths for the values (this helps getUnderlyingObjects() for
2894 // example).
2895 if (TrueSI->getFalseValue() == FalseVal && TrueSI->hasOneUse()) {
2896 Value *And = Builder.CreateAnd(CondVal, TrueSI->getCondition());
2897 replaceOperand(SI, 0, And);
2898 replaceOperand(SI, 1, TrueSI->getTrueValue());
2899 return &SI;
2900 }
2901 }
2902 }
2903 if (SelectInst *FalseSI = dyn_cast<SelectInst>(FalseVal)) {
2904 if (FalseSI->getCondition()->getType() == CondVal->getType()) {
2905 // select(C, a, select(C, b, c)) -> select(C, a, c)
2906 if (FalseSI->getCondition() == CondVal) {
2907 if (SI.getFalseValue() == FalseSI->getFalseValue())
2908 return nullptr;
2909 return replaceOperand(SI, 2, FalseSI->getFalseValue());
2910 }
2911 // select(C0, a, select(C1, a, b)) -> select(C0|C1, a, b)
2912 if (FalseSI->getTrueValue() == TrueVal && FalseSI->hasOneUse()) {
2913 Value *Or = Builder.CreateOr(CondVal, FalseSI->getCondition());
2914 replaceOperand(SI, 0, Or);
2915 replaceOperand(SI, 2, FalseSI->getFalseValue());
2916 return &SI;
2917 }
2918 }
2919 }
2920
2921 auto canMergeSelectThroughBinop = [](BinaryOperator *BO) {
2922 // The select might be preventing a division by 0.
2923 switch (BO->getOpcode()) {
2924 default:
2925 return true;
2926 case Instruction::SRem:
2927 case Instruction::URem:
2928 case Instruction::SDiv:
2929 case Instruction::UDiv:
2930 return false;
2931 }
2932 };
2933
2934 // Try to simplify a binop sandwiched between 2 selects with the same
2935 // condition.
2936 // select(C, binop(select(C, X, Y), W), Z) -> select(C, binop(X, W), Z)
2937 BinaryOperator *TrueBO;
2938 if (match(TrueVal, m_OneUse(m_BinOp(TrueBO))) &&
2939 canMergeSelectThroughBinop(TrueBO)) {
2940 if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(0))) {
2941 if (TrueBOSI->getCondition() == CondVal) {
2942 replaceOperand(*TrueBO, 0, TrueBOSI->getTrueValue());
2943 Worklist.push(TrueBO);
2944 return &SI;
2945 }
2946 }
2947 if (auto *TrueBOSI = dyn_cast<SelectInst>(TrueBO->getOperand(1))) {
2948 if (TrueBOSI->getCondition() == CondVal) {
2949 replaceOperand(*TrueBO, 1, TrueBOSI->getTrueValue());
2950 Worklist.push(TrueBO);
2951 return &SI;
2952 }
2953 }
2954 }
2955
2956 // select(C, Z, binop(select(C, X, Y), W)) -> select(C, Z, binop(Y, W))
2957 BinaryOperator *FalseBO;
2958 if (match(FalseVal, m_OneUse(m_BinOp(FalseBO))) &&
2959 canMergeSelectThroughBinop(FalseBO)) {
2960 if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(0))) {
2961 if (FalseBOSI->getCondition() == CondVal) {
2962 replaceOperand(*FalseBO, 0, FalseBOSI->getFalseValue());
2963 Worklist.push(FalseBO);
2964 return &SI;
2965 }
2966 }
2967 if (auto *FalseBOSI = dyn_cast<SelectInst>(FalseBO->getOperand(1))) {
2968 if (FalseBOSI->getCondition() == CondVal) {
2969 replaceOperand(*FalseBO, 1, FalseBOSI->getFalseValue());
2970 Worklist.push(FalseBO);
2971 return &SI;
2972 }
2973 }
2974 }
2975
2976 Value *NotCond;
2977 if (match(CondVal, m_Not(m_Value(NotCond))) &&
2978 !InstCombiner::shouldAvoidAbsorbingNotIntoSelect(SI)) {
2979 replaceOperand(SI, 0, NotCond);
2980 SI.swapValues();
2981 SI.swapProfMetadata();
2982 return &SI;
2983 }
2984
2985 if (Instruction *I = foldVectorSelect(SI))
2986 return I;
2987
2988 // If we can compute the condition, there's no need for a select.
2989 // Like the above fold, we are attempting to reduce compile-time cost by
2990 // putting this fold here with limitations rather than in InstSimplify.
2991 // The motivation for this call into value tracking is to take advantage of
2992 // the assumption cache, so make sure that is populated.
2993 if (!CondVal->getType()->isVectorTy() && !AC.assumptions().empty()) {
2994 KnownBits Known(1);
2995 computeKnownBits(CondVal, Known, 0, &SI);
2996 if (Known.One.isOneValue())
2997 return replaceInstUsesWith(SI, TrueVal);
2998 if (Known.Zero.isOneValue())
2999 return replaceInstUsesWith(SI, FalseVal);
3000 }
3001
3002 if (Instruction *BitCastSel = foldSelectCmpBitcasts(SI, Builder))
3003 return BitCastSel;
3004
3005 // Simplify selects that test the returned flag of cmpxchg instructions.
3006 if (Value *V = foldSelectCmpXchg(SI))
3007 return replaceInstUsesWith(SI, V);
3008
3009 if (Instruction *Select = foldSelectBinOpIdentity(SI, TLI, *this))
3010 return Select;
3011
3012 if (Instruction *Funnel = foldSelectFunnelShift(SI, Builder))
3013 return Funnel;
3014
3015 if (Instruction *Copysign = foldSelectToCopysign(SI, Builder))
3016 return Copysign;
3017
3018 if (Instruction *PN = foldSelectToPhi(SI, DT, Builder))
3019 return replaceInstUsesWith(SI, PN);
3020
3021 if (Value *Fr = foldSelectWithFrozenICmp(SI, Builder))
3022 return replaceInstUsesWith(SI, Fr);
3023
3024 return nullptr;
3025 }
3026