1 //===- InstCombineAndOrXor.cpp --------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitAnd, visitOr, and visitXor functions.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/Analysis/CmpInstAnalysis.h"
15 #include "llvm/Analysis/InstructionSimplify.h"
16 #include "llvm/IR/ConstantRange.h"
17 #include "llvm/IR/Intrinsics.h"
18 #include "llvm/IR/PatternMatch.h"
19 #include "llvm/Transforms/InstCombine/InstCombiner.h"
20 #include "llvm/Transforms/Utils/Local.h"
21
22 using namespace llvm;
23 using namespace PatternMatch;
24
25 #define DEBUG_TYPE "instcombine"
26
27 /// This is the complement of getICmpCode, which turns an opcode and two
28 /// operands into either a constant true or false, or a brand new ICmp
29 /// instruction. The sign is passed in to determine which kind of predicate to
30 /// use in the new icmp instruction.
getNewICmpValue(unsigned Code,bool Sign,Value * LHS,Value * RHS,InstCombiner::BuilderTy & Builder)31 static Value *getNewICmpValue(unsigned Code, bool Sign, Value *LHS, Value *RHS,
32 InstCombiner::BuilderTy &Builder) {
33 ICmpInst::Predicate NewPred;
34 if (Constant *TorF = getPredForICmpCode(Code, Sign, LHS->getType(), NewPred))
35 return TorF;
36 return Builder.CreateICmp(NewPred, LHS, RHS);
37 }
38
39 /// This is the complement of getFCmpCode, which turns an opcode and two
40 /// operands into either a FCmp instruction, or a true/false constant.
getFCmpValue(unsigned Code,Value * LHS,Value * RHS,InstCombiner::BuilderTy & Builder)41 static Value *getFCmpValue(unsigned Code, Value *LHS, Value *RHS,
42 InstCombiner::BuilderTy &Builder) {
43 FCmpInst::Predicate NewPred;
44 if (Constant *TorF = getPredForFCmpCode(Code, LHS->getType(), NewPred))
45 return TorF;
46 return Builder.CreateFCmp(NewPred, LHS, RHS);
47 }
48
49 /// Emit a computation of: (V >= Lo && V < Hi) if Inside is true, otherwise
50 /// (V < Lo || V >= Hi). This method expects that Lo < Hi. IsSigned indicates
51 /// whether to treat V, Lo, and Hi as signed or not.
insertRangeTest(Value * V,const APInt & Lo,const APInt & Hi,bool isSigned,bool Inside)52 Value *InstCombinerImpl::insertRangeTest(Value *V, const APInt &Lo,
53 const APInt &Hi, bool isSigned,
54 bool Inside) {
55 assert((isSigned ? Lo.slt(Hi) : Lo.ult(Hi)) &&
56 "Lo is not < Hi in range emission code!");
57
58 Type *Ty = V->getType();
59
60 // V >= Min && V < Hi --> V < Hi
61 // V < Min || V >= Hi --> V >= Hi
62 ICmpInst::Predicate Pred = Inside ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE;
63 if (isSigned ? Lo.isMinSignedValue() : Lo.isMinValue()) {
64 Pred = isSigned ? ICmpInst::getSignedPredicate(Pred) : Pred;
65 return Builder.CreateICmp(Pred, V, ConstantInt::get(Ty, Hi));
66 }
67
68 // V >= Lo && V < Hi --> V - Lo u< Hi - Lo
69 // V < Lo || V >= Hi --> V - Lo u>= Hi - Lo
70 Value *VMinusLo =
71 Builder.CreateSub(V, ConstantInt::get(Ty, Lo), V->getName() + ".off");
72 Constant *HiMinusLo = ConstantInt::get(Ty, Hi - Lo);
73 return Builder.CreateICmp(Pred, VMinusLo, HiMinusLo);
74 }
75
76 /// Classify (icmp eq (A & B), C) and (icmp ne (A & B), C) as matching patterns
77 /// that can be simplified.
78 /// One of A and B is considered the mask. The other is the value. This is
79 /// described as the "AMask" or "BMask" part of the enum. If the enum contains
80 /// only "Mask", then both A and B can be considered masks. If A is the mask,
81 /// then it was proven that (A & C) == C. This is trivial if C == A or C == 0.
82 /// If both A and C are constants, this proof is also easy.
83 /// For the following explanations, we assume that A is the mask.
84 ///
85 /// "AllOnes" declares that the comparison is true only if (A & B) == A or all
86 /// bits of A are set in B.
87 /// Example: (icmp eq (A & 3), 3) -> AMask_AllOnes
88 ///
89 /// "AllZeros" declares that the comparison is true only if (A & B) == 0 or all
90 /// bits of A are cleared in B.
91 /// Example: (icmp eq (A & 3), 0) -> Mask_AllZeroes
92 ///
93 /// "Mixed" declares that (A & B) == C and C might or might not contain any
94 /// number of one bits and zero bits.
95 /// Example: (icmp eq (A & 3), 1) -> AMask_Mixed
96 ///
97 /// "Not" means that in above descriptions "==" should be replaced by "!=".
98 /// Example: (icmp ne (A & 3), 3) -> AMask_NotAllOnes
99 ///
100 /// If the mask A contains a single bit, then the following is equivalent:
101 /// (icmp eq (A & B), A) equals (icmp ne (A & B), 0)
102 /// (icmp ne (A & B), A) equals (icmp eq (A & B), 0)
103 enum MaskedICmpType {
104 AMask_AllOnes = 1,
105 AMask_NotAllOnes = 2,
106 BMask_AllOnes = 4,
107 BMask_NotAllOnes = 8,
108 Mask_AllZeros = 16,
109 Mask_NotAllZeros = 32,
110 AMask_Mixed = 64,
111 AMask_NotMixed = 128,
112 BMask_Mixed = 256,
113 BMask_NotMixed = 512
114 };
115
116 /// Return the set of patterns (from MaskedICmpType) that (icmp SCC (A & B), C)
117 /// satisfies.
getMaskedICmpType(Value * A,Value * B,Value * C,ICmpInst::Predicate Pred)118 static unsigned getMaskedICmpType(Value *A, Value *B, Value *C,
119 ICmpInst::Predicate Pred) {
120 const APInt *ConstA = nullptr, *ConstB = nullptr, *ConstC = nullptr;
121 match(A, m_APInt(ConstA));
122 match(B, m_APInt(ConstB));
123 match(C, m_APInt(ConstC));
124 bool IsEq = (Pred == ICmpInst::ICMP_EQ);
125 bool IsAPow2 = ConstA && ConstA->isPowerOf2();
126 bool IsBPow2 = ConstB && ConstB->isPowerOf2();
127 unsigned MaskVal = 0;
128 if (ConstC && ConstC->isZero()) {
129 // if C is zero, then both A and B qualify as mask
130 MaskVal |= (IsEq ? (Mask_AllZeros | AMask_Mixed | BMask_Mixed)
131 : (Mask_NotAllZeros | AMask_NotMixed | BMask_NotMixed));
132 if (IsAPow2)
133 MaskVal |= (IsEq ? (AMask_NotAllOnes | AMask_NotMixed)
134 : (AMask_AllOnes | AMask_Mixed));
135 if (IsBPow2)
136 MaskVal |= (IsEq ? (BMask_NotAllOnes | BMask_NotMixed)
137 : (BMask_AllOnes | BMask_Mixed));
138 return MaskVal;
139 }
140
141 if (A == C) {
142 MaskVal |= (IsEq ? (AMask_AllOnes | AMask_Mixed)
143 : (AMask_NotAllOnes | AMask_NotMixed));
144 if (IsAPow2)
145 MaskVal |= (IsEq ? (Mask_NotAllZeros | AMask_NotMixed)
146 : (Mask_AllZeros | AMask_Mixed));
147 } else if (ConstA && ConstC && ConstC->isSubsetOf(*ConstA)) {
148 MaskVal |= (IsEq ? AMask_Mixed : AMask_NotMixed);
149 }
150
151 if (B == C) {
152 MaskVal |= (IsEq ? (BMask_AllOnes | BMask_Mixed)
153 : (BMask_NotAllOnes | BMask_NotMixed));
154 if (IsBPow2)
155 MaskVal |= (IsEq ? (Mask_NotAllZeros | BMask_NotMixed)
156 : (Mask_AllZeros | BMask_Mixed));
157 } else if (ConstB && ConstC && ConstC->isSubsetOf(*ConstB)) {
158 MaskVal |= (IsEq ? BMask_Mixed : BMask_NotMixed);
159 }
160
161 return MaskVal;
162 }
163
164 /// Convert an analysis of a masked ICmp into its equivalent if all boolean
165 /// operations had the opposite sense. Since each "NotXXX" flag (recording !=)
166 /// is adjacent to the corresponding normal flag (recording ==), this just
167 /// involves swapping those bits over.
conjugateICmpMask(unsigned Mask)168 static unsigned conjugateICmpMask(unsigned Mask) {
169 unsigned NewMask;
170 NewMask = (Mask & (AMask_AllOnes | BMask_AllOnes | Mask_AllZeros |
171 AMask_Mixed | BMask_Mixed))
172 << 1;
173
174 NewMask |= (Mask & (AMask_NotAllOnes | BMask_NotAllOnes | Mask_NotAllZeros |
175 AMask_NotMixed | BMask_NotMixed))
176 >> 1;
177
178 return NewMask;
179 }
180
181 // Adapts the external decomposeBitTestICmp for local use.
decomposeBitTestICmp(Value * LHS,Value * RHS,CmpInst::Predicate & Pred,Value * & X,Value * & Y,Value * & Z)182 static bool decomposeBitTestICmp(Value *LHS, Value *RHS, CmpInst::Predicate &Pred,
183 Value *&X, Value *&Y, Value *&Z) {
184 APInt Mask;
185 if (!llvm::decomposeBitTestICmp(LHS, RHS, Pred, X, Mask))
186 return false;
187
188 Y = ConstantInt::get(X->getType(), Mask);
189 Z = ConstantInt::get(X->getType(), 0);
190 return true;
191 }
192
193 /// Handle (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E).
194 /// Return the pattern classes (from MaskedICmpType) for the left hand side and
195 /// the right hand side as a pair.
196 /// LHS and RHS are the left hand side and the right hand side ICmps and PredL
197 /// and PredR are their predicates, respectively.
getMaskedTypeForICmpPair(Value * & A,Value * & B,Value * & C,Value * & D,Value * & E,ICmpInst * LHS,ICmpInst * RHS,ICmpInst::Predicate & PredL,ICmpInst::Predicate & PredR)198 static std::optional<std::pair<unsigned, unsigned>> getMaskedTypeForICmpPair(
199 Value *&A, Value *&B, Value *&C, Value *&D, Value *&E, ICmpInst *LHS,
200 ICmpInst *RHS, ICmpInst::Predicate &PredL, ICmpInst::Predicate &PredR) {
201 // Don't allow pointers. Splat vectors are fine.
202 if (!LHS->getOperand(0)->getType()->isIntOrIntVectorTy() ||
203 !RHS->getOperand(0)->getType()->isIntOrIntVectorTy())
204 return std::nullopt;
205
206 // Here comes the tricky part:
207 // LHS might be of the form L11 & L12 == X, X == L21 & L22,
208 // and L11 & L12 == L21 & L22. The same goes for RHS.
209 // Now we must find those components L** and R**, that are equal, so
210 // that we can extract the parameters A, B, C, D, and E for the canonical
211 // above.
212 Value *L1 = LHS->getOperand(0);
213 Value *L2 = LHS->getOperand(1);
214 Value *L11, *L12, *L21, *L22;
215 // Check whether the icmp can be decomposed into a bit test.
216 if (decomposeBitTestICmp(L1, L2, PredL, L11, L12, L2)) {
217 L21 = L22 = L1 = nullptr;
218 } else {
219 // Look for ANDs in the LHS icmp.
220 if (!match(L1, m_And(m_Value(L11), m_Value(L12)))) {
221 // Any icmp can be viewed as being trivially masked; if it allows us to
222 // remove one, it's worth it.
223 L11 = L1;
224 L12 = Constant::getAllOnesValue(L1->getType());
225 }
226
227 if (!match(L2, m_And(m_Value(L21), m_Value(L22)))) {
228 L21 = L2;
229 L22 = Constant::getAllOnesValue(L2->getType());
230 }
231 }
232
233 // Bail if LHS was a icmp that can't be decomposed into an equality.
234 if (!ICmpInst::isEquality(PredL))
235 return std::nullopt;
236
237 Value *R1 = RHS->getOperand(0);
238 Value *R2 = RHS->getOperand(1);
239 Value *R11, *R12;
240 bool Ok = false;
241 if (decomposeBitTestICmp(R1, R2, PredR, R11, R12, R2)) {
242 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
243 A = R11;
244 D = R12;
245 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
246 A = R12;
247 D = R11;
248 } else {
249 return std::nullopt;
250 }
251 E = R2;
252 R1 = nullptr;
253 Ok = true;
254 } else {
255 if (!match(R1, m_And(m_Value(R11), m_Value(R12)))) {
256 // As before, model no mask as a trivial mask if it'll let us do an
257 // optimization.
258 R11 = R1;
259 R12 = Constant::getAllOnesValue(R1->getType());
260 }
261
262 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
263 A = R11;
264 D = R12;
265 E = R2;
266 Ok = true;
267 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
268 A = R12;
269 D = R11;
270 E = R2;
271 Ok = true;
272 }
273 }
274
275 // Bail if RHS was a icmp that can't be decomposed into an equality.
276 if (!ICmpInst::isEquality(PredR))
277 return std::nullopt;
278
279 // Look for ANDs on the right side of the RHS icmp.
280 if (!Ok) {
281 if (!match(R2, m_And(m_Value(R11), m_Value(R12)))) {
282 R11 = R2;
283 R12 = Constant::getAllOnesValue(R2->getType());
284 }
285
286 if (R11 == L11 || R11 == L12 || R11 == L21 || R11 == L22) {
287 A = R11;
288 D = R12;
289 E = R1;
290 Ok = true;
291 } else if (R12 == L11 || R12 == L12 || R12 == L21 || R12 == L22) {
292 A = R12;
293 D = R11;
294 E = R1;
295 Ok = true;
296 } else {
297 return std::nullopt;
298 }
299
300 assert(Ok && "Failed to find AND on the right side of the RHS icmp.");
301 }
302
303 if (L11 == A) {
304 B = L12;
305 C = L2;
306 } else if (L12 == A) {
307 B = L11;
308 C = L2;
309 } else if (L21 == A) {
310 B = L22;
311 C = L1;
312 } else if (L22 == A) {
313 B = L21;
314 C = L1;
315 }
316
317 unsigned LeftType = getMaskedICmpType(A, B, C, PredL);
318 unsigned RightType = getMaskedICmpType(A, D, E, PredR);
319 return std::optional<std::pair<unsigned, unsigned>>(
320 std::make_pair(LeftType, RightType));
321 }
322
323 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E) into a single
324 /// (icmp(A & X) ==/!= Y), where the left-hand side is of type Mask_NotAllZeros
325 /// and the right hand side is of type BMask_Mixed. For example,
326 /// (icmp (A & 12) != 0) & (icmp (A & 15) == 8) -> (icmp (A & 15) == 8).
327 /// Also used for logical and/or, must be poison safe.
foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(ICmpInst * LHS,ICmpInst * RHS,bool IsAnd,Value * A,Value * B,Value * C,Value * D,Value * E,ICmpInst::Predicate PredL,ICmpInst::Predicate PredR,InstCombiner::BuilderTy & Builder)328 static Value *foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
329 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C,
330 Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
331 InstCombiner::BuilderTy &Builder) {
332 // We are given the canonical form:
333 // (icmp ne (A & B), 0) & (icmp eq (A & D), E).
334 // where D & E == E.
335 //
336 // If IsAnd is false, we get it in negated form:
337 // (icmp eq (A & B), 0) | (icmp ne (A & D), E) ->
338 // !((icmp ne (A & B), 0) & (icmp eq (A & D), E)).
339 //
340 // We currently handle the case of B, C, D, E are constant.
341 //
342 const APInt *BCst, *CCst, *DCst, *OrigECst;
343 if (!match(B, m_APInt(BCst)) || !match(C, m_APInt(CCst)) ||
344 !match(D, m_APInt(DCst)) || !match(E, m_APInt(OrigECst)))
345 return nullptr;
346
347 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
348
349 // Update E to the canonical form when D is a power of two and RHS is
350 // canonicalized as,
351 // (icmp ne (A & D), 0) -> (icmp eq (A & D), D) or
352 // (icmp ne (A & D), D) -> (icmp eq (A & D), 0).
353 APInt ECst = *OrigECst;
354 if (PredR != NewCC)
355 ECst ^= *DCst;
356
357 // If B or D is zero, skip because if LHS or RHS can be trivially folded by
358 // other folding rules and this pattern won't apply any more.
359 if (*BCst == 0 || *DCst == 0)
360 return nullptr;
361
362 // If B and D don't intersect, ie. (B & D) == 0, no folding because we can't
363 // deduce anything from it.
364 // For example,
365 // (icmp ne (A & 12), 0) & (icmp eq (A & 3), 1) -> no folding.
366 if ((*BCst & *DCst) == 0)
367 return nullptr;
368
369 // If the following two conditions are met:
370 //
371 // 1. mask B covers only a single bit that's not covered by mask D, that is,
372 // (B & (B ^ D)) is a power of 2 (in other words, B minus the intersection of
373 // B and D has only one bit set) and,
374 //
375 // 2. RHS (and E) indicates that the rest of B's bits are zero (in other
376 // words, the intersection of B and D is zero), that is, ((B & D) & E) == 0
377 //
378 // then that single bit in B must be one and thus the whole expression can be
379 // folded to
380 // (A & (B | D)) == (B & (B ^ D)) | E.
381 //
382 // For example,
383 // (icmp ne (A & 12), 0) & (icmp eq (A & 7), 1) -> (icmp eq (A & 15), 9)
384 // (icmp ne (A & 15), 0) & (icmp eq (A & 7), 0) -> (icmp eq (A & 15), 8)
385 if ((((*BCst & *DCst) & ECst) == 0) &&
386 (*BCst & (*BCst ^ *DCst)).isPowerOf2()) {
387 APInt BorD = *BCst | *DCst;
388 APInt BandBxorDorE = (*BCst & (*BCst ^ *DCst)) | ECst;
389 Value *NewMask = ConstantInt::get(A->getType(), BorD);
390 Value *NewMaskedValue = ConstantInt::get(A->getType(), BandBxorDorE);
391 Value *NewAnd = Builder.CreateAnd(A, NewMask);
392 return Builder.CreateICmp(NewCC, NewAnd, NewMaskedValue);
393 }
394
395 auto IsSubSetOrEqual = [](const APInt *C1, const APInt *C2) {
396 return (*C1 & *C2) == *C1;
397 };
398 auto IsSuperSetOrEqual = [](const APInt *C1, const APInt *C2) {
399 return (*C1 & *C2) == *C2;
400 };
401
402 // In the following, we consider only the cases where B is a superset of D, B
403 // is a subset of D, or B == D because otherwise there's at least one bit
404 // covered by B but not D, in which case we can't deduce much from it, so
405 // no folding (aside from the single must-be-one bit case right above.)
406 // For example,
407 // (icmp ne (A & 14), 0) & (icmp eq (A & 3), 1) -> no folding.
408 if (!IsSubSetOrEqual(BCst, DCst) && !IsSuperSetOrEqual(BCst, DCst))
409 return nullptr;
410
411 // At this point, either B is a superset of D, B is a subset of D or B == D.
412
413 // If E is zero, if B is a subset of (or equal to) D, LHS and RHS contradict
414 // and the whole expression becomes false (or true if negated), otherwise, no
415 // folding.
416 // For example,
417 // (icmp ne (A & 3), 0) & (icmp eq (A & 7), 0) -> false.
418 // (icmp ne (A & 15), 0) & (icmp eq (A & 3), 0) -> no folding.
419 if (ECst.isZero()) {
420 if (IsSubSetOrEqual(BCst, DCst))
421 return ConstantInt::get(LHS->getType(), !IsAnd);
422 return nullptr;
423 }
424
425 // At this point, B, D, E aren't zero and (B & D) == B, (B & D) == D or B ==
426 // D. If B is a superset of (or equal to) D, since E is not zero, LHS is
427 // subsumed by RHS (RHS implies LHS.) So the whole expression becomes
428 // RHS. For example,
429 // (icmp ne (A & 255), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
430 // (icmp ne (A & 15), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
431 if (IsSuperSetOrEqual(BCst, DCst))
432 return RHS;
433 // Otherwise, B is a subset of D. If B and E have a common bit set,
434 // ie. (B & E) != 0, then LHS is subsumed by RHS. For example.
435 // (icmp ne (A & 12), 0) & (icmp eq (A & 15), 8) -> (icmp eq (A & 15), 8).
436 assert(IsSubSetOrEqual(BCst, DCst) && "Precondition due to above code");
437 if ((*BCst & ECst) != 0)
438 return RHS;
439 // Otherwise, LHS and RHS contradict and the whole expression becomes false
440 // (or true if negated.) For example,
441 // (icmp ne (A & 7), 0) & (icmp eq (A & 15), 8) -> false.
442 // (icmp ne (A & 6), 0) & (icmp eq (A & 15), 8) -> false.
443 return ConstantInt::get(LHS->getType(), !IsAnd);
444 }
445
446 /// Try to fold (icmp(A & B) ==/!= 0) &/| (icmp(A & D) ==/!= E) into a single
447 /// (icmp(A & X) ==/!= Y), where the left-hand side and the right hand side
448 /// aren't of the common mask pattern type.
449 /// Also used for logical and/or, must be poison safe.
foldLogOpOfMaskedICmpsAsymmetric(ICmpInst * LHS,ICmpInst * RHS,bool IsAnd,Value * A,Value * B,Value * C,Value * D,Value * E,ICmpInst::Predicate PredL,ICmpInst::Predicate PredR,unsigned LHSMask,unsigned RHSMask,InstCombiner::BuilderTy & Builder)450 static Value *foldLogOpOfMaskedICmpsAsymmetric(
451 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd, Value *A, Value *B, Value *C,
452 Value *D, Value *E, ICmpInst::Predicate PredL, ICmpInst::Predicate PredR,
453 unsigned LHSMask, unsigned RHSMask, InstCombiner::BuilderTy &Builder) {
454 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
455 "Expected equality predicates for masked type of icmps.");
456 // Handle Mask_NotAllZeros-BMask_Mixed cases.
457 // (icmp ne/eq (A & B), C) &/| (icmp eq/ne (A & D), E), or
458 // (icmp eq/ne (A & B), C) &/| (icmp ne/eq (A & D), E)
459 // which gets swapped to
460 // (icmp ne/eq (A & D), E) &/| (icmp eq/ne (A & B), C).
461 if (!IsAnd) {
462 LHSMask = conjugateICmpMask(LHSMask);
463 RHSMask = conjugateICmpMask(RHSMask);
464 }
465 if ((LHSMask & Mask_NotAllZeros) && (RHSMask & BMask_Mixed)) {
466 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
467 LHS, RHS, IsAnd, A, B, C, D, E,
468 PredL, PredR, Builder)) {
469 return V;
470 }
471 } else if ((LHSMask & BMask_Mixed) && (RHSMask & Mask_NotAllZeros)) {
472 if (Value *V = foldLogOpOfMaskedICmps_NotAllZeros_BMask_Mixed(
473 RHS, LHS, IsAnd, A, D, E, B, C,
474 PredR, PredL, Builder)) {
475 return V;
476 }
477 }
478 return nullptr;
479 }
480
481 /// Try to fold (icmp(A & B) ==/!= C) &/| (icmp(A & D) ==/!= E)
482 /// into a single (icmp(A & X) ==/!= Y).
foldLogOpOfMaskedICmps(ICmpInst * LHS,ICmpInst * RHS,bool IsAnd,bool IsLogical,InstCombiner::BuilderTy & Builder)483 static Value *foldLogOpOfMaskedICmps(ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
484 bool IsLogical,
485 InstCombiner::BuilderTy &Builder) {
486 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
487 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
488 std::optional<std::pair<unsigned, unsigned>> MaskPair =
489 getMaskedTypeForICmpPair(A, B, C, D, E, LHS, RHS, PredL, PredR);
490 if (!MaskPair)
491 return nullptr;
492 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
493 "Expected equality predicates for masked type of icmps.");
494 unsigned LHSMask = MaskPair->first;
495 unsigned RHSMask = MaskPair->second;
496 unsigned Mask = LHSMask & RHSMask;
497 if (Mask == 0) {
498 // Even if the two sides don't share a common pattern, check if folding can
499 // still happen.
500 if (Value *V = foldLogOpOfMaskedICmpsAsymmetric(
501 LHS, RHS, IsAnd, A, B, C, D, E, PredL, PredR, LHSMask, RHSMask,
502 Builder))
503 return V;
504 return nullptr;
505 }
506
507 // In full generality:
508 // (icmp (A & B) Op C) | (icmp (A & D) Op E)
509 // == ![ (icmp (A & B) !Op C) & (icmp (A & D) !Op E) ]
510 //
511 // If the latter can be converted into (icmp (A & X) Op Y) then the former is
512 // equivalent to (icmp (A & X) !Op Y).
513 //
514 // Therefore, we can pretend for the rest of this function that we're dealing
515 // with the conjunction, provided we flip the sense of any comparisons (both
516 // input and output).
517
518 // In most cases we're going to produce an EQ for the "&&" case.
519 ICmpInst::Predicate NewCC = IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE;
520 if (!IsAnd) {
521 // Convert the masking analysis into its equivalent with negated
522 // comparisons.
523 Mask = conjugateICmpMask(Mask);
524 }
525
526 if (Mask & Mask_AllZeros) {
527 // (icmp eq (A & B), 0) & (icmp eq (A & D), 0)
528 // -> (icmp eq (A & (B|D)), 0)
529 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
530 return nullptr; // TODO: Use freeze?
531 Value *NewOr = Builder.CreateOr(B, D);
532 Value *NewAnd = Builder.CreateAnd(A, NewOr);
533 // We can't use C as zero because we might actually handle
534 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
535 // with B and D, having a single bit set.
536 Value *Zero = Constant::getNullValue(A->getType());
537 return Builder.CreateICmp(NewCC, NewAnd, Zero);
538 }
539 if (Mask & BMask_AllOnes) {
540 // (icmp eq (A & B), B) & (icmp eq (A & D), D)
541 // -> (icmp eq (A & (B|D)), (B|D))
542 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
543 return nullptr; // TODO: Use freeze?
544 Value *NewOr = Builder.CreateOr(B, D);
545 Value *NewAnd = Builder.CreateAnd(A, NewOr);
546 return Builder.CreateICmp(NewCC, NewAnd, NewOr);
547 }
548 if (Mask & AMask_AllOnes) {
549 // (icmp eq (A & B), A) & (icmp eq (A & D), A)
550 // -> (icmp eq (A & (B&D)), A)
551 if (IsLogical && !isGuaranteedNotToBeUndefOrPoison(D))
552 return nullptr; // TODO: Use freeze?
553 Value *NewAnd1 = Builder.CreateAnd(B, D);
554 Value *NewAnd2 = Builder.CreateAnd(A, NewAnd1);
555 return Builder.CreateICmp(NewCC, NewAnd2, A);
556 }
557
558 // Remaining cases assume at least that B and D are constant, and depend on
559 // their actual values. This isn't strictly necessary, just a "handle the
560 // easy cases for now" decision.
561 const APInt *ConstB, *ConstD;
562 if (!match(B, m_APInt(ConstB)) || !match(D, m_APInt(ConstD)))
563 return nullptr;
564
565 if (Mask & (Mask_NotAllZeros | BMask_NotAllOnes)) {
566 // (icmp ne (A & B), 0) & (icmp ne (A & D), 0) and
567 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
568 // -> (icmp ne (A & B), 0) or (icmp ne (A & D), 0)
569 // Only valid if one of the masks is a superset of the other (check "B&D" is
570 // the same as either B or D).
571 APInt NewMask = *ConstB & *ConstD;
572 if (NewMask == *ConstB)
573 return LHS;
574 else if (NewMask == *ConstD)
575 return RHS;
576 }
577
578 if (Mask & AMask_NotAllOnes) {
579 // (icmp ne (A & B), B) & (icmp ne (A & D), D)
580 // -> (icmp ne (A & B), A) or (icmp ne (A & D), A)
581 // Only valid if one of the masks is a superset of the other (check "B|D" is
582 // the same as either B or D).
583 APInt NewMask = *ConstB | *ConstD;
584 if (NewMask == *ConstB)
585 return LHS;
586 else if (NewMask == *ConstD)
587 return RHS;
588 }
589
590 if (Mask & (BMask_Mixed | BMask_NotMixed)) {
591 // Mixed:
592 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
593 // We already know that B & C == C && D & E == E.
594 // If we can prove that (B & D) & (C ^ E) == 0, that is, the bits of
595 // C and E, which are shared by both the mask B and the mask D, don't
596 // contradict, then we can transform to
597 // -> (icmp eq (A & (B|D)), (C|E))
598 // Currently, we only handle the case of B, C, D, and E being constant.
599 // We can't simply use C and E because we might actually handle
600 // (icmp ne (A & B), B) & (icmp eq (A & D), D)
601 // with B and D, having a single bit set.
602
603 // NotMixed:
604 // (icmp ne (A & B), C) & (icmp ne (A & D), E)
605 // -> (icmp ne (A & (B & D)), (C & E))
606 // Check the intersection (B & D) for inequality.
607 // Assume that (B & D) == B || (B & D) == D, i.e B/D is a subset of D/B
608 // and (B & D) & (C ^ E) == 0, bits of C and E, which are shared by both the
609 // B and the D, don't contradict.
610 // Note that we can assume (~B & C) == 0 && (~D & E) == 0, previous
611 // operation should delete these icmps if it hadn't been met.
612
613 const APInt *OldConstC, *OldConstE;
614 if (!match(C, m_APInt(OldConstC)) || !match(E, m_APInt(OldConstE)))
615 return nullptr;
616
617 auto FoldBMixed = [&](ICmpInst::Predicate CC, bool IsNot) -> Value * {
618 CC = IsNot ? CmpInst::getInversePredicate(CC) : CC;
619 const APInt ConstC = PredL != CC ? *ConstB ^ *OldConstC : *OldConstC;
620 const APInt ConstE = PredR != CC ? *ConstD ^ *OldConstE : *OldConstE;
621
622 if (((*ConstB & *ConstD) & (ConstC ^ ConstE)).getBoolValue())
623 return IsNot ? nullptr : ConstantInt::get(LHS->getType(), !IsAnd);
624
625 if (IsNot && !ConstB->isSubsetOf(*ConstD) && !ConstD->isSubsetOf(*ConstB))
626 return nullptr;
627
628 APInt BD, CE;
629 if (IsNot) {
630 BD = *ConstB & *ConstD;
631 CE = ConstC & ConstE;
632 } else {
633 BD = *ConstB | *ConstD;
634 CE = ConstC | ConstE;
635 }
636 Value *NewAnd = Builder.CreateAnd(A, BD);
637 Value *CEVal = ConstantInt::get(A->getType(), CE);
638 return Builder.CreateICmp(CC, CEVal, NewAnd);
639 };
640
641 if (Mask & BMask_Mixed)
642 return FoldBMixed(NewCC, false);
643 if (Mask & BMask_NotMixed) // can be else also
644 return FoldBMixed(NewCC, true);
645 }
646 return nullptr;
647 }
648
649 /// Try to fold a signed range checked with lower bound 0 to an unsigned icmp.
650 /// Example: (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
651 /// If \p Inverted is true then the check is for the inverted range, e.g.
652 /// (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
simplifyRangeCheck(ICmpInst * Cmp0,ICmpInst * Cmp1,bool Inverted)653 Value *InstCombinerImpl::simplifyRangeCheck(ICmpInst *Cmp0, ICmpInst *Cmp1,
654 bool Inverted) {
655 // Check the lower range comparison, e.g. x >= 0
656 // InstCombine already ensured that if there is a constant it's on the RHS.
657 ConstantInt *RangeStart = dyn_cast<ConstantInt>(Cmp0->getOperand(1));
658 if (!RangeStart)
659 return nullptr;
660
661 ICmpInst::Predicate Pred0 = (Inverted ? Cmp0->getInversePredicate() :
662 Cmp0->getPredicate());
663
664 // Accept x > -1 or x >= 0 (after potentially inverting the predicate).
665 if (!((Pred0 == ICmpInst::ICMP_SGT && RangeStart->isMinusOne()) ||
666 (Pred0 == ICmpInst::ICMP_SGE && RangeStart->isZero())))
667 return nullptr;
668
669 ICmpInst::Predicate Pred1 = (Inverted ? Cmp1->getInversePredicate() :
670 Cmp1->getPredicate());
671
672 Value *Input = Cmp0->getOperand(0);
673 Value *RangeEnd;
674 if (Cmp1->getOperand(0) == Input) {
675 // For the upper range compare we have: icmp x, n
676 RangeEnd = Cmp1->getOperand(1);
677 } else if (Cmp1->getOperand(1) == Input) {
678 // For the upper range compare we have: icmp n, x
679 RangeEnd = Cmp1->getOperand(0);
680 Pred1 = ICmpInst::getSwappedPredicate(Pred1);
681 } else {
682 return nullptr;
683 }
684
685 // Check the upper range comparison, e.g. x < n
686 ICmpInst::Predicate NewPred;
687 switch (Pred1) {
688 case ICmpInst::ICMP_SLT: NewPred = ICmpInst::ICMP_ULT; break;
689 case ICmpInst::ICMP_SLE: NewPred = ICmpInst::ICMP_ULE; break;
690 default: return nullptr;
691 }
692
693 // This simplification is only valid if the upper range is not negative.
694 KnownBits Known = computeKnownBits(RangeEnd, /*Depth=*/0, Cmp1);
695 if (!Known.isNonNegative())
696 return nullptr;
697
698 if (Inverted)
699 NewPred = ICmpInst::getInversePredicate(NewPred);
700
701 return Builder.CreateICmp(NewPred, Input, RangeEnd);
702 }
703
704 // (or (icmp eq X, 0), (icmp eq X, Pow2OrZero))
705 // -> (icmp eq (and X, Pow2OrZero), X)
706 // (and (icmp ne X, 0), (icmp ne X, Pow2OrZero))
707 // -> (icmp ne (and X, Pow2OrZero), X)
708 static Value *
foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy & Builder,ICmpInst * LHS,ICmpInst * RHS,bool IsAnd,const SimplifyQuery & Q)709 foldAndOrOfICmpsWithPow2AndWithZero(InstCombiner::BuilderTy &Builder,
710 ICmpInst *LHS, ICmpInst *RHS, bool IsAnd,
711 const SimplifyQuery &Q) {
712 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
713 // Make sure we have right compares for our op.
714 if (LHS->getPredicate() != Pred || RHS->getPredicate() != Pred)
715 return nullptr;
716
717 // Make it so we can match LHS against the (icmp eq/ne X, 0) just for
718 // simplicity.
719 if (match(RHS->getOperand(1), m_Zero()))
720 std::swap(LHS, RHS);
721
722 Value *Pow2, *Op;
723 // Match the desired pattern:
724 // LHS: (icmp eq/ne X, 0)
725 // RHS: (icmp eq/ne X, Pow2OrZero)
726 // Skip if Pow2OrZero is 1. Either way it gets folded to (icmp ugt X, 1) but
727 // this form ends up slightly less canonical.
728 // We could potentially be more sophisticated than requiring LHS/RHS
729 // be one-use. We don't create additional instructions if only one
730 // of them is one-use. So cases where one is one-use and the other
731 // is two-use might be profitable.
732 if (!match(LHS, m_OneUse(m_ICmp(Pred, m_Value(Op), m_Zero()))) ||
733 !match(RHS, m_OneUse(m_c_ICmp(Pred, m_Specific(Op), m_Value(Pow2)))) ||
734 match(Pow2, m_One()) ||
735 !isKnownToBeAPowerOfTwo(Pow2, Q.DL, /*OrZero=*/true, /*Depth=*/0, Q.AC,
736 Q.CxtI, Q.DT))
737 return nullptr;
738
739 Value *And = Builder.CreateAnd(Op, Pow2);
740 return Builder.CreateICmp(Pred, And, Op);
741 }
742
743 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
744 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
foldAndOrOfICmpsOfAndWithPow2(ICmpInst * LHS,ICmpInst * RHS,Instruction * CxtI,bool IsAnd,bool IsLogical)745 Value *InstCombinerImpl::foldAndOrOfICmpsOfAndWithPow2(ICmpInst *LHS,
746 ICmpInst *RHS,
747 Instruction *CxtI,
748 bool IsAnd,
749 bool IsLogical) {
750 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
751 if (LHS->getPredicate() != Pred || RHS->getPredicate() != Pred)
752 return nullptr;
753
754 if (!match(LHS->getOperand(1), m_Zero()) ||
755 !match(RHS->getOperand(1), m_Zero()))
756 return nullptr;
757
758 Value *L1, *L2, *R1, *R2;
759 if (match(LHS->getOperand(0), m_And(m_Value(L1), m_Value(L2))) &&
760 match(RHS->getOperand(0), m_And(m_Value(R1), m_Value(R2)))) {
761 if (L1 == R2 || L2 == R2)
762 std::swap(R1, R2);
763 if (L2 == R1)
764 std::swap(L1, L2);
765
766 if (L1 == R1 &&
767 isKnownToBeAPowerOfTwo(L2, false, 0, CxtI) &&
768 isKnownToBeAPowerOfTwo(R2, false, 0, CxtI)) {
769 // If this is a logical and/or, then we must prevent propagation of a
770 // poison value from the RHS by inserting freeze.
771 if (IsLogical)
772 R2 = Builder.CreateFreeze(R2);
773 Value *Mask = Builder.CreateOr(L2, R2);
774 Value *Masked = Builder.CreateAnd(L1, Mask);
775 auto NewPred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
776 return Builder.CreateICmp(NewPred, Masked, Mask);
777 }
778 }
779
780 return nullptr;
781 }
782
783 /// General pattern:
784 /// X & Y
785 ///
786 /// Where Y is checking that all the high bits (covered by a mask 4294967168)
787 /// are uniform, i.e. %arg & 4294967168 can be either 4294967168 or 0
788 /// Pattern can be one of:
789 /// %t = add i32 %arg, 128
790 /// %r = icmp ult i32 %t, 256
791 /// Or
792 /// %t0 = shl i32 %arg, 24
793 /// %t1 = ashr i32 %t0, 24
794 /// %r = icmp eq i32 %t1, %arg
795 /// Or
796 /// %t0 = trunc i32 %arg to i8
797 /// %t1 = sext i8 %t0 to i32
798 /// %r = icmp eq i32 %t1, %arg
799 /// This pattern is a signed truncation check.
800 ///
801 /// And X is checking that some bit in that same mask is zero.
802 /// I.e. can be one of:
803 /// %r = icmp sgt i32 %arg, -1
804 /// Or
805 /// %t = and i32 %arg, 2147483648
806 /// %r = icmp eq i32 %t, 0
807 ///
808 /// Since we are checking that all the bits in that mask are the same,
809 /// and a particular bit is zero, what we are really checking is that all the
810 /// masked bits are zero.
811 /// So this should be transformed to:
812 /// %r = icmp ult i32 %arg, 128
foldSignedTruncationCheck(ICmpInst * ICmp0,ICmpInst * ICmp1,Instruction & CxtI,InstCombiner::BuilderTy & Builder)813 static Value *foldSignedTruncationCheck(ICmpInst *ICmp0, ICmpInst *ICmp1,
814 Instruction &CxtI,
815 InstCombiner::BuilderTy &Builder) {
816 assert(CxtI.getOpcode() == Instruction::And);
817
818 // Match icmp ult (add %arg, C01), C1 (C1 == C01 << 1; powers of two)
819 auto tryToMatchSignedTruncationCheck = [](ICmpInst *ICmp, Value *&X,
820 APInt &SignBitMask) -> bool {
821 CmpInst::Predicate Pred;
822 const APInt *I01, *I1; // powers of two; I1 == I01 << 1
823 if (!(match(ICmp,
824 m_ICmp(Pred, m_Add(m_Value(X), m_Power2(I01)), m_Power2(I1))) &&
825 Pred == ICmpInst::ICMP_ULT && I1->ugt(*I01) && I01->shl(1) == *I1))
826 return false;
827 // Which bit is the new sign bit as per the 'signed truncation' pattern?
828 SignBitMask = *I01;
829 return true;
830 };
831
832 // One icmp needs to be 'signed truncation check'.
833 // We need to match this first, else we will mismatch commutative cases.
834 Value *X1;
835 APInt HighestBit;
836 ICmpInst *OtherICmp;
837 if (tryToMatchSignedTruncationCheck(ICmp1, X1, HighestBit))
838 OtherICmp = ICmp0;
839 else if (tryToMatchSignedTruncationCheck(ICmp0, X1, HighestBit))
840 OtherICmp = ICmp1;
841 else
842 return nullptr;
843
844 assert(HighestBit.isPowerOf2() && "expected to be power of two (non-zero)");
845
846 // Try to match/decompose into: icmp eq (X & Mask), 0
847 auto tryToDecompose = [](ICmpInst *ICmp, Value *&X,
848 APInt &UnsetBitsMask) -> bool {
849 CmpInst::Predicate Pred = ICmp->getPredicate();
850 // Can it be decomposed into icmp eq (X & Mask), 0 ?
851 if (llvm::decomposeBitTestICmp(ICmp->getOperand(0), ICmp->getOperand(1),
852 Pred, X, UnsetBitsMask,
853 /*LookThroughTrunc=*/false) &&
854 Pred == ICmpInst::ICMP_EQ)
855 return true;
856 // Is it icmp eq (X & Mask), 0 already?
857 const APInt *Mask;
858 if (match(ICmp, m_ICmp(Pred, m_And(m_Value(X), m_APInt(Mask)), m_Zero())) &&
859 Pred == ICmpInst::ICMP_EQ) {
860 UnsetBitsMask = *Mask;
861 return true;
862 }
863 return false;
864 };
865
866 // And the other icmp needs to be decomposable into a bit test.
867 Value *X0;
868 APInt UnsetBitsMask;
869 if (!tryToDecompose(OtherICmp, X0, UnsetBitsMask))
870 return nullptr;
871
872 assert(!UnsetBitsMask.isZero() && "empty mask makes no sense.");
873
874 // Are they working on the same value?
875 Value *X;
876 if (X1 == X0) {
877 // Ok as is.
878 X = X1;
879 } else if (match(X0, m_Trunc(m_Specific(X1)))) {
880 UnsetBitsMask = UnsetBitsMask.zext(X1->getType()->getScalarSizeInBits());
881 X = X1;
882 } else
883 return nullptr;
884
885 // So which bits should be uniform as per the 'signed truncation check'?
886 // (all the bits starting with (i.e. including) HighestBit)
887 APInt SignBitsMask = ~(HighestBit - 1U);
888
889 // UnsetBitsMask must have some common bits with SignBitsMask,
890 if (!UnsetBitsMask.intersects(SignBitsMask))
891 return nullptr;
892
893 // Does UnsetBitsMask contain any bits outside of SignBitsMask?
894 if (!UnsetBitsMask.isSubsetOf(SignBitsMask)) {
895 APInt OtherHighestBit = (~UnsetBitsMask) + 1U;
896 if (!OtherHighestBit.isPowerOf2())
897 return nullptr;
898 HighestBit = APIntOps::umin(HighestBit, OtherHighestBit);
899 }
900 // Else, if it does not, then all is ok as-is.
901
902 // %r = icmp ult %X, SignBit
903 return Builder.CreateICmpULT(X, ConstantInt::get(X->getType(), HighestBit),
904 CxtI.getName() + ".simplified");
905 }
906
907 /// Fold (icmp eq ctpop(X) 1) | (icmp eq X 0) into (icmp ult ctpop(X) 2) and
908 /// fold (icmp ne ctpop(X) 1) & (icmp ne X 0) into (icmp ugt ctpop(X) 1).
909 /// Also used for logical and/or, must be poison safe.
foldIsPowerOf2OrZero(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd,InstCombiner::BuilderTy & Builder)910 static Value *foldIsPowerOf2OrZero(ICmpInst *Cmp0, ICmpInst *Cmp1, bool IsAnd,
911 InstCombiner::BuilderTy &Builder) {
912 CmpInst::Predicate Pred0, Pred1;
913 Value *X;
914 if (!match(Cmp0, m_ICmp(Pred0, m_Intrinsic<Intrinsic::ctpop>(m_Value(X)),
915 m_SpecificInt(1))) ||
916 !match(Cmp1, m_ICmp(Pred1, m_Specific(X), m_ZeroInt())))
917 return nullptr;
918
919 Value *CtPop = Cmp0->getOperand(0);
920 if (IsAnd && Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_NE)
921 return Builder.CreateICmpUGT(CtPop, ConstantInt::get(CtPop->getType(), 1));
922 if (!IsAnd && Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_EQ)
923 return Builder.CreateICmpULT(CtPop, ConstantInt::get(CtPop->getType(), 2));
924
925 return nullptr;
926 }
927
928 /// Reduce a pair of compares that check if a value has exactly 1 bit set.
929 /// Also used for logical and/or, must be poison safe if range attributes are
930 /// dropped.
foldIsPowerOf2(ICmpInst * Cmp0,ICmpInst * Cmp1,bool JoinedByAnd,InstCombiner::BuilderTy & Builder,InstCombinerImpl & IC)931 static Value *foldIsPowerOf2(ICmpInst *Cmp0, ICmpInst *Cmp1, bool JoinedByAnd,
932 InstCombiner::BuilderTy &Builder,
933 InstCombinerImpl &IC) {
934 // Handle 'and' / 'or' commutation: make the equality check the first operand.
935 if (JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_NE)
936 std::swap(Cmp0, Cmp1);
937 else if (!JoinedByAnd && Cmp1->getPredicate() == ICmpInst::ICMP_EQ)
938 std::swap(Cmp0, Cmp1);
939
940 // (X != 0) && (ctpop(X) u< 2) --> ctpop(X) == 1
941 CmpInst::Predicate Pred0, Pred1;
942 Value *X;
943 if (JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) &&
944 match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)),
945 m_SpecificInt(2))) &&
946 Pred0 == ICmpInst::ICMP_NE && Pred1 == ICmpInst::ICMP_ULT) {
947 auto *CtPop = cast<Instruction>(Cmp1->getOperand(0));
948 // Drop range attributes and re-infer them in the next iteration.
949 CtPop->dropPoisonGeneratingAnnotations();
950 IC.addToWorklist(CtPop);
951 return Builder.CreateICmpEQ(CtPop, ConstantInt::get(CtPop->getType(), 1));
952 }
953 // (X == 0) || (ctpop(X) u> 1) --> ctpop(X) != 1
954 if (!JoinedByAnd && match(Cmp0, m_ICmp(Pred0, m_Value(X), m_ZeroInt())) &&
955 match(Cmp1, m_ICmp(Pred1, m_Intrinsic<Intrinsic::ctpop>(m_Specific(X)),
956 m_SpecificInt(1))) &&
957 Pred0 == ICmpInst::ICMP_EQ && Pred1 == ICmpInst::ICMP_UGT) {
958 auto *CtPop = cast<Instruction>(Cmp1->getOperand(0));
959 // Drop range attributes and re-infer them in the next iteration.
960 CtPop->dropPoisonGeneratingAnnotations();
961 IC.addToWorklist(CtPop);
962 return Builder.CreateICmpNE(CtPop, ConstantInt::get(CtPop->getType(), 1));
963 }
964 return nullptr;
965 }
966
967 /// Try to fold (icmp(A & B) == 0) & (icmp(A & D) != E) into (icmp A u< D) iff
968 /// B is a contiguous set of ones starting from the most significant bit
969 /// (negative power of 2), D and E are equal, and D is a contiguous set of ones
970 /// starting at the most significant zero bit in B. Parameter B supports masking
971 /// using undef/poison in either scalar or vector values.
foldNegativePower2AndShiftedMask(Value * A,Value * B,Value * D,Value * E,ICmpInst::Predicate PredL,ICmpInst::Predicate PredR,InstCombiner::BuilderTy & Builder)972 static Value *foldNegativePower2AndShiftedMask(
973 Value *A, Value *B, Value *D, Value *E, ICmpInst::Predicate PredL,
974 ICmpInst::Predicate PredR, InstCombiner::BuilderTy &Builder) {
975 assert(ICmpInst::isEquality(PredL) && ICmpInst::isEquality(PredR) &&
976 "Expected equality predicates for masked type of icmps.");
977 if (PredL != ICmpInst::ICMP_EQ || PredR != ICmpInst::ICMP_NE)
978 return nullptr;
979
980 if (!match(B, m_NegatedPower2()) || !match(D, m_ShiftedMask()) ||
981 !match(E, m_ShiftedMask()))
982 return nullptr;
983
984 // Test scalar arguments for conversion. B has been validated earlier to be a
985 // negative power of two and thus is guaranteed to have one or more contiguous
986 // ones starting from the MSB followed by zero or more contiguous zeros. D has
987 // been validated earlier to be a shifted set of one or more contiguous ones.
988 // In order to match, B leading ones and D leading zeros should be equal. The
989 // predicate that B be a negative power of 2 prevents the condition of there
990 // ever being zero leading ones. Thus 0 == 0 cannot occur. The predicate that
991 // D always be a shifted mask prevents the condition of D equaling 0. This
992 // prevents matching the condition where B contains the maximum number of
993 // leading one bits (-1) and D contains the maximum number of leading zero
994 // bits (0).
995 auto isReducible = [](const Value *B, const Value *D, const Value *E) {
996 const APInt *BCst, *DCst, *ECst;
997 return match(B, m_APIntAllowPoison(BCst)) && match(D, m_APInt(DCst)) &&
998 match(E, m_APInt(ECst)) && *DCst == *ECst &&
999 (isa<PoisonValue>(B) ||
1000 (BCst->countLeadingOnes() == DCst->countLeadingZeros()));
1001 };
1002
1003 // Test vector type arguments for conversion.
1004 if (const auto *BVTy = dyn_cast<VectorType>(B->getType())) {
1005 const auto *BFVTy = dyn_cast<FixedVectorType>(BVTy);
1006 const auto *BConst = dyn_cast<Constant>(B);
1007 const auto *DConst = dyn_cast<Constant>(D);
1008 const auto *EConst = dyn_cast<Constant>(E);
1009
1010 if (!BFVTy || !BConst || !DConst || !EConst)
1011 return nullptr;
1012
1013 for (unsigned I = 0; I != BFVTy->getNumElements(); ++I) {
1014 const auto *BElt = BConst->getAggregateElement(I);
1015 const auto *DElt = DConst->getAggregateElement(I);
1016 const auto *EElt = EConst->getAggregateElement(I);
1017
1018 if (!BElt || !DElt || !EElt)
1019 return nullptr;
1020 if (!isReducible(BElt, DElt, EElt))
1021 return nullptr;
1022 }
1023 } else {
1024 // Test scalar type arguments for conversion.
1025 if (!isReducible(B, D, E))
1026 return nullptr;
1027 }
1028 return Builder.CreateICmp(ICmpInst::ICMP_ULT, A, D);
1029 }
1030
1031 /// Try to fold ((icmp X u< P) & (icmp(X & M) != M)) or ((icmp X s> -1) &
1032 /// (icmp(X & M) != M)) into (icmp X u< M). Where P is a power of 2, M < P, and
1033 /// M is a contiguous shifted mask starting at the right most significant zero
1034 /// bit in P. SGT is supported as when P is the largest representable power of
1035 /// 2, an earlier optimization converts the expression into (icmp X s> -1).
1036 /// Parameter P supports masking using undef/poison in either scalar or vector
1037 /// values.
foldPowerOf2AndShiftedMask(ICmpInst * Cmp0,ICmpInst * Cmp1,bool JoinedByAnd,InstCombiner::BuilderTy & Builder)1038 static Value *foldPowerOf2AndShiftedMask(ICmpInst *Cmp0, ICmpInst *Cmp1,
1039 bool JoinedByAnd,
1040 InstCombiner::BuilderTy &Builder) {
1041 if (!JoinedByAnd)
1042 return nullptr;
1043 Value *A = nullptr, *B = nullptr, *C = nullptr, *D = nullptr, *E = nullptr;
1044 ICmpInst::Predicate CmpPred0 = Cmp0->getPredicate(),
1045 CmpPred1 = Cmp1->getPredicate();
1046 // Assuming P is a 2^n, getMaskedTypeForICmpPair will normalize (icmp X u<
1047 // 2^n) into (icmp (X & ~(2^n-1)) == 0) and (icmp X s> -1) into (icmp (X &
1048 // SignMask) == 0).
1049 std::optional<std::pair<unsigned, unsigned>> MaskPair =
1050 getMaskedTypeForICmpPair(A, B, C, D, E, Cmp0, Cmp1, CmpPred0, CmpPred1);
1051 if (!MaskPair)
1052 return nullptr;
1053
1054 const auto compareBMask = BMask_NotMixed | BMask_NotAllOnes;
1055 unsigned CmpMask0 = MaskPair->first;
1056 unsigned CmpMask1 = MaskPair->second;
1057 if ((CmpMask0 & Mask_AllZeros) && (CmpMask1 == compareBMask)) {
1058 if (Value *V = foldNegativePower2AndShiftedMask(A, B, D, E, CmpPred0,
1059 CmpPred1, Builder))
1060 return V;
1061 } else if ((CmpMask0 == compareBMask) && (CmpMask1 & Mask_AllZeros)) {
1062 if (Value *V = foldNegativePower2AndShiftedMask(A, D, B, C, CmpPred1,
1063 CmpPred0, Builder))
1064 return V;
1065 }
1066 return nullptr;
1067 }
1068
1069 /// Commuted variants are assumed to be handled by calling this function again
1070 /// with the parameters swapped.
foldUnsignedUnderflowCheck(ICmpInst * ZeroICmp,ICmpInst * UnsignedICmp,bool IsAnd,const SimplifyQuery & Q,InstCombiner::BuilderTy & Builder)1071 static Value *foldUnsignedUnderflowCheck(ICmpInst *ZeroICmp,
1072 ICmpInst *UnsignedICmp, bool IsAnd,
1073 const SimplifyQuery &Q,
1074 InstCombiner::BuilderTy &Builder) {
1075 Value *ZeroCmpOp;
1076 ICmpInst::Predicate EqPred;
1077 if (!match(ZeroICmp, m_ICmp(EqPred, m_Value(ZeroCmpOp), m_Zero())) ||
1078 !ICmpInst::isEquality(EqPred))
1079 return nullptr;
1080
1081 ICmpInst::Predicate UnsignedPred;
1082
1083 Value *A, *B;
1084 if (match(UnsignedICmp,
1085 m_c_ICmp(UnsignedPred, m_Specific(ZeroCmpOp), m_Value(A))) &&
1086 match(ZeroCmpOp, m_c_Add(m_Specific(A), m_Value(B))) &&
1087 (ZeroICmp->hasOneUse() || UnsignedICmp->hasOneUse())) {
1088 auto GetKnownNonZeroAndOther = [&](Value *&NonZero, Value *&Other) {
1089 if (!isKnownNonZero(NonZero, Q))
1090 std::swap(NonZero, Other);
1091 return isKnownNonZero(NonZero, Q);
1092 };
1093
1094 // Given ZeroCmpOp = (A + B)
1095 // ZeroCmpOp < A && ZeroCmpOp != 0 --> (0-X) < Y iff
1096 // ZeroCmpOp >= A || ZeroCmpOp == 0 --> (0-X) >= Y iff
1097 // with X being the value (A/B) that is known to be non-zero,
1098 // and Y being remaining value.
1099 if (UnsignedPred == ICmpInst::ICMP_ULT && EqPred == ICmpInst::ICMP_NE &&
1100 IsAnd && GetKnownNonZeroAndOther(B, A))
1101 return Builder.CreateICmpULT(Builder.CreateNeg(B), A);
1102 if (UnsignedPred == ICmpInst::ICMP_UGE && EqPred == ICmpInst::ICMP_EQ &&
1103 !IsAnd && GetKnownNonZeroAndOther(B, A))
1104 return Builder.CreateICmpUGE(Builder.CreateNeg(B), A);
1105 }
1106
1107 return nullptr;
1108 }
1109
1110 struct IntPart {
1111 Value *From;
1112 unsigned StartBit;
1113 unsigned NumBits;
1114 };
1115
1116 /// Match an extraction of bits from an integer.
matchIntPart(Value * V)1117 static std::optional<IntPart> matchIntPart(Value *V) {
1118 Value *X;
1119 if (!match(V, m_OneUse(m_Trunc(m_Value(X)))))
1120 return std::nullopt;
1121
1122 unsigned NumOriginalBits = X->getType()->getScalarSizeInBits();
1123 unsigned NumExtractedBits = V->getType()->getScalarSizeInBits();
1124 Value *Y;
1125 const APInt *Shift;
1126 // For a trunc(lshr Y, Shift) pattern, make sure we're only extracting bits
1127 // from Y, not any shifted-in zeroes.
1128 if (match(X, m_OneUse(m_LShr(m_Value(Y), m_APInt(Shift)))) &&
1129 Shift->ule(NumOriginalBits - NumExtractedBits))
1130 return {{Y, (unsigned)Shift->getZExtValue(), NumExtractedBits}};
1131 return {{X, 0, NumExtractedBits}};
1132 }
1133
1134 /// Materialize an extraction of bits from an integer in IR.
extractIntPart(const IntPart & P,IRBuilderBase & Builder)1135 static Value *extractIntPart(const IntPart &P, IRBuilderBase &Builder) {
1136 Value *V = P.From;
1137 if (P.StartBit)
1138 V = Builder.CreateLShr(V, P.StartBit);
1139 Type *TruncTy = V->getType()->getWithNewBitWidth(P.NumBits);
1140 if (TruncTy != V->getType())
1141 V = Builder.CreateTrunc(V, TruncTy);
1142 return V;
1143 }
1144
1145 /// (icmp eq X0, Y0) & (icmp eq X1, Y1) -> icmp eq X01, Y01
1146 /// (icmp ne X0, Y0) | (icmp ne X1, Y1) -> icmp ne X01, Y01
1147 /// where X0, X1 and Y0, Y1 are adjacent parts extracted from an integer.
foldEqOfParts(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd)1148 Value *InstCombinerImpl::foldEqOfParts(ICmpInst *Cmp0, ICmpInst *Cmp1,
1149 bool IsAnd) {
1150 if (!Cmp0->hasOneUse() || !Cmp1->hasOneUse())
1151 return nullptr;
1152
1153 CmpInst::Predicate Pred = IsAnd ? CmpInst::ICMP_EQ : CmpInst::ICMP_NE;
1154 auto GetMatchPart = [&](ICmpInst *Cmp,
1155 unsigned OpNo) -> std::optional<IntPart> {
1156 if (Pred == Cmp->getPredicate())
1157 return matchIntPart(Cmp->getOperand(OpNo));
1158
1159 const APInt *C;
1160 // (icmp eq (lshr x, C), (lshr y, C)) gets optimized to:
1161 // (icmp ult (xor x, y), 1 << C) so also look for that.
1162 if (Pred == CmpInst::ICMP_EQ && Cmp->getPredicate() == CmpInst::ICMP_ULT) {
1163 if (!match(Cmp->getOperand(1), m_Power2(C)) ||
1164 !match(Cmp->getOperand(0), m_Xor(m_Value(), m_Value())))
1165 return std::nullopt;
1166 }
1167
1168 // (icmp ne (lshr x, C), (lshr y, C)) gets optimized to:
1169 // (icmp ugt (xor x, y), (1 << C) - 1) so also look for that.
1170 else if (Pred == CmpInst::ICMP_NE &&
1171 Cmp->getPredicate() == CmpInst::ICMP_UGT) {
1172 if (!match(Cmp->getOperand(1), m_LowBitMask(C)) ||
1173 !match(Cmp->getOperand(0), m_Xor(m_Value(), m_Value())))
1174 return std::nullopt;
1175 } else {
1176 return std::nullopt;
1177 }
1178
1179 unsigned From = Pred == CmpInst::ICMP_NE ? C->popcount() : C->countr_zero();
1180 Instruction *I = cast<Instruction>(Cmp->getOperand(0));
1181 return {{I->getOperand(OpNo), From, C->getBitWidth() - From}};
1182 };
1183
1184 std::optional<IntPart> L0 = GetMatchPart(Cmp0, 0);
1185 std::optional<IntPart> R0 = GetMatchPart(Cmp0, 1);
1186 std::optional<IntPart> L1 = GetMatchPart(Cmp1, 0);
1187 std::optional<IntPart> R1 = GetMatchPart(Cmp1, 1);
1188 if (!L0 || !R0 || !L1 || !R1)
1189 return nullptr;
1190
1191 // Make sure the LHS/RHS compare a part of the same value, possibly after
1192 // an operand swap.
1193 if (L0->From != L1->From || R0->From != R1->From) {
1194 if (L0->From != R1->From || R0->From != L1->From)
1195 return nullptr;
1196 std::swap(L1, R1);
1197 }
1198
1199 // Make sure the extracted parts are adjacent, canonicalizing to L0/R0 being
1200 // the low part and L1/R1 being the high part.
1201 if (L0->StartBit + L0->NumBits != L1->StartBit ||
1202 R0->StartBit + R0->NumBits != R1->StartBit) {
1203 if (L1->StartBit + L1->NumBits != L0->StartBit ||
1204 R1->StartBit + R1->NumBits != R0->StartBit)
1205 return nullptr;
1206 std::swap(L0, L1);
1207 std::swap(R0, R1);
1208 }
1209
1210 // We can simplify to a comparison of these larger parts of the integers.
1211 IntPart L = {L0->From, L0->StartBit, L0->NumBits + L1->NumBits};
1212 IntPart R = {R0->From, R0->StartBit, R0->NumBits + R1->NumBits};
1213 Value *LValue = extractIntPart(L, Builder);
1214 Value *RValue = extractIntPart(R, Builder);
1215 return Builder.CreateICmp(Pred, LValue, RValue);
1216 }
1217
1218 /// Reduce logic-of-compares with equality to a constant by substituting a
1219 /// common operand with the constant. Callers are expected to call this with
1220 /// Cmp0/Cmp1 switched to handle logic op commutativity.
foldAndOrOfICmpsWithConstEq(ICmpInst * Cmp0,ICmpInst * Cmp1,bool IsAnd,bool IsLogical,InstCombiner::BuilderTy & Builder,const SimplifyQuery & Q)1221 static Value *foldAndOrOfICmpsWithConstEq(ICmpInst *Cmp0, ICmpInst *Cmp1,
1222 bool IsAnd, bool IsLogical,
1223 InstCombiner::BuilderTy &Builder,
1224 const SimplifyQuery &Q) {
1225 // Match an equality compare with a non-poison constant as Cmp0.
1226 // Also, give up if the compare can be constant-folded to avoid looping.
1227 ICmpInst::Predicate Pred0;
1228 Value *X;
1229 Constant *C;
1230 if (!match(Cmp0, m_ICmp(Pred0, m_Value(X), m_Constant(C))) ||
1231 !isGuaranteedNotToBeUndefOrPoison(C) || isa<Constant>(X))
1232 return nullptr;
1233 if ((IsAnd && Pred0 != ICmpInst::ICMP_EQ) ||
1234 (!IsAnd && Pred0 != ICmpInst::ICMP_NE))
1235 return nullptr;
1236
1237 // The other compare must include a common operand (X). Canonicalize the
1238 // common operand as operand 1 (Pred1 is swapped if the common operand was
1239 // operand 0).
1240 Value *Y;
1241 ICmpInst::Predicate Pred1;
1242 if (!match(Cmp1, m_c_ICmp(Pred1, m_Value(Y), m_Specific(X))))
1243 return nullptr;
1244
1245 // Replace variable with constant value equivalence to remove a variable use:
1246 // (X == C) && (Y Pred1 X) --> (X == C) && (Y Pred1 C)
1247 // (X != C) || (Y Pred1 X) --> (X != C) || (Y Pred1 C)
1248 // Can think of the 'or' substitution with the 'and' bool equivalent:
1249 // A || B --> A || (!A && B)
1250 Value *SubstituteCmp = simplifyICmpInst(Pred1, Y, C, Q);
1251 if (!SubstituteCmp) {
1252 // If we need to create a new instruction, require that the old compare can
1253 // be removed.
1254 if (!Cmp1->hasOneUse())
1255 return nullptr;
1256 SubstituteCmp = Builder.CreateICmp(Pred1, Y, C);
1257 }
1258 if (IsLogical)
1259 return IsAnd ? Builder.CreateLogicalAnd(Cmp0, SubstituteCmp)
1260 : Builder.CreateLogicalOr(Cmp0, SubstituteCmp);
1261 return Builder.CreateBinOp(IsAnd ? Instruction::And : Instruction::Or, Cmp0,
1262 SubstituteCmp);
1263 }
1264
1265 /// Fold (icmp Pred1 V1, C1) & (icmp Pred2 V2, C2)
1266 /// or (icmp Pred1 V1, C1) | (icmp Pred2 V2, C2)
1267 /// into a single comparison using range-based reasoning.
1268 /// NOTE: This is also used for logical and/or, must be poison-safe!
foldAndOrOfICmpsUsingRanges(ICmpInst * ICmp1,ICmpInst * ICmp2,bool IsAnd)1269 Value *InstCombinerImpl::foldAndOrOfICmpsUsingRanges(ICmpInst *ICmp1,
1270 ICmpInst *ICmp2,
1271 bool IsAnd) {
1272 ICmpInst::Predicate Pred1, Pred2;
1273 Value *V1, *V2;
1274 const APInt *C1, *C2;
1275 if (!match(ICmp1, m_ICmp(Pred1, m_Value(V1), m_APInt(C1))) ||
1276 !match(ICmp2, m_ICmp(Pred2, m_Value(V2), m_APInt(C2))))
1277 return nullptr;
1278
1279 // Look through add of a constant offset on V1, V2, or both operands. This
1280 // allows us to interpret the V + C' < C'' range idiom into a proper range.
1281 const APInt *Offset1 = nullptr, *Offset2 = nullptr;
1282 if (V1 != V2) {
1283 Value *X;
1284 if (match(V1, m_Add(m_Value(X), m_APInt(Offset1))))
1285 V1 = X;
1286 if (match(V2, m_Add(m_Value(X), m_APInt(Offset2))))
1287 V2 = X;
1288 }
1289
1290 if (V1 != V2)
1291 return nullptr;
1292
1293 ConstantRange CR1 = ConstantRange::makeExactICmpRegion(
1294 IsAnd ? ICmpInst::getInversePredicate(Pred1) : Pred1, *C1);
1295 if (Offset1)
1296 CR1 = CR1.subtract(*Offset1);
1297
1298 ConstantRange CR2 = ConstantRange::makeExactICmpRegion(
1299 IsAnd ? ICmpInst::getInversePredicate(Pred2) : Pred2, *C2);
1300 if (Offset2)
1301 CR2 = CR2.subtract(*Offset2);
1302
1303 Type *Ty = V1->getType();
1304 Value *NewV = V1;
1305 std::optional<ConstantRange> CR = CR1.exactUnionWith(CR2);
1306 if (!CR) {
1307 if (!(ICmp1->hasOneUse() && ICmp2->hasOneUse()) || CR1.isWrappedSet() ||
1308 CR2.isWrappedSet())
1309 return nullptr;
1310
1311 // Check whether we have equal-size ranges that only differ by one bit.
1312 // In that case we can apply a mask to map one range onto the other.
1313 APInt LowerDiff = CR1.getLower() ^ CR2.getLower();
1314 APInt UpperDiff = (CR1.getUpper() - 1) ^ (CR2.getUpper() - 1);
1315 APInt CR1Size = CR1.getUpper() - CR1.getLower();
1316 if (!LowerDiff.isPowerOf2() || LowerDiff != UpperDiff ||
1317 CR1Size != CR2.getUpper() - CR2.getLower())
1318 return nullptr;
1319
1320 CR = CR1.getLower().ult(CR2.getLower()) ? CR1 : CR2;
1321 NewV = Builder.CreateAnd(NewV, ConstantInt::get(Ty, ~LowerDiff));
1322 }
1323
1324 if (IsAnd)
1325 CR = CR->inverse();
1326
1327 CmpInst::Predicate NewPred;
1328 APInt NewC, Offset;
1329 CR->getEquivalentICmp(NewPred, NewC, Offset);
1330
1331 if (Offset != 0)
1332 NewV = Builder.CreateAdd(NewV, ConstantInt::get(Ty, Offset));
1333 return Builder.CreateICmp(NewPred, NewV, ConstantInt::get(Ty, NewC));
1334 }
1335
1336 /// Ignore all operations which only change the sign of a value, returning the
1337 /// underlying magnitude value.
stripSignOnlyFPOps(Value * Val)1338 static Value *stripSignOnlyFPOps(Value *Val) {
1339 match(Val, m_FNeg(m_Value(Val)));
1340 match(Val, m_FAbs(m_Value(Val)));
1341 match(Val, m_CopySign(m_Value(Val), m_Value()));
1342 return Val;
1343 }
1344
1345 /// Matches canonical form of isnan, fcmp ord x, 0
matchIsNotNaN(FCmpInst::Predicate P,Value * LHS,Value * RHS)1346 static bool matchIsNotNaN(FCmpInst::Predicate P, Value *LHS, Value *RHS) {
1347 return P == FCmpInst::FCMP_ORD && match(RHS, m_AnyZeroFP());
1348 }
1349
1350 /// Matches fcmp u__ x, +/-inf
matchUnorderedInfCompare(FCmpInst::Predicate P,Value * LHS,Value * RHS)1351 static bool matchUnorderedInfCompare(FCmpInst::Predicate P, Value *LHS,
1352 Value *RHS) {
1353 return FCmpInst::isUnordered(P) && match(RHS, m_Inf());
1354 }
1355
1356 /// and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1357 ///
1358 /// Clang emits this pattern for doing an isfinite check in __builtin_isnormal.
matchIsFiniteTest(InstCombiner::BuilderTy & Builder,FCmpInst * LHS,FCmpInst * RHS)1359 static Value *matchIsFiniteTest(InstCombiner::BuilderTy &Builder, FCmpInst *LHS,
1360 FCmpInst *RHS) {
1361 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1362 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1363 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1364
1365 if (!matchIsNotNaN(PredL, LHS0, LHS1) ||
1366 !matchUnorderedInfCompare(PredR, RHS0, RHS1))
1367 return nullptr;
1368
1369 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1370 FastMathFlags FMF = LHS->getFastMathFlags();
1371 FMF &= RHS->getFastMathFlags();
1372 Builder.setFastMathFlags(FMF);
1373
1374 return Builder.CreateFCmp(FCmpInst::getOrderedPredicate(PredR), RHS0, RHS1);
1375 }
1376
foldLogicOfFCmps(FCmpInst * LHS,FCmpInst * RHS,bool IsAnd,bool IsLogicalSelect)1377 Value *InstCombinerImpl::foldLogicOfFCmps(FCmpInst *LHS, FCmpInst *RHS,
1378 bool IsAnd, bool IsLogicalSelect) {
1379 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
1380 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
1381 FCmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
1382
1383 if (LHS0 == RHS1 && RHS0 == LHS1) {
1384 // Swap RHS operands to match LHS.
1385 PredR = FCmpInst::getSwappedPredicate(PredR);
1386 std::swap(RHS0, RHS1);
1387 }
1388
1389 // Simplify (fcmp cc0 x, y) & (fcmp cc1 x, y).
1390 // Suppose the relation between x and y is R, where R is one of
1391 // U(1000), L(0100), G(0010) or E(0001), and CC0 and CC1 are the bitmasks for
1392 // testing the desired relations.
1393 //
1394 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1395 // bool(R & CC0) && bool(R & CC1)
1396 // = bool((R & CC0) & (R & CC1))
1397 // = bool(R & (CC0 & CC1)) <= by re-association, commutation, and idempotency
1398 //
1399 // Since (R & CC0) and (R & CC1) are either R or 0, we actually have this:
1400 // bool(R & CC0) || bool(R & CC1)
1401 // = bool((R & CC0) | (R & CC1))
1402 // = bool(R & (CC0 | CC1)) <= by reversed distribution (contribution? ;)
1403 if (LHS0 == RHS0 && LHS1 == RHS1) {
1404 unsigned FCmpCodeL = getFCmpCode(PredL);
1405 unsigned FCmpCodeR = getFCmpCode(PredR);
1406 unsigned NewPred = IsAnd ? FCmpCodeL & FCmpCodeR : FCmpCodeL | FCmpCodeR;
1407
1408 // Intersect the fast math flags.
1409 // TODO: We can union the fast math flags unless this is a logical select.
1410 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1411 FastMathFlags FMF = LHS->getFastMathFlags();
1412 FMF &= RHS->getFastMathFlags();
1413 Builder.setFastMathFlags(FMF);
1414
1415 return getFCmpValue(NewPred, LHS0, LHS1, Builder);
1416 }
1417
1418 // This transform is not valid for a logical select.
1419 if (!IsLogicalSelect &&
1420 ((PredL == FCmpInst::FCMP_ORD && PredR == FCmpInst::FCMP_ORD && IsAnd) ||
1421 (PredL == FCmpInst::FCMP_UNO && PredR == FCmpInst::FCMP_UNO &&
1422 !IsAnd))) {
1423 if (LHS0->getType() != RHS0->getType())
1424 return nullptr;
1425
1426 // FCmp canonicalization ensures that (fcmp ord/uno X, X) and
1427 // (fcmp ord/uno X, C) will be transformed to (fcmp X, +0.0).
1428 if (match(LHS1, m_PosZeroFP()) && match(RHS1, m_PosZeroFP()))
1429 // Ignore the constants because they are obviously not NANs:
1430 // (fcmp ord x, 0.0) & (fcmp ord y, 0.0) -> (fcmp ord x, y)
1431 // (fcmp uno x, 0.0) | (fcmp uno y, 0.0) -> (fcmp uno x, y)
1432 return Builder.CreateFCmp(PredL, LHS0, RHS0);
1433 }
1434
1435 if (IsAnd && stripSignOnlyFPOps(LHS0) == stripSignOnlyFPOps(RHS0)) {
1436 // and (fcmp ord x, 0), (fcmp u* x, inf) -> fcmp o* x, inf
1437 // and (fcmp ord x, 0), (fcmp u* fabs(x), inf) -> fcmp o* x, inf
1438 if (Value *Left = matchIsFiniteTest(Builder, LHS, RHS))
1439 return Left;
1440 if (Value *Right = matchIsFiniteTest(Builder, RHS, LHS))
1441 return Right;
1442 }
1443
1444 // Turn at least two fcmps with constants into llvm.is.fpclass.
1445 //
1446 // If we can represent a combined value test with one class call, we can
1447 // potentially eliminate 4-6 instructions. If we can represent a test with a
1448 // single fcmp with fneg and fabs, that's likely a better canonical form.
1449 if (LHS->hasOneUse() && RHS->hasOneUse()) {
1450 auto [ClassValRHS, ClassMaskRHS] =
1451 fcmpToClassTest(PredR, *RHS->getFunction(), RHS0, RHS1);
1452 if (ClassValRHS) {
1453 auto [ClassValLHS, ClassMaskLHS] =
1454 fcmpToClassTest(PredL, *LHS->getFunction(), LHS0, LHS1);
1455 if (ClassValLHS == ClassValRHS) {
1456 unsigned CombinedMask = IsAnd ? (ClassMaskLHS & ClassMaskRHS)
1457 : (ClassMaskLHS | ClassMaskRHS);
1458 return Builder.CreateIntrinsic(
1459 Intrinsic::is_fpclass, {ClassValLHS->getType()},
1460 {ClassValLHS, Builder.getInt32(CombinedMask)});
1461 }
1462 }
1463 }
1464
1465 // Canonicalize the range check idiom:
1466 // and (fcmp olt/ole/ult/ule x, C), (fcmp ogt/oge/ugt/uge x, -C)
1467 // --> fabs(x) olt/ole/ult/ule C
1468 // or (fcmp ogt/oge/ugt/uge x, C), (fcmp olt/ole/ult/ule x, -C)
1469 // --> fabs(x) ogt/oge/ugt/uge C
1470 // TODO: Generalize to handle a negated variable operand?
1471 const APFloat *LHSC, *RHSC;
1472 if (LHS0 == RHS0 && LHS->hasOneUse() && RHS->hasOneUse() &&
1473 FCmpInst::getSwappedPredicate(PredL) == PredR &&
1474 match(LHS1, m_APFloatAllowPoison(LHSC)) &&
1475 match(RHS1, m_APFloatAllowPoison(RHSC)) &&
1476 LHSC->bitwiseIsEqual(neg(*RHSC))) {
1477 auto IsLessThanOrLessEqual = [](FCmpInst::Predicate Pred) {
1478 switch (Pred) {
1479 case FCmpInst::FCMP_OLT:
1480 case FCmpInst::FCMP_OLE:
1481 case FCmpInst::FCMP_ULT:
1482 case FCmpInst::FCMP_ULE:
1483 return true;
1484 default:
1485 return false;
1486 }
1487 };
1488 if (IsLessThanOrLessEqual(IsAnd ? PredR : PredL)) {
1489 std::swap(LHSC, RHSC);
1490 std::swap(PredL, PredR);
1491 }
1492 if (IsLessThanOrLessEqual(IsAnd ? PredL : PredR)) {
1493 BuilderTy::FastMathFlagGuard Guard(Builder);
1494 Builder.setFastMathFlags(LHS->getFastMathFlags() |
1495 RHS->getFastMathFlags());
1496
1497 Value *FAbs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, LHS0);
1498 return Builder.CreateFCmp(PredL, FAbs,
1499 ConstantFP::get(LHS0->getType(), *LHSC));
1500 }
1501 }
1502
1503 return nullptr;
1504 }
1505
1506 /// Match an fcmp against a special value that performs a test possible by
1507 /// llvm.is.fpclass.
matchIsFPClassLikeFCmp(Value * Op,Value * & ClassVal,uint64_t & ClassMask)1508 static bool matchIsFPClassLikeFCmp(Value *Op, Value *&ClassVal,
1509 uint64_t &ClassMask) {
1510 auto *FCmp = dyn_cast<FCmpInst>(Op);
1511 if (!FCmp || !FCmp->hasOneUse())
1512 return false;
1513
1514 std::tie(ClassVal, ClassMask) =
1515 fcmpToClassTest(FCmp->getPredicate(), *FCmp->getParent()->getParent(),
1516 FCmp->getOperand(0), FCmp->getOperand(1));
1517 return ClassVal != nullptr;
1518 }
1519
1520 /// or (is_fpclass x, mask0), (is_fpclass x, mask1)
1521 /// -> is_fpclass x, (mask0 | mask1)
1522 /// and (is_fpclass x, mask0), (is_fpclass x, mask1)
1523 /// -> is_fpclass x, (mask0 & mask1)
1524 /// xor (is_fpclass x, mask0), (is_fpclass x, mask1)
1525 /// -> is_fpclass x, (mask0 ^ mask1)
foldLogicOfIsFPClass(BinaryOperator & BO,Value * Op0,Value * Op1)1526 Instruction *InstCombinerImpl::foldLogicOfIsFPClass(BinaryOperator &BO,
1527 Value *Op0, Value *Op1) {
1528 Value *ClassVal0 = nullptr;
1529 Value *ClassVal1 = nullptr;
1530 uint64_t ClassMask0, ClassMask1;
1531
1532 // Restrict to folding one fcmp into one is.fpclass for now, don't introduce a
1533 // new class.
1534 //
1535 // TODO: Support forming is.fpclass out of 2 separate fcmps when codegen is
1536 // better.
1537
1538 bool IsLHSClass =
1539 match(Op0, m_OneUse(m_Intrinsic<Intrinsic::is_fpclass>(
1540 m_Value(ClassVal0), m_ConstantInt(ClassMask0))));
1541 bool IsRHSClass =
1542 match(Op1, m_OneUse(m_Intrinsic<Intrinsic::is_fpclass>(
1543 m_Value(ClassVal1), m_ConstantInt(ClassMask1))));
1544 if ((((IsLHSClass || matchIsFPClassLikeFCmp(Op0, ClassVal0, ClassMask0)) &&
1545 (IsRHSClass || matchIsFPClassLikeFCmp(Op1, ClassVal1, ClassMask1)))) &&
1546 ClassVal0 == ClassVal1) {
1547 unsigned NewClassMask;
1548 switch (BO.getOpcode()) {
1549 case Instruction::And:
1550 NewClassMask = ClassMask0 & ClassMask1;
1551 break;
1552 case Instruction::Or:
1553 NewClassMask = ClassMask0 | ClassMask1;
1554 break;
1555 case Instruction::Xor:
1556 NewClassMask = ClassMask0 ^ ClassMask1;
1557 break;
1558 default:
1559 llvm_unreachable("not a binary logic operator");
1560 }
1561
1562 if (IsLHSClass) {
1563 auto *II = cast<IntrinsicInst>(Op0);
1564 II->setArgOperand(
1565 1, ConstantInt::get(II->getArgOperand(1)->getType(), NewClassMask));
1566 return replaceInstUsesWith(BO, II);
1567 }
1568
1569 if (IsRHSClass) {
1570 auto *II = cast<IntrinsicInst>(Op1);
1571 II->setArgOperand(
1572 1, ConstantInt::get(II->getArgOperand(1)->getType(), NewClassMask));
1573 return replaceInstUsesWith(BO, II);
1574 }
1575
1576 CallInst *NewClass =
1577 Builder.CreateIntrinsic(Intrinsic::is_fpclass, {ClassVal0->getType()},
1578 {ClassVal0, Builder.getInt32(NewClassMask)});
1579 return replaceInstUsesWith(BO, NewClass);
1580 }
1581
1582 return nullptr;
1583 }
1584
1585 /// Look for the pattern that conditionally negates a value via math operations:
1586 /// cond.splat = sext i1 cond
1587 /// sub = add cond.splat, x
1588 /// xor = xor sub, cond.splat
1589 /// and rewrite it to do the same, but via logical operations:
1590 /// value.neg = sub 0, value
1591 /// cond = select i1 neg, value.neg, value
canonicalizeConditionalNegationViaMathToSelect(BinaryOperator & I)1592 Instruction *InstCombinerImpl::canonicalizeConditionalNegationViaMathToSelect(
1593 BinaryOperator &I) {
1594 assert(I.getOpcode() == BinaryOperator::Xor && "Only for xor!");
1595 Value *Cond, *X;
1596 // As per complexity ordering, `xor` is not commutative here.
1597 if (!match(&I, m_c_BinOp(m_OneUse(m_Value()), m_Value())) ||
1598 !match(I.getOperand(1), m_SExt(m_Value(Cond))) ||
1599 !Cond->getType()->isIntOrIntVectorTy(1) ||
1600 !match(I.getOperand(0), m_c_Add(m_SExt(m_Specific(Cond)), m_Value(X))))
1601 return nullptr;
1602 return SelectInst::Create(Cond, Builder.CreateNeg(X, X->getName() + ".neg"),
1603 X);
1604 }
1605
1606 /// This a limited reassociation for a special case (see above) where we are
1607 /// checking if two values are either both NAN (unordered) or not-NAN (ordered).
1608 /// This could be handled more generally in '-reassociation', but it seems like
1609 /// an unlikely pattern for a large number of logic ops and fcmps.
reassociateFCmps(BinaryOperator & BO,InstCombiner::BuilderTy & Builder)1610 static Instruction *reassociateFCmps(BinaryOperator &BO,
1611 InstCombiner::BuilderTy &Builder) {
1612 Instruction::BinaryOps Opcode = BO.getOpcode();
1613 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1614 "Expecting and/or op for fcmp transform");
1615
1616 // There are 4 commuted variants of the pattern. Canonicalize operands of this
1617 // logic op so an fcmp is operand 0 and a matching logic op is operand 1.
1618 Value *Op0 = BO.getOperand(0), *Op1 = BO.getOperand(1), *X;
1619 FCmpInst::Predicate Pred;
1620 if (match(Op1, m_FCmp(Pred, m_Value(), m_AnyZeroFP())))
1621 std::swap(Op0, Op1);
1622
1623 // Match inner binop and the predicate for combining 2 NAN checks into 1.
1624 Value *BO10, *BO11;
1625 FCmpInst::Predicate NanPred = Opcode == Instruction::And ? FCmpInst::FCMP_ORD
1626 : FCmpInst::FCMP_UNO;
1627 if (!match(Op0, m_FCmp(Pred, m_Value(X), m_AnyZeroFP())) || Pred != NanPred ||
1628 !match(Op1, m_BinOp(Opcode, m_Value(BO10), m_Value(BO11))))
1629 return nullptr;
1630
1631 // The inner logic op must have a matching fcmp operand.
1632 Value *Y;
1633 if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) ||
1634 Pred != NanPred || X->getType() != Y->getType())
1635 std::swap(BO10, BO11);
1636
1637 if (!match(BO10, m_FCmp(Pred, m_Value(Y), m_AnyZeroFP())) ||
1638 Pred != NanPred || X->getType() != Y->getType())
1639 return nullptr;
1640
1641 // and (fcmp ord X, 0), (and (fcmp ord Y, 0), Z) --> and (fcmp ord X, Y), Z
1642 // or (fcmp uno X, 0), (or (fcmp uno Y, 0), Z) --> or (fcmp uno X, Y), Z
1643 Value *NewFCmp = Builder.CreateFCmp(Pred, X, Y);
1644 if (auto *NewFCmpInst = dyn_cast<FCmpInst>(NewFCmp)) {
1645 // Intersect FMF from the 2 source fcmps.
1646 NewFCmpInst->copyIRFlags(Op0);
1647 NewFCmpInst->andIRFlags(BO10);
1648 }
1649 return BinaryOperator::Create(Opcode, NewFCmp, BO11);
1650 }
1651
1652 /// Match variations of De Morgan's Laws:
1653 /// (~A & ~B) == (~(A | B))
1654 /// (~A | ~B) == (~(A & B))
matchDeMorgansLaws(BinaryOperator & I,InstCombiner & IC)1655 static Instruction *matchDeMorgansLaws(BinaryOperator &I,
1656 InstCombiner &IC) {
1657 const Instruction::BinaryOps Opcode = I.getOpcode();
1658 assert((Opcode == Instruction::And || Opcode == Instruction::Or) &&
1659 "Trying to match De Morgan's Laws with something other than and/or");
1660
1661 // Flip the logic operation.
1662 const Instruction::BinaryOps FlippedOpcode =
1663 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1664
1665 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1666 Value *A, *B;
1667 if (match(Op0, m_OneUse(m_Not(m_Value(A)))) &&
1668 match(Op1, m_OneUse(m_Not(m_Value(B)))) &&
1669 !IC.isFreeToInvert(A, A->hasOneUse()) &&
1670 !IC.isFreeToInvert(B, B->hasOneUse())) {
1671 Value *AndOr =
1672 IC.Builder.CreateBinOp(FlippedOpcode, A, B, I.getName() + ".demorgan");
1673 return BinaryOperator::CreateNot(AndOr);
1674 }
1675
1676 // The 'not' ops may require reassociation.
1677 // (A & ~B) & ~C --> A & ~(B | C)
1678 // (~B & A) & ~C --> A & ~(B | C)
1679 // (A | ~B) | ~C --> A | ~(B & C)
1680 // (~B | A) | ~C --> A | ~(B & C)
1681 Value *C;
1682 if (match(Op0, m_OneUse(m_c_BinOp(Opcode, m_Value(A), m_Not(m_Value(B))))) &&
1683 match(Op1, m_Not(m_Value(C)))) {
1684 Value *FlippedBO = IC.Builder.CreateBinOp(FlippedOpcode, B, C);
1685 return BinaryOperator::Create(Opcode, A, IC.Builder.CreateNot(FlippedBO));
1686 }
1687
1688 return nullptr;
1689 }
1690
shouldOptimizeCast(CastInst * CI)1691 bool InstCombinerImpl::shouldOptimizeCast(CastInst *CI) {
1692 Value *CastSrc = CI->getOperand(0);
1693
1694 // Noop casts and casts of constants should be eliminated trivially.
1695 if (CI->getSrcTy() == CI->getDestTy() || isa<Constant>(CastSrc))
1696 return false;
1697
1698 // If this cast is paired with another cast that can be eliminated, we prefer
1699 // to have it eliminated.
1700 if (const auto *PrecedingCI = dyn_cast<CastInst>(CastSrc))
1701 if (isEliminableCastPair(PrecedingCI, CI))
1702 return false;
1703
1704 return true;
1705 }
1706
1707 /// Fold {and,or,xor} (cast X), C.
foldLogicCastConstant(BinaryOperator & Logic,CastInst * Cast,InstCombinerImpl & IC)1708 static Instruction *foldLogicCastConstant(BinaryOperator &Logic, CastInst *Cast,
1709 InstCombinerImpl &IC) {
1710 Constant *C = dyn_cast<Constant>(Logic.getOperand(1));
1711 if (!C)
1712 return nullptr;
1713
1714 auto LogicOpc = Logic.getOpcode();
1715 Type *DestTy = Logic.getType();
1716 Type *SrcTy = Cast->getSrcTy();
1717
1718 // Move the logic operation ahead of a zext or sext if the constant is
1719 // unchanged in the smaller source type. Performing the logic in a smaller
1720 // type may provide more information to later folds, and the smaller logic
1721 // instruction may be cheaper (particularly in the case of vectors).
1722 Value *X;
1723 if (match(Cast, m_OneUse(m_ZExt(m_Value(X))))) {
1724 if (Constant *TruncC = IC.getLosslessUnsignedTrunc(C, SrcTy)) {
1725 // LogicOpc (zext X), C --> zext (LogicOpc X, C)
1726 Value *NewOp = IC.Builder.CreateBinOp(LogicOpc, X, TruncC);
1727 return new ZExtInst(NewOp, DestTy);
1728 }
1729 }
1730
1731 if (match(Cast, m_OneUse(m_SExtLike(m_Value(X))))) {
1732 if (Constant *TruncC = IC.getLosslessSignedTrunc(C, SrcTy)) {
1733 // LogicOpc (sext X), C --> sext (LogicOpc X, C)
1734 Value *NewOp = IC.Builder.CreateBinOp(LogicOpc, X, TruncC);
1735 return new SExtInst(NewOp, DestTy);
1736 }
1737 }
1738
1739 return nullptr;
1740 }
1741
1742 /// Fold {and,or,xor} (cast X), Y.
foldCastedBitwiseLogic(BinaryOperator & I)1743 Instruction *InstCombinerImpl::foldCastedBitwiseLogic(BinaryOperator &I) {
1744 auto LogicOpc = I.getOpcode();
1745 assert(I.isBitwiseLogicOp() && "Unexpected opcode for bitwise logic folding");
1746
1747 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1748
1749 // fold bitwise(A >> BW - 1, zext(icmp)) (BW is the scalar bits of the
1750 // type of A)
1751 // -> bitwise(zext(A < 0), zext(icmp))
1752 // -> zext(bitwise(A < 0, icmp))
1753 auto FoldBitwiseICmpZeroWithICmp = [&](Value *Op0,
1754 Value *Op1) -> Instruction * {
1755 ICmpInst::Predicate Pred;
1756 Value *A;
1757 bool IsMatched =
1758 match(Op0,
1759 m_OneUse(m_LShr(
1760 m_Value(A),
1761 m_SpecificInt(Op0->getType()->getScalarSizeInBits() - 1)))) &&
1762 match(Op1, m_OneUse(m_ZExt(m_ICmp(Pred, m_Value(), m_Value()))));
1763
1764 if (!IsMatched)
1765 return nullptr;
1766
1767 auto *ICmpL =
1768 Builder.CreateICmpSLT(A, Constant::getNullValue(A->getType()));
1769 auto *ICmpR = cast<ZExtInst>(Op1)->getOperand(0);
1770 auto *BitwiseOp = Builder.CreateBinOp(LogicOpc, ICmpL, ICmpR);
1771
1772 return new ZExtInst(BitwiseOp, Op0->getType());
1773 };
1774
1775 if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op0, Op1))
1776 return Ret;
1777
1778 if (auto *Ret = FoldBitwiseICmpZeroWithICmp(Op1, Op0))
1779 return Ret;
1780
1781 CastInst *Cast0 = dyn_cast<CastInst>(Op0);
1782 if (!Cast0)
1783 return nullptr;
1784
1785 // This must be a cast from an integer or integer vector source type to allow
1786 // transformation of the logic operation to the source type.
1787 Type *DestTy = I.getType();
1788 Type *SrcTy = Cast0->getSrcTy();
1789 if (!SrcTy->isIntOrIntVectorTy())
1790 return nullptr;
1791
1792 if (Instruction *Ret = foldLogicCastConstant(I, Cast0, *this))
1793 return Ret;
1794
1795 CastInst *Cast1 = dyn_cast<CastInst>(Op1);
1796 if (!Cast1)
1797 return nullptr;
1798
1799 // Both operands of the logic operation are casts. The casts must be the
1800 // same kind for reduction.
1801 Instruction::CastOps CastOpcode = Cast0->getOpcode();
1802 if (CastOpcode != Cast1->getOpcode())
1803 return nullptr;
1804
1805 // If the source types do not match, but the casts are matching extends, we
1806 // can still narrow the logic op.
1807 if (SrcTy != Cast1->getSrcTy()) {
1808 Value *X, *Y;
1809 if (match(Cast0, m_OneUse(m_ZExtOrSExt(m_Value(X)))) &&
1810 match(Cast1, m_OneUse(m_ZExtOrSExt(m_Value(Y))))) {
1811 // Cast the narrower source to the wider source type.
1812 unsigned XNumBits = X->getType()->getScalarSizeInBits();
1813 unsigned YNumBits = Y->getType()->getScalarSizeInBits();
1814 if (XNumBits < YNumBits)
1815 X = Builder.CreateCast(CastOpcode, X, Y->getType());
1816 else
1817 Y = Builder.CreateCast(CastOpcode, Y, X->getType());
1818 // Do the logic op in the intermediate width, then widen more.
1819 Value *NarrowLogic = Builder.CreateBinOp(LogicOpc, X, Y);
1820 return CastInst::Create(CastOpcode, NarrowLogic, DestTy);
1821 }
1822
1823 // Give up for other cast opcodes.
1824 return nullptr;
1825 }
1826
1827 Value *Cast0Src = Cast0->getOperand(0);
1828 Value *Cast1Src = Cast1->getOperand(0);
1829
1830 // fold logic(cast(A), cast(B)) -> cast(logic(A, B))
1831 if ((Cast0->hasOneUse() || Cast1->hasOneUse()) &&
1832 shouldOptimizeCast(Cast0) && shouldOptimizeCast(Cast1)) {
1833 Value *NewOp = Builder.CreateBinOp(LogicOpc, Cast0Src, Cast1Src,
1834 I.getName());
1835 return CastInst::Create(CastOpcode, NewOp, DestTy);
1836 }
1837
1838 return nullptr;
1839 }
1840
foldAndToXor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1841 static Instruction *foldAndToXor(BinaryOperator &I,
1842 InstCombiner::BuilderTy &Builder) {
1843 assert(I.getOpcode() == Instruction::And);
1844 Value *Op0 = I.getOperand(0);
1845 Value *Op1 = I.getOperand(1);
1846 Value *A, *B;
1847
1848 // Operand complexity canonicalization guarantees that the 'or' is Op0.
1849 // (A | B) & ~(A & B) --> A ^ B
1850 // (A | B) & ~(B & A) --> A ^ B
1851 if (match(&I, m_BinOp(m_Or(m_Value(A), m_Value(B)),
1852 m_Not(m_c_And(m_Deferred(A), m_Deferred(B))))))
1853 return BinaryOperator::CreateXor(A, B);
1854
1855 // (A | ~B) & (~A | B) --> ~(A ^ B)
1856 // (A | ~B) & (B | ~A) --> ~(A ^ B)
1857 // (~B | A) & (~A | B) --> ~(A ^ B)
1858 // (~B | A) & (B | ~A) --> ~(A ^ B)
1859 if (Op0->hasOneUse() || Op1->hasOneUse())
1860 if (match(&I, m_BinOp(m_c_Or(m_Value(A), m_Not(m_Value(B))),
1861 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B)))))
1862 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1863
1864 return nullptr;
1865 }
1866
foldOrToXor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1867 static Instruction *foldOrToXor(BinaryOperator &I,
1868 InstCombiner::BuilderTy &Builder) {
1869 assert(I.getOpcode() == Instruction::Or);
1870 Value *Op0 = I.getOperand(0);
1871 Value *Op1 = I.getOperand(1);
1872 Value *A, *B;
1873
1874 // Operand complexity canonicalization guarantees that the 'and' is Op0.
1875 // (A & B) | ~(A | B) --> ~(A ^ B)
1876 // (A & B) | ~(B | A) --> ~(A ^ B)
1877 if (Op0->hasOneUse() || Op1->hasOneUse())
1878 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
1879 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1880 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
1881
1882 // Operand complexity canonicalization guarantees that the 'xor' is Op0.
1883 // (A ^ B) | ~(A | B) --> ~(A & B)
1884 // (A ^ B) | ~(B | A) --> ~(A & B)
1885 if (Op0->hasOneUse() || Op1->hasOneUse())
1886 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
1887 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B)))))
1888 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
1889
1890 // (A & ~B) | (~A & B) --> A ^ B
1891 // (A & ~B) | (B & ~A) --> A ^ B
1892 // (~B & A) | (~A & B) --> A ^ B
1893 // (~B & A) | (B & ~A) --> A ^ B
1894 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
1895 match(Op1, m_c_And(m_Not(m_Specific(A)), m_Specific(B))))
1896 return BinaryOperator::CreateXor(A, B);
1897
1898 return nullptr;
1899 }
1900
1901 /// Return true if a constant shift amount is always less than the specified
1902 /// bit-width. If not, the shift could create poison in the narrower type.
canNarrowShiftAmt(Constant * C,unsigned BitWidth)1903 static bool canNarrowShiftAmt(Constant *C, unsigned BitWidth) {
1904 APInt Threshold(C->getType()->getScalarSizeInBits(), BitWidth);
1905 return match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold));
1906 }
1907
1908 /// Try to use narrower ops (sink zext ops) for an 'and' with binop operand and
1909 /// a common zext operand: and (binop (zext X), C), (zext X).
narrowMaskedBinOp(BinaryOperator & And)1910 Instruction *InstCombinerImpl::narrowMaskedBinOp(BinaryOperator &And) {
1911 // This transform could also apply to {or, and, xor}, but there are better
1912 // folds for those cases, so we don't expect those patterns here. AShr is not
1913 // handled because it should always be transformed to LShr in this sequence.
1914 // The subtract transform is different because it has a constant on the left.
1915 // Add/mul commute the constant to RHS; sub with constant RHS becomes add.
1916 Value *Op0 = And.getOperand(0), *Op1 = And.getOperand(1);
1917 Constant *C;
1918 if (!match(Op0, m_OneUse(m_Add(m_Specific(Op1), m_Constant(C)))) &&
1919 !match(Op0, m_OneUse(m_Mul(m_Specific(Op1), m_Constant(C)))) &&
1920 !match(Op0, m_OneUse(m_LShr(m_Specific(Op1), m_Constant(C)))) &&
1921 !match(Op0, m_OneUse(m_Shl(m_Specific(Op1), m_Constant(C)))) &&
1922 !match(Op0, m_OneUse(m_Sub(m_Constant(C), m_Specific(Op1)))))
1923 return nullptr;
1924
1925 Value *X;
1926 if (!match(Op1, m_ZExt(m_Value(X))) || Op1->hasNUsesOrMore(3))
1927 return nullptr;
1928
1929 Type *Ty = And.getType();
1930 if (!isa<VectorType>(Ty) && !shouldChangeType(Ty, X->getType()))
1931 return nullptr;
1932
1933 // If we're narrowing a shift, the shift amount must be safe (less than the
1934 // width) in the narrower type. If the shift amount is greater, instsimplify
1935 // usually handles that case, but we can't guarantee/assert it.
1936 Instruction::BinaryOps Opc = cast<BinaryOperator>(Op0)->getOpcode();
1937 if (Opc == Instruction::LShr || Opc == Instruction::Shl)
1938 if (!canNarrowShiftAmt(C, X->getType()->getScalarSizeInBits()))
1939 return nullptr;
1940
1941 // and (sub C, (zext X)), (zext X) --> zext (and (sub C', X), X)
1942 // and (binop (zext X), C), (zext X) --> zext (and (binop X, C'), X)
1943 Value *NewC = ConstantExpr::getTrunc(C, X->getType());
1944 Value *NewBO = Opc == Instruction::Sub ? Builder.CreateBinOp(Opc, NewC, X)
1945 : Builder.CreateBinOp(Opc, X, NewC);
1946 return new ZExtInst(Builder.CreateAnd(NewBO, X), Ty);
1947 }
1948
1949 /// Try folding relatively complex patterns for both And and Or operations
1950 /// with all And and Or swapped.
foldComplexAndOrPatterns(BinaryOperator & I,InstCombiner::BuilderTy & Builder)1951 static Instruction *foldComplexAndOrPatterns(BinaryOperator &I,
1952 InstCombiner::BuilderTy &Builder) {
1953 const Instruction::BinaryOps Opcode = I.getOpcode();
1954 assert(Opcode == Instruction::And || Opcode == Instruction::Or);
1955
1956 // Flip the logic operation.
1957 const Instruction::BinaryOps FlippedOpcode =
1958 (Opcode == Instruction::And) ? Instruction::Or : Instruction::And;
1959
1960 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1961 Value *A, *B, *C, *X, *Y, *Dummy;
1962
1963 // Match following expressions:
1964 // (~(A | B) & C)
1965 // (~(A & B) | C)
1966 // Captures X = ~(A | B) or ~(A & B)
1967 const auto matchNotOrAnd =
1968 [Opcode, FlippedOpcode](Value *Op, auto m_A, auto m_B, auto m_C,
1969 Value *&X, bool CountUses = false) -> bool {
1970 if (CountUses && !Op->hasOneUse())
1971 return false;
1972
1973 if (match(Op, m_c_BinOp(FlippedOpcode,
1974 m_CombineAnd(m_Value(X),
1975 m_Not(m_c_BinOp(Opcode, m_A, m_B))),
1976 m_C)))
1977 return !CountUses || X->hasOneUse();
1978
1979 return false;
1980 };
1981
1982 // (~(A | B) & C) | ... --> ...
1983 // (~(A & B) | C) & ... --> ...
1984 // TODO: One use checks are conservative. We just need to check that a total
1985 // number of multiple used values does not exceed reduction
1986 // in operations.
1987 if (matchNotOrAnd(Op0, m_Value(A), m_Value(B), m_Value(C), X)) {
1988 // (~(A | B) & C) | (~(A | C) & B) --> (B ^ C) & ~A
1989 // (~(A & B) | C) & (~(A & C) | B) --> ~((B ^ C) & A)
1990 if (matchNotOrAnd(Op1, m_Specific(A), m_Specific(C), m_Specific(B), Dummy,
1991 true)) {
1992 Value *Xor = Builder.CreateXor(B, C);
1993 return (Opcode == Instruction::Or)
1994 ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(A))
1995 : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, A));
1996 }
1997
1998 // (~(A | B) & C) | (~(B | C) & A) --> (A ^ C) & ~B
1999 // (~(A & B) | C) & (~(B & C) | A) --> ~((A ^ C) & B)
2000 if (matchNotOrAnd(Op1, m_Specific(B), m_Specific(C), m_Specific(A), Dummy,
2001 true)) {
2002 Value *Xor = Builder.CreateXor(A, C);
2003 return (Opcode == Instruction::Or)
2004 ? BinaryOperator::CreateAnd(Xor, Builder.CreateNot(B))
2005 : BinaryOperator::CreateNot(Builder.CreateAnd(Xor, B));
2006 }
2007
2008 // (~(A | B) & C) | ~(A | C) --> ~((B & C) | A)
2009 // (~(A & B) | C) & ~(A & C) --> ~((B | C) & A)
2010 if (match(Op1, m_OneUse(m_Not(m_OneUse(
2011 m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)))))))
2012 return BinaryOperator::CreateNot(Builder.CreateBinOp(
2013 Opcode, Builder.CreateBinOp(FlippedOpcode, B, C), A));
2014
2015 // (~(A | B) & C) | ~(B | C) --> ~((A & C) | B)
2016 // (~(A & B) | C) & ~(B & C) --> ~((A | C) & B)
2017 if (match(Op1, m_OneUse(m_Not(m_OneUse(
2018 m_c_BinOp(Opcode, m_Specific(B), m_Specific(C)))))))
2019 return BinaryOperator::CreateNot(Builder.CreateBinOp(
2020 Opcode, Builder.CreateBinOp(FlippedOpcode, A, C), B));
2021
2022 // (~(A | B) & C) | ~(C | (A ^ B)) --> ~((A | B) & (C | (A ^ B)))
2023 // Note, the pattern with swapped and/or is not handled because the
2024 // result is more undefined than a source:
2025 // (~(A & B) | C) & ~(C & (A ^ B)) --> (A ^ B ^ C) | ~(A | C) is invalid.
2026 if (Opcode == Instruction::Or && Op0->hasOneUse() &&
2027 match(Op1, m_OneUse(m_Not(m_CombineAnd(
2028 m_Value(Y),
2029 m_c_BinOp(Opcode, m_Specific(C),
2030 m_c_Xor(m_Specific(A), m_Specific(B)))))))) {
2031 // X = ~(A | B)
2032 // Y = (C | (A ^ B)
2033 Value *Or = cast<BinaryOperator>(X)->getOperand(0);
2034 return BinaryOperator::CreateNot(Builder.CreateAnd(Or, Y));
2035 }
2036 }
2037
2038 // (~A & B & C) | ... --> ...
2039 // (~A | B | C) | ... --> ...
2040 // TODO: One use checks are conservative. We just need to check that a total
2041 // number of multiple used values does not exceed reduction
2042 // in operations.
2043 if (match(Op0,
2044 m_OneUse(m_c_BinOp(FlippedOpcode,
2045 m_BinOp(FlippedOpcode, m_Value(B), m_Value(C)),
2046 m_CombineAnd(m_Value(X), m_Not(m_Value(A)))))) ||
2047 match(Op0, m_OneUse(m_c_BinOp(
2048 FlippedOpcode,
2049 m_c_BinOp(FlippedOpcode, m_Value(C),
2050 m_CombineAnd(m_Value(X), m_Not(m_Value(A)))),
2051 m_Value(B))))) {
2052 // X = ~A
2053 // (~A & B & C) | ~(A | B | C) --> ~(A | (B ^ C))
2054 // (~A | B | C) & ~(A & B & C) --> (~A | (B ^ C))
2055 if (match(Op1, m_OneUse(m_Not(m_c_BinOp(
2056 Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(B)),
2057 m_Specific(C))))) ||
2058 match(Op1, m_OneUse(m_Not(m_c_BinOp(
2059 Opcode, m_c_BinOp(Opcode, m_Specific(B), m_Specific(C)),
2060 m_Specific(A))))) ||
2061 match(Op1, m_OneUse(m_Not(m_c_BinOp(
2062 Opcode, m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)),
2063 m_Specific(B)))))) {
2064 Value *Xor = Builder.CreateXor(B, C);
2065 return (Opcode == Instruction::Or)
2066 ? BinaryOperator::CreateNot(Builder.CreateOr(Xor, A))
2067 : BinaryOperator::CreateOr(Xor, X);
2068 }
2069
2070 // (~A & B & C) | ~(A | B) --> (C | ~B) & ~A
2071 // (~A | B | C) & ~(A & B) --> (C & ~B) | ~A
2072 if (match(Op1, m_OneUse(m_Not(m_OneUse(
2073 m_c_BinOp(Opcode, m_Specific(A), m_Specific(B)))))))
2074 return BinaryOperator::Create(
2075 FlippedOpcode, Builder.CreateBinOp(Opcode, C, Builder.CreateNot(B)),
2076 X);
2077
2078 // (~A & B & C) | ~(A | C) --> (B | ~C) & ~A
2079 // (~A | B | C) & ~(A & C) --> (B & ~C) | ~A
2080 if (match(Op1, m_OneUse(m_Not(m_OneUse(
2081 m_c_BinOp(Opcode, m_Specific(A), m_Specific(C)))))))
2082 return BinaryOperator::Create(
2083 FlippedOpcode, Builder.CreateBinOp(Opcode, B, Builder.CreateNot(C)),
2084 X);
2085 }
2086
2087 return nullptr;
2088 }
2089
2090 /// Try to reassociate a pair of binops so that values with one use only are
2091 /// part of the same instruction. This may enable folds that are limited with
2092 /// multi-use restrictions and makes it more likely to match other patterns that
2093 /// are looking for a common operand.
reassociateForUses(BinaryOperator & BO,InstCombinerImpl::BuilderTy & Builder)2094 static Instruction *reassociateForUses(BinaryOperator &BO,
2095 InstCombinerImpl::BuilderTy &Builder) {
2096 Instruction::BinaryOps Opcode = BO.getOpcode();
2097 Value *X, *Y, *Z;
2098 if (match(&BO,
2099 m_c_BinOp(Opcode, m_OneUse(m_BinOp(Opcode, m_Value(X), m_Value(Y))),
2100 m_OneUse(m_Value(Z))))) {
2101 if (!isa<Constant>(X) && !isa<Constant>(Y) && !isa<Constant>(Z)) {
2102 // (X op Y) op Z --> (Y op Z) op X
2103 if (!X->hasOneUse()) {
2104 Value *YZ = Builder.CreateBinOp(Opcode, Y, Z);
2105 return BinaryOperator::Create(Opcode, YZ, X);
2106 }
2107 // (X op Y) op Z --> (X op Z) op Y
2108 if (!Y->hasOneUse()) {
2109 Value *XZ = Builder.CreateBinOp(Opcode, X, Z);
2110 return BinaryOperator::Create(Opcode, XZ, Y);
2111 }
2112 }
2113 }
2114
2115 return nullptr;
2116 }
2117
2118 // Match
2119 // (X + C2) | C
2120 // (X + C2) ^ C
2121 // (X + C2) & C
2122 // and convert to do the bitwise logic first:
2123 // (X | C) + C2
2124 // (X ^ C) + C2
2125 // (X & C) + C2
2126 // iff bits affected by logic op are lower than last bit affected by math op
canonicalizeLogicFirst(BinaryOperator & I,InstCombiner::BuilderTy & Builder)2127 static Instruction *canonicalizeLogicFirst(BinaryOperator &I,
2128 InstCombiner::BuilderTy &Builder) {
2129 Type *Ty = I.getType();
2130 Instruction::BinaryOps OpC = I.getOpcode();
2131 Value *Op0 = I.getOperand(0);
2132 Value *Op1 = I.getOperand(1);
2133 Value *X;
2134 const APInt *C, *C2;
2135
2136 if (!(match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C2)))) &&
2137 match(Op1, m_APInt(C))))
2138 return nullptr;
2139
2140 unsigned Width = Ty->getScalarSizeInBits();
2141 unsigned LastOneMath = Width - C2->countr_zero();
2142
2143 switch (OpC) {
2144 case Instruction::And:
2145 if (C->countl_one() < LastOneMath)
2146 return nullptr;
2147 break;
2148 case Instruction::Xor:
2149 case Instruction::Or:
2150 if (C->countl_zero() < LastOneMath)
2151 return nullptr;
2152 break;
2153 default:
2154 llvm_unreachable("Unexpected BinaryOp!");
2155 }
2156
2157 Value *NewBinOp = Builder.CreateBinOp(OpC, X, ConstantInt::get(Ty, *C));
2158 return BinaryOperator::CreateWithCopiedFlags(Instruction::Add, NewBinOp,
2159 ConstantInt::get(Ty, *C2), Op0);
2160 }
2161
2162 // binop(shift(ShiftedC1, ShAmt), shift(ShiftedC2, add(ShAmt, AddC))) ->
2163 // shift(binop(ShiftedC1, shift(ShiftedC2, AddC)), ShAmt)
2164 // where both shifts are the same and AddC is a valid shift amount.
foldBinOpOfDisplacedShifts(BinaryOperator & I)2165 Instruction *InstCombinerImpl::foldBinOpOfDisplacedShifts(BinaryOperator &I) {
2166 assert((I.isBitwiseLogicOp() || I.getOpcode() == Instruction::Add) &&
2167 "Unexpected opcode");
2168
2169 Value *ShAmt;
2170 Constant *ShiftedC1, *ShiftedC2, *AddC;
2171 Type *Ty = I.getType();
2172 unsigned BitWidth = Ty->getScalarSizeInBits();
2173 if (!match(&I, m_c_BinOp(m_Shift(m_ImmConstant(ShiftedC1), m_Value(ShAmt)),
2174 m_Shift(m_ImmConstant(ShiftedC2),
2175 m_AddLike(m_Deferred(ShAmt),
2176 m_ImmConstant(AddC))))))
2177 return nullptr;
2178
2179 // Make sure the add constant is a valid shift amount.
2180 if (!match(AddC,
2181 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(BitWidth, BitWidth))))
2182 return nullptr;
2183
2184 // Avoid constant expressions.
2185 auto *Op0Inst = dyn_cast<Instruction>(I.getOperand(0));
2186 auto *Op1Inst = dyn_cast<Instruction>(I.getOperand(1));
2187 if (!Op0Inst || !Op1Inst)
2188 return nullptr;
2189
2190 // Both shifts must be the same.
2191 Instruction::BinaryOps ShiftOp =
2192 static_cast<Instruction::BinaryOps>(Op0Inst->getOpcode());
2193 if (ShiftOp != Op1Inst->getOpcode())
2194 return nullptr;
2195
2196 // For adds, only left shifts are supported.
2197 if (I.getOpcode() == Instruction::Add && ShiftOp != Instruction::Shl)
2198 return nullptr;
2199
2200 Value *NewC = Builder.CreateBinOp(
2201 I.getOpcode(), ShiftedC1, Builder.CreateBinOp(ShiftOp, ShiftedC2, AddC));
2202 return BinaryOperator::Create(ShiftOp, NewC, ShAmt);
2203 }
2204
2205 // Fold and/or/xor with two equal intrinsic IDs:
2206 // bitwise(fshl (A, B, ShAmt), fshl(C, D, ShAmt))
2207 // -> fshl(bitwise(A, C), bitwise(B, D), ShAmt)
2208 // bitwise(fshr (A, B, ShAmt), fshr(C, D, ShAmt))
2209 // -> fshr(bitwise(A, C), bitwise(B, D), ShAmt)
2210 // bitwise(bswap(A), bswap(B)) -> bswap(bitwise(A, B))
2211 // bitwise(bswap(A), C) -> bswap(bitwise(A, bswap(C)))
2212 // bitwise(bitreverse(A), bitreverse(B)) -> bitreverse(bitwise(A, B))
2213 // bitwise(bitreverse(A), C) -> bitreverse(bitwise(A, bitreverse(C)))
2214 static Instruction *
foldBitwiseLogicWithIntrinsics(BinaryOperator & I,InstCombiner::BuilderTy & Builder)2215 foldBitwiseLogicWithIntrinsics(BinaryOperator &I,
2216 InstCombiner::BuilderTy &Builder) {
2217 assert(I.isBitwiseLogicOp() && "Should and/or/xor");
2218 if (!I.getOperand(0)->hasOneUse())
2219 return nullptr;
2220 IntrinsicInst *X = dyn_cast<IntrinsicInst>(I.getOperand(0));
2221 if (!X)
2222 return nullptr;
2223
2224 IntrinsicInst *Y = dyn_cast<IntrinsicInst>(I.getOperand(1));
2225 if (Y && (!Y->hasOneUse() || X->getIntrinsicID() != Y->getIntrinsicID()))
2226 return nullptr;
2227
2228 Intrinsic::ID IID = X->getIntrinsicID();
2229 const APInt *RHSC;
2230 // Try to match constant RHS.
2231 if (!Y && (!(IID == Intrinsic::bswap || IID == Intrinsic::bitreverse) ||
2232 !match(I.getOperand(1), m_APInt(RHSC))))
2233 return nullptr;
2234
2235 switch (IID) {
2236 case Intrinsic::fshl:
2237 case Intrinsic::fshr: {
2238 if (X->getOperand(2) != Y->getOperand(2))
2239 return nullptr;
2240 Value *NewOp0 =
2241 Builder.CreateBinOp(I.getOpcode(), X->getOperand(0), Y->getOperand(0));
2242 Value *NewOp1 =
2243 Builder.CreateBinOp(I.getOpcode(), X->getOperand(1), Y->getOperand(1));
2244 Function *F = Intrinsic::getDeclaration(I.getModule(), IID, I.getType());
2245 return CallInst::Create(F, {NewOp0, NewOp1, X->getOperand(2)});
2246 }
2247 case Intrinsic::bswap:
2248 case Intrinsic::bitreverse: {
2249 Value *NewOp0 = Builder.CreateBinOp(
2250 I.getOpcode(), X->getOperand(0),
2251 Y ? Y->getOperand(0)
2252 : ConstantInt::get(I.getType(), IID == Intrinsic::bswap
2253 ? RHSC->byteSwap()
2254 : RHSC->reverseBits()));
2255 Function *F = Intrinsic::getDeclaration(I.getModule(), IID, I.getType());
2256 return CallInst::Create(F, {NewOp0});
2257 }
2258 default:
2259 return nullptr;
2260 }
2261 }
2262
2263 // Try to simplify V by replacing occurrences of Op with RepOp, but only look
2264 // through bitwise operations. In particular, for X | Y we try to replace Y with
2265 // 0 inside X and for X & Y we try to replace Y with -1 inside X.
2266 // Return the simplified result of X if successful, and nullptr otherwise.
2267 // If SimplifyOnly is true, no new instructions will be created.
simplifyAndOrWithOpReplaced(Value * V,Value * Op,Value * RepOp,bool SimplifyOnly,InstCombinerImpl & IC,unsigned Depth=0)2268 static Value *simplifyAndOrWithOpReplaced(Value *V, Value *Op, Value *RepOp,
2269 bool SimplifyOnly,
2270 InstCombinerImpl &IC,
2271 unsigned Depth = 0) {
2272 if (Op == RepOp)
2273 return nullptr;
2274
2275 if (V == Op)
2276 return RepOp;
2277
2278 auto *I = dyn_cast<BinaryOperator>(V);
2279 if (!I || !I->isBitwiseLogicOp() || Depth >= 3)
2280 return nullptr;
2281
2282 if (!I->hasOneUse())
2283 SimplifyOnly = true;
2284
2285 Value *NewOp0 = simplifyAndOrWithOpReplaced(I->getOperand(0), Op, RepOp,
2286 SimplifyOnly, IC, Depth + 1);
2287 Value *NewOp1 = simplifyAndOrWithOpReplaced(I->getOperand(1), Op, RepOp,
2288 SimplifyOnly, IC, Depth + 1);
2289 if (!NewOp0 && !NewOp1)
2290 return nullptr;
2291
2292 if (!NewOp0)
2293 NewOp0 = I->getOperand(0);
2294 if (!NewOp1)
2295 NewOp1 = I->getOperand(1);
2296
2297 if (Value *Res = simplifyBinOp(I->getOpcode(), NewOp0, NewOp1,
2298 IC.getSimplifyQuery().getWithInstruction(I)))
2299 return Res;
2300
2301 if (SimplifyOnly)
2302 return nullptr;
2303 return IC.Builder.CreateBinOp(I->getOpcode(), NewOp0, NewOp1);
2304 }
2305
2306 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
2307 // here. We should standardize that construct where it is needed or choose some
2308 // other way to ensure that commutated variants of patterns are not missed.
visitAnd(BinaryOperator & I)2309 Instruction *InstCombinerImpl::visitAnd(BinaryOperator &I) {
2310 Type *Ty = I.getType();
2311
2312 if (Value *V = simplifyAndInst(I.getOperand(0), I.getOperand(1),
2313 SQ.getWithInstruction(&I)))
2314 return replaceInstUsesWith(I, V);
2315
2316 if (SimplifyAssociativeOrCommutative(I))
2317 return &I;
2318
2319 if (Instruction *X = foldVectorBinop(I))
2320 return X;
2321
2322 if (Instruction *Phi = foldBinopWithPhiOperands(I))
2323 return Phi;
2324
2325 // See if we can simplify any instructions used by the instruction whose sole
2326 // purpose is to compute bits we don't care about.
2327 if (SimplifyDemandedInstructionBits(I))
2328 return &I;
2329
2330 // Do this before using distributive laws to catch simple and/or/not patterns.
2331 if (Instruction *Xor = foldAndToXor(I, Builder))
2332 return Xor;
2333
2334 if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
2335 return X;
2336
2337 // (A|B)&(A|C) -> A|(B&C) etc
2338 if (Value *V = foldUsingDistributiveLaws(I))
2339 return replaceInstUsesWith(I, V);
2340
2341 if (Instruction *R = foldBinOpShiftWithShift(I))
2342 return R;
2343
2344 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
2345
2346 Value *X, *Y;
2347 const APInt *C;
2348 if ((match(Op0, m_OneUse(m_LogicalShift(m_One(), m_Value(X)))) ||
2349 (match(Op0, m_OneUse(m_Shl(m_APInt(C), m_Value(X)))) && (*C)[0])) &&
2350 match(Op1, m_One())) {
2351 // (1 >> X) & 1 --> zext(X == 0)
2352 // (C << X) & 1 --> zext(X == 0), when C is odd
2353 Value *IsZero = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, 0));
2354 return new ZExtInst(IsZero, Ty);
2355 }
2356
2357 // (-(X & 1)) & Y --> (X & 1) == 0 ? 0 : Y
2358 Value *Neg;
2359 if (match(&I,
2360 m_c_And(m_CombineAnd(m_Value(Neg),
2361 m_OneUse(m_Neg(m_And(m_Value(), m_One())))),
2362 m_Value(Y)))) {
2363 Value *Cmp = Builder.CreateIsNull(Neg);
2364 return SelectInst::Create(Cmp, ConstantInt::getNullValue(Ty), Y);
2365 }
2366
2367 // Canonicalize:
2368 // (X +/- Y) & Y --> ~X & Y when Y is a power of 2.
2369 if (match(&I, m_c_And(m_Value(Y), m_OneUse(m_CombineOr(
2370 m_c_Add(m_Value(X), m_Deferred(Y)),
2371 m_Sub(m_Value(X), m_Deferred(Y)))))) &&
2372 isKnownToBeAPowerOfTwo(Y, /*OrZero*/ true, /*Depth*/ 0, &I))
2373 return BinaryOperator::CreateAnd(Builder.CreateNot(X), Y);
2374
2375 if (match(Op1, m_APInt(C))) {
2376 const APInt *XorC;
2377 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_APInt(XorC))))) {
2378 // (X ^ C1) & C2 --> (X & C2) ^ (C1&C2)
2379 Constant *NewC = ConstantInt::get(Ty, *C & *XorC);
2380 Value *And = Builder.CreateAnd(X, Op1);
2381 And->takeName(Op0);
2382 return BinaryOperator::CreateXor(And, NewC);
2383 }
2384
2385 const APInt *OrC;
2386 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_APInt(OrC))))) {
2387 // (X | C1) & C2 --> (X & C2^(C1&C2)) | (C1&C2)
2388 // NOTE: This reduces the number of bits set in the & mask, which
2389 // can expose opportunities for store narrowing for scalars.
2390 // NOTE: SimplifyDemandedBits should have already removed bits from C1
2391 // that aren't set in C2. Meaning we can replace (C1&C2) with C1 in
2392 // above, but this feels safer.
2393 APInt Together = *C & *OrC;
2394 Value *And = Builder.CreateAnd(X, ConstantInt::get(Ty, Together ^ *C));
2395 And->takeName(Op0);
2396 return BinaryOperator::CreateOr(And, ConstantInt::get(Ty, Together));
2397 }
2398
2399 unsigned Width = Ty->getScalarSizeInBits();
2400 const APInt *ShiftC;
2401 if (match(Op0, m_OneUse(m_SExt(m_AShr(m_Value(X), m_APInt(ShiftC))))) &&
2402 ShiftC->ult(Width)) {
2403 if (*C == APInt::getLowBitsSet(Width, Width - ShiftC->getZExtValue())) {
2404 // We are clearing high bits that were potentially set by sext+ashr:
2405 // and (sext (ashr X, ShiftC)), C --> lshr (sext X), ShiftC
2406 Value *Sext = Builder.CreateSExt(X, Ty);
2407 Constant *ShAmtC = ConstantInt::get(Ty, ShiftC->zext(Width));
2408 return BinaryOperator::CreateLShr(Sext, ShAmtC);
2409 }
2410 }
2411
2412 // If this 'and' clears the sign-bits added by ashr, replace with lshr:
2413 // and (ashr X, ShiftC), C --> lshr X, ShiftC
2414 if (match(Op0, m_AShr(m_Value(X), m_APInt(ShiftC))) && ShiftC->ult(Width) &&
2415 C->isMask(Width - ShiftC->getZExtValue()))
2416 return BinaryOperator::CreateLShr(X, ConstantInt::get(Ty, *ShiftC));
2417
2418 const APInt *AddC;
2419 if (match(Op0, m_Add(m_Value(X), m_APInt(AddC)))) {
2420 // If we are masking the result of the add down to exactly one bit and
2421 // the constant we are adding has no bits set below that bit, then the
2422 // add is flipping a single bit. Example:
2423 // (X + 4) & 4 --> (X & 4) ^ 4
2424 if (Op0->hasOneUse() && C->isPowerOf2() && (*AddC & (*C - 1)) == 0) {
2425 assert((*C & *AddC) != 0 && "Expected common bit");
2426 Value *NewAnd = Builder.CreateAnd(X, Op1);
2427 return BinaryOperator::CreateXor(NewAnd, Op1);
2428 }
2429 }
2430
2431 // ((C1 OP zext(X)) & C2) -> zext((C1 OP X) & C2) if C2 fits in the
2432 // bitwidth of X and OP behaves well when given trunc(C1) and X.
2433 auto isNarrowableBinOpcode = [](BinaryOperator *B) {
2434 switch (B->getOpcode()) {
2435 case Instruction::Xor:
2436 case Instruction::Or:
2437 case Instruction::Mul:
2438 case Instruction::Add:
2439 case Instruction::Sub:
2440 return true;
2441 default:
2442 return false;
2443 }
2444 };
2445 BinaryOperator *BO;
2446 if (match(Op0, m_OneUse(m_BinOp(BO))) && isNarrowableBinOpcode(BO)) {
2447 Instruction::BinaryOps BOpcode = BO->getOpcode();
2448 Value *X;
2449 const APInt *C1;
2450 // TODO: The one-use restrictions could be relaxed a little if the AND
2451 // is going to be removed.
2452 // Try to narrow the 'and' and a binop with constant operand:
2453 // and (bo (zext X), C1), C --> zext (and (bo X, TruncC1), TruncC)
2454 if (match(BO, m_c_BinOp(m_OneUse(m_ZExt(m_Value(X))), m_APInt(C1))) &&
2455 C->isIntN(X->getType()->getScalarSizeInBits())) {
2456 unsigned XWidth = X->getType()->getScalarSizeInBits();
2457 Constant *TruncC1 = ConstantInt::get(X->getType(), C1->trunc(XWidth));
2458 Value *BinOp = isa<ZExtInst>(BO->getOperand(0))
2459 ? Builder.CreateBinOp(BOpcode, X, TruncC1)
2460 : Builder.CreateBinOp(BOpcode, TruncC1, X);
2461 Constant *TruncC = ConstantInt::get(X->getType(), C->trunc(XWidth));
2462 Value *And = Builder.CreateAnd(BinOp, TruncC);
2463 return new ZExtInst(And, Ty);
2464 }
2465
2466 // Similar to above: if the mask matches the zext input width, then the
2467 // 'and' can be eliminated, so we can truncate the other variable op:
2468 // and (bo (zext X), Y), C --> zext (bo X, (trunc Y))
2469 if (isa<Instruction>(BO->getOperand(0)) &&
2470 match(BO->getOperand(0), m_OneUse(m_ZExt(m_Value(X)))) &&
2471 C->isMask(X->getType()->getScalarSizeInBits())) {
2472 Y = BO->getOperand(1);
2473 Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr");
2474 Value *NewBO =
2475 Builder.CreateBinOp(BOpcode, X, TrY, BO->getName() + ".narrow");
2476 return new ZExtInst(NewBO, Ty);
2477 }
2478 // and (bo Y, (zext X)), C --> zext (bo (trunc Y), X)
2479 if (isa<Instruction>(BO->getOperand(1)) &&
2480 match(BO->getOperand(1), m_OneUse(m_ZExt(m_Value(X)))) &&
2481 C->isMask(X->getType()->getScalarSizeInBits())) {
2482 Y = BO->getOperand(0);
2483 Value *TrY = Builder.CreateTrunc(Y, X->getType(), Y->getName() + ".tr");
2484 Value *NewBO =
2485 Builder.CreateBinOp(BOpcode, TrY, X, BO->getName() + ".narrow");
2486 return new ZExtInst(NewBO, Ty);
2487 }
2488 }
2489
2490 // This is intentionally placed after the narrowing transforms for
2491 // efficiency (transform directly to the narrow logic op if possible).
2492 // If the mask is only needed on one incoming arm, push the 'and' op up.
2493 if (match(Op0, m_OneUse(m_Xor(m_Value(X), m_Value(Y)))) ||
2494 match(Op0, m_OneUse(m_Or(m_Value(X), m_Value(Y))))) {
2495 APInt NotAndMask(~(*C));
2496 BinaryOperator::BinaryOps BinOp = cast<BinaryOperator>(Op0)->getOpcode();
2497 if (MaskedValueIsZero(X, NotAndMask, 0, &I)) {
2498 // Not masking anything out for the LHS, move mask to RHS.
2499 // and ({x}or X, Y), C --> {x}or X, (and Y, C)
2500 Value *NewRHS = Builder.CreateAnd(Y, Op1, Y->getName() + ".masked");
2501 return BinaryOperator::Create(BinOp, X, NewRHS);
2502 }
2503 if (!isa<Constant>(Y) && MaskedValueIsZero(Y, NotAndMask, 0, &I)) {
2504 // Not masking anything out for the RHS, move mask to LHS.
2505 // and ({x}or X, Y), C --> {x}or (and X, C), Y
2506 Value *NewLHS = Builder.CreateAnd(X, Op1, X->getName() + ".masked");
2507 return BinaryOperator::Create(BinOp, NewLHS, Y);
2508 }
2509 }
2510
2511 // When the mask is a power-of-2 constant and op0 is a shifted-power-of-2
2512 // constant, test if the shift amount equals the offset bit index:
2513 // (ShiftC << X) & C --> X == (log2(C) - log2(ShiftC)) ? C : 0
2514 // (ShiftC >> X) & C --> X == (log2(ShiftC) - log2(C)) ? C : 0
2515 if (C->isPowerOf2() &&
2516 match(Op0, m_OneUse(m_LogicalShift(m_Power2(ShiftC), m_Value(X))))) {
2517 int Log2ShiftC = ShiftC->exactLogBase2();
2518 int Log2C = C->exactLogBase2();
2519 bool IsShiftLeft =
2520 cast<BinaryOperator>(Op0)->getOpcode() == Instruction::Shl;
2521 int BitNum = IsShiftLeft ? Log2C - Log2ShiftC : Log2ShiftC - Log2C;
2522 assert(BitNum >= 0 && "Expected demanded bits to handle impossible mask");
2523 Value *Cmp = Builder.CreateICmpEQ(X, ConstantInt::get(Ty, BitNum));
2524 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C),
2525 ConstantInt::getNullValue(Ty));
2526 }
2527
2528 Constant *C1, *C2;
2529 const APInt *C3 = C;
2530 Value *X;
2531 if (C3->isPowerOf2()) {
2532 Constant *Log2C3 = ConstantInt::get(Ty, C3->countr_zero());
2533 if (match(Op0, m_OneUse(m_LShr(m_Shl(m_ImmConstant(C1), m_Value(X)),
2534 m_ImmConstant(C2)))) &&
2535 match(C1, m_Power2())) {
2536 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1);
2537 Constant *LshrC = ConstantExpr::getAdd(C2, Log2C3);
2538 KnownBits KnownLShrc = computeKnownBits(LshrC, 0, nullptr);
2539 if (KnownLShrc.getMaxValue().ult(Width)) {
2540 // iff C1,C3 is pow2 and C2 + cttz(C3) < BitWidth:
2541 // ((C1 << X) >> C2) & C3 -> X == (cttz(C3)+C2-cttz(C1)) ? C3 : 0
2542 Constant *CmpC = ConstantExpr::getSub(LshrC, Log2C1);
2543 Value *Cmp = Builder.CreateICmpEQ(X, CmpC);
2544 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3),
2545 ConstantInt::getNullValue(Ty));
2546 }
2547 }
2548
2549 if (match(Op0, m_OneUse(m_Shl(m_LShr(m_ImmConstant(C1), m_Value(X)),
2550 m_ImmConstant(C2)))) &&
2551 match(C1, m_Power2())) {
2552 Constant *Log2C1 = ConstantExpr::getExactLogBase2(C1);
2553 Constant *Cmp =
2554 ConstantFoldCompareInstOperands(ICmpInst::ICMP_ULT, Log2C3, C2, DL);
2555 if (Cmp && Cmp->isZeroValue()) {
2556 // iff C1,C3 is pow2 and Log2(C3) >= C2:
2557 // ((C1 >> X) << C2) & C3 -> X == (cttz(C1)+C2-cttz(C3)) ? C3 : 0
2558 Constant *ShlC = ConstantExpr::getAdd(C2, Log2C1);
2559 Constant *CmpC = ConstantExpr::getSub(ShlC, Log2C3);
2560 Value *Cmp = Builder.CreateICmpEQ(X, CmpC);
2561 return SelectInst::Create(Cmp, ConstantInt::get(Ty, *C3),
2562 ConstantInt::getNullValue(Ty));
2563 }
2564 }
2565 }
2566 }
2567
2568 // If we are clearing the sign bit of a floating-point value, convert this to
2569 // fabs, then cast back to integer.
2570 //
2571 // This is a generous interpretation for noimplicitfloat, this is not a true
2572 // floating-point operation.
2573 //
2574 // Assumes any IEEE-represented type has the sign bit in the high bit.
2575 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
2576 Value *CastOp;
2577 if (match(Op0, m_ElementWiseBitCast(m_Value(CastOp))) &&
2578 match(Op1, m_MaxSignedValue()) &&
2579 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
2580 Attribute::NoImplicitFloat)) {
2581 Type *EltTy = CastOp->getType()->getScalarType();
2582 if (EltTy->isFloatingPointTy() && EltTy->isIEEE()) {
2583 Value *FAbs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, CastOp);
2584 return new BitCastInst(FAbs, I.getType());
2585 }
2586 }
2587
2588 // and(shl(zext(X), Y), SignMask) -> and(sext(X), SignMask)
2589 // where Y is a valid shift amount.
2590 if (match(&I, m_And(m_OneUse(m_Shl(m_ZExt(m_Value(X)), m_Value(Y))),
2591 m_SignMask())) &&
2592 match(Y, m_SpecificInt_ICMP(
2593 ICmpInst::Predicate::ICMP_EQ,
2594 APInt(Ty->getScalarSizeInBits(),
2595 Ty->getScalarSizeInBits() -
2596 X->getType()->getScalarSizeInBits())))) {
2597 auto *SExt = Builder.CreateSExt(X, Ty, X->getName() + ".signext");
2598 return BinaryOperator::CreateAnd(SExt, Op1);
2599 }
2600
2601 if (Instruction *Z = narrowMaskedBinOp(I))
2602 return Z;
2603
2604 if (I.getType()->isIntOrIntVectorTy(1)) {
2605 if (auto *SI0 = dyn_cast<SelectInst>(Op0)) {
2606 if (auto *R =
2607 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ true))
2608 return R;
2609 }
2610 if (auto *SI1 = dyn_cast<SelectInst>(Op1)) {
2611 if (auto *R =
2612 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ true))
2613 return R;
2614 }
2615 }
2616
2617 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
2618 return FoldedLogic;
2619
2620 if (Instruction *DeMorgan = matchDeMorgansLaws(I, *this))
2621 return DeMorgan;
2622
2623 {
2624 Value *A, *B, *C;
2625 // A & ~(A ^ B) --> A & B
2626 if (match(Op1, m_Not(m_c_Xor(m_Specific(Op0), m_Value(B)))))
2627 return BinaryOperator::CreateAnd(Op0, B);
2628 // ~(A ^ B) & A --> A & B
2629 if (match(Op0, m_Not(m_c_Xor(m_Specific(Op1), m_Value(B)))))
2630 return BinaryOperator::CreateAnd(Op1, B);
2631
2632 // (A ^ B) & ((B ^ C) ^ A) -> (A ^ B) & ~C
2633 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
2634 match(Op1, m_Xor(m_Xor(m_Specific(B), m_Value(C)), m_Specific(A)))) {
2635 Value *NotC = Op1->hasOneUse()
2636 ? Builder.CreateNot(C)
2637 : getFreelyInverted(C, C->hasOneUse(), &Builder);
2638 if (NotC != nullptr)
2639 return BinaryOperator::CreateAnd(Op0, NotC);
2640 }
2641
2642 // ((A ^ C) ^ B) & (B ^ A) -> (B ^ A) & ~C
2643 if (match(Op0, m_Xor(m_Xor(m_Value(A), m_Value(C)), m_Value(B))) &&
2644 match(Op1, m_Xor(m_Specific(B), m_Specific(A)))) {
2645 Value *NotC = Op0->hasOneUse()
2646 ? Builder.CreateNot(C)
2647 : getFreelyInverted(C, C->hasOneUse(), &Builder);
2648 if (NotC != nullptr)
2649 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(C));
2650 }
2651
2652 // (A | B) & (~A ^ B) -> A & B
2653 // (A | B) & (B ^ ~A) -> A & B
2654 // (B | A) & (~A ^ B) -> A & B
2655 // (B | A) & (B ^ ~A) -> A & B
2656 if (match(Op1, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2657 match(Op0, m_c_Or(m_Specific(A), m_Specific(B))))
2658 return BinaryOperator::CreateAnd(A, B);
2659
2660 // (~A ^ B) & (A | B) -> A & B
2661 // (~A ^ B) & (B | A) -> A & B
2662 // (B ^ ~A) & (A | B) -> A & B
2663 // (B ^ ~A) & (B | A) -> A & B
2664 if (match(Op0, m_c_Xor(m_Not(m_Value(A)), m_Value(B))) &&
2665 match(Op1, m_c_Or(m_Specific(A), m_Specific(B))))
2666 return BinaryOperator::CreateAnd(A, B);
2667
2668 // (~A | B) & (A ^ B) -> ~A & B
2669 // (~A | B) & (B ^ A) -> ~A & B
2670 // (B | ~A) & (A ^ B) -> ~A & B
2671 // (B | ~A) & (B ^ A) -> ~A & B
2672 if (match(Op0, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2673 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
2674 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
2675
2676 // (A ^ B) & (~A | B) -> ~A & B
2677 // (B ^ A) & (~A | B) -> ~A & B
2678 // (A ^ B) & (B | ~A) -> ~A & B
2679 // (B ^ A) & (B | ~A) -> ~A & B
2680 if (match(Op1, m_c_Or(m_Not(m_Value(A)), m_Value(B))) &&
2681 match(Op0, m_c_Xor(m_Specific(A), m_Specific(B))))
2682 return BinaryOperator::CreateAnd(Builder.CreateNot(A), B);
2683 }
2684
2685 {
2686 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
2687 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
2688 if (LHS && RHS)
2689 if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ true))
2690 return replaceInstUsesWith(I, Res);
2691
2692 // TODO: Make this recursive; it's a little tricky because an arbitrary
2693 // number of 'and' instructions might have to be created.
2694 if (LHS && match(Op1, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) {
2695 bool IsLogical = isa<SelectInst>(Op1);
2696 // LHS & (X && Y) --> (LHS && X) && Y
2697 if (auto *Cmp = dyn_cast<ICmpInst>(X))
2698 if (Value *Res =
2699 foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true, IsLogical))
2700 return replaceInstUsesWith(I, IsLogical
2701 ? Builder.CreateLogicalAnd(Res, Y)
2702 : Builder.CreateAnd(Res, Y));
2703 // LHS & (X && Y) --> X && (LHS & Y)
2704 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2705 if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ true,
2706 /* IsLogical */ false))
2707 return replaceInstUsesWith(I, IsLogical
2708 ? Builder.CreateLogicalAnd(X, Res)
2709 : Builder.CreateAnd(X, Res));
2710 }
2711 if (RHS && match(Op0, m_OneUse(m_LogicalAnd(m_Value(X), m_Value(Y))))) {
2712 bool IsLogical = isa<SelectInst>(Op0);
2713 // (X && Y) & RHS --> (X && RHS) && Y
2714 if (auto *Cmp = dyn_cast<ICmpInst>(X))
2715 if (Value *Res =
2716 foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true, IsLogical))
2717 return replaceInstUsesWith(I, IsLogical
2718 ? Builder.CreateLogicalAnd(Res, Y)
2719 : Builder.CreateAnd(Res, Y));
2720 // (X && Y) & RHS --> X && (Y & RHS)
2721 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
2722 if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ true,
2723 /* IsLogical */ false))
2724 return replaceInstUsesWith(I, IsLogical
2725 ? Builder.CreateLogicalAnd(X, Res)
2726 : Builder.CreateAnd(X, Res));
2727 }
2728 }
2729
2730 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
2731 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
2732 if (Value *Res = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ true))
2733 return replaceInstUsesWith(I, Res);
2734
2735 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder))
2736 return FoldedFCmps;
2737
2738 if (Instruction *CastedAnd = foldCastedBitwiseLogic(I))
2739 return CastedAnd;
2740
2741 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
2742 return Sel;
2743
2744 // and(sext(A), B) / and(B, sext(A)) --> A ? B : 0, where A is i1 or <N x i1>.
2745 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
2746 // with binop identity constant. But creating a select with non-constant
2747 // arm may not be reversible due to poison semantics. Is that a good
2748 // canonicalization?
2749 Value *A, *B;
2750 if (match(&I, m_c_And(m_SExt(m_Value(A)), m_Value(B))) &&
2751 A->getType()->isIntOrIntVectorTy(1))
2752 return SelectInst::Create(A, B, Constant::getNullValue(Ty));
2753
2754 // Similarly, a 'not' of the bool translates to a swap of the select arms:
2755 // ~sext(A) & B / B & ~sext(A) --> A ? 0 : B
2756 if (match(&I, m_c_And(m_Not(m_SExt(m_Value(A))), m_Value(B))) &&
2757 A->getType()->isIntOrIntVectorTy(1))
2758 return SelectInst::Create(A, Constant::getNullValue(Ty), B);
2759
2760 // and(zext(A), B) -> A ? (B & 1) : 0
2761 if (match(&I, m_c_And(m_OneUse(m_ZExt(m_Value(A))), m_Value(B))) &&
2762 A->getType()->isIntOrIntVectorTy(1))
2763 return SelectInst::Create(A, Builder.CreateAnd(B, ConstantInt::get(Ty, 1)),
2764 Constant::getNullValue(Ty));
2765
2766 // (-1 + A) & B --> A ? 0 : B where A is 0/1.
2767 if (match(&I, m_c_And(m_OneUse(m_Add(m_ZExtOrSelf(m_Value(A)), m_AllOnes())),
2768 m_Value(B)))) {
2769 if (A->getType()->isIntOrIntVectorTy(1))
2770 return SelectInst::Create(A, Constant::getNullValue(Ty), B);
2771 if (computeKnownBits(A, /* Depth */ 0, &I).countMaxActiveBits() <= 1) {
2772 return SelectInst::Create(
2773 Builder.CreateICmpEQ(A, Constant::getNullValue(A->getType())), B,
2774 Constant::getNullValue(Ty));
2775 }
2776 }
2777
2778 // (iN X s>> (N-1)) & Y --> (X s< 0) ? Y : 0 -- with optional sext
2779 if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
2780 m_AShr(m_Value(X), m_APIntAllowPoison(C)))),
2781 m_Value(Y))) &&
2782 *C == X->getType()->getScalarSizeInBits() - 1) {
2783 Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
2784 return SelectInst::Create(IsNeg, Y, ConstantInt::getNullValue(Ty));
2785 }
2786 // If there's a 'not' of the shifted value, swap the select operands:
2787 // ~(iN X s>> (N-1)) & Y --> (X s< 0) ? 0 : Y -- with optional sext
2788 if (match(&I, m_c_And(m_OneUse(m_SExtOrSelf(
2789 m_Not(m_AShr(m_Value(X), m_APIntAllowPoison(C))))),
2790 m_Value(Y))) &&
2791 *C == X->getType()->getScalarSizeInBits() - 1) {
2792 Value *IsNeg = Builder.CreateIsNeg(X, "isneg");
2793 return SelectInst::Create(IsNeg, ConstantInt::getNullValue(Ty), Y);
2794 }
2795
2796 // (~x) & y --> ~(x | (~y)) iff that gets rid of inversions
2797 if (sinkNotIntoOtherHandOfLogicalOp(I))
2798 return &I;
2799
2800 // An and recurrence w/loop invariant step is equivelent to (and start, step)
2801 PHINode *PN = nullptr;
2802 Value *Start = nullptr, *Step = nullptr;
2803 if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN))
2804 return replaceInstUsesWith(I, Builder.CreateAnd(Start, Step));
2805
2806 if (Instruction *R = reassociateForUses(I, Builder))
2807 return R;
2808
2809 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
2810 return Canonicalized;
2811
2812 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
2813 return Folded;
2814
2815 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
2816 return Res;
2817
2818 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
2819 return Res;
2820
2821 if (Value *V =
2822 simplifyAndOrWithOpReplaced(Op0, Op1, Constant::getAllOnesValue(Ty),
2823 /*SimplifyOnly*/ false, *this))
2824 return BinaryOperator::CreateAnd(V, Op1);
2825 if (Value *V =
2826 simplifyAndOrWithOpReplaced(Op1, Op0, Constant::getAllOnesValue(Ty),
2827 /*SimplifyOnly*/ false, *this))
2828 return BinaryOperator::CreateAnd(Op0, V);
2829
2830 return nullptr;
2831 }
2832
matchBSwapOrBitReverse(Instruction & I,bool MatchBSwaps,bool MatchBitReversals)2833 Instruction *InstCombinerImpl::matchBSwapOrBitReverse(Instruction &I,
2834 bool MatchBSwaps,
2835 bool MatchBitReversals) {
2836 SmallVector<Instruction *, 4> Insts;
2837 if (!recognizeBSwapOrBitReverseIdiom(&I, MatchBSwaps, MatchBitReversals,
2838 Insts))
2839 return nullptr;
2840 Instruction *LastInst = Insts.pop_back_val();
2841 LastInst->removeFromParent();
2842
2843 for (auto *Inst : Insts)
2844 Worklist.push(Inst);
2845 return LastInst;
2846 }
2847
2848 std::optional<std::pair<Intrinsic::ID, SmallVector<Value *, 3>>>
convertOrOfShiftsToFunnelShift(Instruction & Or)2849 InstCombinerImpl::convertOrOfShiftsToFunnelShift(Instruction &Or) {
2850 // TODO: Can we reduce the code duplication between this and the related
2851 // rotate matching code under visitSelect and visitTrunc?
2852 assert(Or.getOpcode() == BinaryOperator::Or && "Expecting or instruction");
2853
2854 unsigned Width = Or.getType()->getScalarSizeInBits();
2855
2856 Instruction *Or0, *Or1;
2857 if (!match(Or.getOperand(0), m_Instruction(Or0)) ||
2858 !match(Or.getOperand(1), m_Instruction(Or1)))
2859 return std::nullopt;
2860
2861 bool IsFshl = true; // Sub on LSHR.
2862 SmallVector<Value *, 3> FShiftArgs;
2863
2864 // First, find an or'd pair of opposite shifts:
2865 // or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1)
2866 if (isa<BinaryOperator>(Or0) && isa<BinaryOperator>(Or1)) {
2867 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
2868 if (!match(Or0,
2869 m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) ||
2870 !match(Or1,
2871 m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) ||
2872 Or0->getOpcode() == Or1->getOpcode())
2873 return std::nullopt;
2874
2875 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
2876 if (Or0->getOpcode() == BinaryOperator::LShr) {
2877 std::swap(Or0, Or1);
2878 std::swap(ShVal0, ShVal1);
2879 std::swap(ShAmt0, ShAmt1);
2880 }
2881 assert(Or0->getOpcode() == BinaryOperator::Shl &&
2882 Or1->getOpcode() == BinaryOperator::LShr &&
2883 "Illegal or(shift,shift) pair");
2884
2885 // Match the shift amount operands for a funnel shift pattern. This always
2886 // matches a subtraction on the R operand.
2887 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
2888 // Check for constant shift amounts that sum to the bitwidth.
2889 const APInt *LI, *RI;
2890 if (match(L, m_APIntAllowPoison(LI)) && match(R, m_APIntAllowPoison(RI)))
2891 if (LI->ult(Width) && RI->ult(Width) && (*LI + *RI) == Width)
2892 return ConstantInt::get(L->getType(), *LI);
2893
2894 Constant *LC, *RC;
2895 if (match(L, m_Constant(LC)) && match(R, m_Constant(RC)) &&
2896 match(L,
2897 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
2898 match(R,
2899 m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, APInt(Width, Width))) &&
2900 match(ConstantExpr::getAdd(LC, RC), m_SpecificIntAllowPoison(Width)))
2901 return ConstantExpr::mergeUndefsWith(LC, RC);
2902
2903 // (shl ShVal, X) | (lshr ShVal, (Width - x)) iff X < Width.
2904 // We limit this to X < Width in case the backend re-expands the
2905 // intrinsic, and has to reintroduce a shift modulo operation (InstCombine
2906 // might remove it after this fold). This still doesn't guarantee that the
2907 // final codegen will match this original pattern.
2908 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L))))) {
2909 KnownBits KnownL = computeKnownBits(L, /*Depth*/ 0, &Or);
2910 return KnownL.getMaxValue().ult(Width) ? L : nullptr;
2911 }
2912
2913 // For non-constant cases, the following patterns currently only work for
2914 // rotation patterns.
2915 // TODO: Add general funnel-shift compatible patterns.
2916 if (ShVal0 != ShVal1)
2917 return nullptr;
2918
2919 // For non-constant cases we don't support non-pow2 shift masks.
2920 // TODO: Is it worth matching urem as well?
2921 if (!isPowerOf2_32(Width))
2922 return nullptr;
2923
2924 // The shift amount may be masked with negation:
2925 // (shl ShVal, (X & (Width - 1))) | (lshr ShVal, ((-X) & (Width - 1)))
2926 Value *X;
2927 unsigned Mask = Width - 1;
2928 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
2929 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
2930 return X;
2931
2932 // (shl ShVal, X) | (lshr ShVal, ((-X) & (Width - 1)))
2933 if (match(R, m_And(m_Neg(m_Specific(L)), m_SpecificInt(Mask))))
2934 return L;
2935
2936 // Similar to above, but the shift amount may be extended after masking,
2937 // so return the extended value as the parameter for the intrinsic.
2938 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
2939 match(R,
2940 m_And(m_Neg(m_ZExt(m_And(m_Specific(X), m_SpecificInt(Mask)))),
2941 m_SpecificInt(Mask))))
2942 return L;
2943
2944 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
2945 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
2946 return L;
2947
2948 return nullptr;
2949 };
2950
2951 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, Width);
2952 if (!ShAmt) {
2953 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, Width);
2954 IsFshl = false; // Sub on SHL.
2955 }
2956 if (!ShAmt)
2957 return std::nullopt;
2958
2959 FShiftArgs = {ShVal0, ShVal1, ShAmt};
2960 } else if (isa<ZExtInst>(Or0) || isa<ZExtInst>(Or1)) {
2961 // If there are two 'or' instructions concat variables in opposite order:
2962 //
2963 // Slot1 and Slot2 are all zero bits.
2964 // | Slot1 | Low | Slot2 | High |
2965 // LowHigh = or (shl (zext Low), ZextLowShlAmt), (zext High)
2966 // | Slot2 | High | Slot1 | Low |
2967 // HighLow = or (shl (zext High), ZextHighShlAmt), (zext Low)
2968 //
2969 // the latter 'or' can be safely convert to
2970 // -> HighLow = fshl LowHigh, LowHigh, ZextHighShlAmt
2971 // if ZextLowShlAmt + ZextHighShlAmt == Width.
2972 if (!isa<ZExtInst>(Or1))
2973 std::swap(Or0, Or1);
2974
2975 Value *High, *ZextHigh, *Low;
2976 const APInt *ZextHighShlAmt;
2977 if (!match(Or0,
2978 m_OneUse(m_Shl(m_Value(ZextHigh), m_APInt(ZextHighShlAmt)))))
2979 return std::nullopt;
2980
2981 if (!match(Or1, m_ZExt(m_Value(Low))) ||
2982 !match(ZextHigh, m_ZExt(m_Value(High))))
2983 return std::nullopt;
2984
2985 unsigned HighSize = High->getType()->getScalarSizeInBits();
2986 unsigned LowSize = Low->getType()->getScalarSizeInBits();
2987 // Make sure High does not overlap with Low and most significant bits of
2988 // High aren't shifted out.
2989 if (ZextHighShlAmt->ult(LowSize) || ZextHighShlAmt->ugt(Width - HighSize))
2990 return std::nullopt;
2991
2992 for (User *U : ZextHigh->users()) {
2993 Value *X, *Y;
2994 if (!match(U, m_Or(m_Value(X), m_Value(Y))))
2995 continue;
2996
2997 if (!isa<ZExtInst>(Y))
2998 std::swap(X, Y);
2999
3000 const APInt *ZextLowShlAmt;
3001 if (!match(X, m_Shl(m_Specific(Or1), m_APInt(ZextLowShlAmt))) ||
3002 !match(Y, m_Specific(ZextHigh)) || !DT.dominates(U, &Or))
3003 continue;
3004
3005 // HighLow is good concat. If sum of two shifts amount equals to Width,
3006 // LowHigh must also be a good concat.
3007 if (*ZextLowShlAmt + *ZextHighShlAmt != Width)
3008 continue;
3009
3010 // Low must not overlap with High and most significant bits of Low must
3011 // not be shifted out.
3012 assert(ZextLowShlAmt->uge(HighSize) &&
3013 ZextLowShlAmt->ule(Width - LowSize) && "Invalid concat");
3014
3015 FShiftArgs = {U, U, ConstantInt::get(Or0->getType(), *ZextHighShlAmt)};
3016 break;
3017 }
3018 }
3019
3020 if (FShiftArgs.empty())
3021 return std::nullopt;
3022
3023 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
3024 return std::make_pair(IID, FShiftArgs);
3025 }
3026
3027 /// Match UB-safe variants of the funnel shift intrinsic.
matchFunnelShift(Instruction & Or,InstCombinerImpl & IC)3028 static Instruction *matchFunnelShift(Instruction &Or, InstCombinerImpl &IC) {
3029 if (auto Opt = IC.convertOrOfShiftsToFunnelShift(Or)) {
3030 auto [IID, FShiftArgs] = *Opt;
3031 Function *F = Intrinsic::getDeclaration(Or.getModule(), IID, Or.getType());
3032 return CallInst::Create(F, FShiftArgs);
3033 }
3034
3035 return nullptr;
3036 }
3037
3038 /// Attempt to combine or(zext(x),shl(zext(y),bw/2) concat packing patterns.
matchOrConcat(Instruction & Or,InstCombiner::BuilderTy & Builder)3039 static Instruction *matchOrConcat(Instruction &Or,
3040 InstCombiner::BuilderTy &Builder) {
3041 assert(Or.getOpcode() == Instruction::Or && "bswap requires an 'or'");
3042 Value *Op0 = Or.getOperand(0), *Op1 = Or.getOperand(1);
3043 Type *Ty = Or.getType();
3044
3045 unsigned Width = Ty->getScalarSizeInBits();
3046 if ((Width & 1) != 0)
3047 return nullptr;
3048 unsigned HalfWidth = Width / 2;
3049
3050 // Canonicalize zext (lower half) to LHS.
3051 if (!isa<ZExtInst>(Op0))
3052 std::swap(Op0, Op1);
3053
3054 // Find lower/upper half.
3055 Value *LowerSrc, *ShlVal, *UpperSrc;
3056 const APInt *C;
3057 if (!match(Op0, m_OneUse(m_ZExt(m_Value(LowerSrc)))) ||
3058 !match(Op1, m_OneUse(m_Shl(m_Value(ShlVal), m_APInt(C)))) ||
3059 !match(ShlVal, m_OneUse(m_ZExt(m_Value(UpperSrc)))))
3060 return nullptr;
3061 if (*C != HalfWidth || LowerSrc->getType() != UpperSrc->getType() ||
3062 LowerSrc->getType()->getScalarSizeInBits() != HalfWidth)
3063 return nullptr;
3064
3065 auto ConcatIntrinsicCalls = [&](Intrinsic::ID id, Value *Lo, Value *Hi) {
3066 Value *NewLower = Builder.CreateZExt(Lo, Ty);
3067 Value *NewUpper = Builder.CreateZExt(Hi, Ty);
3068 NewUpper = Builder.CreateShl(NewUpper, HalfWidth);
3069 Value *BinOp = Builder.CreateOr(NewLower, NewUpper);
3070 Function *F = Intrinsic::getDeclaration(Or.getModule(), id, Ty);
3071 return Builder.CreateCall(F, BinOp);
3072 };
3073
3074 // BSWAP: Push the concat down, swapping the lower/upper sources.
3075 // concat(bswap(x),bswap(y)) -> bswap(concat(x,y))
3076 Value *LowerBSwap, *UpperBSwap;
3077 if (match(LowerSrc, m_BSwap(m_Value(LowerBSwap))) &&
3078 match(UpperSrc, m_BSwap(m_Value(UpperBSwap))))
3079 return ConcatIntrinsicCalls(Intrinsic::bswap, UpperBSwap, LowerBSwap);
3080
3081 // BITREVERSE: Push the concat down, swapping the lower/upper sources.
3082 // concat(bitreverse(x),bitreverse(y)) -> bitreverse(concat(x,y))
3083 Value *LowerBRev, *UpperBRev;
3084 if (match(LowerSrc, m_BitReverse(m_Value(LowerBRev))) &&
3085 match(UpperSrc, m_BitReverse(m_Value(UpperBRev))))
3086 return ConcatIntrinsicCalls(Intrinsic::bitreverse, UpperBRev, LowerBRev);
3087
3088 return nullptr;
3089 }
3090
3091 /// If all elements of two constant vectors are 0/-1 and inverses, return true.
areInverseVectorBitmasks(Constant * C1,Constant * C2)3092 static bool areInverseVectorBitmasks(Constant *C1, Constant *C2) {
3093 unsigned NumElts = cast<FixedVectorType>(C1->getType())->getNumElements();
3094 for (unsigned i = 0; i != NumElts; ++i) {
3095 Constant *EltC1 = C1->getAggregateElement(i);
3096 Constant *EltC2 = C2->getAggregateElement(i);
3097 if (!EltC1 || !EltC2)
3098 return false;
3099
3100 // One element must be all ones, and the other must be all zeros.
3101 if (!((match(EltC1, m_Zero()) && match(EltC2, m_AllOnes())) ||
3102 (match(EltC2, m_Zero()) && match(EltC1, m_AllOnes()))))
3103 return false;
3104 }
3105 return true;
3106 }
3107
3108 /// We have an expression of the form (A & C) | (B & D). If A is a scalar or
3109 /// vector composed of all-zeros or all-ones values and is the bitwise 'not' of
3110 /// B, it can be used as the condition operand of a select instruction.
3111 /// We will detect (A & C) | ~(B | D) when the flag ABIsTheSame enabled.
getSelectCondition(Value * A,Value * B,bool ABIsTheSame)3112 Value *InstCombinerImpl::getSelectCondition(Value *A, Value *B,
3113 bool ABIsTheSame) {
3114 // We may have peeked through bitcasts in the caller.
3115 // Exit immediately if we don't have (vector) integer types.
3116 Type *Ty = A->getType();
3117 if (!Ty->isIntOrIntVectorTy() || !B->getType()->isIntOrIntVectorTy())
3118 return nullptr;
3119
3120 // If A is the 'not' operand of B and has enough signbits, we have our answer.
3121 if (ABIsTheSame ? (A == B) : match(B, m_Not(m_Specific(A)))) {
3122 // If these are scalars or vectors of i1, A can be used directly.
3123 if (Ty->isIntOrIntVectorTy(1))
3124 return A;
3125
3126 // If we look through a vector bitcast, the caller will bitcast the operands
3127 // to match the condition's number of bits (N x i1).
3128 // To make this poison-safe, disallow bitcast from wide element to narrow
3129 // element. That could allow poison in lanes where it was not present in the
3130 // original code.
3131 A = peekThroughBitcast(A);
3132 if (A->getType()->isIntOrIntVectorTy()) {
3133 unsigned NumSignBits = ComputeNumSignBits(A);
3134 if (NumSignBits == A->getType()->getScalarSizeInBits() &&
3135 NumSignBits <= Ty->getScalarSizeInBits())
3136 return Builder.CreateTrunc(A, CmpInst::makeCmpResultType(A->getType()));
3137 }
3138 return nullptr;
3139 }
3140
3141 // TODO: add support for sext and constant case
3142 if (ABIsTheSame)
3143 return nullptr;
3144
3145 // If both operands are constants, see if the constants are inverse bitmasks.
3146 Constant *AConst, *BConst;
3147 if (match(A, m_Constant(AConst)) && match(B, m_Constant(BConst)))
3148 if (AConst == ConstantExpr::getNot(BConst) &&
3149 ComputeNumSignBits(A) == Ty->getScalarSizeInBits())
3150 return Builder.CreateZExtOrTrunc(A, CmpInst::makeCmpResultType(Ty));
3151
3152 // Look for more complex patterns. The 'not' op may be hidden behind various
3153 // casts. Look through sexts and bitcasts to find the booleans.
3154 Value *Cond;
3155 Value *NotB;
3156 if (match(A, m_SExt(m_Value(Cond))) &&
3157 Cond->getType()->isIntOrIntVectorTy(1)) {
3158 // A = sext i1 Cond; B = sext (not (i1 Cond))
3159 if (match(B, m_SExt(m_Not(m_Specific(Cond)))))
3160 return Cond;
3161
3162 // A = sext i1 Cond; B = not ({bitcast} (sext (i1 Cond)))
3163 // TODO: The one-use checks are unnecessary or misplaced. If the caller
3164 // checked for uses on logic ops/casts, that should be enough to
3165 // make this transform worthwhile.
3166 if (match(B, m_OneUse(m_Not(m_Value(NotB))))) {
3167 NotB = peekThroughBitcast(NotB, true);
3168 if (match(NotB, m_SExt(m_Specific(Cond))))
3169 return Cond;
3170 }
3171 }
3172
3173 // All scalar (and most vector) possibilities should be handled now.
3174 // Try more matches that only apply to non-splat constant vectors.
3175 if (!Ty->isVectorTy())
3176 return nullptr;
3177
3178 // If both operands are xor'd with constants using the same sexted boolean
3179 // operand, see if the constants are inverse bitmasks.
3180 // TODO: Use ConstantExpr::getNot()?
3181 if (match(A, (m_Xor(m_SExt(m_Value(Cond)), m_Constant(AConst)))) &&
3182 match(B, (m_Xor(m_SExt(m_Specific(Cond)), m_Constant(BConst)))) &&
3183 Cond->getType()->isIntOrIntVectorTy(1) &&
3184 areInverseVectorBitmasks(AConst, BConst)) {
3185 AConst = ConstantExpr::getTrunc(AConst, CmpInst::makeCmpResultType(Ty));
3186 return Builder.CreateXor(Cond, AConst);
3187 }
3188 return nullptr;
3189 }
3190
3191 /// We have an expression of the form (A & B) | (C & D). Try to simplify this
3192 /// to "A' ? B : D", where A' is a boolean or vector of booleans.
3193 /// When InvertFalseVal is set to true, we try to match the pattern
3194 /// where we have peeked through a 'not' op and A and C are the same:
3195 /// (A & B) | ~(A | D) --> (A & B) | (~A & ~D) --> A' ? B : ~D
matchSelectFromAndOr(Value * A,Value * B,Value * C,Value * D,bool InvertFalseVal)3196 Value *InstCombinerImpl::matchSelectFromAndOr(Value *A, Value *B, Value *C,
3197 Value *D, bool InvertFalseVal) {
3198 // The potential condition of the select may be bitcasted. In that case, look
3199 // through its bitcast and the corresponding bitcast of the 'not' condition.
3200 Type *OrigType = A->getType();
3201 A = peekThroughBitcast(A, true);
3202 C = peekThroughBitcast(C, true);
3203 if (Value *Cond = getSelectCondition(A, C, InvertFalseVal)) {
3204 // ((bc Cond) & B) | ((bc ~Cond) & D) --> bc (select Cond, (bc B), (bc D))
3205 // If this is a vector, we may need to cast to match the condition's length.
3206 // The bitcasts will either all exist or all not exist. The builder will
3207 // not create unnecessary casts if the types already match.
3208 Type *SelTy = A->getType();
3209 if (auto *VecTy = dyn_cast<VectorType>(Cond->getType())) {
3210 // For a fixed or scalable vector get N from <{vscale x} N x iM>
3211 unsigned Elts = VecTy->getElementCount().getKnownMinValue();
3212 // For a fixed or scalable vector, get the size in bits of N x iM; for a
3213 // scalar this is just M.
3214 unsigned SelEltSize = SelTy->getPrimitiveSizeInBits().getKnownMinValue();
3215 Type *EltTy = Builder.getIntNTy(SelEltSize / Elts);
3216 SelTy = VectorType::get(EltTy, VecTy->getElementCount());
3217 }
3218 Value *BitcastB = Builder.CreateBitCast(B, SelTy);
3219 if (InvertFalseVal)
3220 D = Builder.CreateNot(D);
3221 Value *BitcastD = Builder.CreateBitCast(D, SelTy);
3222 Value *Select = Builder.CreateSelect(Cond, BitcastB, BitcastD);
3223 return Builder.CreateBitCast(Select, OrigType);
3224 }
3225
3226 return nullptr;
3227 }
3228
3229 // (icmp eq X, C) | (icmp ult Other, (X - C)) -> (icmp ule Other, (X - (C + 1)))
3230 // (icmp ne X, C) & (icmp uge Other, (X - C)) -> (icmp ugt Other, (X - (C + 1)))
foldAndOrOfICmpEqConstantAndICmp(ICmpInst * LHS,ICmpInst * RHS,bool IsAnd,bool IsLogical,IRBuilderBase & Builder)3231 static Value *foldAndOrOfICmpEqConstantAndICmp(ICmpInst *LHS, ICmpInst *RHS,
3232 bool IsAnd, bool IsLogical,
3233 IRBuilderBase &Builder) {
3234 Value *LHS0 = LHS->getOperand(0);
3235 Value *RHS0 = RHS->getOperand(0);
3236 Value *RHS1 = RHS->getOperand(1);
3237
3238 ICmpInst::Predicate LPred =
3239 IsAnd ? LHS->getInversePredicate() : LHS->getPredicate();
3240 ICmpInst::Predicate RPred =
3241 IsAnd ? RHS->getInversePredicate() : RHS->getPredicate();
3242
3243 const APInt *CInt;
3244 if (LPred != ICmpInst::ICMP_EQ ||
3245 !match(LHS->getOperand(1), m_APIntAllowPoison(CInt)) ||
3246 !LHS0->getType()->isIntOrIntVectorTy() ||
3247 !(LHS->hasOneUse() || RHS->hasOneUse()))
3248 return nullptr;
3249
3250 auto MatchRHSOp = [LHS0, CInt](const Value *RHSOp) {
3251 return match(RHSOp,
3252 m_Add(m_Specific(LHS0), m_SpecificIntAllowPoison(-*CInt))) ||
3253 (CInt->isZero() && RHSOp == LHS0);
3254 };
3255
3256 Value *Other;
3257 if (RPred == ICmpInst::ICMP_ULT && MatchRHSOp(RHS1))
3258 Other = RHS0;
3259 else if (RPred == ICmpInst::ICMP_UGT && MatchRHSOp(RHS0))
3260 Other = RHS1;
3261 else
3262 return nullptr;
3263
3264 if (IsLogical)
3265 Other = Builder.CreateFreeze(Other);
3266
3267 return Builder.CreateICmp(
3268 IsAnd ? ICmpInst::ICMP_ULT : ICmpInst::ICMP_UGE,
3269 Builder.CreateSub(LHS0, ConstantInt::get(LHS0->getType(), *CInt + 1)),
3270 Other);
3271 }
3272
3273 /// Fold (icmp)&(icmp) or (icmp)|(icmp) if possible.
3274 /// If IsLogical is true, then the and/or is in select form and the transform
3275 /// must be poison-safe.
foldAndOrOfICmps(ICmpInst * LHS,ICmpInst * RHS,Instruction & I,bool IsAnd,bool IsLogical)3276 Value *InstCombinerImpl::foldAndOrOfICmps(ICmpInst *LHS, ICmpInst *RHS,
3277 Instruction &I, bool IsAnd,
3278 bool IsLogical) {
3279 const SimplifyQuery Q = SQ.getWithInstruction(&I);
3280
3281 // Fold (iszero(A & K1) | iszero(A & K2)) -> (A & (K1 | K2)) != (K1 | K2)
3282 // Fold (!iszero(A & K1) & !iszero(A & K2)) -> (A & (K1 | K2)) == (K1 | K2)
3283 // if K1 and K2 are a one-bit mask.
3284 if (Value *V = foldAndOrOfICmpsOfAndWithPow2(LHS, RHS, &I, IsAnd, IsLogical))
3285 return V;
3286
3287 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
3288 Value *LHS0 = LHS->getOperand(0), *RHS0 = RHS->getOperand(0);
3289 Value *LHS1 = LHS->getOperand(1), *RHS1 = RHS->getOperand(1);
3290
3291 const APInt *LHSC = nullptr, *RHSC = nullptr;
3292 match(LHS1, m_APInt(LHSC));
3293 match(RHS1, m_APInt(RHSC));
3294
3295 // (icmp1 A, B) | (icmp2 A, B) --> (icmp3 A, B)
3296 // (icmp1 A, B) & (icmp2 A, B) --> (icmp3 A, B)
3297 if (predicatesFoldable(PredL, PredR)) {
3298 if (LHS0 == RHS1 && LHS1 == RHS0) {
3299 PredL = ICmpInst::getSwappedPredicate(PredL);
3300 std::swap(LHS0, LHS1);
3301 }
3302 if (LHS0 == RHS0 && LHS1 == RHS1) {
3303 unsigned Code = IsAnd ? getICmpCode(PredL) & getICmpCode(PredR)
3304 : getICmpCode(PredL) | getICmpCode(PredR);
3305 bool IsSigned = LHS->isSigned() || RHS->isSigned();
3306 return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder);
3307 }
3308 }
3309
3310 // handle (roughly):
3311 // (icmp ne (A & B), C) | (icmp ne (A & D), E)
3312 // (icmp eq (A & B), C) & (icmp eq (A & D), E)
3313 if (Value *V = foldLogOpOfMaskedICmps(LHS, RHS, IsAnd, IsLogical, Builder))
3314 return V;
3315
3316 if (Value *V =
3317 foldAndOrOfICmpEqConstantAndICmp(LHS, RHS, IsAnd, IsLogical, Builder))
3318 return V;
3319 // We can treat logical like bitwise here, because both operands are used on
3320 // the LHS, and as such poison from both will propagate.
3321 if (Value *V = foldAndOrOfICmpEqConstantAndICmp(RHS, LHS, IsAnd,
3322 /*IsLogical*/ false, Builder))
3323 return V;
3324
3325 if (Value *V =
3326 foldAndOrOfICmpsWithConstEq(LHS, RHS, IsAnd, IsLogical, Builder, Q))
3327 return V;
3328 // We can convert this case to bitwise and, because both operands are used
3329 // on the LHS, and as such poison from both will propagate.
3330 if (Value *V = foldAndOrOfICmpsWithConstEq(RHS, LHS, IsAnd,
3331 /*IsLogical*/ false, Builder, Q))
3332 return V;
3333
3334 if (Value *V = foldIsPowerOf2OrZero(LHS, RHS, IsAnd, Builder))
3335 return V;
3336 if (Value *V = foldIsPowerOf2OrZero(RHS, LHS, IsAnd, Builder))
3337 return V;
3338
3339 // TODO: One of these directions is fine with logical and/or, the other could
3340 // be supported by inserting freeze.
3341 if (!IsLogical) {
3342 // E.g. (icmp slt x, 0) | (icmp sgt x, n) --> icmp ugt x, n
3343 // E.g. (icmp sge x, 0) & (icmp slt x, n) --> icmp ult x, n
3344 if (Value *V = simplifyRangeCheck(LHS, RHS, /*Inverted=*/!IsAnd))
3345 return V;
3346
3347 // E.g. (icmp sgt x, n) | (icmp slt x, 0) --> icmp ugt x, n
3348 // E.g. (icmp slt x, n) & (icmp sge x, 0) --> icmp ult x, n
3349 if (Value *V = simplifyRangeCheck(RHS, LHS, /*Inverted=*/!IsAnd))
3350 return V;
3351 }
3352
3353 // TODO: Add conjugated or fold, check whether it is safe for logical and/or.
3354 if (IsAnd && !IsLogical)
3355 if (Value *V = foldSignedTruncationCheck(LHS, RHS, I, Builder))
3356 return V;
3357
3358 if (Value *V = foldIsPowerOf2(LHS, RHS, IsAnd, Builder, *this))
3359 return V;
3360
3361 if (Value *V = foldPowerOf2AndShiftedMask(LHS, RHS, IsAnd, Builder))
3362 return V;
3363
3364 // TODO: Verify whether this is safe for logical and/or.
3365 if (!IsLogical) {
3366 if (Value *X = foldUnsignedUnderflowCheck(LHS, RHS, IsAnd, Q, Builder))
3367 return X;
3368 if (Value *X = foldUnsignedUnderflowCheck(RHS, LHS, IsAnd, Q, Builder))
3369 return X;
3370 }
3371
3372 if (Value *X = foldEqOfParts(LHS, RHS, IsAnd))
3373 return X;
3374
3375 // (icmp ne A, 0) | (icmp ne B, 0) --> (icmp ne (A|B), 0)
3376 // (icmp eq A, 0) & (icmp eq B, 0) --> (icmp eq (A|B), 0)
3377 // TODO: Remove this and below when foldLogOpOfMaskedICmps can handle undefs.
3378 if (!IsLogical && PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3379 PredL == PredR && match(LHS1, m_ZeroInt()) && match(RHS1, m_ZeroInt()) &&
3380 LHS0->getType() == RHS0->getType()) {
3381 Value *NewOr = Builder.CreateOr(LHS0, RHS0);
3382 return Builder.CreateICmp(PredL, NewOr,
3383 Constant::getNullValue(NewOr->getType()));
3384 }
3385
3386 // (icmp ne A, -1) | (icmp ne B, -1) --> (icmp ne (A&B), -1)
3387 // (icmp eq A, -1) & (icmp eq B, -1) --> (icmp eq (A&B), -1)
3388 if (!IsLogical && PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3389 PredL == PredR && match(LHS1, m_AllOnes()) && match(RHS1, m_AllOnes()) &&
3390 LHS0->getType() == RHS0->getType()) {
3391 Value *NewAnd = Builder.CreateAnd(LHS0, RHS0);
3392 return Builder.CreateICmp(PredL, NewAnd,
3393 Constant::getAllOnesValue(LHS0->getType()));
3394 }
3395
3396 if (!IsLogical)
3397 if (Value *V =
3398 foldAndOrOfICmpsWithPow2AndWithZero(Builder, LHS, RHS, IsAnd, Q))
3399 return V;
3400
3401 // This only handles icmp of constants: (icmp1 A, C1) | (icmp2 B, C2).
3402 if (!LHSC || !RHSC)
3403 return nullptr;
3404
3405 // (trunc x) == C1 & (and x, CA) == C2 -> (and x, CA|CMAX) == C1|C2
3406 // (trunc x) != C1 | (and x, CA) != C2 -> (and x, CA|CMAX) != C1|C2
3407 // where CMAX is the all ones value for the truncated type,
3408 // iff the lower bits of C2 and CA are zero.
3409 if (PredL == (IsAnd ? ICmpInst::ICMP_EQ : ICmpInst::ICMP_NE) &&
3410 PredL == PredR && LHS->hasOneUse() && RHS->hasOneUse()) {
3411 Value *V;
3412 const APInt *AndC, *SmallC = nullptr, *BigC = nullptr;
3413
3414 // (trunc x) == C1 & (and x, CA) == C2
3415 // (and x, CA) == C2 & (trunc x) == C1
3416 if (match(RHS0, m_Trunc(m_Value(V))) &&
3417 match(LHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
3418 SmallC = RHSC;
3419 BigC = LHSC;
3420 } else if (match(LHS0, m_Trunc(m_Value(V))) &&
3421 match(RHS0, m_And(m_Specific(V), m_APInt(AndC)))) {
3422 SmallC = LHSC;
3423 BigC = RHSC;
3424 }
3425
3426 if (SmallC && BigC) {
3427 unsigned BigBitSize = BigC->getBitWidth();
3428 unsigned SmallBitSize = SmallC->getBitWidth();
3429
3430 // Check that the low bits are zero.
3431 APInt Low = APInt::getLowBitsSet(BigBitSize, SmallBitSize);
3432 if ((Low & *AndC).isZero() && (Low & *BigC).isZero()) {
3433 Value *NewAnd = Builder.CreateAnd(V, Low | *AndC);
3434 APInt N = SmallC->zext(BigBitSize) | *BigC;
3435 Value *NewVal = ConstantInt::get(NewAnd->getType(), N);
3436 return Builder.CreateICmp(PredL, NewAnd, NewVal);
3437 }
3438 }
3439 }
3440
3441 // Match naive pattern (and its inverted form) for checking if two values
3442 // share same sign. An example of the pattern:
3443 // (icmp slt (X & Y), 0) | (icmp sgt (X | Y), -1) -> (icmp sgt (X ^ Y), -1)
3444 // Inverted form (example):
3445 // (icmp slt (X | Y), 0) & (icmp sgt (X & Y), -1) -> (icmp slt (X ^ Y), 0)
3446 bool TrueIfSignedL, TrueIfSignedR;
3447 if (isSignBitCheck(PredL, *LHSC, TrueIfSignedL) &&
3448 isSignBitCheck(PredR, *RHSC, TrueIfSignedR) &&
3449 (RHS->hasOneUse() || LHS->hasOneUse())) {
3450 Value *X, *Y;
3451 if (IsAnd) {
3452 if ((TrueIfSignedL && !TrueIfSignedR &&
3453 match(LHS0, m_Or(m_Value(X), m_Value(Y))) &&
3454 match(RHS0, m_c_And(m_Specific(X), m_Specific(Y)))) ||
3455 (!TrueIfSignedL && TrueIfSignedR &&
3456 match(LHS0, m_And(m_Value(X), m_Value(Y))) &&
3457 match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y))))) {
3458 Value *NewXor = Builder.CreateXor(X, Y);
3459 return Builder.CreateIsNeg(NewXor);
3460 }
3461 } else {
3462 if ((TrueIfSignedL && !TrueIfSignedR &&
3463 match(LHS0, m_And(m_Value(X), m_Value(Y))) &&
3464 match(RHS0, m_c_Or(m_Specific(X), m_Specific(Y)))) ||
3465 (!TrueIfSignedL && TrueIfSignedR &&
3466 match(LHS0, m_Or(m_Value(X), m_Value(Y))) &&
3467 match(RHS0, m_c_And(m_Specific(X), m_Specific(Y))))) {
3468 Value *NewXor = Builder.CreateXor(X, Y);
3469 return Builder.CreateIsNotNeg(NewXor);
3470 }
3471 }
3472 }
3473
3474 return foldAndOrOfICmpsUsingRanges(LHS, RHS, IsAnd);
3475 }
3476
foldOrOfInversions(BinaryOperator & I,InstCombiner::BuilderTy & Builder)3477 static Value *foldOrOfInversions(BinaryOperator &I,
3478 InstCombiner::BuilderTy &Builder) {
3479 assert(I.getOpcode() == Instruction::Or &&
3480 "Simplification only supports or at the moment.");
3481
3482 Value *Cmp1, *Cmp2, *Cmp3, *Cmp4;
3483 if (!match(I.getOperand(0), m_And(m_Value(Cmp1), m_Value(Cmp2))) ||
3484 !match(I.getOperand(1), m_And(m_Value(Cmp3), m_Value(Cmp4))))
3485 return nullptr;
3486
3487 // Check if any two pairs of the and operations are inversions of each other.
3488 if (isKnownInversion(Cmp1, Cmp3) && isKnownInversion(Cmp2, Cmp4))
3489 return Builder.CreateXor(Cmp1, Cmp4);
3490 if (isKnownInversion(Cmp1, Cmp4) && isKnownInversion(Cmp2, Cmp3))
3491 return Builder.CreateXor(Cmp1, Cmp3);
3492
3493 return nullptr;
3494 }
3495
3496 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
3497 // here. We should standardize that construct where it is needed or choose some
3498 // other way to ensure that commutated variants of patterns are not missed.
visitOr(BinaryOperator & I)3499 Instruction *InstCombinerImpl::visitOr(BinaryOperator &I) {
3500 if (Value *V = simplifyOrInst(I.getOperand(0), I.getOperand(1),
3501 SQ.getWithInstruction(&I)))
3502 return replaceInstUsesWith(I, V);
3503
3504 if (SimplifyAssociativeOrCommutative(I))
3505 return &I;
3506
3507 if (Instruction *X = foldVectorBinop(I))
3508 return X;
3509
3510 if (Instruction *Phi = foldBinopWithPhiOperands(I))
3511 return Phi;
3512
3513 // See if we can simplify any instructions used by the instruction whose sole
3514 // purpose is to compute bits we don't care about.
3515 if (SimplifyDemandedInstructionBits(I))
3516 return &I;
3517
3518 // Do this before using distributive laws to catch simple and/or/not patterns.
3519 if (Instruction *Xor = foldOrToXor(I, Builder))
3520 return Xor;
3521
3522 if (Instruction *X = foldComplexAndOrPatterns(I, Builder))
3523 return X;
3524
3525 // (A & B) | (C & D) -> A ^ D where A == ~C && B == ~D
3526 // (A & B) | (C & D) -> A ^ C where A == ~D && B == ~C
3527 if (Value *V = foldOrOfInversions(I, Builder))
3528 return replaceInstUsesWith(I, V);
3529
3530 // (A&B)|(A&C) -> A&(B|C) etc
3531 if (Value *V = foldUsingDistributiveLaws(I))
3532 return replaceInstUsesWith(I, V);
3533
3534 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
3535 Type *Ty = I.getType();
3536 if (Ty->isIntOrIntVectorTy(1)) {
3537 if (auto *SI0 = dyn_cast<SelectInst>(Op0)) {
3538 if (auto *R =
3539 foldAndOrOfSelectUsingImpliedCond(Op1, *SI0, /* IsAnd */ false))
3540 return R;
3541 }
3542 if (auto *SI1 = dyn_cast<SelectInst>(Op1)) {
3543 if (auto *R =
3544 foldAndOrOfSelectUsingImpliedCond(Op0, *SI1, /* IsAnd */ false))
3545 return R;
3546 }
3547 }
3548
3549 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
3550 return FoldedLogic;
3551
3552 if (Instruction *BitOp = matchBSwapOrBitReverse(I, /*MatchBSwaps*/ true,
3553 /*MatchBitReversals*/ true))
3554 return BitOp;
3555
3556 if (Instruction *Funnel = matchFunnelShift(I, *this))
3557 return Funnel;
3558
3559 if (Instruction *Concat = matchOrConcat(I, Builder))
3560 return replaceInstUsesWith(I, Concat);
3561
3562 if (Instruction *R = foldBinOpShiftWithShift(I))
3563 return R;
3564
3565 if (Instruction *R = tryFoldInstWithCtpopWithNot(&I))
3566 return R;
3567
3568 Value *X, *Y;
3569 const APInt *CV;
3570 if (match(&I, m_c_Or(m_OneUse(m_Xor(m_Value(X), m_APInt(CV))), m_Value(Y))) &&
3571 !CV->isAllOnes() && MaskedValueIsZero(Y, *CV, 0, &I)) {
3572 // (X ^ C) | Y -> (X | Y) ^ C iff Y & C == 0
3573 // The check for a 'not' op is for efficiency (if Y is known zero --> ~X).
3574 Value *Or = Builder.CreateOr(X, Y);
3575 return BinaryOperator::CreateXor(Or, ConstantInt::get(Ty, *CV));
3576 }
3577
3578 // If the operands have no common bits set:
3579 // or (mul X, Y), X --> add (mul X, Y), X --> mul X, (Y + 1)
3580 if (match(&I, m_c_DisjointOr(m_OneUse(m_Mul(m_Value(X), m_Value(Y))),
3581 m_Deferred(X)))) {
3582 Value *IncrementY = Builder.CreateAdd(Y, ConstantInt::get(Ty, 1));
3583 return BinaryOperator::CreateMul(X, IncrementY);
3584 }
3585
3586 // (A & C) | (B & D)
3587 Value *A, *B, *C, *D;
3588 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
3589 match(Op1, m_And(m_Value(B), m_Value(D)))) {
3590
3591 // (A & C0) | (B & C1)
3592 const APInt *C0, *C1;
3593 if (match(C, m_APInt(C0)) && match(D, m_APInt(C1))) {
3594 Value *X;
3595 if (*C0 == ~*C1) {
3596 // ((X | B) & MaskC) | (B & ~MaskC) -> (X & MaskC) | B
3597 if (match(A, m_c_Or(m_Value(X), m_Specific(B))))
3598 return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C0), B);
3599 // (A & MaskC) | ((X | A) & ~MaskC) -> (X & ~MaskC) | A
3600 if (match(B, m_c_Or(m_Specific(A), m_Value(X))))
3601 return BinaryOperator::CreateOr(Builder.CreateAnd(X, *C1), A);
3602
3603 // ((X ^ B) & MaskC) | (B & ~MaskC) -> (X & MaskC) ^ B
3604 if (match(A, m_c_Xor(m_Value(X), m_Specific(B))))
3605 return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C0), B);
3606 // (A & MaskC) | ((X ^ A) & ~MaskC) -> (X & ~MaskC) ^ A
3607 if (match(B, m_c_Xor(m_Specific(A), m_Value(X))))
3608 return BinaryOperator::CreateXor(Builder.CreateAnd(X, *C1), A);
3609 }
3610
3611 if ((*C0 & *C1).isZero()) {
3612 // ((X | B) & C0) | (B & C1) --> (X | B) & (C0 | C1)
3613 // iff (C0 & C1) == 0 and (X & ~C0) == 0
3614 if (match(A, m_c_Or(m_Value(X), m_Specific(B))) &&
3615 MaskedValueIsZero(X, ~*C0, 0, &I)) {
3616 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3617 return BinaryOperator::CreateAnd(A, C01);
3618 }
3619 // (A & C0) | ((X | A) & C1) --> (X | A) & (C0 | C1)
3620 // iff (C0 & C1) == 0 and (X & ~C1) == 0
3621 if (match(B, m_c_Or(m_Value(X), m_Specific(A))) &&
3622 MaskedValueIsZero(X, ~*C1, 0, &I)) {
3623 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3624 return BinaryOperator::CreateAnd(B, C01);
3625 }
3626 // ((X | C2) & C0) | ((X | C3) & C1) --> (X | C2 | C3) & (C0 | C1)
3627 // iff (C0 & C1) == 0 and (C2 & ~C0) == 0 and (C3 & ~C1) == 0.
3628 const APInt *C2, *C3;
3629 if (match(A, m_Or(m_Value(X), m_APInt(C2))) &&
3630 match(B, m_Or(m_Specific(X), m_APInt(C3))) &&
3631 (*C2 & ~*C0).isZero() && (*C3 & ~*C1).isZero()) {
3632 Value *Or = Builder.CreateOr(X, *C2 | *C3, "bitfield");
3633 Constant *C01 = ConstantInt::get(Ty, *C0 | *C1);
3634 return BinaryOperator::CreateAnd(Or, C01);
3635 }
3636 }
3637 }
3638
3639 // Don't try to form a select if it's unlikely that we'll get rid of at
3640 // least one of the operands. A select is generally more expensive than the
3641 // 'or' that it is replacing.
3642 if (Op0->hasOneUse() || Op1->hasOneUse()) {
3643 // (Cond & C) | (~Cond & D) -> Cond ? C : D, and commuted variants.
3644 if (Value *V = matchSelectFromAndOr(A, C, B, D))
3645 return replaceInstUsesWith(I, V);
3646 if (Value *V = matchSelectFromAndOr(A, C, D, B))
3647 return replaceInstUsesWith(I, V);
3648 if (Value *V = matchSelectFromAndOr(C, A, B, D))
3649 return replaceInstUsesWith(I, V);
3650 if (Value *V = matchSelectFromAndOr(C, A, D, B))
3651 return replaceInstUsesWith(I, V);
3652 if (Value *V = matchSelectFromAndOr(B, D, A, C))
3653 return replaceInstUsesWith(I, V);
3654 if (Value *V = matchSelectFromAndOr(B, D, C, A))
3655 return replaceInstUsesWith(I, V);
3656 if (Value *V = matchSelectFromAndOr(D, B, A, C))
3657 return replaceInstUsesWith(I, V);
3658 if (Value *V = matchSelectFromAndOr(D, B, C, A))
3659 return replaceInstUsesWith(I, V);
3660 }
3661 }
3662
3663 if (match(Op0, m_And(m_Value(A), m_Value(C))) &&
3664 match(Op1, m_Not(m_Or(m_Value(B), m_Value(D)))) &&
3665 (Op0->hasOneUse() || Op1->hasOneUse())) {
3666 // (Cond & C) | ~(Cond | D) -> Cond ? C : ~D
3667 if (Value *V = matchSelectFromAndOr(A, C, B, D, true))
3668 return replaceInstUsesWith(I, V);
3669 if (Value *V = matchSelectFromAndOr(A, C, D, B, true))
3670 return replaceInstUsesWith(I, V);
3671 if (Value *V = matchSelectFromAndOr(C, A, B, D, true))
3672 return replaceInstUsesWith(I, V);
3673 if (Value *V = matchSelectFromAndOr(C, A, D, B, true))
3674 return replaceInstUsesWith(I, V);
3675 }
3676
3677 // (A ^ B) | ((B ^ C) ^ A) -> (A ^ B) | C
3678 if (match(Op0, m_Xor(m_Value(A), m_Value(B))))
3679 if (match(Op1,
3680 m_c_Xor(m_c_Xor(m_Specific(B), m_Value(C)), m_Specific(A))) ||
3681 match(Op1, m_c_Xor(m_c_Xor(m_Specific(A), m_Value(C)), m_Specific(B))))
3682 return BinaryOperator::CreateOr(Op0, C);
3683
3684 // ((B ^ C) ^ A) | (A ^ B) -> (A ^ B) | C
3685 if (match(Op1, m_Xor(m_Value(A), m_Value(B))))
3686 if (match(Op0,
3687 m_c_Xor(m_c_Xor(m_Specific(B), m_Value(C)), m_Specific(A))) ||
3688 match(Op0, m_c_Xor(m_c_Xor(m_Specific(A), m_Value(C)), m_Specific(B))))
3689 return BinaryOperator::CreateOr(Op1, C);
3690
3691 if (Instruction *DeMorgan = matchDeMorgansLaws(I, *this))
3692 return DeMorgan;
3693
3694 // Canonicalize xor to the RHS.
3695 bool SwappedForXor = false;
3696 if (match(Op0, m_Xor(m_Value(), m_Value()))) {
3697 std::swap(Op0, Op1);
3698 SwappedForXor = true;
3699 }
3700
3701 if (match(Op1, m_Xor(m_Value(A), m_Value(B)))) {
3702 // (A | ?) | (A ^ B) --> (A | ?) | B
3703 // (B | ?) | (A ^ B) --> (B | ?) | A
3704 if (match(Op0, m_c_Or(m_Specific(A), m_Value())))
3705 return BinaryOperator::CreateOr(Op0, B);
3706 if (match(Op0, m_c_Or(m_Specific(B), m_Value())))
3707 return BinaryOperator::CreateOr(Op0, A);
3708
3709 // (A & B) | (A ^ B) --> A | B
3710 // (B & A) | (A ^ B) --> A | B
3711 if (match(Op0, m_c_And(m_Specific(A), m_Specific(B))))
3712 return BinaryOperator::CreateOr(A, B);
3713
3714 // ~A | (A ^ B) --> ~(A & B)
3715 // ~B | (A ^ B) --> ~(A & B)
3716 // The swap above should always make Op0 the 'not'.
3717 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3718 (match(Op0, m_Not(m_Specific(A))) || match(Op0, m_Not(m_Specific(B)))))
3719 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
3720
3721 // Same as above, but peek through an 'and' to the common operand:
3722 // ~(A & ?) | (A ^ B) --> ~((A & ?) & B)
3723 // ~(B & ?) | (A ^ B) --> ~((B & ?) & A)
3724 Instruction *And;
3725 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3726 match(Op0, m_Not(m_CombineAnd(m_Instruction(And),
3727 m_c_And(m_Specific(A), m_Value())))))
3728 return BinaryOperator::CreateNot(Builder.CreateAnd(And, B));
3729 if ((Op0->hasOneUse() || Op1->hasOneUse()) &&
3730 match(Op0, m_Not(m_CombineAnd(m_Instruction(And),
3731 m_c_And(m_Specific(B), m_Value())))))
3732 return BinaryOperator::CreateNot(Builder.CreateAnd(And, A));
3733
3734 // (~A | C) | (A ^ B) --> ~(A & B) | C
3735 // (~B | C) | (A ^ B) --> ~(A & B) | C
3736 if (Op0->hasOneUse() && Op1->hasOneUse() &&
3737 (match(Op0, m_c_Or(m_Not(m_Specific(A)), m_Value(C))) ||
3738 match(Op0, m_c_Or(m_Not(m_Specific(B)), m_Value(C))))) {
3739 Value *Nand = Builder.CreateNot(Builder.CreateAnd(A, B), "nand");
3740 return BinaryOperator::CreateOr(Nand, C);
3741 }
3742 }
3743
3744 if (SwappedForXor)
3745 std::swap(Op0, Op1);
3746
3747 {
3748 ICmpInst *LHS = dyn_cast<ICmpInst>(Op0);
3749 ICmpInst *RHS = dyn_cast<ICmpInst>(Op1);
3750 if (LHS && RHS)
3751 if (Value *Res = foldAndOrOfICmps(LHS, RHS, I, /* IsAnd */ false))
3752 return replaceInstUsesWith(I, Res);
3753
3754 // TODO: Make this recursive; it's a little tricky because an arbitrary
3755 // number of 'or' instructions might have to be created.
3756 Value *X, *Y;
3757 if (LHS && match(Op1, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) {
3758 bool IsLogical = isa<SelectInst>(Op1);
3759 // LHS | (X || Y) --> (LHS || X) || Y
3760 if (auto *Cmp = dyn_cast<ICmpInst>(X))
3761 if (Value *Res =
3762 foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false, IsLogical))
3763 return replaceInstUsesWith(I, IsLogical
3764 ? Builder.CreateLogicalOr(Res, Y)
3765 : Builder.CreateOr(Res, Y));
3766 // LHS | (X || Y) --> X || (LHS | Y)
3767 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
3768 if (Value *Res = foldAndOrOfICmps(LHS, Cmp, I, /* IsAnd */ false,
3769 /* IsLogical */ false))
3770 return replaceInstUsesWith(I, IsLogical
3771 ? Builder.CreateLogicalOr(X, Res)
3772 : Builder.CreateOr(X, Res));
3773 }
3774 if (RHS && match(Op0, m_OneUse(m_LogicalOr(m_Value(X), m_Value(Y))))) {
3775 bool IsLogical = isa<SelectInst>(Op0);
3776 // (X || Y) | RHS --> (X || RHS) || Y
3777 if (auto *Cmp = dyn_cast<ICmpInst>(X))
3778 if (Value *Res =
3779 foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false, IsLogical))
3780 return replaceInstUsesWith(I, IsLogical
3781 ? Builder.CreateLogicalOr(Res, Y)
3782 : Builder.CreateOr(Res, Y));
3783 // (X || Y) | RHS --> X || (Y | RHS)
3784 if (auto *Cmp = dyn_cast<ICmpInst>(Y))
3785 if (Value *Res = foldAndOrOfICmps(Cmp, RHS, I, /* IsAnd */ false,
3786 /* IsLogical */ false))
3787 return replaceInstUsesWith(I, IsLogical
3788 ? Builder.CreateLogicalOr(X, Res)
3789 : Builder.CreateOr(X, Res));
3790 }
3791 }
3792
3793 if (FCmpInst *LHS = dyn_cast<FCmpInst>(I.getOperand(0)))
3794 if (FCmpInst *RHS = dyn_cast<FCmpInst>(I.getOperand(1)))
3795 if (Value *Res = foldLogicOfFCmps(LHS, RHS, /*IsAnd*/ false))
3796 return replaceInstUsesWith(I, Res);
3797
3798 if (Instruction *FoldedFCmps = reassociateFCmps(I, Builder))
3799 return FoldedFCmps;
3800
3801 if (Instruction *CastedOr = foldCastedBitwiseLogic(I))
3802 return CastedOr;
3803
3804 if (Instruction *Sel = foldBinopOfSextBoolToSelect(I))
3805 return Sel;
3806
3807 // or(sext(A), B) / or(B, sext(A)) --> A ? -1 : B, where A is i1 or <N x i1>.
3808 // TODO: Move this into foldBinopOfSextBoolToSelect as a more generalized fold
3809 // with binop identity constant. But creating a select with non-constant
3810 // arm may not be reversible due to poison semantics. Is that a good
3811 // canonicalization?
3812 if (match(&I, m_c_Or(m_OneUse(m_SExt(m_Value(A))), m_Value(B))) &&
3813 A->getType()->isIntOrIntVectorTy(1))
3814 return SelectInst::Create(A, ConstantInt::getAllOnesValue(Ty), B);
3815
3816 // Note: If we've gotten to the point of visiting the outer OR, then the
3817 // inner one couldn't be simplified. If it was a constant, then it won't
3818 // be simplified by a later pass either, so we try swapping the inner/outer
3819 // ORs in the hopes that we'll be able to simplify it this way.
3820 // (X|C) | V --> (X|V) | C
3821 ConstantInt *CI;
3822 if (Op0->hasOneUse() && !match(Op1, m_ConstantInt()) &&
3823 match(Op0, m_Or(m_Value(A), m_ConstantInt(CI)))) {
3824 Value *Inner = Builder.CreateOr(A, Op1);
3825 Inner->takeName(Op0);
3826 return BinaryOperator::CreateOr(Inner, CI);
3827 }
3828
3829 // Change (or (bool?A:B),(bool?C:D)) --> (bool?(or A,C):(or B,D))
3830 // Since this OR statement hasn't been optimized further yet, we hope
3831 // that this transformation will allow the new ORs to be optimized.
3832 {
3833 Value *X = nullptr, *Y = nullptr;
3834 if (Op0->hasOneUse() && Op1->hasOneUse() &&
3835 match(Op0, m_Select(m_Value(X), m_Value(A), m_Value(B))) &&
3836 match(Op1, m_Select(m_Value(Y), m_Value(C), m_Value(D))) && X == Y) {
3837 Value *orTrue = Builder.CreateOr(A, C);
3838 Value *orFalse = Builder.CreateOr(B, D);
3839 return SelectInst::Create(X, orTrue, orFalse);
3840 }
3841 }
3842
3843 // or(ashr(subNSW(Y, X), ScalarSizeInBits(Y) - 1), X) --> X s> Y ? -1 : X.
3844 {
3845 Value *X, *Y;
3846 if (match(&I, m_c_Or(m_OneUse(m_AShr(
3847 m_NSWSub(m_Value(Y), m_Value(X)),
3848 m_SpecificInt(Ty->getScalarSizeInBits() - 1))),
3849 m_Deferred(X)))) {
3850 Value *NewICmpInst = Builder.CreateICmpSGT(X, Y);
3851 Value *AllOnes = ConstantInt::getAllOnesValue(Ty);
3852 return SelectInst::Create(NewICmpInst, AllOnes, X);
3853 }
3854 }
3855
3856 {
3857 // ((A & B) ^ A) | ((A & B) ^ B) -> A ^ B
3858 // (A ^ (A & B)) | (B ^ (A & B)) -> A ^ B
3859 // ((A & B) ^ B) | ((A & B) ^ A) -> A ^ B
3860 // (B ^ (A & B)) | (A ^ (A & B)) -> A ^ B
3861 const auto TryXorOpt = [&](Value *Lhs, Value *Rhs) -> Instruction * {
3862 if (match(Lhs, m_c_Xor(m_And(m_Value(A), m_Value(B)), m_Deferred(A))) &&
3863 match(Rhs,
3864 m_c_Xor(m_And(m_Specific(A), m_Specific(B)), m_Specific(B)))) {
3865 return BinaryOperator::CreateXor(A, B);
3866 }
3867 return nullptr;
3868 };
3869
3870 if (Instruction *Result = TryXorOpt(Op0, Op1))
3871 return Result;
3872 if (Instruction *Result = TryXorOpt(Op1, Op0))
3873 return Result;
3874 }
3875
3876 if (Instruction *V =
3877 canonicalizeCondSignextOfHighBitExtractToSignextHighBitExtract(I))
3878 return V;
3879
3880 CmpInst::Predicate Pred;
3881 Value *Mul, *Ov, *MulIsNotZero, *UMulWithOv;
3882 // Check if the OR weakens the overflow condition for umul.with.overflow by
3883 // treating any non-zero result as overflow. In that case, we overflow if both
3884 // umul.with.overflow operands are != 0, as in that case the result can only
3885 // be 0, iff the multiplication overflows.
3886 if (match(&I,
3887 m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_Value(UMulWithOv)),
3888 m_Value(Ov)),
3889 m_CombineAnd(m_ICmp(Pred,
3890 m_CombineAnd(m_ExtractValue<0>(
3891 m_Deferred(UMulWithOv)),
3892 m_Value(Mul)),
3893 m_ZeroInt()),
3894 m_Value(MulIsNotZero)))) &&
3895 (Ov->hasOneUse() || (MulIsNotZero->hasOneUse() && Mul->hasOneUse())) &&
3896 Pred == CmpInst::ICMP_NE) {
3897 Value *A, *B;
3898 if (match(UMulWithOv, m_Intrinsic<Intrinsic::umul_with_overflow>(
3899 m_Value(A), m_Value(B)))) {
3900 Value *NotNullA = Builder.CreateIsNotNull(A);
3901 Value *NotNullB = Builder.CreateIsNotNull(B);
3902 return BinaryOperator::CreateAnd(NotNullA, NotNullB);
3903 }
3904 }
3905
3906 /// Res, Overflow = xxx_with_overflow X, C1
3907 /// Try to canonicalize the pattern "Overflow | icmp pred Res, C2" into
3908 /// "Overflow | icmp pred X, C2 +/- C1".
3909 const WithOverflowInst *WO;
3910 const Value *WOV;
3911 const APInt *C1, *C2;
3912 if (match(&I, m_c_Or(m_CombineAnd(m_ExtractValue<1>(m_CombineAnd(
3913 m_WithOverflowInst(WO), m_Value(WOV))),
3914 m_Value(Ov)),
3915 m_OneUse(m_ICmp(Pred, m_ExtractValue<0>(m_Deferred(WOV)),
3916 m_APInt(C2))))) &&
3917 (WO->getBinaryOp() == Instruction::Add ||
3918 WO->getBinaryOp() == Instruction::Sub) &&
3919 (ICmpInst::isEquality(Pred) ||
3920 WO->isSigned() == ICmpInst::isSigned(Pred)) &&
3921 match(WO->getRHS(), m_APInt(C1))) {
3922 bool Overflow;
3923 APInt NewC = WO->getBinaryOp() == Instruction::Add
3924 ? (ICmpInst::isSigned(Pred) ? C2->ssub_ov(*C1, Overflow)
3925 : C2->usub_ov(*C1, Overflow))
3926 : (ICmpInst::isSigned(Pred) ? C2->sadd_ov(*C1, Overflow)
3927 : C2->uadd_ov(*C1, Overflow));
3928 if (!Overflow || ICmpInst::isEquality(Pred)) {
3929 Value *NewCmp = Builder.CreateICmp(
3930 Pred, WO->getLHS(), ConstantInt::get(WO->getLHS()->getType(), NewC));
3931 return BinaryOperator::CreateOr(Ov, NewCmp);
3932 }
3933 }
3934
3935 // (~x) | y --> ~(x & (~y)) iff that gets rid of inversions
3936 if (sinkNotIntoOtherHandOfLogicalOp(I))
3937 return &I;
3938
3939 // Improve "get low bit mask up to and including bit X" pattern:
3940 // (1 << X) | ((1 << X) + -1) --> -1 l>> (bitwidth(x) - 1 - X)
3941 if (match(&I, m_c_Or(m_Add(m_Shl(m_One(), m_Value(X)), m_AllOnes()),
3942 m_Shl(m_One(), m_Deferred(X)))) &&
3943 match(&I, m_c_Or(m_OneUse(m_Value()), m_Value()))) {
3944 Value *Sub = Builder.CreateSub(
3945 ConstantInt::get(Ty, Ty->getScalarSizeInBits() - 1), X);
3946 return BinaryOperator::CreateLShr(Constant::getAllOnesValue(Ty), Sub);
3947 }
3948
3949 // An or recurrence w/loop invariant step is equivelent to (or start, step)
3950 PHINode *PN = nullptr;
3951 Value *Start = nullptr, *Step = nullptr;
3952 if (matchSimpleRecurrence(&I, PN, Start, Step) && DT.dominates(Step, PN))
3953 return replaceInstUsesWith(I, Builder.CreateOr(Start, Step));
3954
3955 // (A & B) | (C | D) or (C | D) | (A & B)
3956 // Can be combined if C or D is of type (A/B & X)
3957 if (match(&I, m_c_Or(m_OneUse(m_And(m_Value(A), m_Value(B))),
3958 m_OneUse(m_Or(m_Value(C), m_Value(D)))))) {
3959 // (A & B) | (C | ?) -> C | (? | (A & B))
3960 // (A & B) | (C | ?) -> C | (? | (A & B))
3961 // (A & B) | (C | ?) -> C | (? | (A & B))
3962 // (A & B) | (C | ?) -> C | (? | (A & B))
3963 // (C | ?) | (A & B) -> C | (? | (A & B))
3964 // (C | ?) | (A & B) -> C | (? | (A & B))
3965 // (C | ?) | (A & B) -> C | (? | (A & B))
3966 // (C | ?) | (A & B) -> C | (? | (A & B))
3967 if (match(D, m_OneUse(m_c_And(m_Specific(A), m_Value()))) ||
3968 match(D, m_OneUse(m_c_And(m_Specific(B), m_Value()))))
3969 return BinaryOperator::CreateOr(
3970 C, Builder.CreateOr(D, Builder.CreateAnd(A, B)));
3971 // (A & B) | (? | D) -> (? | (A & B)) | D
3972 // (A & B) | (? | D) -> (? | (A & B)) | D
3973 // (A & B) | (? | D) -> (? | (A & B)) | D
3974 // (A & B) | (? | D) -> (? | (A & B)) | D
3975 // (? | D) | (A & B) -> (? | (A & B)) | D
3976 // (? | D) | (A & B) -> (? | (A & B)) | D
3977 // (? | D) | (A & B) -> (? | (A & B)) | D
3978 // (? | D) | (A & B) -> (? | (A & B)) | D
3979 if (match(C, m_OneUse(m_c_And(m_Specific(A), m_Value()))) ||
3980 match(C, m_OneUse(m_c_And(m_Specific(B), m_Value()))))
3981 return BinaryOperator::CreateOr(
3982 Builder.CreateOr(C, Builder.CreateAnd(A, B)), D);
3983 }
3984
3985 if (Instruction *R = reassociateForUses(I, Builder))
3986 return R;
3987
3988 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
3989 return Canonicalized;
3990
3991 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
3992 return Folded;
3993
3994 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
3995 return Res;
3996
3997 // If we are setting the sign bit of a floating-point value, convert
3998 // this to fneg(fabs), then cast back to integer.
3999 //
4000 // If the result isn't immediately cast back to a float, this will increase
4001 // the number of instructions. This is still probably a better canonical form
4002 // as it enables FP value tracking.
4003 //
4004 // Assumes any IEEE-represented type has the sign bit in the high bit.
4005 //
4006 // This is generous interpretation of noimplicitfloat, this is not a true
4007 // floating-point operation.
4008 Value *CastOp;
4009 if (match(Op0, m_ElementWiseBitCast(m_Value(CastOp))) &&
4010 match(Op1, m_SignMask()) &&
4011 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
4012 Attribute::NoImplicitFloat)) {
4013 Type *EltTy = CastOp->getType()->getScalarType();
4014 if (EltTy->isFloatingPointTy() && EltTy->isIEEE()) {
4015 Value *FAbs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, CastOp);
4016 Value *FNegFAbs = Builder.CreateFNeg(FAbs);
4017 return new BitCastInst(FNegFAbs, I.getType());
4018 }
4019 }
4020
4021 // (X & C1) | C2 -> X & (C1 | C2) iff (X & C2) == C2
4022 if (match(Op0, m_OneUse(m_And(m_Value(X), m_APInt(C1)))) &&
4023 match(Op1, m_APInt(C2))) {
4024 KnownBits KnownX = computeKnownBits(X, /*Depth*/ 0, &I);
4025 if ((KnownX.One & *C2) == *C2)
4026 return BinaryOperator::CreateAnd(X, ConstantInt::get(Ty, *C1 | *C2));
4027 }
4028
4029 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
4030 return Res;
4031
4032 if (Value *V =
4033 simplifyAndOrWithOpReplaced(Op0, Op1, Constant::getNullValue(Ty),
4034 /*SimplifyOnly*/ false, *this))
4035 return BinaryOperator::CreateOr(V, Op1);
4036 if (Value *V =
4037 simplifyAndOrWithOpReplaced(Op1, Op0, Constant::getNullValue(Ty),
4038 /*SimplifyOnly*/ false, *this))
4039 return BinaryOperator::CreateOr(Op0, V);
4040
4041 if (cast<PossiblyDisjointInst>(I).isDisjoint())
4042 if (Value *V = SimplifyAddWithRemainder(I))
4043 return replaceInstUsesWith(I, V);
4044
4045 return nullptr;
4046 }
4047
4048 /// A ^ B can be specified using other logic ops in a variety of patterns. We
4049 /// can fold these early and efficiently by morphing an existing instruction.
foldXorToXor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)4050 static Instruction *foldXorToXor(BinaryOperator &I,
4051 InstCombiner::BuilderTy &Builder) {
4052 assert(I.getOpcode() == Instruction::Xor);
4053 Value *Op0 = I.getOperand(0);
4054 Value *Op1 = I.getOperand(1);
4055 Value *A, *B;
4056
4057 // There are 4 commuted variants for each of the basic patterns.
4058
4059 // (A & B) ^ (A | B) -> A ^ B
4060 // (A & B) ^ (B | A) -> A ^ B
4061 // (A | B) ^ (A & B) -> A ^ B
4062 // (A | B) ^ (B & A) -> A ^ B
4063 if (match(&I, m_c_Xor(m_And(m_Value(A), m_Value(B)),
4064 m_c_Or(m_Deferred(A), m_Deferred(B)))))
4065 return BinaryOperator::CreateXor(A, B);
4066
4067 // (A | ~B) ^ (~A | B) -> A ^ B
4068 // (~B | A) ^ (~A | B) -> A ^ B
4069 // (~A | B) ^ (A | ~B) -> A ^ B
4070 // (B | ~A) ^ (A | ~B) -> A ^ B
4071 if (match(&I, m_Xor(m_c_Or(m_Value(A), m_Not(m_Value(B))),
4072 m_c_Or(m_Not(m_Deferred(A)), m_Deferred(B)))))
4073 return BinaryOperator::CreateXor(A, B);
4074
4075 // (A & ~B) ^ (~A & B) -> A ^ B
4076 // (~B & A) ^ (~A & B) -> A ^ B
4077 // (~A & B) ^ (A & ~B) -> A ^ B
4078 // (B & ~A) ^ (A & ~B) -> A ^ B
4079 if (match(&I, m_Xor(m_c_And(m_Value(A), m_Not(m_Value(B))),
4080 m_c_And(m_Not(m_Deferred(A)), m_Deferred(B)))))
4081 return BinaryOperator::CreateXor(A, B);
4082
4083 // For the remaining cases we need to get rid of one of the operands.
4084 if (!Op0->hasOneUse() && !Op1->hasOneUse())
4085 return nullptr;
4086
4087 // (A | B) ^ ~(A & B) -> ~(A ^ B)
4088 // (A | B) ^ ~(B & A) -> ~(A ^ B)
4089 // (A & B) ^ ~(A | B) -> ~(A ^ B)
4090 // (A & B) ^ ~(B | A) -> ~(A ^ B)
4091 // Complexity sorting ensures the not will be on the right side.
4092 if ((match(Op0, m_Or(m_Value(A), m_Value(B))) &&
4093 match(Op1, m_Not(m_c_And(m_Specific(A), m_Specific(B))))) ||
4094 (match(Op0, m_And(m_Value(A), m_Value(B))) &&
4095 match(Op1, m_Not(m_c_Or(m_Specific(A), m_Specific(B))))))
4096 return BinaryOperator::CreateNot(Builder.CreateXor(A, B));
4097
4098 return nullptr;
4099 }
4100
foldXorOfICmps(ICmpInst * LHS,ICmpInst * RHS,BinaryOperator & I)4101 Value *InstCombinerImpl::foldXorOfICmps(ICmpInst *LHS, ICmpInst *RHS,
4102 BinaryOperator &I) {
4103 assert(I.getOpcode() == Instruction::Xor && I.getOperand(0) == LHS &&
4104 I.getOperand(1) == RHS && "Should be 'xor' with these operands");
4105
4106 ICmpInst::Predicate PredL = LHS->getPredicate(), PredR = RHS->getPredicate();
4107 Value *LHS0 = LHS->getOperand(0), *LHS1 = LHS->getOperand(1);
4108 Value *RHS0 = RHS->getOperand(0), *RHS1 = RHS->getOperand(1);
4109
4110 if (predicatesFoldable(PredL, PredR)) {
4111 if (LHS0 == RHS1 && LHS1 == RHS0) {
4112 std::swap(LHS0, LHS1);
4113 PredL = ICmpInst::getSwappedPredicate(PredL);
4114 }
4115 if (LHS0 == RHS0 && LHS1 == RHS1) {
4116 // (icmp1 A, B) ^ (icmp2 A, B) --> (icmp3 A, B)
4117 unsigned Code = getICmpCode(PredL) ^ getICmpCode(PredR);
4118 bool IsSigned = LHS->isSigned() || RHS->isSigned();
4119 return getNewICmpValue(Code, IsSigned, LHS0, LHS1, Builder);
4120 }
4121 }
4122
4123 // TODO: This can be generalized to compares of non-signbits using
4124 // decomposeBitTestICmp(). It could be enhanced more by using (something like)
4125 // foldLogOpOfMaskedICmps().
4126 const APInt *LC, *RC;
4127 if (match(LHS1, m_APInt(LC)) && match(RHS1, m_APInt(RC)) &&
4128 LHS0->getType() == RHS0->getType() &&
4129 LHS0->getType()->isIntOrIntVectorTy()) {
4130 // Convert xor of signbit tests to signbit test of xor'd values:
4131 // (X > -1) ^ (Y > -1) --> (X ^ Y) < 0
4132 // (X < 0) ^ (Y < 0) --> (X ^ Y) < 0
4133 // (X > -1) ^ (Y < 0) --> (X ^ Y) > -1
4134 // (X < 0) ^ (Y > -1) --> (X ^ Y) > -1
4135 bool TrueIfSignedL, TrueIfSignedR;
4136 if ((LHS->hasOneUse() || RHS->hasOneUse()) &&
4137 isSignBitCheck(PredL, *LC, TrueIfSignedL) &&
4138 isSignBitCheck(PredR, *RC, TrueIfSignedR)) {
4139 Value *XorLR = Builder.CreateXor(LHS0, RHS0);
4140 return TrueIfSignedL == TrueIfSignedR ? Builder.CreateIsNeg(XorLR) :
4141 Builder.CreateIsNotNeg(XorLR);
4142 }
4143
4144 // Fold (icmp pred1 X, C1) ^ (icmp pred2 X, C2)
4145 // into a single comparison using range-based reasoning.
4146 if (LHS0 == RHS0) {
4147 ConstantRange CR1 = ConstantRange::makeExactICmpRegion(PredL, *LC);
4148 ConstantRange CR2 = ConstantRange::makeExactICmpRegion(PredR, *RC);
4149 auto CRUnion = CR1.exactUnionWith(CR2);
4150 auto CRIntersect = CR1.exactIntersectWith(CR2);
4151 if (CRUnion && CRIntersect)
4152 if (auto CR = CRUnion->exactIntersectWith(CRIntersect->inverse())) {
4153 if (CR->isFullSet())
4154 return ConstantInt::getTrue(I.getType());
4155 if (CR->isEmptySet())
4156 return ConstantInt::getFalse(I.getType());
4157
4158 CmpInst::Predicate NewPred;
4159 APInt NewC, Offset;
4160 CR->getEquivalentICmp(NewPred, NewC, Offset);
4161
4162 if ((Offset.isZero() && (LHS->hasOneUse() || RHS->hasOneUse())) ||
4163 (LHS->hasOneUse() && RHS->hasOneUse())) {
4164 Value *NewV = LHS0;
4165 Type *Ty = LHS0->getType();
4166 if (!Offset.isZero())
4167 NewV = Builder.CreateAdd(NewV, ConstantInt::get(Ty, Offset));
4168 return Builder.CreateICmp(NewPred, NewV,
4169 ConstantInt::get(Ty, NewC));
4170 }
4171 }
4172 }
4173 }
4174
4175 // Instead of trying to imitate the folds for and/or, decompose this 'xor'
4176 // into those logic ops. That is, try to turn this into an and-of-icmps
4177 // because we have many folds for that pattern.
4178 //
4179 // This is based on a truth table definition of xor:
4180 // X ^ Y --> (X | Y) & !(X & Y)
4181 if (Value *OrICmp = simplifyBinOp(Instruction::Or, LHS, RHS, SQ)) {
4182 // TODO: If OrICmp is true, then the definition of xor simplifies to !(X&Y).
4183 // TODO: If OrICmp is false, the whole thing is false (InstSimplify?).
4184 if (Value *AndICmp = simplifyBinOp(Instruction::And, LHS, RHS, SQ)) {
4185 // TODO: Independently handle cases where the 'and' side is a constant.
4186 ICmpInst *X = nullptr, *Y = nullptr;
4187 if (OrICmp == LHS && AndICmp == RHS) {
4188 // (LHS | RHS) & !(LHS & RHS) --> LHS & !RHS --> X & !Y
4189 X = LHS;
4190 Y = RHS;
4191 }
4192 if (OrICmp == RHS && AndICmp == LHS) {
4193 // !(LHS & RHS) & (LHS | RHS) --> !LHS & RHS --> !Y & X
4194 X = RHS;
4195 Y = LHS;
4196 }
4197 if (X && Y && (Y->hasOneUse() || canFreelyInvertAllUsersOf(Y, &I))) {
4198 // Invert the predicate of 'Y', thus inverting its output.
4199 Y->setPredicate(Y->getInversePredicate());
4200 // So, are there other uses of Y?
4201 if (!Y->hasOneUse()) {
4202 // We need to adapt other uses of Y though. Get a value that matches
4203 // the original value of Y before inversion. While this increases
4204 // immediate instruction count, we have just ensured that all the
4205 // users are freely-invertible, so that 'not' *will* get folded away.
4206 BuilderTy::InsertPointGuard Guard(Builder);
4207 // Set insertion point to right after the Y.
4208 Builder.SetInsertPoint(Y->getParent(), ++(Y->getIterator()));
4209 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4210 // Replace all uses of Y (excluding the one in NotY!) with NotY.
4211 Worklist.pushUsersToWorkList(*Y);
4212 Y->replaceUsesWithIf(NotY,
4213 [NotY](Use &U) { return U.getUser() != NotY; });
4214 }
4215 // All done.
4216 return Builder.CreateAnd(LHS, RHS);
4217 }
4218 }
4219 }
4220
4221 return nullptr;
4222 }
4223
4224 /// If we have a masked merge, in the canonical form of:
4225 /// (assuming that A only has one use.)
4226 /// | A | |B|
4227 /// ((x ^ y) & M) ^ y
4228 /// | D |
4229 /// * If M is inverted:
4230 /// | D |
4231 /// ((x ^ y) & ~M) ^ y
4232 /// We can canonicalize by swapping the final xor operand
4233 /// to eliminate the 'not' of the mask.
4234 /// ((x ^ y) & M) ^ x
4235 /// * If M is a constant, and D has one use, we transform to 'and' / 'or' ops
4236 /// because that shortens the dependency chain and improves analysis:
4237 /// (x & M) | (y & ~M)
visitMaskedMerge(BinaryOperator & I,InstCombiner::BuilderTy & Builder)4238 static Instruction *visitMaskedMerge(BinaryOperator &I,
4239 InstCombiner::BuilderTy &Builder) {
4240 Value *B, *X, *D;
4241 Value *M;
4242 if (!match(&I, m_c_Xor(m_Value(B),
4243 m_OneUse(m_c_And(
4244 m_CombineAnd(m_c_Xor(m_Deferred(B), m_Value(X)),
4245 m_Value(D)),
4246 m_Value(M))))))
4247 return nullptr;
4248
4249 Value *NotM;
4250 if (match(M, m_Not(m_Value(NotM)))) {
4251 // De-invert the mask and swap the value in B part.
4252 Value *NewA = Builder.CreateAnd(D, NotM);
4253 return BinaryOperator::CreateXor(NewA, X);
4254 }
4255
4256 Constant *C;
4257 if (D->hasOneUse() && match(M, m_Constant(C))) {
4258 // Propagating undef is unsafe. Clamp undef elements to -1.
4259 Type *EltTy = C->getType()->getScalarType();
4260 C = Constant::replaceUndefsWith(C, ConstantInt::getAllOnesValue(EltTy));
4261 // Unfold.
4262 Value *LHS = Builder.CreateAnd(X, C);
4263 Value *NotC = Builder.CreateNot(C);
4264 Value *RHS = Builder.CreateAnd(B, NotC);
4265 return BinaryOperator::CreateOr(LHS, RHS);
4266 }
4267
4268 return nullptr;
4269 }
4270
foldNotXor(BinaryOperator & I,InstCombiner::BuilderTy & Builder)4271 static Instruction *foldNotXor(BinaryOperator &I,
4272 InstCombiner::BuilderTy &Builder) {
4273 Value *X, *Y;
4274 // FIXME: one-use check is not needed in general, but currently we are unable
4275 // to fold 'not' into 'icmp', if that 'icmp' has multiple uses. (D35182)
4276 if (!match(&I, m_Not(m_OneUse(m_Xor(m_Value(X), m_Value(Y))))))
4277 return nullptr;
4278
4279 auto hasCommonOperand = [](Value *A, Value *B, Value *C, Value *D) {
4280 return A == C || A == D || B == C || B == D;
4281 };
4282
4283 Value *A, *B, *C, *D;
4284 // Canonicalize ~((A & B) ^ (A | ?)) -> (A & B) | ~(A | ?)
4285 // 4 commuted variants
4286 if (match(X, m_And(m_Value(A), m_Value(B))) &&
4287 match(Y, m_Or(m_Value(C), m_Value(D))) && hasCommonOperand(A, B, C, D)) {
4288 Value *NotY = Builder.CreateNot(Y);
4289 return BinaryOperator::CreateOr(X, NotY);
4290 };
4291
4292 // Canonicalize ~((A | ?) ^ (A & B)) -> (A & B) | ~(A | ?)
4293 // 4 commuted variants
4294 if (match(Y, m_And(m_Value(A), m_Value(B))) &&
4295 match(X, m_Or(m_Value(C), m_Value(D))) && hasCommonOperand(A, B, C, D)) {
4296 Value *NotX = Builder.CreateNot(X);
4297 return BinaryOperator::CreateOr(Y, NotX);
4298 };
4299
4300 return nullptr;
4301 }
4302
4303 /// Canonicalize a shifty way to code absolute value to the more common pattern
4304 /// that uses negation and select.
canonicalizeAbs(BinaryOperator & Xor,InstCombiner::BuilderTy & Builder)4305 static Instruction *canonicalizeAbs(BinaryOperator &Xor,
4306 InstCombiner::BuilderTy &Builder) {
4307 assert(Xor.getOpcode() == Instruction::Xor && "Expected an xor instruction.");
4308
4309 // There are 4 potential commuted variants. Move the 'ashr' candidate to Op1.
4310 // We're relying on the fact that we only do this transform when the shift has
4311 // exactly 2 uses and the add has exactly 1 use (otherwise, we might increase
4312 // instructions).
4313 Value *Op0 = Xor.getOperand(0), *Op1 = Xor.getOperand(1);
4314 if (Op0->hasNUses(2))
4315 std::swap(Op0, Op1);
4316
4317 Type *Ty = Xor.getType();
4318 Value *A;
4319 const APInt *ShAmt;
4320 if (match(Op1, m_AShr(m_Value(A), m_APInt(ShAmt))) &&
4321 Op1->hasNUses(2) && *ShAmt == Ty->getScalarSizeInBits() - 1 &&
4322 match(Op0, m_OneUse(m_c_Add(m_Specific(A), m_Specific(Op1))))) {
4323 // Op1 = ashr i32 A, 31 ; smear the sign bit
4324 // xor (add A, Op1), Op1 ; add -1 and flip bits if negative
4325 // --> (A < 0) ? -A : A
4326 Value *IsNeg = Builder.CreateIsNeg(A);
4327 // Copy the nsw flags from the add to the negate.
4328 auto *Add = cast<BinaryOperator>(Op0);
4329 Value *NegA = Add->hasNoUnsignedWrap()
4330 ? Constant::getNullValue(A->getType())
4331 : Builder.CreateNeg(A, "", Add->hasNoSignedWrap());
4332 return SelectInst::Create(IsNeg, NegA, A);
4333 }
4334 return nullptr;
4335 }
4336
canFreelyInvert(InstCombiner & IC,Value * Op,Instruction * IgnoredUser)4337 static bool canFreelyInvert(InstCombiner &IC, Value *Op,
4338 Instruction *IgnoredUser) {
4339 auto *I = dyn_cast<Instruction>(Op);
4340 return I && IC.isFreeToInvert(I, /*WillInvertAllUses=*/true) &&
4341 IC.canFreelyInvertAllUsersOf(I, IgnoredUser);
4342 }
4343
freelyInvert(InstCombinerImpl & IC,Value * Op,Instruction * IgnoredUser)4344 static Value *freelyInvert(InstCombinerImpl &IC, Value *Op,
4345 Instruction *IgnoredUser) {
4346 auto *I = cast<Instruction>(Op);
4347 IC.Builder.SetInsertPoint(*I->getInsertionPointAfterDef());
4348 Value *NotOp = IC.Builder.CreateNot(Op, Op->getName() + ".not");
4349 Op->replaceUsesWithIf(NotOp,
4350 [NotOp](Use &U) { return U.getUser() != NotOp; });
4351 IC.freelyInvertAllUsersOf(NotOp, IgnoredUser);
4352 return NotOp;
4353 }
4354
4355 // Transform
4356 // z = ~(x &/| y)
4357 // into:
4358 // z = ((~x) |/& (~y))
4359 // iff both x and y are free to invert and all uses of z can be freely updated.
sinkNotIntoLogicalOp(Instruction & I)4360 bool InstCombinerImpl::sinkNotIntoLogicalOp(Instruction &I) {
4361 Value *Op0, *Op1;
4362 if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1))))
4363 return false;
4364
4365 // If this logic op has not been simplified yet, just bail out and let that
4366 // happen first. Otherwise, the code below may wrongly invert.
4367 if (Op0 == Op1)
4368 return false;
4369
4370 Instruction::BinaryOps NewOpc =
4371 match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And;
4372 bool IsBinaryOp = isa<BinaryOperator>(I);
4373
4374 // Can our users be adapted?
4375 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
4376 return false;
4377
4378 // And can the operands be adapted?
4379 if (!canFreelyInvert(*this, Op0, &I) || !canFreelyInvert(*this, Op1, &I))
4380 return false;
4381
4382 Op0 = freelyInvert(*this, Op0, &I);
4383 Op1 = freelyInvert(*this, Op1, &I);
4384
4385 Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
4386 Value *NewLogicOp;
4387 if (IsBinaryOp)
4388 NewLogicOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not");
4389 else
4390 NewLogicOp =
4391 Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not");
4392
4393 replaceInstUsesWith(I, NewLogicOp);
4394 // We can not just create an outer `not`, it will most likely be immediately
4395 // folded back, reconstructing our initial pattern, and causing an
4396 // infinite combine loop, so immediately manually fold it away.
4397 freelyInvertAllUsersOf(NewLogicOp);
4398 return true;
4399 }
4400
4401 // Transform
4402 // z = (~x) &/| y
4403 // into:
4404 // z = ~(x |/& (~y))
4405 // iff y is free to invert and all uses of z can be freely updated.
sinkNotIntoOtherHandOfLogicalOp(Instruction & I)4406 bool InstCombinerImpl::sinkNotIntoOtherHandOfLogicalOp(Instruction &I) {
4407 Value *Op0, *Op1;
4408 if (!match(&I, m_LogicalOp(m_Value(Op0), m_Value(Op1))))
4409 return false;
4410 Instruction::BinaryOps NewOpc =
4411 match(&I, m_LogicalAnd()) ? Instruction::Or : Instruction::And;
4412 bool IsBinaryOp = isa<BinaryOperator>(I);
4413
4414 Value *NotOp0 = nullptr;
4415 Value *NotOp1 = nullptr;
4416 Value **OpToInvert = nullptr;
4417 if (match(Op0, m_Not(m_Value(NotOp0))) && canFreelyInvert(*this, Op1, &I)) {
4418 Op0 = NotOp0;
4419 OpToInvert = &Op1;
4420 } else if (match(Op1, m_Not(m_Value(NotOp1))) &&
4421 canFreelyInvert(*this, Op0, &I)) {
4422 Op1 = NotOp1;
4423 OpToInvert = &Op0;
4424 } else
4425 return false;
4426
4427 // And can our users be adapted?
4428 if (!InstCombiner::canFreelyInvertAllUsersOf(&I, /*IgnoredUser=*/nullptr))
4429 return false;
4430
4431 *OpToInvert = freelyInvert(*this, *OpToInvert, &I);
4432
4433 Builder.SetInsertPoint(*I.getInsertionPointAfterDef());
4434 Value *NewBinOp;
4435 if (IsBinaryOp)
4436 NewBinOp = Builder.CreateBinOp(NewOpc, Op0, Op1, I.getName() + ".not");
4437 else
4438 NewBinOp = Builder.CreateLogicalOp(NewOpc, Op0, Op1, I.getName() + ".not");
4439 replaceInstUsesWith(I, NewBinOp);
4440 // We can not just create an outer `not`, it will most likely be immediately
4441 // folded back, reconstructing our initial pattern, and causing an
4442 // infinite combine loop, so immediately manually fold it away.
4443 freelyInvertAllUsersOf(NewBinOp);
4444 return true;
4445 }
4446
foldNot(BinaryOperator & I)4447 Instruction *InstCombinerImpl::foldNot(BinaryOperator &I) {
4448 Value *NotOp;
4449 if (!match(&I, m_Not(m_Value(NotOp))))
4450 return nullptr;
4451
4452 // Apply DeMorgan's Law for 'nand' / 'nor' logic with an inverted operand.
4453 // We must eliminate the and/or (one-use) for these transforms to not increase
4454 // the instruction count.
4455 //
4456 // ~(~X & Y) --> (X | ~Y)
4457 // ~(Y & ~X) --> (X | ~Y)
4458 //
4459 // Note: The logical matches do not check for the commuted patterns because
4460 // those are handled via SimplifySelectsFeedingBinaryOp().
4461 Type *Ty = I.getType();
4462 Value *X, *Y;
4463 if (match(NotOp, m_OneUse(m_c_And(m_Not(m_Value(X)), m_Value(Y))))) {
4464 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4465 return BinaryOperator::CreateOr(X, NotY);
4466 }
4467 if (match(NotOp, m_OneUse(m_LogicalAnd(m_Not(m_Value(X)), m_Value(Y))))) {
4468 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4469 return SelectInst::Create(X, ConstantInt::getTrue(Ty), NotY);
4470 }
4471
4472 // ~(~X | Y) --> (X & ~Y)
4473 // ~(Y | ~X) --> (X & ~Y)
4474 if (match(NotOp, m_OneUse(m_c_Or(m_Not(m_Value(X)), m_Value(Y))))) {
4475 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4476 return BinaryOperator::CreateAnd(X, NotY);
4477 }
4478 if (match(NotOp, m_OneUse(m_LogicalOr(m_Not(m_Value(X)), m_Value(Y))))) {
4479 Value *NotY = Builder.CreateNot(Y, Y->getName() + ".not");
4480 return SelectInst::Create(X, NotY, ConstantInt::getFalse(Ty));
4481 }
4482
4483 // Is this a 'not' (~) fed by a binary operator?
4484 BinaryOperator *NotVal;
4485 if (match(NotOp, m_BinOp(NotVal))) {
4486 // ~((-X) | Y) --> (X - 1) & (~Y)
4487 if (match(NotVal,
4488 m_OneUse(m_c_Or(m_OneUse(m_Neg(m_Value(X))), m_Value(Y))))) {
4489 Value *DecX = Builder.CreateAdd(X, ConstantInt::getAllOnesValue(Ty));
4490 Value *NotY = Builder.CreateNot(Y);
4491 return BinaryOperator::CreateAnd(DecX, NotY);
4492 }
4493
4494 // ~(~X >>s Y) --> (X >>s Y)
4495 if (match(NotVal, m_AShr(m_Not(m_Value(X)), m_Value(Y))))
4496 return BinaryOperator::CreateAShr(X, Y);
4497
4498 // Treat lshr with non-negative operand as ashr.
4499 // ~(~X >>u Y) --> (X >>s Y) iff X is known negative
4500 if (match(NotVal, m_LShr(m_Not(m_Value(X)), m_Value(Y))) &&
4501 isKnownNegative(X, SQ.getWithInstruction(NotVal)))
4502 return BinaryOperator::CreateAShr(X, Y);
4503
4504 // Bit-hack form of a signbit test for iN type:
4505 // ~(X >>s (N - 1)) --> sext i1 (X > -1) to iN
4506 unsigned FullShift = Ty->getScalarSizeInBits() - 1;
4507 if (match(NotVal, m_OneUse(m_AShr(m_Value(X), m_SpecificInt(FullShift))))) {
4508 Value *IsNotNeg = Builder.CreateIsNotNeg(X, "isnotneg");
4509 return new SExtInst(IsNotNeg, Ty);
4510 }
4511
4512 // If we are inverting a right-shifted constant, we may be able to eliminate
4513 // the 'not' by inverting the constant and using the opposite shift type.
4514 // Canonicalization rules ensure that only a negative constant uses 'ashr',
4515 // but we must check that in case that transform has not fired yet.
4516
4517 // ~(C >>s Y) --> ~C >>u Y (when inverting the replicated sign bits)
4518 Constant *C;
4519 if (match(NotVal, m_AShr(m_Constant(C), m_Value(Y))) &&
4520 match(C, m_Negative()))
4521 return BinaryOperator::CreateLShr(ConstantExpr::getNot(C), Y);
4522
4523 // ~(C >>u Y) --> ~C >>s Y (when inverting the replicated sign bits)
4524 if (match(NotVal, m_LShr(m_Constant(C), m_Value(Y))) &&
4525 match(C, m_NonNegative()))
4526 return BinaryOperator::CreateAShr(ConstantExpr::getNot(C), Y);
4527
4528 // ~(X + C) --> ~C - X
4529 if (match(NotVal, m_Add(m_Value(X), m_ImmConstant(C))))
4530 return BinaryOperator::CreateSub(ConstantExpr::getNot(C), X);
4531
4532 // ~(X - Y) --> ~X + Y
4533 // FIXME: is it really beneficial to sink the `not` here?
4534 if (match(NotVal, m_Sub(m_Value(X), m_Value(Y))))
4535 if (isa<Constant>(X) || NotVal->hasOneUse())
4536 return BinaryOperator::CreateAdd(Builder.CreateNot(X), Y);
4537
4538 // ~(~X + Y) --> X - Y
4539 if (match(NotVal, m_c_Add(m_Not(m_Value(X)), m_Value(Y))))
4540 return BinaryOperator::CreateWithCopiedFlags(Instruction::Sub, X, Y,
4541 NotVal);
4542 }
4543
4544 // not (cmp A, B) = !cmp A, B
4545 CmpInst::Predicate Pred;
4546 if (match(NotOp, m_Cmp(Pred, m_Value(), m_Value())) &&
4547 (NotOp->hasOneUse() ||
4548 InstCombiner::canFreelyInvertAllUsersOf(cast<Instruction>(NotOp),
4549 /*IgnoredUser=*/nullptr))) {
4550 cast<CmpInst>(NotOp)->setPredicate(CmpInst::getInversePredicate(Pred));
4551 freelyInvertAllUsersOf(NotOp);
4552 return &I;
4553 }
4554
4555 // Move a 'not' ahead of casts of a bool to enable logic reduction:
4556 // not (bitcast (sext i1 X)) --> bitcast (sext (not i1 X))
4557 if (match(NotOp, m_OneUse(m_BitCast(m_OneUse(m_SExt(m_Value(X)))))) && X->getType()->isIntOrIntVectorTy(1)) {
4558 Type *SextTy = cast<BitCastOperator>(NotOp)->getSrcTy();
4559 Value *NotX = Builder.CreateNot(X);
4560 Value *Sext = Builder.CreateSExt(NotX, SextTy);
4561 return CastInst::CreateBitOrPointerCast(Sext, Ty);
4562 }
4563
4564 if (auto *NotOpI = dyn_cast<Instruction>(NotOp))
4565 if (sinkNotIntoLogicalOp(*NotOpI))
4566 return &I;
4567
4568 // Eliminate a bitwise 'not' op of 'not' min/max by inverting the min/max:
4569 // ~min(~X, ~Y) --> max(X, Y)
4570 // ~max(~X, Y) --> min(X, ~Y)
4571 auto *II = dyn_cast<IntrinsicInst>(NotOp);
4572 if (II && II->hasOneUse()) {
4573 if (match(NotOp, m_c_MaxOrMin(m_Not(m_Value(X)), m_Value(Y)))) {
4574 Intrinsic::ID InvID = getInverseMinMaxIntrinsic(II->getIntrinsicID());
4575 Value *NotY = Builder.CreateNot(Y);
4576 Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, NotY);
4577 return replaceInstUsesWith(I, InvMaxMin);
4578 }
4579
4580 if (II->getIntrinsicID() == Intrinsic::is_fpclass) {
4581 ConstantInt *ClassMask = cast<ConstantInt>(II->getArgOperand(1));
4582 II->setArgOperand(
4583 1, ConstantInt::get(ClassMask->getType(),
4584 ~ClassMask->getZExtValue() & fcAllFlags));
4585 return replaceInstUsesWith(I, II);
4586 }
4587 }
4588
4589 if (NotOp->hasOneUse()) {
4590 // Pull 'not' into operands of select if both operands are one-use compares
4591 // or one is one-use compare and the other one is a constant.
4592 // Inverting the predicates eliminates the 'not' operation.
4593 // Example:
4594 // not (select ?, (cmp TPred, ?, ?), (cmp FPred, ?, ?) -->
4595 // select ?, (cmp InvTPred, ?, ?), (cmp InvFPred, ?, ?)
4596 // not (select ?, (cmp TPred, ?, ?), true -->
4597 // select ?, (cmp InvTPred, ?, ?), false
4598 if (auto *Sel = dyn_cast<SelectInst>(NotOp)) {
4599 Value *TV = Sel->getTrueValue();
4600 Value *FV = Sel->getFalseValue();
4601 auto *CmpT = dyn_cast<CmpInst>(TV);
4602 auto *CmpF = dyn_cast<CmpInst>(FV);
4603 bool InvertibleT = (CmpT && CmpT->hasOneUse()) || isa<Constant>(TV);
4604 bool InvertibleF = (CmpF && CmpF->hasOneUse()) || isa<Constant>(FV);
4605 if (InvertibleT && InvertibleF) {
4606 if (CmpT)
4607 CmpT->setPredicate(CmpT->getInversePredicate());
4608 else
4609 Sel->setTrueValue(ConstantExpr::getNot(cast<Constant>(TV)));
4610 if (CmpF)
4611 CmpF->setPredicate(CmpF->getInversePredicate());
4612 else
4613 Sel->setFalseValue(ConstantExpr::getNot(cast<Constant>(FV)));
4614 return replaceInstUsesWith(I, Sel);
4615 }
4616 }
4617 }
4618
4619 if (Instruction *NewXor = foldNotXor(I, Builder))
4620 return NewXor;
4621
4622 // TODO: Could handle multi-use better by checking if all uses of NotOp (other
4623 // than I) can be inverted.
4624 if (Value *R = getFreelyInverted(NotOp, NotOp->hasOneUse(), &Builder))
4625 return replaceInstUsesWith(I, R);
4626
4627 return nullptr;
4628 }
4629
4630 // FIXME: We use commutative matchers (m_c_*) for some, but not all, matches
4631 // here. We should standardize that construct where it is needed or choose some
4632 // other way to ensure that commutated variants of patterns are not missed.
visitXor(BinaryOperator & I)4633 Instruction *InstCombinerImpl::visitXor(BinaryOperator &I) {
4634 if (Value *V = simplifyXorInst(I.getOperand(0), I.getOperand(1),
4635 SQ.getWithInstruction(&I)))
4636 return replaceInstUsesWith(I, V);
4637
4638 if (SimplifyAssociativeOrCommutative(I))
4639 return &I;
4640
4641 if (Instruction *X = foldVectorBinop(I))
4642 return X;
4643
4644 if (Instruction *Phi = foldBinopWithPhiOperands(I))
4645 return Phi;
4646
4647 if (Instruction *NewXor = foldXorToXor(I, Builder))
4648 return NewXor;
4649
4650 // (A&B)^(A&C) -> A&(B^C) etc
4651 if (Value *V = foldUsingDistributiveLaws(I))
4652 return replaceInstUsesWith(I, V);
4653
4654 // See if we can simplify any instructions used by the instruction whose sole
4655 // purpose is to compute bits we don't care about.
4656 if (SimplifyDemandedInstructionBits(I))
4657 return &I;
4658
4659 if (Instruction *R = foldNot(I))
4660 return R;
4661
4662 if (Instruction *R = foldBinOpShiftWithShift(I))
4663 return R;
4664
4665 // Fold (X & M) ^ (Y & ~M) -> (X & M) | (Y & ~M)
4666 // This it a special case in haveNoCommonBitsSet, but the computeKnownBits
4667 // calls in there are unnecessary as SimplifyDemandedInstructionBits should
4668 // have already taken care of those cases.
4669 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
4670 Value *M;
4671 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(M)), m_Value()),
4672 m_c_And(m_Deferred(M), m_Value())))) {
4673 if (isGuaranteedNotToBeUndef(M))
4674 return BinaryOperator::CreateDisjointOr(Op0, Op1);
4675 else
4676 return BinaryOperator::CreateOr(Op0, Op1);
4677 }
4678
4679 if (Instruction *Xor = visitMaskedMerge(I, Builder))
4680 return Xor;
4681
4682 Value *X, *Y;
4683 Constant *C1;
4684 if (match(Op1, m_Constant(C1))) {
4685 Constant *C2;
4686
4687 if (match(Op0, m_OneUse(m_Or(m_Value(X), m_ImmConstant(C2)))) &&
4688 match(C1, m_ImmConstant())) {
4689 // (X | C2) ^ C1 --> (X & ~C2) ^ (C1^C2)
4690 C2 = Constant::replaceUndefsWith(
4691 C2, Constant::getAllOnesValue(C2->getType()->getScalarType()));
4692 Value *And = Builder.CreateAnd(
4693 X, Constant::mergeUndefsWith(ConstantExpr::getNot(C2), C1));
4694 return BinaryOperator::CreateXor(
4695 And, Constant::mergeUndefsWith(ConstantExpr::getXor(C1, C2), C1));
4696 }
4697
4698 // Use DeMorgan and reassociation to eliminate a 'not' op.
4699 if (match(Op0, m_OneUse(m_Or(m_Not(m_Value(X)), m_Constant(C2))))) {
4700 // (~X | C2) ^ C1 --> ((X & ~C2) ^ -1) ^ C1 --> (X & ~C2) ^ ~C1
4701 Value *And = Builder.CreateAnd(X, ConstantExpr::getNot(C2));
4702 return BinaryOperator::CreateXor(And, ConstantExpr::getNot(C1));
4703 }
4704 if (match(Op0, m_OneUse(m_And(m_Not(m_Value(X)), m_Constant(C2))))) {
4705 // (~X & C2) ^ C1 --> ((X | ~C2) ^ -1) ^ C1 --> (X | ~C2) ^ ~C1
4706 Value *Or = Builder.CreateOr(X, ConstantExpr::getNot(C2));
4707 return BinaryOperator::CreateXor(Or, ConstantExpr::getNot(C1));
4708 }
4709
4710 // Convert xor ([trunc] (ashr X, BW-1)), C =>
4711 // select(X >s -1, C, ~C)
4712 // The ashr creates "AllZeroOrAllOne's", which then optionally inverses the
4713 // constant depending on whether this input is less than 0.
4714 const APInt *CA;
4715 if (match(Op0, m_OneUse(m_TruncOrSelf(
4716 m_AShr(m_Value(X), m_APIntAllowPoison(CA))))) &&
4717 *CA == X->getType()->getScalarSizeInBits() - 1 &&
4718 !match(C1, m_AllOnes())) {
4719 assert(!C1->isZeroValue() && "Unexpected xor with 0");
4720 Value *IsNotNeg = Builder.CreateIsNotNeg(X);
4721 return SelectInst::Create(IsNotNeg, Op1, Builder.CreateNot(Op1));
4722 }
4723 }
4724
4725 Type *Ty = I.getType();
4726 {
4727 const APInt *RHSC;
4728 if (match(Op1, m_APInt(RHSC))) {
4729 Value *X;
4730 const APInt *C;
4731 // (C - X) ^ signmaskC --> (C + signmaskC) - X
4732 if (RHSC->isSignMask() && match(Op0, m_Sub(m_APInt(C), m_Value(X))))
4733 return BinaryOperator::CreateSub(ConstantInt::get(Ty, *C + *RHSC), X);
4734
4735 // (X + C) ^ signmaskC --> X + (C + signmaskC)
4736 if (RHSC->isSignMask() && match(Op0, m_Add(m_Value(X), m_APInt(C))))
4737 return BinaryOperator::CreateAdd(X, ConstantInt::get(Ty, *C + *RHSC));
4738
4739 // (X | C) ^ RHSC --> X ^ (C ^ RHSC) iff X & C == 0
4740 if (match(Op0, m_Or(m_Value(X), m_APInt(C))) &&
4741 MaskedValueIsZero(X, *C, 0, &I))
4742 return BinaryOperator::CreateXor(X, ConstantInt::get(Ty, *C ^ *RHSC));
4743
4744 // When X is a power-of-two or zero and zero input is poison:
4745 // ctlz(i32 X) ^ 31 --> cttz(X)
4746 // cttz(i32 X) ^ 31 --> ctlz(X)
4747 auto *II = dyn_cast<IntrinsicInst>(Op0);
4748 if (II && II->hasOneUse() && *RHSC == Ty->getScalarSizeInBits() - 1) {
4749 Intrinsic::ID IID = II->getIntrinsicID();
4750 if ((IID == Intrinsic::ctlz || IID == Intrinsic::cttz) &&
4751 match(II->getArgOperand(1), m_One()) &&
4752 isKnownToBeAPowerOfTwo(II->getArgOperand(0), /*OrZero */ true)) {
4753 IID = (IID == Intrinsic::ctlz) ? Intrinsic::cttz : Intrinsic::ctlz;
4754 Function *F = Intrinsic::getDeclaration(II->getModule(), IID, Ty);
4755 return CallInst::Create(F, {II->getArgOperand(0), Builder.getTrue()});
4756 }
4757 }
4758
4759 // If RHSC is inverting the remaining bits of shifted X,
4760 // canonicalize to a 'not' before the shift to help SCEV and codegen:
4761 // (X << C) ^ RHSC --> ~X << C
4762 if (match(Op0, m_OneUse(m_Shl(m_Value(X), m_APInt(C)))) &&
4763 *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).shl(*C)) {
4764 Value *NotX = Builder.CreateNot(X);
4765 return BinaryOperator::CreateShl(NotX, ConstantInt::get(Ty, *C));
4766 }
4767 // (X >>u C) ^ RHSC --> ~X >>u C
4768 if (match(Op0, m_OneUse(m_LShr(m_Value(X), m_APInt(C)))) &&
4769 *RHSC == APInt::getAllOnes(Ty->getScalarSizeInBits()).lshr(*C)) {
4770 Value *NotX = Builder.CreateNot(X);
4771 return BinaryOperator::CreateLShr(NotX, ConstantInt::get(Ty, *C));
4772 }
4773 // TODO: We could handle 'ashr' here as well. That would be matching
4774 // a 'not' op and moving it before the shift. Doing that requires
4775 // preventing the inverse fold in canShiftBinOpWithConstantRHS().
4776 }
4777
4778 // If we are XORing the sign bit of a floating-point value, convert
4779 // this to fneg, then cast back to integer.
4780 //
4781 // This is generous interpretation of noimplicitfloat, this is not a true
4782 // floating-point operation.
4783 //
4784 // Assumes any IEEE-represented type has the sign bit in the high bit.
4785 // TODO: Unify with APInt matcher. This version allows undef unlike m_APInt
4786 Value *CastOp;
4787 if (match(Op0, m_ElementWiseBitCast(m_Value(CastOp))) &&
4788 match(Op1, m_SignMask()) &&
4789 !Builder.GetInsertBlock()->getParent()->hasFnAttribute(
4790 Attribute::NoImplicitFloat)) {
4791 Type *EltTy = CastOp->getType()->getScalarType();
4792 if (EltTy->isFloatingPointTy() && EltTy->isIEEE()) {
4793 Value *FNeg = Builder.CreateFNeg(CastOp);
4794 return new BitCastInst(FNeg, I.getType());
4795 }
4796 }
4797 }
4798
4799 // FIXME: This should not be limited to scalar (pull into APInt match above).
4800 {
4801 Value *X;
4802 ConstantInt *C1, *C2, *C3;
4803 // ((X^C1) >> C2) ^ C3 -> (X>>C2) ^ ((C1>>C2)^C3)
4804 if (match(Op1, m_ConstantInt(C3)) &&
4805 match(Op0, m_LShr(m_Xor(m_Value(X), m_ConstantInt(C1)),
4806 m_ConstantInt(C2))) &&
4807 Op0->hasOneUse()) {
4808 // fold (C1 >> C2) ^ C3
4809 APInt FoldConst = C1->getValue().lshr(C2->getValue());
4810 FoldConst ^= C3->getValue();
4811 // Prepare the two operands.
4812 auto *Opnd0 = Builder.CreateLShr(X, C2);
4813 Opnd0->takeName(Op0);
4814 return BinaryOperator::CreateXor(Opnd0, ConstantInt::get(Ty, FoldConst));
4815 }
4816 }
4817
4818 if (Instruction *FoldedLogic = foldBinOpIntoSelectOrPhi(I))
4819 return FoldedLogic;
4820
4821 // Y ^ (X | Y) --> X & ~Y
4822 // Y ^ (Y | X) --> X & ~Y
4823 if (match(Op1, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op0)))))
4824 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op0));
4825 // (X | Y) ^ Y --> X & ~Y
4826 // (Y | X) ^ Y --> X & ~Y
4827 if (match(Op0, m_OneUse(m_c_Or(m_Value(X), m_Specific(Op1)))))
4828 return BinaryOperator::CreateAnd(X, Builder.CreateNot(Op1));
4829
4830 // Y ^ (X & Y) --> ~X & Y
4831 // Y ^ (Y & X) --> ~X & Y
4832 if (match(Op1, m_OneUse(m_c_And(m_Value(X), m_Specific(Op0)))))
4833 return BinaryOperator::CreateAnd(Op0, Builder.CreateNot(X));
4834 // (X & Y) ^ Y --> ~X & Y
4835 // (Y & X) ^ Y --> ~X & Y
4836 // Canonical form is (X & C) ^ C; don't touch that.
4837 // TODO: A 'not' op is better for analysis and codegen, but demanded bits must
4838 // be fixed to prefer that (otherwise we get infinite looping).
4839 if (!match(Op1, m_Constant()) &&
4840 match(Op0, m_OneUse(m_c_And(m_Value(X), m_Specific(Op1)))))
4841 return BinaryOperator::CreateAnd(Op1, Builder.CreateNot(X));
4842
4843 Value *A, *B, *C;
4844 // (A ^ B) ^ (A | C) --> (~A & C) ^ B -- There are 4 commuted variants.
4845 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
4846 m_OneUse(m_c_Or(m_Deferred(A), m_Value(C))))))
4847 return BinaryOperator::CreateXor(
4848 Builder.CreateAnd(Builder.CreateNot(A), C), B);
4849
4850 // (A ^ B) ^ (B | C) --> (~B & C) ^ A -- There are 4 commuted variants.
4851 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_Value(A), m_Value(B))),
4852 m_OneUse(m_c_Or(m_Deferred(B), m_Value(C))))))
4853 return BinaryOperator::CreateXor(
4854 Builder.CreateAnd(Builder.CreateNot(B), C), A);
4855
4856 // (A & B) ^ (A ^ B) -> (A | B)
4857 if (match(Op0, m_And(m_Value(A), m_Value(B))) &&
4858 match(Op1, m_c_Xor(m_Specific(A), m_Specific(B))))
4859 return BinaryOperator::CreateOr(A, B);
4860 // (A ^ B) ^ (A & B) -> (A | B)
4861 if (match(Op0, m_Xor(m_Value(A), m_Value(B))) &&
4862 match(Op1, m_c_And(m_Specific(A), m_Specific(B))))
4863 return BinaryOperator::CreateOr(A, B);
4864
4865 // (A & ~B) ^ ~A -> ~(A & B)
4866 // (~B & A) ^ ~A -> ~(A & B)
4867 if (match(Op0, m_c_And(m_Value(A), m_Not(m_Value(B)))) &&
4868 match(Op1, m_Not(m_Specific(A))))
4869 return BinaryOperator::CreateNot(Builder.CreateAnd(A, B));
4870
4871 // (~A & B) ^ A --> A | B -- There are 4 commuted variants.
4872 if (match(&I, m_c_Xor(m_c_And(m_Not(m_Value(A)), m_Value(B)), m_Deferred(A))))
4873 return BinaryOperator::CreateOr(A, B);
4874
4875 // (~A | B) ^ A --> ~(A & B)
4876 if (match(Op0, m_OneUse(m_c_Or(m_Not(m_Specific(Op1)), m_Value(B)))))
4877 return BinaryOperator::CreateNot(Builder.CreateAnd(Op1, B));
4878
4879 // A ^ (~A | B) --> ~(A & B)
4880 if (match(Op1, m_OneUse(m_c_Or(m_Not(m_Specific(Op0)), m_Value(B)))))
4881 return BinaryOperator::CreateNot(Builder.CreateAnd(Op0, B));
4882
4883 // (A | B) ^ (A | C) --> (B ^ C) & ~A -- There are 4 commuted variants.
4884 // TODO: Loosen one-use restriction if common operand is a constant.
4885 Value *D;
4886 if (match(Op0, m_OneUse(m_Or(m_Value(A), m_Value(B)))) &&
4887 match(Op1, m_OneUse(m_Or(m_Value(C), m_Value(D))))) {
4888 if (B == C || B == D)
4889 std::swap(A, B);
4890 if (A == C)
4891 std::swap(C, D);
4892 if (A == D) {
4893 Value *NotA = Builder.CreateNot(A);
4894 return BinaryOperator::CreateAnd(Builder.CreateXor(B, C), NotA);
4895 }
4896 }
4897
4898 // (A & B) ^ (A | C) --> A ? ~B : C -- There are 4 commuted variants.
4899 if (I.getType()->isIntOrIntVectorTy(1) &&
4900 match(Op0, m_OneUse(m_LogicalAnd(m_Value(A), m_Value(B)))) &&
4901 match(Op1, m_OneUse(m_LogicalOr(m_Value(C), m_Value(D))))) {
4902 bool NeedFreeze = isa<SelectInst>(Op0) && isa<SelectInst>(Op1) && B == D;
4903 if (B == C || B == D)
4904 std::swap(A, B);
4905 if (A == C)
4906 std::swap(C, D);
4907 if (A == D) {
4908 if (NeedFreeze)
4909 A = Builder.CreateFreeze(A);
4910 Value *NotB = Builder.CreateNot(B);
4911 return SelectInst::Create(A, NotB, C);
4912 }
4913 }
4914
4915 if (auto *LHS = dyn_cast<ICmpInst>(I.getOperand(0)))
4916 if (auto *RHS = dyn_cast<ICmpInst>(I.getOperand(1)))
4917 if (Value *V = foldXorOfICmps(LHS, RHS, I))
4918 return replaceInstUsesWith(I, V);
4919
4920 if (Instruction *CastedXor = foldCastedBitwiseLogic(I))
4921 return CastedXor;
4922
4923 if (Instruction *Abs = canonicalizeAbs(I, Builder))
4924 return Abs;
4925
4926 // Otherwise, if all else failed, try to hoist the xor-by-constant:
4927 // (X ^ C) ^ Y --> (X ^ Y) ^ C
4928 // Just like we do in other places, we completely avoid the fold
4929 // for constantexprs, at least to avoid endless combine loop.
4930 if (match(&I, m_c_Xor(m_OneUse(m_Xor(m_CombineAnd(m_Value(X),
4931 m_Unless(m_ConstantExpr())),
4932 m_ImmConstant(C1))),
4933 m_Value(Y))))
4934 return BinaryOperator::CreateXor(Builder.CreateXor(X, Y), C1);
4935
4936 if (Instruction *R = reassociateForUses(I, Builder))
4937 return R;
4938
4939 if (Instruction *Canonicalized = canonicalizeLogicFirst(I, Builder))
4940 return Canonicalized;
4941
4942 if (Instruction *Folded = foldLogicOfIsFPClass(I, Op0, Op1))
4943 return Folded;
4944
4945 if (Instruction *Folded = canonicalizeConditionalNegationViaMathToSelect(I))
4946 return Folded;
4947
4948 if (Instruction *Res = foldBinOpOfDisplacedShifts(I))
4949 return Res;
4950
4951 if (Instruction *Res = foldBitwiseLogicWithIntrinsics(I, Builder))
4952 return Res;
4953
4954 return nullptr;
4955 }
4956