1 /*========================== begin_copyright_notice ============================
2
3 Copyright (C) 2018-2021 Intel Corporation
4
5 SPDX-License-Identifier: MIT
6
7 ============================= end_copyright_notice ===========================*/
8
9 /*========================== begin_copyright_notice ============================
10
11 This file is distributed under the University of Illinois Open Source License.
12 See LICENSE.TXT for details.
13
14 ============================= end_copyright_notice ===========================*/
15
16 // This file implements the visit functions for add, fadd, sub, and fsub.
17
18 #include "common/LLVMWarningsPush.hpp"
19 #include "InstCombineInternal.h"
20 #include "llvm/ADT/STLExtras.h"
21 #include "llvm/Analysis/InstructionSimplify.h"
22 #include "llvm/IR/DataLayout.h"
23 #include "llvm/IR/GetElementPtrTypeIterator.h"
24 #include "llvm/IR/PatternMatch.h"
25 #include "common/LLVMWarningsPop.hpp"
26 #include "Probe/Assertion.h"
27
28 using namespace llvm;
29 using namespace PatternMatch;
30 using namespace IGCombiner;
31
32 #define DEBUG_TYPE "instcombine"
33
34 namespace {
35
36 /// Class representing coefficient of floating-point addend.
37 /// This class needs to be highly efficient, which is especially true for
38 /// the constructor. As of I write this comment, the cost of the default
39 /// constructor is merely 4-byte-store-zero (Assuming compiler is able to
40 /// perform write-merging).
41 ///
42 class FAddendCoef {
43 public:
44 // The constructor has to initialize a APFloat, which is unnecessary for
45 // most addends which have coefficient either 1 or -1. So, the constructor
46 // is expensive. In order to avoid the cost of the constructor, we should
47 // reuse some instances whenever possible. The pre-created instances
48 // FAddCombine::Add[0-5] embodies this idea.
49 //
FAddendCoef()50 FAddendCoef() : IsFp(false), BufHasFpVal(false), IntVal(0) {}
51 ~FAddendCoef();
52
set(short C)53 void set(short C) {
54 IGC_ASSERT_MESSAGE(!insaneIntVal(C), "Insane coefficient");
55 IsFp = false; IntVal = C;
56 }
57
58 void set(const APFloat& C);
59
60 void negate();
61
isZero() const62 bool isZero() const { return isInt() ? !IntVal : getFpVal().isZero(); }
63 Value *getValue(Type *) const;
64
65 // If possible, don't define operator+/operator- etc because these
66 // operators inevitably call FAddendCoef's constructor which is not cheap.
67 void operator=(const FAddendCoef &A);
68 void operator+=(const FAddendCoef &A);
69 void operator*=(const FAddendCoef &S);
70
isOne() const71 bool isOne() const { return isInt() && IntVal == 1; }
isTwo() const72 bool isTwo() const { return isInt() && IntVal == 2; }
isMinusOne() const73 bool isMinusOne() const { return isInt() && IntVal == -1; }
isMinusTwo() const74 bool isMinusTwo() const { return isInt() && IntVal == -2; }
75
76 private:
insaneIntVal(int V)77 bool insaneIntVal(int V) { return V > 4 || V < -4; }
getFpValPtr()78 APFloat *getFpValPtr()
79 { return reinterpret_cast<APFloat*>(&FpValBuf.buffer[0]); }
getFpValPtr() const80 const APFloat *getFpValPtr() const
81 { return reinterpret_cast<const APFloat*>(&FpValBuf.buffer[0]); }
82
getFpVal() const83 const APFloat &getFpVal() const {
84 IGC_ASSERT_MESSAGE(IsFp, "Incorret state");
85 IGC_ASSERT_MESSAGE(BufHasFpVal, "Incorret state");
86 return *getFpValPtr();
87 }
88
getFpVal()89 APFloat &getFpVal() {
90 IGC_ASSERT_MESSAGE(IsFp, "Incorret state");
91 IGC_ASSERT_MESSAGE(BufHasFpVal, "Incorret state");
92 return *getFpValPtr();
93 }
94
isInt() const95 bool isInt() const { return !IsFp; }
96
97 // If the coefficient is represented by an integer, promote it to a
98 // floating point.
99 void convertToFpType(const fltSemantics &Sem);
100
101 // Construct an APFloat from a signed integer.
102 // TODO: We should get rid of this function when APFloat can be constructed
103 // from an *SIGNED* integer.
104 APFloat createAPFloatFromInt(const fltSemantics &Sem, int Val);
105
106 private:
107 bool IsFp;
108
109 // True iff FpValBuf contains an instance of APFloat.
110 bool BufHasFpVal;
111
112 // The integer coefficient of an individual addend is either 1 or -1,
113 // and we try to simplify at most 4 addends from neighboring at most
114 // two instructions. So the range of <IntVal> falls in [-4, 4]. APInt
115 // is overkill of this end.
116 short IntVal;
117
118 AlignedCharArrayUnion<APFloat> FpValBuf;
119 };
120
121 /// FAddend is used to represent floating-point addend. An addend is
122 /// represented as <C, V>, where the V is a symbolic value, and C is a
123 /// constant coefficient. A constant addend is represented as <C, 0>.
124 ///
125 class FAddend {
126 public:
FAddend()127 FAddend() : Val(nullptr) {}
128
getSymVal() const129 Value *getSymVal() const { return Val; }
getCoef() const130 const FAddendCoef &getCoef() const { return Coeff; }
131
isConstant() const132 bool isConstant() const { return Val == nullptr; }
isZero() const133 bool isZero() const { return Coeff.isZero(); }
134
set(short Coefficient,Value * V)135 void set(short Coefficient, Value *V) {
136 Coeff.set(Coefficient);
137 Val = V;
138 }
set(const APFloat & Coefficient,Value * V)139 void set(const APFloat &Coefficient, Value *V) {
140 Coeff.set(Coefficient);
141 Val = V;
142 }
set(const ConstantFP * Coefficient,Value * V)143 void set(const ConstantFP *Coefficient, Value *V) {
144 Coeff.set(Coefficient->getValueAPF());
145 Val = V;
146 }
147
negate()148 void negate() { Coeff.negate(); }
149
150 /// Drill down the U-D chain one step to find the definition of V, and
151 /// try to break the definition into one or two addends.
152 static unsigned drillValueDownOneStep(Value* V, FAddend &A0, FAddend &A1);
153
154 /// Similar to FAddend::drillDownOneStep() except that the value being
155 /// splitted is the addend itself.
156 unsigned drillAddendDownOneStep(FAddend &Addend0, FAddend &Addend1) const;
157
operator +=(const FAddend & T)158 void operator+=(const FAddend &T) {
159 IGC_ASSERT_MESSAGE((Val == T.Val), "Symbolic-values disagree");
160 Coeff += T.Coeff;
161 }
162
163 private:
Scale(const FAddendCoef & ScaleAmt)164 void Scale(const FAddendCoef& ScaleAmt) { Coeff *= ScaleAmt; }
165
166 // This addend has the value of "Coeff * Val".
167 Value *Val;
168 FAddendCoef Coeff;
169 };
170
171 /// FAddCombine is the class for optimizing an unsafe fadd/fsub along
172 /// with its neighboring at most two instructions.
173 ///
174 class FAddCombine {
175 public:
FAddCombine(InstCombiner::BuilderTy * B)176 FAddCombine(InstCombiner::BuilderTy *B) : Builder(B), Instr(nullptr) {}
177 Value *simplify(Instruction *FAdd);
178
179 private:
180 typedef SmallVector<const FAddend*, 4> AddendVect;
181
182 Value *simplifyFAdd(AddendVect& V, unsigned InstrQuota);
183
184 Value *performFactorization(Instruction *I);
185
186 /// Convert given addend to a Value
187 Value *createAddendVal(const FAddend &A, bool& NeedNeg);
188
189 /// Return the number of instructions needed to emit the N-ary addition.
190 unsigned calcInstrNumber(const AddendVect& Vect);
191 Value *createFSub(Value *Opnd0, Value *Opnd1);
192 Value *createFAdd(Value *Opnd0, Value *Opnd1);
193 Value *createFMul(Value *Opnd0, Value *Opnd1);
194 Value *createFDiv(Value *Opnd0, Value *Opnd1);
195 Value *createFNeg(Value *V);
196 Value *createNaryFAdd(const AddendVect& Opnds, unsigned InstrQuota);
197 void createInstPostProc(Instruction *NewInst, bool NoNumber = false);
198
199 InstCombiner::BuilderTy *Builder;
200 Instruction *Instr;
201
202 unsigned InstructionCounter;
203 };
204
205 } // anonymous namespace
206
207 //===----------------------------------------------------------------------===//
208 //
209 // Implementation of
210 // {FAddendCoef, FAddend, FAddition, FAddCombine}.
211 //
212 //===----------------------------------------------------------------------===//
~FAddendCoef()213 FAddendCoef::~FAddendCoef() {
214 if (BufHasFpVal)
215 getFpValPtr()->~APFloat();
216 }
217
set(const APFloat & C)218 void FAddendCoef::set(const APFloat& C) {
219 APFloat *P = getFpValPtr();
220
221 if (isInt()) {
222 // As the buffer is meanless byte stream, we cannot call
223 // APFloat::operator=().
224 new(P) APFloat(C);
225 } else
226 *P = C;
227
228 IsFp = BufHasFpVal = true;
229 }
230
convertToFpType(const fltSemantics & Sem)231 void FAddendCoef::convertToFpType(const fltSemantics &Sem) {
232 if (!isInt())
233 return;
234
235 APFloat *P = getFpValPtr();
236 if (IntVal > 0)
237 new(P) APFloat(Sem, IntVal);
238 else {
239 new(P) APFloat(Sem, 0 - IntVal);
240 P->changeSign();
241 }
242 IsFp = BufHasFpVal = true;
243 }
244
createAPFloatFromInt(const fltSemantics & Sem,int Val)245 APFloat FAddendCoef::createAPFloatFromInt(const fltSemantics &Sem, int Val) {
246 if (Val >= 0)
247 return APFloat(Sem, Val);
248
249 APFloat T(Sem, 0 - Val);
250 T.changeSign();
251
252 return T;
253 }
254
operator =(const FAddendCoef & That)255 void FAddendCoef::operator=(const FAddendCoef &That) {
256 if (That.isInt())
257 set(That.IntVal);
258 else
259 set(That.getFpVal());
260 }
261
operator +=(const FAddendCoef & That)262 void FAddendCoef::operator+=(const FAddendCoef &That) {
263 enum APFloat::roundingMode RndMode = APFloat::rmNearestTiesToEven;
264 if (isInt() == That.isInt()) {
265 if (isInt())
266 IntVal += That.IntVal;
267 else
268 getFpVal().add(That.getFpVal(), RndMode);
269 return;
270 }
271
272 if (isInt()) {
273 const APFloat &T = That.getFpVal();
274 convertToFpType(T.getSemantics());
275 getFpVal().add(T, RndMode);
276 return;
277 }
278
279 APFloat &T = getFpVal();
280 T.add(createAPFloatFromInt(T.getSemantics(), That.IntVal), RndMode);
281 }
282
operator *=(const FAddendCoef & That)283 void FAddendCoef::operator*=(const FAddendCoef &That) {
284 if (That.isOne())
285 return;
286
287 if (That.isMinusOne()) {
288 negate();
289 return;
290 }
291
292 if (isInt() && That.isInt()) {
293 int Res = IntVal * (int)That.IntVal;
294 IGC_ASSERT_MESSAGE(!insaneIntVal(Res), "Insane int value");
295 IntVal = Res;
296 return;
297 }
298
299 const fltSemantics &Semantic =
300 isInt() ? That.getFpVal().getSemantics() : getFpVal().getSemantics();
301
302 if (isInt())
303 convertToFpType(Semantic);
304 APFloat &F0 = getFpVal();
305
306 if (That.isInt())
307 F0.multiply(createAPFloatFromInt(Semantic, That.IntVal),
308 APFloat::rmNearestTiesToEven);
309 else
310 F0.multiply(That.getFpVal(), APFloat::rmNearestTiesToEven);
311 }
312
negate()313 void FAddendCoef::negate() {
314 if (isInt())
315 IntVal = 0 - IntVal;
316 else
317 getFpVal().changeSign();
318 }
319
getValue(Type * Ty) const320 Value *FAddendCoef::getValue(Type *Ty) const {
321 return isInt() ?
322 ConstantFP::get(Ty, float(IntVal)) :
323 ConstantFP::get(Ty->getContext(), getFpVal());
324 }
325
326 // The definition of <Val> Addends
327 // =========================================
328 // A + B <1, A>, <1,B>
329 // A - B <1, A>, <1,B>
330 // 0 - B <-1, B>
331 // C * A, <C, A>
332 // A + C <1, A> <C, NULL>
333 // 0 +/- 0 <0, NULL> (corner case)
334 //
335 // Legend: A and B are not constant, C is constant
336 //
drillValueDownOneStep(Value * Val,FAddend & Addend0,FAddend & Addend1)337 unsigned FAddend::drillValueDownOneStep
338 (Value *Val, FAddend &Addend0, FAddend &Addend1) {
339 Instruction *I = nullptr;
340 if (!Val || !(I = dyn_cast<Instruction>(Val)))
341 return 0;
342
343 unsigned Opcode = I->getOpcode();
344
345 if (Opcode == Instruction::FAdd || Opcode == Instruction::FSub) {
346 ConstantFP *C0, *C1;
347 Value *Opnd0 = I->getOperand(0);
348 Value *Opnd1 = I->getOperand(1);
349 if ((C0 = dyn_cast<ConstantFP>(Opnd0)) && C0->isZero())
350 Opnd0 = nullptr;
351
352 if ((C1 = dyn_cast<ConstantFP>(Opnd1)) && C1->isZero())
353 Opnd1 = nullptr;
354
355 if (Opnd0) {
356 if (!C0)
357 Addend0.set(1, Opnd0);
358 else
359 Addend0.set(C0, nullptr);
360 }
361
362 if (Opnd1) {
363 FAddend &Addend = Opnd0 ? Addend1 : Addend0;
364 if (!C1)
365 Addend.set(1, Opnd1);
366 else
367 Addend.set(C1, nullptr);
368 if (Opcode == Instruction::FSub)
369 Addend.negate();
370 }
371
372 if (Opnd0 || Opnd1)
373 return Opnd0 && Opnd1 ? 2 : 1;
374
375 // Both operands are zero. Weird!
376 Addend0.set(APFloat(C0->getValueAPF().getSemantics()), nullptr);
377 return 1;
378 }
379
380 if (I->getOpcode() == Instruction::FMul) {
381 Value *V0 = I->getOperand(0);
382 Value *V1 = I->getOperand(1);
383 if (ConstantFP *C = dyn_cast<ConstantFP>(V0)) {
384 Addend0.set(C, V1);
385 return 1;
386 }
387
388 if (ConstantFP *C = dyn_cast<ConstantFP>(V1)) {
389 Addend0.set(C, V0);
390 return 1;
391 }
392 }
393
394 return 0;
395 }
396
397 // Try to break *this* addend into two addends. e.g. Suppose this addend is
398 // <2.3, V>, and V = X + Y, by calling this function, we obtain two addends,
399 // i.e. <2.3, X> and <2.3, Y>.
400 //
drillAddendDownOneStep(FAddend & Addend0,FAddend & Addend1) const401 unsigned FAddend::drillAddendDownOneStep
402 (FAddend &Addend0, FAddend &Addend1) const {
403 if (isConstant())
404 return 0;
405
406 unsigned BreakNum = FAddend::drillValueDownOneStep(Val, Addend0, Addend1);
407 if (!BreakNum || Coeff.isOne())
408 return BreakNum;
409
410 Addend0.Scale(Coeff);
411
412 if (BreakNum == 2)
413 Addend1.Scale(Coeff);
414
415 return BreakNum;
416 }
417
418 // Try to perform following optimization on the input instruction I. Return the
419 // simplified expression if was successful; otherwise, return 0.
420 //
421 // Instruction "I" is Simplified into
422 // -------------------------------------------------------
423 // (x * y) +/- (x * z) x * (y +/- z)
424 // (y / x) +/- (z / x) (y +/- z) / x
425 //
performFactorization(Instruction * I)426 Value *FAddCombine::performFactorization(Instruction *I) {
427 IGC_ASSERT_MESSAGE((I->getOpcode() == Instruction::FAdd) || (I->getOpcode() == Instruction::FSub), "Expect add/sub");
428
429 Instruction *I0 = dyn_cast<Instruction>(I->getOperand(0));
430 Instruction *I1 = dyn_cast<Instruction>(I->getOperand(1));
431
432 if (!I0 || !I1 || I0->getOpcode() != I1->getOpcode())
433 return nullptr;
434
435 bool isMpy = false;
436 if (I0->getOpcode() == Instruction::FMul)
437 isMpy = true;
438 else if (I0->getOpcode() != Instruction::FDiv)
439 return nullptr;
440
441 Value *Opnd0_0 = I0->getOperand(0);
442 Value *Opnd0_1 = I0->getOperand(1);
443 Value *Opnd1_0 = I1->getOperand(0);
444 Value *Opnd1_1 = I1->getOperand(1);
445
446 // Input Instr I Factor AddSub0 AddSub1
447 // ----------------------------------------------
448 // (x*y) +/- (x*z) x y z
449 // (y/x) +/- (z/x) x y z
450 //
451 Value *Factor = nullptr;
452 Value *AddSub0 = nullptr, *AddSub1 = nullptr;
453
454 if (isMpy) {
455 if (Opnd0_0 == Opnd1_0 || Opnd0_0 == Opnd1_1)
456 Factor = Opnd0_0;
457 else if (Opnd0_1 == Opnd1_0 || Opnd0_1 == Opnd1_1)
458 Factor = Opnd0_1;
459
460 if (Factor) {
461 AddSub0 = (Factor == Opnd0_0) ? Opnd0_1 : Opnd0_0;
462 AddSub1 = (Factor == Opnd1_0) ? Opnd1_1 : Opnd1_0;
463 }
464 } else if (Opnd0_1 == Opnd1_1) {
465 Factor = Opnd0_1;
466 AddSub0 = Opnd0_0;
467 AddSub1 = Opnd1_0;
468 }
469
470 if (!Factor)
471 return nullptr;
472
473 FastMathFlags Flags;
474 Flags.setFast();
475 if (I0) Flags &= I->getFastMathFlags();
476 if (I1) Flags &= I->getFastMathFlags();
477
478 // Create expression "NewAddSub = AddSub0 +/- AddsSub1"
479 Value *NewAddSub = (I->getOpcode() == Instruction::FAdd) ?
480 createFAdd(AddSub0, AddSub1) :
481 createFSub(AddSub0, AddSub1);
482 if (ConstantFP *CFP = dyn_cast<ConstantFP>(NewAddSub)) {
483 const APFloat &F = CFP->getValueAPF();
484 if (!F.isNormal())
485 return nullptr;
486 } else if (Instruction *II = dyn_cast<Instruction>(NewAddSub))
487 II->setFastMathFlags(Flags);
488
489 if (isMpy) {
490 Value *RI = createFMul(Factor, NewAddSub);
491 if (Instruction *II = dyn_cast<Instruction>(RI))
492 II->setFastMathFlags(Flags);
493 return RI;
494 }
495
496 Value *RI = createFDiv(NewAddSub, Factor);
497 if (Instruction *II = dyn_cast<Instruction>(RI))
498 II->setFastMathFlags(Flags);
499 return RI;
500 }
501
simplify(Instruction * I)502 Value *FAddCombine::simplify(Instruction *I) {
503 IGC_ASSERT_MESSAGE(I->isFast(), "Expected 'fast' instruction");
504
505 // Currently we are not able to handle vector type.
506 if (I->getType()->isVectorTy())
507 return nullptr;
508
509 IGC_ASSERT_MESSAGE((I->getOpcode() == Instruction::FAdd) || (I->getOpcode() == Instruction::FSub), "Expect add/sub");
510
511 // Save the instruction before calling other member-functions.
512 Instr = I;
513
514 FAddend Opnd0, Opnd1, Opnd0_0, Opnd0_1, Opnd1_0, Opnd1_1;
515
516 unsigned OpndNum = FAddend::drillValueDownOneStep(I, Opnd0, Opnd1);
517
518 // Step 1: Expand the 1st addend into Opnd0_0 and Opnd0_1.
519 unsigned Opnd0_ExpNum = 0;
520 unsigned Opnd1_ExpNum = 0;
521
522 if (!Opnd0.isConstant())
523 Opnd0_ExpNum = Opnd0.drillAddendDownOneStep(Opnd0_0, Opnd0_1);
524
525 // Step 2: Expand the 2nd addend into Opnd1_0 and Opnd1_1.
526 if (OpndNum == 2 && !Opnd1.isConstant())
527 Opnd1_ExpNum = Opnd1.drillAddendDownOneStep(Opnd1_0, Opnd1_1);
528
529 // Step 3: Try to optimize Opnd0_0 + Opnd0_1 + Opnd1_0 + Opnd1_1
530 if (Opnd0_ExpNum && Opnd1_ExpNum) {
531 AddendVect AllOpnds;
532 AllOpnds.push_back(&Opnd0_0);
533 AllOpnds.push_back(&Opnd1_0);
534 if (Opnd0_ExpNum == 2)
535 AllOpnds.push_back(&Opnd0_1);
536 if (Opnd1_ExpNum == 2)
537 AllOpnds.push_back(&Opnd1_1);
538
539 // Compute instruction quota. We should save at least one instruction.
540 unsigned InstQuota = 0;
541
542 Value *V0 = I->getOperand(0);
543 Value *V1 = I->getOperand(1);
544 InstQuota = ((!isa<Constant>(V0) && V0->hasOneUse()) &&
545 (!isa<Constant>(V1) && V1->hasOneUse())) ? 2 : 1;
546
547 if (Value *R = simplifyFAdd(AllOpnds, InstQuota))
548 return R;
549 }
550
551 if (OpndNum != 2) {
552 // The input instruction is : "I=0.0 +/- V". If the "V" were able to be
553 // splitted into two addends, say "V = X - Y", the instruction would have
554 // been optimized into "I = Y - X" in the previous steps.
555 //
556 const FAddendCoef &CE = Opnd0.getCoef();
557 return CE.isOne() ? Opnd0.getSymVal() : nullptr;
558 }
559
560 // step 4: Try to optimize Opnd0 + Opnd1_0 [+ Opnd1_1]
561 if (Opnd1_ExpNum) {
562 AddendVect AllOpnds;
563 AllOpnds.push_back(&Opnd0);
564 AllOpnds.push_back(&Opnd1_0);
565 if (Opnd1_ExpNum == 2)
566 AllOpnds.push_back(&Opnd1_1);
567
568 if (Value *R = simplifyFAdd(AllOpnds, 1))
569 return R;
570 }
571
572 // step 5: Try to optimize Opnd1 + Opnd0_0 [+ Opnd0_1]
573 if (Opnd0_ExpNum) {
574 AddendVect AllOpnds;
575 AllOpnds.push_back(&Opnd1);
576 AllOpnds.push_back(&Opnd0_0);
577 if (Opnd0_ExpNum == 2)
578 AllOpnds.push_back(&Opnd0_1);
579
580 if (Value *R = simplifyFAdd(AllOpnds, 1))
581 return R;
582 }
583
584 // step 6: Try factorization as the last resort,
585 return performFactorization(I);
586 }
587
simplifyFAdd(AddendVect & Addends,unsigned InstrQuota)588 Value *FAddCombine::simplifyFAdd(AddendVect& Addends, unsigned InstrQuota) {
589 unsigned AddendNum = Addends.size();
590 IGC_ASSERT_MESSAGE(AddendNum <= 4, "Too many addends");
591
592 // For saving intermediate results;
593 unsigned NextTmpIdx = 0;
594 FAddend TmpResult[3];
595
596 // Points to the constant addend of the resulting simplified expression.
597 // If the resulting expr has constant-addend, this constant-addend is
598 // desirable to reside at the top of the resulting expression tree. Placing
599 // constant close to supper-expr(s) will potentially reveal some optimization
600 // opportunities in super-expr(s).
601 //
602 const FAddend *ConstAdd = nullptr;
603
604 // Simplified addends are placed <SimpVect>.
605 AddendVect SimpVect;
606
607 // The outer loop works on one symbolic-value at a time. Suppose the input
608 // addends are : <a1, x>, <b1, y>, <a2, x>, <c1, z>, <b2, y>, ...
609 // The symbolic-values will be processed in this order: x, y, z.
610 //
611 for (unsigned SymIdx = 0; SymIdx < AddendNum; SymIdx++) {
612
613 const FAddend *ThisAddend = Addends[SymIdx];
614 if (!ThisAddend) {
615 // This addend was processed before.
616 continue;
617 }
618
619 Value *Val = ThisAddend->getSymVal();
620 unsigned StartIdx = SimpVect.size();
621 SimpVect.push_back(ThisAddend);
622
623 // The inner loop collects addends sharing same symbolic-value, and these
624 // addends will be later on folded into a single addend. Following above
625 // example, if the symbolic value "y" is being processed, the inner loop
626 // will collect two addends "<b1,y>" and "<b2,Y>". These two addends will
627 // be later on folded into "<b1+b2, y>".
628 //
629 for (unsigned SameSymIdx = SymIdx + 1;
630 SameSymIdx < AddendNum; SameSymIdx++) {
631 const FAddend *T = Addends[SameSymIdx];
632 if (T && T->getSymVal() == Val) {
633 // Set null such that next iteration of the outer loop will not process
634 // this addend again.
635 Addends[SameSymIdx] = nullptr;
636 SimpVect.push_back(T);
637 }
638 }
639
640 // If multiple addends share same symbolic value, fold them together.
641 if (StartIdx + 1 != SimpVect.size()) {
642 FAddend &R = TmpResult[NextTmpIdx ++];
643 R = *SimpVect[StartIdx];
644 for (unsigned Idx = StartIdx + 1; Idx < SimpVect.size(); Idx++)
645 R += *SimpVect[Idx];
646
647 // Pop all addends being folded and push the resulting folded addend.
648 SimpVect.resize(StartIdx);
649 if (Val) {
650 if (!R.isZero()) {
651 SimpVect.push_back(&R);
652 }
653 } else {
654 // Don't push constant addend at this time. It will be the last element
655 // of <SimpVect>.
656 ConstAdd = &R;
657 }
658 }
659 }
660
661 IGC_ASSERT_MESSAGE((NextTmpIdx <= array_lengthof(TmpResult) + 1), "out-of-bound access");
662
663 if (ConstAdd)
664 SimpVect.push_back(ConstAdd);
665
666 Value *Result;
667 if (!SimpVect.empty())
668 Result = createNaryFAdd(SimpVect, InstrQuota);
669 else {
670 // The addition is folded to 0.0.
671 Result = ConstantFP::get(Instr->getType(), 0.0);
672 }
673
674 return Result;
675 }
676
createNaryFAdd(const AddendVect & Opnds,unsigned InstrQuota)677 Value *FAddCombine::createNaryFAdd
678 (const AddendVect &Opnds, unsigned InstrQuota) {
679 IGC_ASSERT_MESSAGE(!Opnds.empty(), "Expect at least one addend");
680
681 // Step 1: Check if the # of instructions needed exceeds the quota.
682 //
683 unsigned InstrNeeded = calcInstrNumber(Opnds);
684 if (InstrNeeded > InstrQuota)
685 return nullptr;
686
687 InstructionCounter = 0;
688
689 // step 2: Emit the N-ary addition.
690 // Note that at most three instructions are involved in Fadd-InstCombine: the
691 // addition in question, and at most two neighboring instructions.
692 // The resulting optimized addition should have at least one less instruction
693 // than the original addition expression tree. This implies that the resulting
694 // N-ary addition has at most two instructions, and we don't need to worry
695 // about tree-height when constructing the N-ary addition.
696
697 Value *LastVal = nullptr;
698 bool LastValNeedNeg = false;
699
700 // Iterate the addends, creating fadd/fsub using adjacent two addends.
701 for (const FAddend *Opnd : Opnds) {
702 bool NeedNeg;
703 Value *V = createAddendVal(*Opnd, NeedNeg);
704 if (!LastVal) {
705 LastVal = V;
706 LastValNeedNeg = NeedNeg;
707 continue;
708 }
709
710 if (LastValNeedNeg == NeedNeg) {
711 LastVal = createFAdd(LastVal, V);
712 continue;
713 }
714
715 if (LastValNeedNeg)
716 LastVal = createFSub(V, LastVal);
717 else
718 LastVal = createFSub(LastVal, V);
719
720 LastValNeedNeg = false;
721 }
722
723 if (LastValNeedNeg) {
724 LastVal = createFNeg(LastVal);
725 }
726
727 IGC_ASSERT_MESSAGE((InstructionCounter == InstrNeeded), "Inconsistent in instruction numbers");
728
729 return LastVal;
730 }
731
createFSub(Value * Opnd0,Value * Opnd1)732 Value *FAddCombine::createFSub(Value *Opnd0, Value *Opnd1) {
733 Value *V = Builder->CreateFSub(Opnd0, Opnd1);
734 if (Instruction *I = dyn_cast<Instruction>(V))
735 createInstPostProc(I);
736 return V;
737 }
738
createFNeg(Value * V)739 Value *FAddCombine::createFNeg(Value *V) {
740 Value *Zero = cast<Value>(ConstantFP::getZeroValueForNegation(V->getType()));
741 Value *NewV = createFSub(Zero, V);
742 if (Instruction *I = dyn_cast<Instruction>(NewV))
743 createInstPostProc(I, true); // fneg's don't receive instruction numbers.
744 return NewV;
745 }
746
createFAdd(Value * Opnd0,Value * Opnd1)747 Value *FAddCombine::createFAdd(Value *Opnd0, Value *Opnd1) {
748 Value *V = Builder->CreateFAdd(Opnd0, Opnd1);
749 if (Instruction *I = dyn_cast<Instruction>(V))
750 createInstPostProc(I);
751 return V;
752 }
753
createFMul(Value * Opnd0,Value * Opnd1)754 Value *FAddCombine::createFMul(Value *Opnd0, Value *Opnd1) {
755 Value *V = Builder->CreateFMul(Opnd0, Opnd1);
756 if (Instruction *I = dyn_cast<Instruction>(V))
757 createInstPostProc(I);
758 return V;
759 }
760
createFDiv(Value * Opnd0,Value * Opnd1)761 Value *FAddCombine::createFDiv(Value *Opnd0, Value *Opnd1) {
762 Value *V = Builder->CreateFDiv(Opnd0, Opnd1);
763 if (Instruction *I = dyn_cast<Instruction>(V))
764 createInstPostProc(I);
765 return V;
766 }
767
createInstPostProc(Instruction * NewInstr,bool NoNumber)768 void FAddCombine::createInstPostProc(Instruction *NewInstr, bool NoNumber) {
769 NewInstr->setDebugLoc(Instr->getDebugLoc());
770
771 // Keep track of the number of instruction created.
772 if (!NoNumber)
773 ++InstructionCounter;
774
775 // Propagate fast-math flags
776 NewInstr->setFastMathFlags(Instr->getFastMathFlags());
777 }
778
779 // Return the number of instruction needed to emit the N-ary addition.
780 // NOTE: Keep this function in sync with createAddendVal().
calcInstrNumber(const AddendVect & Opnds)781 unsigned FAddCombine::calcInstrNumber(const AddendVect &Opnds) {
782 unsigned OpndNum = Opnds.size();
783 unsigned InstrNeeded = OpndNum - 1;
784
785 // The number of addends in the form of "(-1)*x".
786 unsigned NegOpndNum = 0;
787
788 // Adjust the number of instructions needed to emit the N-ary add.
789 for (const FAddend *Opnd : Opnds) {
790 if (Opnd->isConstant())
791 continue;
792
793 const FAddendCoef &CE = Opnd->getCoef();
794 if (CE.isMinusOne() || CE.isMinusTwo())
795 NegOpndNum++;
796
797 // Let the addend be "c * x". If "c == +/-1", the value of the addend
798 // is immediately available; otherwise, it needs exactly one instruction
799 // to evaluate the value.
800 if (!CE.isMinusOne() && !CE.isOne())
801 InstrNeeded++;
802 }
803 if (NegOpndNum == OpndNum)
804 InstrNeeded++;
805 return InstrNeeded;
806 }
807
808 // Input Addend Value NeedNeg(output)
809 // ================================================================
810 // Constant C C false
811 // <+/-1, V> V coefficient is -1
812 // <2/-2, V> "fadd V, V" coefficient is -2
813 // <C, V> "fmul V, C" false
814 //
815 // NOTE: Keep this function in sync with FAddCombine::calcInstrNumber.
createAddendVal(const FAddend & Opnd,bool & NeedNeg)816 Value *FAddCombine::createAddendVal(const FAddend &Opnd, bool &NeedNeg) {
817 const FAddendCoef &Coeff = Opnd.getCoef();
818
819 if (Opnd.isConstant()) {
820 NeedNeg = false;
821 return Coeff.getValue(Instr->getType());
822 }
823
824 Value *OpndVal = Opnd.getSymVal();
825
826 if (Coeff.isMinusOne() || Coeff.isOne()) {
827 NeedNeg = Coeff.isMinusOne();
828 return OpndVal;
829 }
830
831 if (Coeff.isTwo() || Coeff.isMinusTwo()) {
832 NeedNeg = Coeff.isMinusTwo();
833 return createFAdd(OpndVal, OpndVal);
834 }
835
836 NeedNeg = false;
837 return createFMul(OpndVal, Coeff.getValue(Instr->getType()));
838 }
839
840 // If one of the operands only has one non-zero bit, and if the other
841 // operand has a known-zero bit in a more significant place than it (not
842 // including the sign bit) the ripple may go up to and fill the zero, but
843 // won't change the sign. For example, (X & ~4) + 1.
checkRippleForAdd(const APInt & Op0KnownZero,const APInt & Op1KnownZero)844 static bool checkRippleForAdd(const APInt &Op0KnownZero,
845 const APInt &Op1KnownZero) {
846 APInt Op1MaybeOne = ~Op1KnownZero;
847 // Make sure that one of the operand has at most one bit set to 1.
848 if (Op1MaybeOne.countPopulation() != 1)
849 return false;
850
851 // Find the most significant known 0 other than the sign bit.
852 int BitWidth = Op0KnownZero.getBitWidth();
853 APInt Op0KnownZeroTemp(Op0KnownZero);
854 Op0KnownZeroTemp.clearBit(BitWidth - 1);
855 int Op0ZeroPosition = BitWidth - Op0KnownZeroTemp.countLeadingZeros() - 1;
856
857 int Op1OnePosition = BitWidth - Op1MaybeOne.countLeadingZeros() - 1;
858 IGC_ASSERT(Op1OnePosition >= 0);
859
860 // This also covers the case of no known zero, since in that case
861 // Op0ZeroPosition is -1.
862 return Op0ZeroPosition >= Op1OnePosition;
863 }
864
865
866 /// Return true if we can prove that:
867 /// (sext (add LHS, RHS)) === (add (sext LHS), (sext RHS))
868 /// This basically requires proving that the add in the original type would not
869 /// overflow to change the sign bit or have a carry out.
WillNotOverflowSignedAdd(Value * LHS,Value * RHS,Instruction & CxtI)870 bool InstCombiner::WillNotOverflowSignedAdd(Value *LHS, Value *RHS,
871 Instruction &CxtI) {
872 // There are different heuristics we can use for this. Here are some simple
873 // ones.
874
875 // If LHS and RHS each have at least two sign bits, the addition will look
876 // like
877 //
878 // XX..... +
879 // YY.....
880 //
881 // If the carry into the most significant position is 0, X and Y can't both
882 // be 1 and therefore the carry out of the addition is also 0.
883 //
884 // If the carry into the most significant position is 1, X and Y can't both
885 // be 0 and therefore the carry out of the addition is also 1.
886 //
887 // Since the carry into the most significant position is always equal to
888 // the carry out of the addition, there is no signed overflow.
889 if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
890 ComputeNumSignBits(RHS, 0, &CxtI) > 1)
891 return true;
892
893 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
894 APInt LHSKnownZero(BitWidth, 0);
895 APInt LHSKnownOne(BitWidth, 0);
896 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, &CxtI);
897
898 APInt RHSKnownZero(BitWidth, 0);
899 APInt RHSKnownOne(BitWidth, 0);
900 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, &CxtI);
901
902 // Addition of two 2's compliment numbers having opposite signs will never
903 // overflow.
904 if ((LHSKnownOne[BitWidth - 1] && RHSKnownZero[BitWidth - 1]) ||
905 (LHSKnownZero[BitWidth - 1] && RHSKnownOne[BitWidth - 1]))
906 return true;
907
908 // Check if carry bit of addition will not cause overflow.
909 if (checkRippleForAdd(LHSKnownZero, RHSKnownZero))
910 return true;
911 if (checkRippleForAdd(RHSKnownZero, LHSKnownZero))
912 return true;
913
914 return false;
915 }
916
917 /// \brief Return true if we can prove that:
918 /// (sub LHS, RHS) === (sub nsw LHS, RHS)
919 /// This basically requires proving that the add in the original type would not
920 /// overflow to change the sign bit or have a carry out.
921 /// TODO: Handle this for Vectors.
WillNotOverflowSignedSub(Value * LHS,Value * RHS,Instruction & CxtI)922 bool InstCombiner::WillNotOverflowSignedSub(Value *LHS, Value *RHS,
923 Instruction &CxtI) {
924 // If LHS and RHS each have at least two sign bits, the subtraction
925 // cannot overflow.
926 if (ComputeNumSignBits(LHS, 0, &CxtI) > 1 &&
927 ComputeNumSignBits(RHS, 0, &CxtI) > 1)
928 return true;
929
930 unsigned BitWidth = LHS->getType()->getScalarSizeInBits();
931 APInt LHSKnownZero(BitWidth, 0);
932 APInt LHSKnownOne(BitWidth, 0);
933 computeKnownBits(LHS, LHSKnownZero, LHSKnownOne, 0, &CxtI);
934
935 APInt RHSKnownZero(BitWidth, 0);
936 APInt RHSKnownOne(BitWidth, 0);
937 computeKnownBits(RHS, RHSKnownZero, RHSKnownOne, 0, &CxtI);
938
939 // Subtraction of two 2's compliment numbers having identical signs will
940 // never overflow.
941 if ((LHSKnownOne[BitWidth - 1] && RHSKnownOne[BitWidth - 1]) ||
942 (LHSKnownZero[BitWidth - 1] && RHSKnownZero[BitWidth - 1]))
943 return true;
944
945 // TODO: implement logic similar to checkRippleForAdd
946 return false;
947 }
948
949 /// \brief Return true if we can prove that:
950 /// (sub LHS, RHS) === (sub nuw LHS, RHS)
WillNotOverflowUnsignedSub(Value * LHS,Value * RHS,Instruction & CxtI)951 bool InstCombiner::WillNotOverflowUnsignedSub(Value *LHS, Value *RHS,
952 Instruction &CxtI) {
953 // If the LHS is negative and the RHS is non-negative, no unsigned wrap.
954 bool LHSKnownNonNegative, LHSKnownNegative;
955 bool RHSKnownNonNegative, RHSKnownNegative;
956 ComputeSignBit(LHS, LHSKnownNonNegative, LHSKnownNegative, /*Depth=*/0,
957 &CxtI);
958 ComputeSignBit(RHS, RHSKnownNonNegative, RHSKnownNegative, /*Depth=*/0,
959 &CxtI);
960 if (LHSKnownNegative && RHSKnownNonNegative)
961 return true;
962
963 return false;
964 }
965
966 // Checks if any operand is negative and we can convert add to sub.
967 // This function checks for following negative patterns
968 // ADD(XOR(OR(Z, NOT(C)), C)), 1) == NEG(AND(Z, C))
969 // ADD(XOR(AND(Z, C), C), 1) == NEG(OR(Z, ~C))
970 // XOR(AND(Z, C), (C + 1)) == NEG(OR(Z, ~C)) if C is even
checkForNegativeOperand(BinaryOperator & I,InstCombiner::BuilderTy * Builder)971 static Value *checkForNegativeOperand(BinaryOperator &I,
972 InstCombiner::BuilderTy *Builder) {
973 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
974
975 // This function creates 2 instructions to replace ADD, we need at least one
976 // of LHS or RHS to have one use to ensure benefit in transform.
977 if (!LHS->hasOneUse() && !RHS->hasOneUse())
978 return nullptr;
979
980 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
981 const APInt *C1 = nullptr, *C2 = nullptr;
982
983 // if ONE is on other side, swap
984 if (match(RHS, m_Add(m_Value(X), m_One())))
985 std::swap(LHS, RHS);
986
987 if (match(LHS, m_Add(m_Value(X), m_One()))) {
988 // if XOR on other side, swap
989 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
990 std::swap(X, RHS);
991
992 if (match(X, m_Xor(m_Value(Y), m_APInt(C1)))) {
993 // X = XOR(Y, C1), Y = OR(Z, C2), C2 = NOT(C1) ==> X == NOT(AND(Z, C1))
994 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, AND(Z, C1))
995 if (match(Y, m_Or(m_Value(Z), m_APInt(C2))) && (*C2 == ~(*C1))) {
996 Value *NewAnd = Builder->CreateAnd(Z, *C1);
997 return Builder->CreateSub(RHS, NewAnd, "sub");
998 } else if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && (*C1 == *C2)) {
999 // X = XOR(Y, C1), Y = AND(Z, C2), C2 == C1 ==> X == NOT(OR(Z, ~C1))
1000 // ADD(ADD(X, 1), RHS) == ADD(X, ADD(RHS, 1)) == SUB(RHS, OR(Z, ~C1))
1001 Value *NewOr = Builder->CreateOr(Z, ~(*C1));
1002 return Builder->CreateSub(RHS, NewOr, "sub");
1003 }
1004 }
1005 }
1006
1007 // Restore LHS and RHS
1008 LHS = I.getOperand(0);
1009 RHS = I.getOperand(1);
1010
1011 // if XOR is on other side, swap
1012 if (match(RHS, m_Xor(m_Value(Y), m_APInt(C1))))
1013 std::swap(LHS, RHS);
1014
1015 // C2 is ODD
1016 // LHS = XOR(Y, C1), Y = AND(Z, C2), C1 == (C2 + 1) => LHS == NEG(OR(Z, ~C2))
1017 // ADD(LHS, RHS) == SUB(RHS, OR(Z, ~C2))
1018 if (match(LHS, m_Xor(m_Value(Y), m_APInt(C1))))
1019 if (C1->countTrailingZeros() == 0)
1020 if (match(Y, m_And(m_Value(Z), m_APInt(C2))) && *C1 == (*C2 + 1)) {
1021 Value *NewOr = Builder->CreateOr(Z, ~(*C2));
1022 return Builder->CreateSub(RHS, NewOr, "sub");
1023 }
1024 return nullptr;
1025 }
1026
visitAdd(BinaryOperator & I)1027 Instruction *InstCombiner::visitAdd(BinaryOperator &I) {
1028 bool Changed = SimplifyAssociativeOrCommutative(I);
1029 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1030
1031 if (Value *V = SimplifyVectorOp(I))
1032 return replaceInstUsesWith(I, V);
1033
1034 if (Value *V = SimplifyAddInst(LHS, RHS, I.hasNoSignedWrap(),
1035 I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
1036 return replaceInstUsesWith(I, V);
1037
1038 // (A*B)+(A*C) -> A*(B+C) etc
1039 if (Value *V = SimplifyUsingDistributiveLaws(I))
1040 return replaceInstUsesWith(I, V);
1041
1042 const APInt *Val = nullptr;
1043 if (match(RHS, m_APInt(Val))) {
1044 // X + (signbit) --> X ^ signbit
1045 if (Val->isSignBit())
1046 return BinaryOperator::CreateXor(LHS, RHS);
1047
1048 // Is this add the last step in a convoluted sext?
1049 Value *X = nullptr;
1050 const APInt *C = nullptr;
1051 if (match(LHS, m_ZExt(m_Xor(m_Value(X), m_APInt(C)))) &&
1052 C->isMinSignedValue() &&
1053 C->sext(LHS->getType()->getScalarSizeInBits()) == *Val) {
1054 // add(zext(xor i16 X, -32768), -32768) --> sext X
1055 return CastInst::Create(Instruction::SExt, X, LHS->getType());
1056 }
1057
1058 if (Val->isNegative() &&
1059 match(LHS, m_ZExt(m_NUWAdd(m_Value(X), m_APInt(C)))) &&
1060 Val->sge(-C->sext(Val->getBitWidth()))) {
1061 // (add (zext (add nuw X, C)), Val) -> (zext (add nuw X, C+Val))
1062 return CastInst::Create(
1063 Instruction::ZExt,
1064 Builder->CreateNUWAdd(
1065 X, Constant::getIntegerValue(X->getType(),
1066 *C + Val->trunc(C->getBitWidth()))),
1067 I.getType());
1068 }
1069 }
1070
1071 // FIXME: Use the match above instead of dyn_cast to allow these transforms
1072 // for splat vectors.
1073 if (ConstantInt *CI = dyn_cast<ConstantInt>(RHS)) {
1074 // See if SimplifyDemandedBits can simplify this. This handles stuff like
1075 // (X & 254)+1 -> (X&254)|1
1076 if (SimplifyDemandedInstructionBits(I))
1077 return &I;
1078
1079 // zext(bool) + C -> bool ? C + 1 : C
1080 if (ZExtInst *ZI = dyn_cast<ZExtInst>(LHS))
1081 if (ZI->getSrcTy()->isIntegerTy(1))
1082 return SelectInst::Create(ZI->getOperand(0), AddOne(CI), CI);
1083
1084 Value *XorLHS = nullptr; ConstantInt *XorRHS = nullptr;
1085 if (match(LHS, m_Xor(m_Value(XorLHS), m_ConstantInt(XorRHS)))) {
1086 uint32_t TySizeBits = I.getType()->getScalarSizeInBits();
1087 const APInt &RHSVal = CI->getValue();
1088 unsigned ExtendAmt = 0;
1089 // If we have ADD(XOR(AND(X, 0xFF), 0x80), 0xF..F80), it's a sext.
1090 // If we have ADD(XOR(AND(X, 0xFF), 0xF..F80), 0x80), it's a sext.
1091 if (XorRHS->getValue() == -RHSVal) {
1092 if (RHSVal.isPowerOf2())
1093 ExtendAmt = TySizeBits - RHSVal.logBase2() - 1;
1094 else if (XorRHS->getValue().isPowerOf2())
1095 ExtendAmt = TySizeBits - XorRHS->getValue().logBase2() - 1;
1096 }
1097
1098 if (ExtendAmt) {
1099 APInt Mask = APInt::getHighBitsSet(TySizeBits, ExtendAmt);
1100 if (!MaskedValueIsZero(XorLHS, Mask, 0, &I))
1101 ExtendAmt = 0;
1102 }
1103
1104 if (ExtendAmt) {
1105 Constant *ShAmt = ConstantInt::get(I.getType(), ExtendAmt);
1106 Value *NewShl = Builder->CreateShl(XorLHS, ShAmt, "sext");
1107 return BinaryOperator::CreateAShr(NewShl, ShAmt);
1108 }
1109
1110 // If this is a xor that was canonicalized from a sub, turn it back into
1111 // a sub and fuse this add with it.
1112 if (LHS->hasOneUse() && (XorRHS->getValue()+1).isPowerOf2()) {
1113 IntegerType *IT = cast<IntegerType>(I.getType());
1114 APInt LHSKnownOne(IT->getBitWidth(), 0);
1115 APInt LHSKnownZero(IT->getBitWidth(), 0);
1116 computeKnownBits(XorLHS, LHSKnownZero, LHSKnownOne, 0, &I);
1117 if ((XorRHS->getValue() | LHSKnownZero).isAllOnesValue())
1118 return BinaryOperator::CreateSub(ConstantExpr::getAdd(XorRHS, CI),
1119 XorLHS);
1120 }
1121 // (X + signbit) + C could have gotten canonicalized to (X ^ signbit) + C,
1122 // transform them into (X + (signbit ^ C))
1123 if (XorRHS->getValue().isSignBit())
1124 return BinaryOperator::CreateAdd(XorLHS,
1125 ConstantExpr::getXor(XorRHS, CI));
1126 }
1127 }
1128
1129 if (isa<Constant>(RHS) && isa<PHINode>(LHS))
1130 if (Instruction *NV = FoldOpIntoPhi(I))
1131 return NV;
1132
1133 if (I.getType()->getScalarType()->isIntegerTy(1))
1134 return BinaryOperator::CreateXor(LHS, RHS);
1135
1136 // X + X --> X << 1
1137 if (LHS == RHS) {
1138 BinaryOperator *New =
1139 BinaryOperator::CreateShl(LHS, ConstantInt::get(I.getType(), 1));
1140 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1141 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1142 return New;
1143 }
1144
1145 // -A + B --> B - A
1146 // -A + -B --> -(A + B)
1147 if (Value *LHSV = dyn_castNegVal(LHS)) {
1148 if (!isa<Constant>(RHS))
1149 if (Value *RHSV = dyn_castNegVal(RHS)) {
1150 Value *NewAdd = Builder->CreateAdd(LHSV, RHSV, "sum");
1151 return BinaryOperator::CreateNeg(NewAdd);
1152 }
1153
1154 return BinaryOperator::CreateSub(RHS, LHSV);
1155 }
1156
1157 // A + -B --> A - B
1158 if (!isa<Constant>(RHS))
1159 if (Value *V = dyn_castNegVal(RHS))
1160 return BinaryOperator::CreateSub(LHS, V);
1161
1162 if (Value *V = checkForNegativeOperand(I, Builder))
1163 return replaceInstUsesWith(I, V);
1164
1165 // A+B --> A|B iff A and B have no bits set in common.
1166 if (haveNoCommonBitsSet(LHS, RHS, DL, &AC, &I, &DT))
1167 return BinaryOperator::CreateOr(LHS, RHS);
1168
1169 if (Constant *CRHS = dyn_cast<Constant>(RHS)) {
1170 Value *X = nullptr;
1171 if (match(LHS, m_Not(m_Value(X)))) // ~X + C --> (C-1) - X
1172 return BinaryOperator::CreateSub(SubOne(CRHS), X);
1173 }
1174
1175 // FIXME: We already did a check for ConstantInt RHS above this.
1176 // FIXME: Is this pattern covered by another fold? No regression tests fail on
1177 // removal.
1178 if (ConstantInt *CRHS = dyn_cast<ConstantInt>(RHS)) {
1179 // (X & FF00) + xx00 -> (X+xx00) & FF00
1180 Value *X = nullptr;
1181 ConstantInt *C2 = nullptr;
1182 if (LHS->hasOneUse() &&
1183 match(LHS, m_And(m_Value(X), m_ConstantInt(C2))) &&
1184 CRHS->getValue() == (CRHS->getValue() & C2->getValue())) {
1185 // See if all bits from the first bit set in the Add RHS up are included
1186 // in the mask. First, get the rightmost bit.
1187 const APInt &AddRHSV = CRHS->getValue();
1188
1189 // Form a mask of all bits from the lowest bit added through the top.
1190 APInt AddRHSHighBits(~((AddRHSV & -AddRHSV)-1));
1191
1192 // See if the and mask includes all of these bits.
1193 APInt AddRHSHighBitsAnd(AddRHSHighBits & C2->getValue());
1194
1195 if (AddRHSHighBits == AddRHSHighBitsAnd) {
1196 // Okay, the xform is safe. Insert the new add pronto.
1197 Value *NewAdd = Builder->CreateAdd(X, CRHS, LHS->getName());
1198 return BinaryOperator::CreateAnd(NewAdd, C2);
1199 }
1200 }
1201
1202 // Try to fold constant add into select arguments.
1203 if (SelectInst *SI = dyn_cast<SelectInst>(LHS))
1204 if (Instruction *R = FoldOpIntoSelect(I, SI))
1205 return R;
1206 }
1207
1208 // add (select X 0 (sub n A)) A --> select X A n
1209 {
1210 SelectInst *SI = dyn_cast<SelectInst>(LHS);
1211 Value *A = RHS;
1212 if (!SI) {
1213 SI = dyn_cast<SelectInst>(RHS);
1214 A = LHS;
1215 }
1216 if (SI && SI->hasOneUse()) {
1217 Value *TV = SI->getTrueValue();
1218 Value *FV = SI->getFalseValue();
1219 Value *N = nullptr;
1220
1221 // Can we fold the add into the argument of the select?
1222 // We check both true and false select arguments for a matching subtract.
1223 if (match(FV, m_Zero()) && match(TV, m_Sub(m_Value(N), m_Specific(A))))
1224 // Fold the add into the true select value.
1225 return SelectInst::Create(SI->getCondition(), N, A);
1226
1227 if (match(TV, m_Zero()) && match(FV, m_Sub(m_Value(N), m_Specific(A))))
1228 // Fold the add into the false select value.
1229 return SelectInst::Create(SI->getCondition(), A, N);
1230 }
1231 }
1232
1233 // Check for (add (sext x), y), see if we can merge this into an
1234 // integer add followed by a sext.
1235 if (SExtInst *LHSConv = dyn_cast<SExtInst>(LHS)) {
1236 // (add (sext x), cst) --> (sext (add x, cst'))
1237 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1238 if (LHSConv->hasOneUse()) {
1239 Constant *CI =
1240 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1241 if (ConstantExpr::getSExt(CI, I.getType()) == RHSC &&
1242 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
1243 // Insert the new, smaller add.
1244 Value *NewAdd =
1245 Builder->CreateNSWAdd(LHSConv->getOperand(0), CI, "addconv");
1246 return new SExtInst(NewAdd, I.getType());
1247 }
1248 }
1249 }
1250
1251 // (add (sext x), (sext y)) --> (sext (add int x, y))
1252 if (SExtInst *RHSConv = dyn_cast<SExtInst>(RHS)) {
1253 // Only do this if x/y have the same type, if at last one of them has a
1254 // single use (so we don't increase the number of sexts), and if the
1255 // integer add will not overflow.
1256 if (LHSConv->getOperand(0)->getType() ==
1257 RHSConv->getOperand(0)->getType() &&
1258 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1259 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
1260 RHSConv->getOperand(0), I)) {
1261 // Insert the new integer add.
1262 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1263 RHSConv->getOperand(0), "addconv");
1264 return new SExtInst(NewAdd, I.getType());
1265 }
1266 }
1267 }
1268
1269 // Check for (add (zext x), y), see if we can merge this into an
1270 // integer add followed by a zext.
1271 if (auto *LHSConv = dyn_cast<ZExtInst>(LHS)) {
1272 // (add (zext x), cst) --> (zext (add x, cst'))
1273 if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS)) {
1274 if (LHSConv->hasOneUse()) {
1275 Constant *CI =
1276 ConstantExpr::getTrunc(RHSC, LHSConv->getOperand(0)->getType());
1277 if (ConstantExpr::getZExt(CI, I.getType()) == RHSC &&
1278 computeOverflowForUnsignedAdd(LHSConv->getOperand(0), CI, &I) ==
1279 OverflowResult::NeverOverflows) {
1280 // Insert the new, smaller add.
1281 Value *NewAdd =
1282 Builder->CreateNUWAdd(LHSConv->getOperand(0), CI, "addconv");
1283 return new ZExtInst(NewAdd, I.getType());
1284 }
1285 }
1286 }
1287
1288 // (add (zext x), (zext y)) --> (zext (add int x, y))
1289 if (auto *RHSConv = dyn_cast<ZExtInst>(RHS)) {
1290 // Only do this if x/y have the same type, if at last one of them has a
1291 // single use (so we don't increase the number of zexts), and if the
1292 // integer add will not overflow.
1293 if (LHSConv->getOperand(0)->getType() ==
1294 RHSConv->getOperand(0)->getType() &&
1295 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1296 computeOverflowForUnsignedAdd(LHSConv->getOperand(0),
1297 RHSConv->getOperand(0),
1298 &I) == OverflowResult::NeverOverflows) {
1299 // Insert the new integer add.
1300 Value *NewAdd = Builder->CreateNUWAdd(
1301 LHSConv->getOperand(0), RHSConv->getOperand(0), "addconv");
1302 return new ZExtInst(NewAdd, I.getType());
1303 }
1304 }
1305 }
1306
1307 // (add (xor A, B) (and A, B)) --> (or A, B)
1308 {
1309 Value *A = nullptr, *B = nullptr;
1310 if (match(RHS, m_Xor(m_Value(A), m_Value(B))) &&
1311 (match(LHS, m_And(m_Specific(A), m_Specific(B))) ||
1312 match(LHS, m_And(m_Specific(B), m_Specific(A)))))
1313 return BinaryOperator::CreateOr(A, B);
1314
1315 if (match(LHS, m_Xor(m_Value(A), m_Value(B))) &&
1316 (match(RHS, m_And(m_Specific(A), m_Specific(B))) ||
1317 match(RHS, m_And(m_Specific(B), m_Specific(A)))))
1318 return BinaryOperator::CreateOr(A, B);
1319 }
1320
1321 // (add (or A, B) (and A, B)) --> (add A, B)
1322 {
1323 Value *A = nullptr, *B = nullptr;
1324 if (match(RHS, m_Or(m_Value(A), m_Value(B))) &&
1325 (match(LHS, m_And(m_Specific(A), m_Specific(B))) ||
1326 match(LHS, m_And(m_Specific(B), m_Specific(A))))) {
1327 auto *New = BinaryOperator::CreateAdd(A, B);
1328 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1329 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1330 return New;
1331 }
1332
1333 if (match(LHS, m_Or(m_Value(A), m_Value(B))) &&
1334 (match(RHS, m_And(m_Specific(A), m_Specific(B))) ||
1335 match(RHS, m_And(m_Specific(B), m_Specific(A))))) {
1336 auto *New = BinaryOperator::CreateAdd(A, B);
1337 New->setHasNoSignedWrap(I.hasNoSignedWrap());
1338 New->setHasNoUnsignedWrap(I.hasNoUnsignedWrap());
1339 return New;
1340 }
1341 }
1342
1343 // TODO(jingyue): Consider WillNotOverflowSignedAdd and
1344 // WillNotOverflowUnsignedAdd to reduce the number of invocations of
1345 // computeKnownBits.
1346 if (!I.hasNoSignedWrap() && WillNotOverflowSignedAdd(LHS, RHS, I)) {
1347 Changed = true;
1348 I.setHasNoSignedWrap(true);
1349 }
1350 if (!I.hasNoUnsignedWrap() &&
1351 computeOverflowForUnsignedAdd(LHS, RHS, &I) ==
1352 OverflowResult::NeverOverflows) {
1353 Changed = true;
1354 I.setHasNoUnsignedWrap(true);
1355 }
1356
1357 return Changed ? &I : nullptr;
1358 }
1359
visitFAdd(BinaryOperator & I)1360 Instruction *InstCombiner::visitFAdd(BinaryOperator &I) {
1361 bool Changed = SimplifyAssociativeOrCommutative(I);
1362 Value *LHS = I.getOperand(0), *RHS = I.getOperand(1);
1363
1364 if (Value *V = SimplifyVectorOp(I))
1365 return replaceInstUsesWith(I, V);
1366
1367 if (Value *V =
1368 SimplifyFAddInst(LHS, RHS, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
1369 return replaceInstUsesWith(I, V);
1370
1371 if (isa<Constant>(RHS))
1372 if (Instruction *FoldedFAdd = foldOpWithConstantIntoOperand(I))
1373 return FoldedFAdd;
1374
1375 // -A + B --> B - A
1376 // -A + -B --> -(A + B)
1377 if (Value *LHSV = dyn_castFNegVal(LHS)) {
1378 Instruction *RI = BinaryOperator::CreateFSub(RHS, LHSV);
1379 RI->copyFastMathFlags(&I);
1380 return RI;
1381 }
1382
1383 // A + -B --> A - B
1384 if (!isa<Constant>(RHS))
1385 if (Value *V = dyn_castFNegVal(RHS)) {
1386 Instruction *RI = BinaryOperator::CreateFSub(LHS, V);
1387 RI->copyFastMathFlags(&I);
1388 return RI;
1389 }
1390
1391 // Check for (fadd double (sitofp x), y), see if we can merge this into an
1392 // integer add followed by a promotion.
1393 if (SIToFPInst *LHSConv = dyn_cast<SIToFPInst>(LHS)) {
1394 // (fadd double (sitofp x), fpcst) --> (sitofp (add int x, intcst))
1395 // ... if the constant fits in the integer value. This is useful for things
1396 // like (double)(x & 1234) + 4.0 -> (double)((X & 1234)+4) which no longer
1397 // requires a constant pool load, and generally allows the add to be better
1398 // instcombined.
1399 if (ConstantFP *CFP = dyn_cast<ConstantFP>(RHS)) {
1400 Constant *CI =
1401 ConstantExpr::getFPToSI(CFP, LHSConv->getOperand(0)->getType());
1402 if (LHSConv->hasOneUse() &&
1403 ConstantExpr::getSIToFP(CI, I.getType()) == CFP &&
1404 WillNotOverflowSignedAdd(LHSConv->getOperand(0), CI, I)) {
1405 // Insert the new integer add.
1406 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1407 CI, "addconv");
1408 return new SIToFPInst(NewAdd, I.getType());
1409 }
1410 }
1411
1412 // (fadd double (sitofp x), (sitofp y)) --> (sitofp (add int x, y))
1413 if (SIToFPInst *RHSConv = dyn_cast<SIToFPInst>(RHS)) {
1414 // Only do this if x/y have the same type, if at last one of them has a
1415 // single use (so we don't increase the number of int->fp conversions),
1416 // and if the integer add will not overflow.
1417 if (LHSConv->getOperand(0)->getType() ==
1418 RHSConv->getOperand(0)->getType() &&
1419 (LHSConv->hasOneUse() || RHSConv->hasOneUse()) &&
1420 WillNotOverflowSignedAdd(LHSConv->getOperand(0),
1421 RHSConv->getOperand(0), I)) {
1422 // Insert the new integer add.
1423 Value *NewAdd = Builder->CreateNSWAdd(LHSConv->getOperand(0),
1424 RHSConv->getOperand(0),"addconv");
1425 return new SIToFPInst(NewAdd, I.getType());
1426 }
1427 }
1428 }
1429
1430 // select C, 0, B + select C, A, 0 -> select C, A, B
1431 {
1432 Value *A1 = nullptr, *B1 = nullptr, *C1 = nullptr;
1433 Value *A2 = nullptr, *B2 = nullptr, *C2 = nullptr;
1434 if (match(LHS, m_Select(m_Value(C1), m_Value(A1), m_Value(B1))) &&
1435 match(RHS, m_Select(m_Value(C2), m_Value(A2), m_Value(B2)))) {
1436 if (C1 == C2) {
1437 Constant *Z1=nullptr, *Z2=nullptr;
1438 Value *A = nullptr, *B = nullptr, *C=C1;
1439 if (match(A1, m_AnyZero()) && match(B2, m_AnyZero())) {
1440 Z1 = dyn_cast<Constant>(A1); A = A2;
1441 Z2 = dyn_cast<Constant>(B2); B = B1;
1442 } else if (match(B1, m_AnyZero()) && match(A2, m_AnyZero())) {
1443 Z1 = dyn_cast<Constant>(B1); B = B2;
1444 Z2 = dyn_cast<Constant>(A2); A = A1;
1445 }
1446
1447 if (Z1 && Z2 &&
1448 (I.hasNoSignedZeros() ||
1449 (Z1->isNegativeZeroValue() && Z2->isNegativeZeroValue()))) {
1450 return SelectInst::Create(C, A, B);
1451 }
1452 }
1453 }
1454 }
1455
1456 if (I.isFast()) {
1457 if (Value *V = FAddCombine(Builder).simplify(&I))
1458 return replaceInstUsesWith(I, V);
1459 }
1460
1461 return Changed ? &I : nullptr;
1462 }
1463
1464 /// Optimize pointer differences into the same array into a size. Consider:
1465 /// &A[10] - &A[0]: we should compile this to "10". LHS/RHS are the pointer
1466 /// operands to the ptrtoint instructions for the LHS/RHS of the subtract.
1467 ///
OptimizePointerDifference(Value * LHS,Value * RHS,Type * Ty)1468 Value *InstCombiner::OptimizePointerDifference(Value *LHS, Value *RHS,
1469 Type *Ty) {
1470 // If LHS is a gep based on RHS or RHS is a gep based on LHS, we can optimize
1471 // this.
1472 bool Swapped = false;
1473 GEPOperator *GEP1 = nullptr, *GEP2 = nullptr;
1474
1475 // For now we require one side to be the base pointer "A" or a constant
1476 // GEP derived from it.
1477 if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1478 // (gep X, ...) - X
1479 if (LHSGEP->getOperand(0) == RHS) {
1480 GEP1 = LHSGEP;
1481 Swapped = false;
1482 } else if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1483 // (gep X, ...) - (gep X, ...)
1484 if (LHSGEP->getOperand(0)->stripPointerCasts() ==
1485 RHSGEP->getOperand(0)->stripPointerCasts()) {
1486 GEP2 = RHSGEP;
1487 GEP1 = LHSGEP;
1488 Swapped = false;
1489 }
1490 }
1491 }
1492
1493 if (GEPOperator *RHSGEP = dyn_cast<GEPOperator>(RHS)) {
1494 // X - (gep X, ...)
1495 if (RHSGEP->getOperand(0) == LHS) {
1496 GEP1 = RHSGEP;
1497 Swapped = true;
1498 } else if (GEPOperator *LHSGEP = dyn_cast<GEPOperator>(LHS)) {
1499 // (gep X, ...) - (gep X, ...)
1500 if (RHSGEP->getOperand(0)->stripPointerCasts() ==
1501 LHSGEP->getOperand(0)->stripPointerCasts()) {
1502 GEP2 = LHSGEP;
1503 GEP1 = RHSGEP;
1504 Swapped = true;
1505 }
1506 }
1507 }
1508
1509 // Avoid duplicating the arithmetic if GEP2 has non-constant indices and
1510 // multiple users.
1511 if (!GEP1 ||
1512 (GEP2 && !GEP2->hasAllConstantIndices() && !GEP2->hasOneUse()))
1513 return nullptr;
1514
1515 // Emit the offset of the GEP and an intptr_t.
1516 Value *Result = EmitGEPOffset(GEP1);
1517
1518 // If we had a constant expression GEP on the other side offsetting the
1519 // pointer, subtract it from the offset we have.
1520 if (GEP2) {
1521 Value *Offset = EmitGEPOffset(GEP2);
1522 Result = Builder->CreateSub(Result, Offset);
1523 }
1524
1525 // If we have p - gep(p, ...) then we have to negate the result.
1526 if (Swapped)
1527 Result = Builder->CreateNeg(Result, "diff.neg");
1528
1529 return Builder->CreateIntCast(Result, Ty, true);
1530 }
1531
visitSub(BinaryOperator & I)1532 Instruction *InstCombiner::visitSub(BinaryOperator &I) {
1533 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1534
1535 if (Value *V = SimplifyVectorOp(I))
1536 return replaceInstUsesWith(I, V);
1537
1538 if (Value *V = SimplifySubInst(Op0, Op1, I.hasNoSignedWrap(),
1539 I.hasNoUnsignedWrap(), DL, &TLI, &DT, &AC))
1540 return replaceInstUsesWith(I, V);
1541
1542 // (A*B)-(A*C) -> A*(B-C) etc
1543 if (Value *V = SimplifyUsingDistributiveLaws(I))
1544 return replaceInstUsesWith(I, V);
1545
1546 // If this is a 'B = x-(-A)', change to B = x+A.
1547 if (Value *V = dyn_castNegVal(Op1)) {
1548 BinaryOperator *Res = BinaryOperator::CreateAdd(Op0, V);
1549
1550 if (const auto *BO = dyn_cast<BinaryOperator>(Op1)) {
1551 IGC_ASSERT_MESSAGE(BO->getOpcode() == Instruction::Sub, "Expected a subtraction operator!");
1552 if (BO->hasNoSignedWrap() && I.hasNoSignedWrap())
1553 Res->setHasNoSignedWrap(true);
1554 } else {
1555 if (cast<Constant>(Op1)->isNotMinSignedValue() && I.hasNoSignedWrap())
1556 Res->setHasNoSignedWrap(true);
1557 }
1558
1559 return Res;
1560 }
1561
1562 if (I.getType()->isIntegerTy(1))
1563 return BinaryOperator::CreateXor(Op0, Op1);
1564
1565 // Replace (-1 - A) with (~A).
1566 if (match(Op0, m_AllOnes()))
1567 return BinaryOperator::CreateNot(Op1);
1568
1569 if (Constant *C = dyn_cast<Constant>(Op0)) {
1570 // C - ~X == X + (1+C)
1571 Value *X = nullptr;
1572 if (match(Op1, m_Not(m_Value(X))))
1573 return BinaryOperator::CreateAdd(X, AddOne(C));
1574
1575 // Try to fold constant sub into select arguments.
1576 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1577 if (Instruction *R = FoldOpIntoSelect(I, SI))
1578 return R;
1579
1580 // C-(X+C2) --> (C-C2)-X
1581 Constant *C2 = nullptr;
1582 if (match(Op1, m_Add(m_Value(X), m_Constant(C2))))
1583 return BinaryOperator::CreateSub(ConstantExpr::getSub(C, C2), X);
1584
1585 if (SimplifyDemandedInstructionBits(I))
1586 return &I;
1587
1588 // Fold (sub 0, (zext bool to B)) --> (sext bool to B)
1589 if (C->isNullValue() && match(Op1, m_ZExt(m_Value(X))))
1590 if (X->getType()->getScalarType()->isIntegerTy(1))
1591 return CastInst::CreateSExtOrBitCast(X, Op1->getType());
1592
1593 // Fold (sub 0, (sext bool to B)) --> (zext bool to B)
1594 if (C->isNullValue() && match(Op1, m_SExt(m_Value(X))))
1595 if (X->getType()->getScalarType()->isIntegerTy(1))
1596 return CastInst::CreateZExtOrBitCast(X, Op1->getType());
1597 }
1598
1599 const APInt *Op0C = nullptr;
1600 if (match(Op0, m_APInt(Op0C))) {
1601 unsigned BitWidth = I.getType()->getScalarSizeInBits();
1602
1603 // -(X >>u 31) -> (X >>s 31)
1604 // -(X >>s 31) -> (X >>u 31)
1605 if (*Op0C == 0) {
1606 Value *X;
1607 const APInt *ShAmt;
1608 if (match(Op1, m_LShr(m_Value(X), m_APInt(ShAmt))) &&
1609 *ShAmt == BitWidth - 1) {
1610 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1611 return BinaryOperator::CreateAShr(X, ShAmtOp);
1612 }
1613 if (match(Op1, m_AShr(m_Value(X), m_APInt(ShAmt))) &&
1614 *ShAmt == BitWidth - 1) {
1615 Value *ShAmtOp = cast<Instruction>(Op1)->getOperand(1);
1616 return BinaryOperator::CreateLShr(X, ShAmtOp);
1617 }
1618 }
1619
1620 // Turn this into a xor if LHS is 2^n-1 and the remaining bits are known
1621 // zero.
1622 if ((*Op0C + 1).isPowerOf2()) {
1623 APInt KnownZero(BitWidth, 0);
1624 APInt KnownOne(BitWidth, 0);
1625 computeKnownBits(&I, KnownZero, KnownOne, 0, &I);
1626 if ((*Op0C | KnownZero).isAllOnesValue())
1627 return BinaryOperator::CreateXor(Op1, Op0);
1628 }
1629 }
1630
1631 {
1632 Value *Y = nullptr;
1633 // X-(X+Y) == -Y X-(Y+X) == -Y
1634 if (match(Op1, m_Add(m_Specific(Op0), m_Value(Y))) ||
1635 match(Op1, m_Add(m_Value(Y), m_Specific(Op0))))
1636 return BinaryOperator::CreateNeg(Y);
1637
1638 // (X-Y)-X == -Y
1639 if (match(Op0, m_Sub(m_Specific(Op1), m_Value(Y))))
1640 return BinaryOperator::CreateNeg(Y);
1641 }
1642
1643 // (sub (or A, B) (xor A, B)) --> (and A, B)
1644 {
1645 Value *A = nullptr, *B = nullptr;
1646 if (match(Op1, m_Xor(m_Value(A), m_Value(B))) &&
1647 (match(Op0, m_Or(m_Specific(A), m_Specific(B))) ||
1648 match(Op0, m_Or(m_Specific(B), m_Specific(A)))))
1649 return BinaryOperator::CreateAnd(A, B);
1650 }
1651
1652 if (Op0->hasOneUse()) {
1653 Value *Y = nullptr;
1654 // ((X | Y) - X) --> (~X & Y)
1655 if (match(Op0, m_Or(m_Value(Y), m_Specific(Op1))) ||
1656 match(Op0, m_Or(m_Specific(Op1), m_Value(Y))))
1657 return BinaryOperator::CreateAnd(
1658 Y, Builder->CreateNot(Op1, Op1->getName() + ".not"));
1659 }
1660
1661 if (Op1->hasOneUse()) {
1662 Value *X = nullptr, *Y = nullptr, *Z = nullptr;
1663 Constant *C = nullptr;
1664 Constant *CI = nullptr;
1665
1666 // (X - (Y - Z)) --> (X + (Z - Y)).
1667 if (match(Op1, m_Sub(m_Value(Y), m_Value(Z))))
1668 return BinaryOperator::CreateAdd(Op0,
1669 Builder->CreateSub(Z, Y, Op1->getName()));
1670
1671 // (X - (X & Y)) --> (X & ~Y)
1672 //
1673 if (match(Op1, m_And(m_Value(Y), m_Specific(Op0))) ||
1674 match(Op1, m_And(m_Specific(Op0), m_Value(Y))))
1675 return BinaryOperator::CreateAnd(Op0,
1676 Builder->CreateNot(Y, Y->getName() + ".not"));
1677
1678 // 0 - (X sdiv C) -> (X sdiv -C) provided the negation doesn't overflow.
1679 if (match(Op1, m_SDiv(m_Value(X), m_Constant(C))) && match(Op0, m_Zero()) &&
1680 C->isNotMinSignedValue() && !C->isOneValue())
1681 return BinaryOperator::CreateSDiv(X, ConstantExpr::getNeg(C));
1682
1683 // 0 - (X << Y) -> (-X << Y) when X is freely negatable.
1684 if (match(Op1, m_Shl(m_Value(X), m_Value(Y))) && match(Op0, m_Zero()))
1685 if (Value *XNeg = dyn_castNegVal(X))
1686 return BinaryOperator::CreateShl(XNeg, Y);
1687
1688 // Subtracting -1/0 is the same as adding 1/0:
1689 // sub [nsw] Op0, sext(bool Y) -> add [nsw] Op0, zext(bool Y)
1690 // 'nuw' is dropped in favor of the canonical form.
1691 if (match(Op1, m_SExt(m_Value(Y))) &&
1692 Y->getType()->getScalarSizeInBits() == 1) {
1693 Value *Zext = Builder->CreateZExt(Y, I.getType());
1694 BinaryOperator *Add = BinaryOperator::CreateAdd(Op0, Zext);
1695 Add->setHasNoSignedWrap(I.hasNoSignedWrap());
1696 return Add;
1697 }
1698
1699 // X - A*-B -> X + A*B
1700 // X - -A*B -> X + A*B
1701 Value *A = nullptr, *B = nullptr;
1702 if (match(Op1, m_Mul(m_Value(A), m_Neg(m_Value(B)))) ||
1703 match(Op1, m_Mul(m_Neg(m_Value(A)), m_Value(B))))
1704 return BinaryOperator::CreateAdd(Op0, Builder->CreateMul(A, B));
1705
1706 // X - A*CI -> X + A*-CI
1707 // X - CI*A -> X + A*-CI
1708 if (match(Op1, m_Mul(m_Value(A), m_Constant(CI))) ||
1709 match(Op1, m_Mul(m_Constant(CI), m_Value(A)))) {
1710 Value *NewMul = Builder->CreateMul(A, ConstantExpr::getNeg(CI));
1711 return BinaryOperator::CreateAdd(Op0, NewMul);
1712 }
1713 }
1714
1715 // Optimize pointer differences into the same array into a size. Consider:
1716 // &A[10] - &A[0]: we should compile this to "10".
1717 Value *LHSOp = nullptr, *RHSOp = nullptr;
1718 if (match(Op0, m_PtrToInt(m_Value(LHSOp))) &&
1719 match(Op1, m_PtrToInt(m_Value(RHSOp))))
1720 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1721 return replaceInstUsesWith(I, Res);
1722
1723 // trunc(p)-trunc(q) -> trunc(p-q)
1724 if (match(Op0, m_Trunc(m_PtrToInt(m_Value(LHSOp)))) &&
1725 match(Op1, m_Trunc(m_PtrToInt(m_Value(RHSOp)))))
1726 if (Value *Res = OptimizePointerDifference(LHSOp, RHSOp, I.getType()))
1727 return replaceInstUsesWith(I, Res);
1728
1729 bool Changed = false;
1730 if (!I.hasNoSignedWrap() && WillNotOverflowSignedSub(Op0, Op1, I)) {
1731 Changed = true;
1732 I.setHasNoSignedWrap(true);
1733 }
1734 if (!I.hasNoUnsignedWrap() && WillNotOverflowUnsignedSub(Op0, Op1, I)) {
1735 Changed = true;
1736 I.setHasNoUnsignedWrap(true);
1737 }
1738
1739 return Changed ? &I : nullptr;
1740 }
1741
visitFSub(BinaryOperator & I)1742 Instruction *InstCombiner::visitFSub(BinaryOperator &I) {
1743 Value *Op0 = I.getOperand(0), *Op1 = I.getOperand(1);
1744
1745 if (Value *V = SimplifyVectorOp(I))
1746 return replaceInstUsesWith(I, V);
1747
1748 if (Value *V =
1749 SimplifyFSubInst(Op0, Op1, I.getFastMathFlags(), DL, &TLI, &DT, &AC))
1750 return replaceInstUsesWith(I, V);
1751
1752 // fsub nsz 0, X ==> fsub nsz -0.0, X
1753 if (I.getFastMathFlags().noSignedZeros() && match(Op0, m_Zero())) {
1754 // Subtraction from -0.0 is the canonical form of fneg.
1755 Instruction *NewI = BinaryOperator::CreateFNeg(Op1);
1756 NewI->copyFastMathFlags(&I);
1757 return NewI;
1758 }
1759
1760 if (isa<Constant>(Op0))
1761 if (SelectInst *SI = dyn_cast<SelectInst>(Op1))
1762 if (Instruction *NV = FoldOpIntoSelect(I, SI))
1763 return NV;
1764
1765 // If this is a 'B = x-(-A)', change to B = x+A, potentially looking
1766 // through FP extensions/truncations along the way.
1767 if (Value *V = dyn_castFNegVal(Op1)) {
1768 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, V);
1769 NewI->copyFastMathFlags(&I);
1770 return NewI;
1771 }
1772 if (FPTruncInst *FPTI = dyn_cast<FPTruncInst>(Op1)) {
1773 if (Value *V = dyn_castFNegVal(FPTI->getOperand(0))) {
1774 Value *NewTrunc = Builder->CreateFPTrunc(V, I.getType());
1775 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewTrunc);
1776 NewI->copyFastMathFlags(&I);
1777 return NewI;
1778 }
1779 } else if (FPExtInst *FPEI = dyn_cast<FPExtInst>(Op1)) {
1780 if (Value *V = dyn_castFNegVal(FPEI->getOperand(0))) {
1781 Value *NewExt = Builder->CreateFPExt(V, I.getType());
1782 Instruction *NewI = BinaryOperator::CreateFAdd(Op0, NewExt);
1783 NewI->copyFastMathFlags(&I);
1784 return NewI;
1785 }
1786 }
1787
1788 if (I.isFast()) {
1789 if (Value *V = FAddCombine(Builder).simplify(&I))
1790 return replaceInstUsesWith(I, V);
1791 }
1792
1793 return nullptr;
1794 }
1795