1 /*========================== begin_copyright_notice ============================
2
3 Copyright (C) 2018-2021 Intel Corporation
4
5 SPDX-License-Identifier: MIT
6
7 ============================= end_copyright_notice ===========================*/
8
9 /*========================== begin_copyright_notice ============================
10
11 This file is distributed under the University of Illinois Open Source License.
12 See LICENSE.TXT for details.
13
14 ============================= end_copyright_notice ===========================*/
15
16 // This file implements the visit functions for cast operations.
17
18 #include "common/LLVMWarningsPush.hpp"
19 #include "InstCombineInternal.h"
20 #include "llvm/ADT/SetVector.h"
21 #include "llvm/Analysis/ConstantFolding.h"
22 #include "llvm/Analysis/TargetLibraryInfo.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DIBuilder.h"
25 #include "llvm/IR/PatternMatch.h"
26 #include "llvm/Support/KnownBits.h"
27 #include "Probe/Assertion.h"
28
29 using namespace llvm;
30 using namespace PatternMatch;
31 using namespace IGCombiner;
32
33 #define DEBUG_TYPE "instcombine"
34
35 /// Analyze 'Val', seeing if it is a simple linear expression.
36 /// If so, decompose it, returning some value X, such that Val is
37 /// X*Scale+Offset.
38 ///
decomposeSimpleLinearExpr(Value * Val,unsigned & Scale,uint64_t & Offset)39 static Value* decomposeSimpleLinearExpr(Value* Val, unsigned& Scale,
40 uint64_t& Offset) {
41 if (ConstantInt * CI = dyn_cast<ConstantInt>(Val)) {
42 Offset = CI->getZExtValue();
43 Scale = 0;
44 return ConstantInt::get(Val->getType(), 0);
45 }
46
47 if (BinaryOperator * I = dyn_cast<BinaryOperator>(Val)) {
48 // Cannot look past anything that might overflow.
49 OverflowingBinaryOperator* OBI = dyn_cast<OverflowingBinaryOperator>(Val);
50 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) {
51 Scale = 1;
52 Offset = 0;
53 return Val;
54 }
55
56 if (ConstantInt * RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
57 if (I->getOpcode() == Instruction::Shl) {
58 // This is a value scaled by '1 << the shift amt'.
59 Scale = UINT64_C(1) << RHS->getZExtValue();
60 Offset = 0;
61 return I->getOperand(0);
62 }
63
64 if (I->getOpcode() == Instruction::Mul) {
65 // This value is scaled by 'RHS'.
66 Scale = RHS->getZExtValue();
67 Offset = 0;
68 return I->getOperand(0);
69 }
70
71 if (I->getOpcode() == Instruction::Add) {
72 // We have X+C. Check to see if we really have (X*C2)+C1,
73 // where C1 is divisible by C2.
74 unsigned SubScale;
75 Value* SubVal =
76 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
77 Offset += RHS->getZExtValue();
78 Scale = SubScale;
79 return SubVal;
80 }
81 }
82 }
83
84 // Otherwise, we can't look past this.
85 Scale = 1;
86 Offset = 0;
87 return Val;
88 }
89
90 /// If we find a cast of an allocation instruction, try to eliminate the cast by
91 /// moving the type information into the alloc.
PromoteCastOfAllocation(BitCastInst & CI,AllocaInst & AI)92 Instruction* InstCombiner::PromoteCastOfAllocation(BitCastInst& CI,
93 AllocaInst& AI) {
94 PointerType* PTy = cast<PointerType>(CI.getType());
95
96 BuilderTy AllocaBuilder(Builder);
97 AllocaBuilder.SetInsertPoint(&AI);
98
99 // Get the type really allocated and the type casted to.
100 Type* AllocElTy = AI.getAllocatedType();
101 Type* CastElTy = PTy->getElementType();
102 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
103
104 unsigned AllocElTyAlign = DL.getABITypeAlignment(AllocElTy);
105 unsigned CastElTyAlign = DL.getABITypeAlignment(CastElTy);
106 if (CastElTyAlign < AllocElTyAlign) return nullptr;
107
108 // If the allocation has multiple uses, only promote it if we are strictly
109 // increasing the alignment of the resultant allocation. If we keep it the
110 // same, we open the door to infinite loops of various kinds.
111 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
112
113 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy);
114 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy);
115 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
116
117 // If the allocation has multiple uses, only promote it if we're not
118 // shrinking the amount of memory being allocated.
119 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy);
120 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy);
121 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
122
123 // See if we can satisfy the modulus by pulling a scale out of the array
124 // size argument.
125 unsigned ArraySizeScale;
126 uint64_t ArrayOffset;
127 Value* NumElements = // See if the array size is a decomposable linear expr.
128 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
129
130 // If we can now satisfy the modulus, by using a non-1 scale, we really can
131 // do the xform.
132 if ((AllocElTySize * ArraySizeScale) % CastElTySize != 0 ||
133 (AllocElTySize * ArrayOffset) % CastElTySize != 0) return nullptr;
134
135 unsigned Scale = (AllocElTySize * ArraySizeScale) / CastElTySize;
136 Value* Amt = nullptr;
137 if (Scale == 1) {
138 Amt = NumElements;
139 }
140 else {
141 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
142 // Insert before the alloca, not before the cast.
143 Amt = AllocaBuilder.CreateMul(Amt, NumElements);
144 }
145
146 if (uint64_t Offset = (AllocElTySize * ArrayOffset) / CastElTySize) {
147 Value* Off = ConstantInt::get(AI.getArraySize()->getType(),
148 Offset, true);
149 Amt = AllocaBuilder.CreateAdd(Amt, Off);
150 }
151
152 AllocaInst* New = AllocaBuilder.CreateAlloca(CastElTy, Amt);
153 New->setAlignment(AI.getAlignment());
154 New->takeName(&AI);
155 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
156
157 // If the allocation has multiple real uses, insert a cast and change all
158 // things that used it to use the new cast. This will also hack on CI, but it
159 // will die soon.
160 if (!AI.hasOneUse()) {
161 // New is the allocation instruction, pointer typed. AI is the original
162 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
163 Value* NewCast = AllocaBuilder.CreateBitCast(New, AI.getType(), "tmpcast");
164 replaceInstUsesWith(AI, NewCast);
165 }
166 return replaceInstUsesWith(CI, New);
167 }
168
169 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
170 /// true for, actually insert the code to evaluate the expression.
EvaluateInDifferentType(Value * V,Type * Ty,bool isSigned)171 Value* InstCombiner::EvaluateInDifferentType(Value* V, Type* Ty,
172 bool isSigned) {
173 if (Constant * C = dyn_cast<Constant>(V)) {
174 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
175 // If we got a constantexpr back, try to simplify it with DL info.
176 if (Constant * FoldedC = ConstantFoldConstant(C, DL, &TLI))
177 C = FoldedC;
178 return C;
179 }
180
181 // Otherwise, it must be an instruction.
182 Instruction* I = cast<Instruction>(V);
183 Instruction* Res = nullptr;
184 unsigned Opc = I->getOpcode();
185 switch (Opc) {
186 case Instruction::Add:
187 case Instruction::Sub:
188 case Instruction::Mul:
189 case Instruction::And:
190 case Instruction::Or:
191 case Instruction::Xor:
192 case Instruction::AShr:
193 case Instruction::LShr:
194 case Instruction::Shl:
195 case Instruction::UDiv:
196 case Instruction::URem: {
197 Value* LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
198 Value* RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
199 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
200 break;
201 }
202 case Instruction::Trunc:
203 case Instruction::ZExt:
204 case Instruction::SExt:
205 // If the source type of the cast is the type we're trying for then we can
206 // just return the source. There's no need to insert it because it is not
207 // new.
208 if (I->getOperand(0)->getType() == Ty)
209 return I->getOperand(0);
210
211 // Otherwise, must be the same type of cast, so just reinsert a new one.
212 // This also handles the case of zext(trunc(x)) -> zext(x).
213 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
214 Opc == Instruction::SExt);
215 break;
216 case Instruction::Select: {
217 Value* True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
218 Value* False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
219 Res = SelectInst::Create(I->getOperand(0), True, False);
220 break;
221 }
222 case Instruction::PHI: {
223 PHINode* OPN = cast<PHINode>(I);
224 PHINode* NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
225 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
226 Value* V =
227 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
228 NPN->addIncoming(V, OPN->getIncomingBlock(i));
229 }
230 Res = NPN;
231 break;
232 }
233 default:
234 // TODO: Can handle more cases here.
235 IGC_ASSERT_EXIT_MESSAGE(0, "Unreachable!");
236 }
237
238 Res->takeName(I);
239 return InsertNewInstWith(Res, *I);
240 }
241
isEliminableCastPair(const CastInst * CI1,const CastInst * CI2)242 Instruction::CastOps InstCombiner::isEliminableCastPair(const CastInst* CI1,
243 const CastInst* CI2) {
244 Type* SrcTy = CI1->getSrcTy();
245 Type* MidTy = CI1->getDestTy();
246 Type* DstTy = CI2->getDestTy();
247
248 Instruction::CastOps firstOp = CI1->getOpcode();
249 Instruction::CastOps secondOp = CI2->getOpcode();
250 Type* SrcIntPtrTy =
251 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
252 Type* MidIntPtrTy =
253 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
254 Type* DstIntPtrTy =
255 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
256 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
257 DstTy, SrcIntPtrTy, MidIntPtrTy,
258 DstIntPtrTy);
259
260 // We don't want to form an inttoptr or ptrtoint that converts to an integer
261 // type that differs from the pointer size.
262 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
263 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
264 Res = 0;
265
266 return Instruction::CastOps(Res);
267 }
268
269 /// Implement the transforms common to all CastInst visitors.
commonCastTransforms(CastInst & CI)270 Instruction* InstCombiner::commonCastTransforms(CastInst& CI) {
271 Value* Src = CI.getOperand(0);
272
273 // Try to eliminate a cast of a cast.
274 if (auto * CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
275 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) {
276 // The first cast (CSrc) is eliminable so we need to fix up or replace
277 // the second cast (CI). CSrc will then have a good chance of being dead.
278 auto* Ty = CI.getType();
279 auto* Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty);
280 // Point debug users of the dying cast to the new one.
281 if (CSrc->hasOneUse())
282 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT);
283 return Res;
284 }
285 }
286
287 if (auto * Sel = dyn_cast<SelectInst>(Src)) {
288 // We are casting a select. Try to fold the cast into the select, but only
289 // if the select does not have a compare instruction with matching operand
290 // types. Creating a select with operands that are different sizes than its
291 // condition may inhibit other folds and lead to worse codegen.
292 auto* Cmp = dyn_cast<CmpInst>(Sel->getCondition());
293 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType())
294 if (Instruction * NV = FoldOpIntoSelect(CI, Sel)) {
295 replaceAllDbgUsesWith(*Sel, *NV, CI, DT);
296 return NV;
297 }
298 }
299
300 // If we are casting a PHI, then fold the cast into the PHI.
301 if (auto * PN = dyn_cast<PHINode>(Src)) {
302 // Don't do this if it would create a PHI node with an illegal type from a
303 // legal type.
304 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
305 shouldChangeType(CI.getType(), Src->getType()))
306 if (Instruction * NV = foldOpIntoPhi(CI, PN))
307 return NV;
308 }
309
310 return nullptr;
311 }
312
313 /// Constants and extensions/truncates from the destination type are always
314 /// free to be evaluated in that type. This is a helper for canEvaluate*.
canAlwaysEvaluateInType(Value * V,Type * Ty)315 static bool canAlwaysEvaluateInType(Value* V, Type* Ty) {
316 if (isa<Constant>(V))
317 return true;
318 Value* X;
319 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) &&
320 X->getType() == Ty)
321 return true;
322
323 return false;
324 }
325
326 /// Filter out values that we can not evaluate in the destination type for free.
327 /// This is a helper for canEvaluate*.
canNotEvaluateInType(Value * V,Type * Ty)328 static bool canNotEvaluateInType(Value* V, Type* Ty) {
329 IGC_ASSERT_MESSAGE(!isa<Constant>(V), "Constant should already be handled.");
330 if (!isa<Instruction>(V))
331 return true;
332 // We don't extend or shrink something that has multiple uses -- doing so
333 // would require duplicating the instruction which isn't profitable.
334 if (!V->hasOneUse())
335 return true;
336
337 return false;
338 }
339
340 /// Return true if we can evaluate the specified expression tree as type Ty
341 /// instead of its larger type, and arrive with the same value.
342 /// This is used by code that tries to eliminate truncates.
343 ///
344 /// Ty will always be a type smaller than V. We should return true if trunc(V)
345 /// can be computed by computing V in the smaller type. If V is an instruction,
346 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
347 /// makes sense if x and y can be efficiently truncated.
348 ///
349 /// This function works on both vectors and scalars.
350 ///
canEvaluateTruncated(Value * V,Type * Ty,InstCombiner & IC,Instruction * CxtI)351 static bool canEvaluateTruncated(Value* V, Type* Ty, InstCombiner& IC,
352 Instruction* CxtI) {
353 if (canAlwaysEvaluateInType(V, Ty))
354 return true;
355 if (canNotEvaluateInType(V, Ty))
356 return false;
357
358 auto* I = cast<Instruction>(V);
359 Type* OrigTy = V->getType();
360 switch (I->getOpcode()) {
361 case Instruction::Add:
362 case Instruction::Sub:
363 case Instruction::Mul:
364 case Instruction::And:
365 case Instruction::Or:
366 case Instruction::Xor:
367 // These operators can all arbitrarily be extended or truncated.
368 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
369 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
370
371 case Instruction::UDiv:
372 case Instruction::URem: {
373 // UDiv and URem can be truncated if all the truncated bits are zero.
374 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
375 uint32_t BitWidth = Ty->getScalarSizeInBits();
376 IGC_ASSERT_MESSAGE(BitWidth < OrigBitWidth, "Unexpected bitwidths!");
377 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
378 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) &&
379 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) {
380 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
381 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
382 }
383 break;
384 }
385 case Instruction::Shl: {
386 // If we are truncating the result of this SHL, and if it's a shift of a
387 // constant amount, we can always perform a SHL in a smaller type.
388 const APInt* Amt;
389 if (match(I->getOperand(1), m_APInt(Amt))) {
390 uint32_t BitWidth = Ty->getScalarSizeInBits();
391 if (Amt->getLimitedValue(BitWidth) < BitWidth)
392 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
393 }
394 break;
395 }
396 case Instruction::LShr: {
397 // If this is a truncate of a logical shr, we can truncate it to a smaller
398 // lshr iff we know that the bits we would otherwise be shifting in are
399 // already zeros.
400 const APInt* Amt;
401 if (match(I->getOperand(1), m_APInt(Amt))) {
402 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
403 uint32_t BitWidth = Ty->getScalarSizeInBits();
404 if (Amt->getLimitedValue(BitWidth) < BitWidth &&
405 IC.MaskedValueIsZero(I->getOperand(0),
406 APInt::getBitsSetFrom(OrigBitWidth, BitWidth), 0, CxtI)) {
407 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
408 }
409 }
410 break;
411 }
412 case Instruction::AShr: {
413 // If this is a truncate of an arithmetic shr, we can truncate it to a
414 // smaller ashr iff we know that all the bits from the sign bit of the
415 // original type and the sign bit of the truncate type are similar.
416 // TODO: It is enough to check that the bits we would be shifting in are
417 // similar to sign bit of the truncate type.
418 const APInt* Amt;
419 if (match(I->getOperand(1), m_APInt(Amt))) {
420 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
421 uint32_t BitWidth = Ty->getScalarSizeInBits();
422 if (Amt->getLimitedValue(BitWidth) < BitWidth &&
423 OrigBitWidth - BitWidth <
424 IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI))
425 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI);
426 }
427 break;
428 }
429 case Instruction::Trunc:
430 // trunc(trunc(x)) -> trunc(x)
431 return true;
432 case Instruction::ZExt:
433 case Instruction::SExt:
434 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
435 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest
436 return true;
437 case Instruction::Select: {
438 SelectInst* SI = cast<SelectInst>(I);
439 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) &&
440 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI);
441 }
442 case Instruction::PHI: {
443 // We can change a phi if we can change all operands. Note that we never
444 // get into trouble with cyclic PHIs here because we only consider
445 // instructions with a single use.
446 PHINode* PN = cast<PHINode>(I);
447 for (Value* IncValue : PN->incoming_values())
448 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI))
449 return false;
450 return true;
451 }
452 default:
453 // TODO: Can handle more cases here.
454 break;
455 }
456
457 return false;
458 }
459
460 /// Given a vector that is bitcast to an integer, optionally logically
461 /// right-shifted, and truncated, convert it to an extractelement.
462 /// Example (big endian):
463 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
464 /// --->
465 /// extractelement <4 x i32> %X, 1
foldVecTruncToExtElt(TruncInst & Trunc,InstCombiner & IC)466 static Instruction* foldVecTruncToExtElt(TruncInst& Trunc, InstCombiner& IC) {
467 Value* TruncOp = Trunc.getOperand(0);
468 Type* DestType = Trunc.getType();
469 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
470 return nullptr;
471
472 Value* VecInput = nullptr;
473 ConstantInt* ShiftVal = nullptr;
474 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)),
475 m_LShr(m_BitCast(m_Value(VecInput)),
476 m_ConstantInt(ShiftVal)))) ||
477 !isa<VectorType>(VecInput->getType()))
478 return nullptr;
479
480 VectorType* VecType = cast<VectorType>(VecInput->getType());
481 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
482 unsigned DestWidth = DestType->getPrimitiveSizeInBits();
483 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0;
484
485 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
486 return nullptr;
487
488 // If the element type of the vector doesn't match the result type,
489 // bitcast it to a vector type that we can extract from.
490 unsigned NumVecElts = VecWidth / DestWidth;
491 if (VecType->getElementType() != DestType) {
492 VecType = VectorType::get(DestType, NumVecElts);
493 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
494 }
495
496 unsigned Elt = ShiftAmount / DestWidth;
497 if (IC.getDataLayout().isBigEndian())
498 Elt = NumVecElts - 1 - Elt;
499
500 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
501 }
502
503 /// Rotate left/right may occur in a wider type than necessary because of type
504 /// promotion rules. Try to narrow all of the component instructions.
narrowRotate(TruncInst & Trunc)505 Instruction* InstCombiner::narrowRotate(TruncInst& Trunc) {
506 IGC_ASSERT_MESSAGE((isa<VectorType>(Trunc.getSrcTy()) || shouldChangeType(Trunc.getSrcTy(), Trunc.getType())), "Don't narrow to an illegal scalar type");
507
508 // First, find an or'd pair of opposite shifts with the same shifted operand:
509 // trunc (or (lshr ShVal, ShAmt0), (shl ShVal, ShAmt1))
510 Value* Or0, * Or1;
511 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_Value(Or0), m_Value(Or1)))))
512 return nullptr;
513
514 Value* ShVal, * ShAmt0, * ShAmt1;
515 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal), m_Value(ShAmt0)))) ||
516 !match(Or1, m_OneUse(m_LogicalShift(m_Specific(ShVal), m_Value(ShAmt1)))))
517 return nullptr;
518
519 auto ShiftOpcode0 = cast<BinaryOperator>(Or0)->getOpcode();
520 auto ShiftOpcode1 = cast<BinaryOperator>(Or1)->getOpcode();
521 if (ShiftOpcode0 == ShiftOpcode1)
522 return nullptr;
523
524 // The shift amounts must add up to the narrow bit width.
525 Value* ShAmt;
526 bool SubIsOnLHS;
527 Type* DestTy = Trunc.getType();
528 unsigned NarrowWidth = DestTy->getScalarSizeInBits();
529 if (match(ShAmt0,
530 m_OneUse(m_Sub(m_SpecificInt(NarrowWidth), m_Specific(ShAmt1))))) {
531 ShAmt = ShAmt1;
532 SubIsOnLHS = true;
533 }
534 else if (match(ShAmt1, m_OneUse(m_Sub(m_SpecificInt(NarrowWidth),
535 m_Specific(ShAmt0))))) {
536 ShAmt = ShAmt0;
537 SubIsOnLHS = false;
538 }
539 else {
540 return nullptr;
541 }
542
543 // The shifted value must have high zeros in the wide type. Typically, this
544 // will be a zext, but it could also be the result of an 'and' or 'shift'.
545 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits();
546 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth);
547 if (!MaskedValueIsZero(ShVal, HiBitMask, 0, &Trunc))
548 return nullptr;
549
550 // We have an unnecessarily wide rotate!
551 // trunc (or (lshr ShVal, ShAmt), (shl ShVal, BitWidth - ShAmt))
552 // Narrow it down to eliminate the zext/trunc:
553 // or (lshr trunc(ShVal), ShAmt0'), (shl trunc(ShVal), ShAmt1')
554 Value* NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy);
555 Value* NegShAmt = Builder.CreateNeg(NarrowShAmt);
556
557 // Mask both shift amounts to ensure there's no UB from oversized shifts.
558 Constant* MaskC = ConstantInt::get(DestTy, NarrowWidth - 1);
559 Value* MaskedShAmt = Builder.CreateAnd(NarrowShAmt, MaskC);
560 Value* MaskedNegShAmt = Builder.CreateAnd(NegShAmt, MaskC);
561
562 // Truncate the original value and use narrow ops.
563 Value* X = Builder.CreateTrunc(ShVal, DestTy);
564 Value* NarrowShAmt0 = SubIsOnLHS ? MaskedNegShAmt : MaskedShAmt;
565 Value* NarrowShAmt1 = SubIsOnLHS ? MaskedShAmt : MaskedNegShAmt;
566 Value* NarrowSh0 = Builder.CreateBinOp(ShiftOpcode0, X, NarrowShAmt0);
567 Value* NarrowSh1 = Builder.CreateBinOp(ShiftOpcode1, X, NarrowShAmt1);
568 return BinaryOperator::CreateOr(NarrowSh0, NarrowSh1);
569 }
570
571 /// Try to narrow the width of math or bitwise logic instructions by pulling a
572 /// truncate ahead of binary operators.
573 /// TODO: Transforms for truncated shifts should be moved into here.
narrowBinOp(TruncInst & Trunc)574 Instruction* InstCombiner::narrowBinOp(TruncInst& Trunc) {
575 Type* SrcTy = Trunc.getSrcTy();
576 Type* DestTy = Trunc.getType();
577 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
578 return nullptr;
579
580 BinaryOperator* BinOp;
581 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp))))
582 return nullptr;
583
584 Value* BinOp0 = BinOp->getOperand(0);
585 Value* BinOp1 = BinOp->getOperand(1);
586 switch (BinOp->getOpcode()) {
587 case Instruction::And:
588 case Instruction::Or:
589 case Instruction::Xor:
590 case Instruction::Add:
591 case Instruction::Sub:
592 case Instruction::Mul: {
593 Constant* C;
594 if (match(BinOp0, m_Constant(C))) {
595 // trunc (binop C, X) --> binop (trunc C', X)
596 Constant* NarrowC = ConstantExpr::getTrunc(C, DestTy);
597 Value* TruncX = Builder.CreateTrunc(BinOp1, DestTy);
598 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX);
599 }
600 if (match(BinOp1, m_Constant(C))) {
601 // trunc (binop X, C) --> binop (trunc X, C')
602 Constant* NarrowC = ConstantExpr::getTrunc(C, DestTy);
603 Value* TruncX = Builder.CreateTrunc(BinOp0, DestTy);
604 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC);
605 }
606 Value* X;
607 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
608 // trunc (binop (ext X), Y) --> binop X, (trunc Y)
609 Value* NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy);
610 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1);
611 }
612 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
613 // trunc (binop Y, (ext X)) --> binop (trunc Y), X
614 Value* NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy);
615 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X);
616 }
617 break;
618 }
619
620 default: break;
621 }
622
623 if (Instruction * NarrowOr = narrowRotate(Trunc))
624 return NarrowOr;
625
626 return nullptr;
627 }
628
629 /// Try to narrow the width of a splat shuffle. This could be generalized to any
630 /// shuffle with a constant operand, but we limit the transform to avoid
631 /// creating a shuffle type that targets may not be able to lower effectively.
shrinkSplatShuffle(TruncInst & Trunc,InstCombiner::BuilderTy & Builder)632 static Instruction* shrinkSplatShuffle(TruncInst& Trunc,
633 InstCombiner::BuilderTy& Builder) {
634 auto* Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0));
635 if (Shuf && Shuf->hasOneUse() && isa<UndefValue>(Shuf->getOperand(1)) &&
636 Shuf->getMask()->getSplatValue() &&
637 Shuf->getType() == Shuf->getOperand(0)->getType()) {
638 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Undef, SplatMask
639 Constant* NarrowUndef = UndefValue::get(Trunc.getType());
640 Value* NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType());
641 return new ShuffleVectorInst(NarrowOp, NarrowUndef, Shuf->getMask());
642 }
643
644 return nullptr;
645 }
646
647 /// Try to narrow the width of an insert element. This could be generalized for
648 /// any vector constant, but we limit the transform to insertion into undef to
649 /// avoid potential backend problems from unsupported insertion widths. This
650 /// could also be extended to handle the case of inserting a scalar constant
651 /// into a vector variable.
shrinkInsertElt(CastInst & Trunc,InstCombiner::BuilderTy & Builder)652 static Instruction* shrinkInsertElt(CastInst& Trunc,
653 InstCombiner::BuilderTy& Builder) {
654 Instruction::CastOps Opcode = Trunc.getOpcode();
655 IGC_ASSERT_MESSAGE((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc), "Unexpected instruction for shrinking");
656
657 auto* InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0));
658 if (!InsElt || !InsElt->hasOneUse())
659 return nullptr;
660
661 Type* DestTy = Trunc.getType();
662 Type* DestScalarTy = DestTy->getScalarType();
663 Value* VecOp = InsElt->getOperand(0);
664 Value* ScalarOp = InsElt->getOperand(1);
665 Value* Index = InsElt->getOperand(2);
666
667 if (isa<UndefValue>(VecOp)) {
668 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index
669 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index
670 UndefValue* NarrowUndef = UndefValue::get(DestTy);
671 Value* NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
672 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index);
673 }
674
675 return nullptr;
676 }
677
visitTrunc(TruncInst & CI)678 Instruction* InstCombiner::visitTrunc(TruncInst& CI) {
679 if (Instruction * Result = commonCastTransforms(CI))
680 return Result;
681
682 Value* Src = CI.getOperand(0);
683 Type* DestTy = CI.getType(), * SrcTy = Src->getType();
684
685 // Attempt to truncate the entire input expression tree to the destination
686 // type. Only do this if the dest type is a simple type, don't convert the
687 // expression tree to something weird like i93 unless the source is also
688 // strange.
689 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
690 canEvaluateTruncated(Src, DestTy, *this, &CI)) {
691
692 // If this cast is a truncate, evaluting in a different type always
693 // eliminates the cast, so it is always a win.
694 LLVM_DEBUG(
695 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
696 " to avoid cast: "
697 << CI << '\n');
698 Value* Res = EvaluateInDifferentType(Src, DestTy, false);
699 IGC_ASSERT(Res->getType() == DestTy);
700 return replaceInstUsesWith(CI, Res);
701 }
702
703 // Test if the trunc is the user of a select which is part of a
704 // minimum or maximum operation. If so, don't do any more simplification.
705 // Even simplifying demanded bits can break the canonical form of a
706 // min/max.
707 Value* LHS, * RHS;
708 if (SelectInst * SI = dyn_cast<SelectInst>(CI.getOperand(0)))
709 if (matchSelectPattern(SI, LHS, RHS).Flavor != SPF_UNKNOWN)
710 return nullptr;
711
712 // See if we can simplify any instructions used by the input whose sole
713 // purpose is to compute bits we don't care about.
714 if (SimplifyDemandedInstructionBits(CI))
715 return &CI;
716
717 // Canonicalize trunc x to i1 -> (icmp ne (and x, 1), 0), likewise for vector.
718 if (DestTy->getScalarSizeInBits() == 1) {
719 Constant* One = ConstantInt::get(SrcTy, 1);
720 Src = Builder.CreateAnd(Src, One);
721 Value* Zero = Constant::getNullValue(Src->getType());
722 return new ICmpInst(ICmpInst::ICMP_NE, Src, Zero);
723 }
724
725 // FIXME: Maybe combine the next two transforms to handle the no cast case
726 // more efficiently. Support vector types. Cleanup code by using m_OneUse.
727
728 // Transform trunc(lshr (zext A), Cst) to eliminate one type conversion.
729 Value* A = nullptr; ConstantInt* Cst = nullptr;
730 if (Src->hasOneUse() &&
731 match(Src, m_LShr(m_ZExt(m_Value(A)), m_ConstantInt(Cst)))) {
732 // We have three types to worry about here, the type of A, the source of
733 // the truncate (MidSize), and the destination of the truncate. We know that
734 // ASize < MidSize and MidSize > ResultSize, but don't know the relation
735 // between ASize and ResultSize.
736 unsigned ASize = A->getType()->getPrimitiveSizeInBits();
737
738 // If the shift amount is larger than the size of A, then the result is
739 // known to be zero because all the input bits got shifted out.
740 if (Cst->getZExtValue() >= ASize)
741 return replaceInstUsesWith(CI, Constant::getNullValue(DestTy));
742
743 // Since we're doing an lshr and a zero extend, and know that the shift
744 // amount is smaller than ASize, it is always safe to do the shift in A's
745 // type, then zero extend or truncate to the result.
746 Value* Shift = Builder.CreateLShr(A, Cst->getZExtValue());
747 Shift->takeName(Src);
748 return CastInst::CreateIntegerCast(Shift, DestTy, false);
749 }
750
751 // FIXME: We should canonicalize to zext/trunc and remove this transform.
752 // Transform trunc(lshr (sext A), Cst) to ashr A, Cst to eliminate type
753 // conversion.
754 // It works because bits coming from sign extension have the same value as
755 // the sign bit of the original value; performing ashr instead of lshr
756 // generates bits of the same value as the sign bit.
757 if (Src->hasOneUse() &&
758 match(Src, m_LShr(m_SExt(m_Value(A)), m_ConstantInt(Cst)))) {
759 Value* SExt = cast<Instruction>(Src)->getOperand(0);
760 const unsigned SExtSize = SExt->getType()->getPrimitiveSizeInBits();
761 const unsigned ASize = A->getType()->getPrimitiveSizeInBits();
762 const unsigned CISize = CI.getType()->getPrimitiveSizeInBits();
763 const unsigned MaxAmt = SExtSize - std::max(CISize, ASize);
764 unsigned ShiftAmt = Cst->getZExtValue();
765
766 // This optimization can be only performed when zero bits generated by
767 // the original lshr aren't pulled into the value after truncation, so we
768 // can only shift by values no larger than the number of extension bits.
769 // FIXME: Instead of bailing when the shift is too large, use and to clear
770 // the extra bits.
771 if (ShiftAmt <= MaxAmt) {
772 if (CISize == ASize)
773 return BinaryOperator::CreateAShr(A, ConstantInt::get(CI.getType(),
774 std::min(ShiftAmt, ASize - 1)));
775 if (SExt->hasOneUse()) {
776 Value* Shift = Builder.CreateAShr(A, std::min(ShiftAmt, ASize - 1));
777 Shift->takeName(Src);
778 return CastInst::CreateIntegerCast(Shift, CI.getType(), true);
779 }
780 }
781 }
782
783 if (Instruction * I = narrowBinOp(CI))
784 return I;
785
786 if (Instruction * I = shrinkSplatShuffle(CI, Builder))
787 return I;
788
789 if (Instruction * I = shrinkInsertElt(CI, Builder))
790 return I;
791
792 if (Src->hasOneUse() && isa<IntegerType>(SrcTy) &&
793 shouldChangeType(SrcTy, DestTy)) {
794 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the
795 // dest type is native and cst < dest size.
796 if (match(Src, m_Shl(m_Value(A), m_ConstantInt(Cst))) &&
797 !match(A, m_Shr(m_Value(), m_Constant()))) {
798 // Skip shifts of shift by constants. It undoes a combine in
799 // FoldShiftByConstant and is the extend in reg pattern.
800 const unsigned DestSize = DestTy->getScalarSizeInBits();
801 if (Cst->getValue().ult(DestSize)) {
802 Value* NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
803
804 return BinaryOperator::Create(
805 Instruction::Shl, NewTrunc,
806 ConstantInt::get(DestTy, Cst->getValue().trunc(DestSize)));
807 }
808 }
809 }
810
811 if (Instruction * I = foldVecTruncToExtElt(CI, *this))
812 return I;
813
814 return nullptr;
815 }
816
transformZExtICmp(ICmpInst * ICI,ZExtInst & CI,bool DoTransform)817 Instruction* InstCombiner::transformZExtICmp(ICmpInst* ICI, ZExtInst& CI,
818 bool DoTransform) {
819 // If we are just checking for a icmp eq of a single bit and zext'ing it
820 // to an integer, then shift the bit to the appropriate place and then
821 // cast to integer to avoid the comparison.
822 const APInt* Op1CV;
823 if (match(ICI->getOperand(1), m_APInt(Op1CV))) {
824
825 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
826 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
827 if ((ICI->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isNullValue()) ||
828 (ICI->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnesValue())) {
829 if (!DoTransform) return ICI;
830
831 Value* In = ICI->getOperand(0);
832 Value* Sh = ConstantInt::get(In->getType(),
833 In->getType()->getScalarSizeInBits() - 1);
834 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
835 if (In->getType() != CI.getType())
836 In = Builder.CreateIntCast(In, CI.getType(), false /*ZExt*/);
837
838 if (ICI->getPredicate() == ICmpInst::ICMP_SGT) {
839 Constant* One = ConstantInt::get(In->getType(), 1);
840 In = Builder.CreateXor(In, One, In->getName() + ".not");
841 }
842
843 return replaceInstUsesWith(CI, In);
844 }
845
846 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
847 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
848 // zext (X == 1) to i32 --> X iff X has only the low bit set.
849 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
850 // zext (X != 0) to i32 --> X iff X has only the low bit set.
851 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
852 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
853 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
854 if ((Op1CV->isNullValue() || Op1CV->isPowerOf2()) &&
855 // This only works for EQ and NE
856 ICI->isEquality()) {
857 // If Op1C some other power of two, convert:
858 KnownBits Known = computeKnownBits(ICI->getOperand(0), 0, &CI);
859
860 APInt KnownZeroMask(~Known.Zero);
861 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
862 if (!DoTransform) return ICI;
863
864 bool isNE = ICI->getPredicate() == ICmpInst::ICMP_NE;
865 if (!Op1CV->isNullValue() && (*Op1CV != KnownZeroMask)) {
866 // (X&4) == 2 --> false
867 // (X&4) != 2 --> true
868 Constant* Res = ConstantInt::get(CI.getType(), isNE);
869 return replaceInstUsesWith(CI, Res);
870 }
871
872 uint32_t ShAmt = KnownZeroMask.logBase2();
873 Value* In = ICI->getOperand(0);
874 if (ShAmt) {
875 // Perform a logical shr by shiftamt.
876 // Insert the shift to put the result in the low bit.
877 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
878 In->getName() + ".lobit");
879 }
880
881 if (!Op1CV->isNullValue() == isNE) { // Toggle the low bit.
882 Constant* One = ConstantInt::get(In->getType(), 1);
883 In = Builder.CreateXor(In, One);
884 }
885
886 if (CI.getType() == In->getType())
887 return replaceInstUsesWith(CI, In);
888
889 Value* IntCast = Builder.CreateIntCast(In, CI.getType(), false);
890 return replaceInstUsesWith(CI, IntCast);
891 }
892 }
893 }
894
895 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
896 // It is also profitable to transform icmp eq into not(xor(A, B)) because that
897 // may lead to additional simplifications.
898 if (ICI->isEquality() && CI.getType() == ICI->getOperand(0)->getType()) {
899 if (IntegerType * ITy = dyn_cast<IntegerType>(CI.getType())) {
900 Value* LHS = ICI->getOperand(0);
901 Value* RHS = ICI->getOperand(1);
902
903 KnownBits KnownLHS = computeKnownBits(LHS, 0, &CI);
904 KnownBits KnownRHS = computeKnownBits(RHS, 0, &CI);
905
906 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) {
907 APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
908 APInt UnknownBit = ~KnownBits;
909 if (UnknownBit.countPopulation() == 1) {
910 if (!DoTransform) return ICI;
911
912 Value* Result = Builder.CreateXor(LHS, RHS);
913
914 // Mask off any bits that are set and won't be shifted away.
915 if (KnownLHS.One.uge(UnknownBit))
916 Result = Builder.CreateAnd(Result,
917 ConstantInt::get(ITy, UnknownBit));
918
919 // Shift the bit we're testing down to the lsb.
920 Result = Builder.CreateLShr(
921 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
922
923 if (ICI->getPredicate() == ICmpInst::ICMP_EQ)
924 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
925 Result->takeName(ICI);
926 return replaceInstUsesWith(CI, Result);
927 }
928 }
929 }
930 }
931
932 return nullptr;
933 }
934
935 /// Determine if the specified value can be computed in the specified wider type
936 /// and produce the same low bits. If not, return false.
937 ///
938 /// If this function returns true, it can also return a non-zero number of bits
939 /// (in BitsToClear) which indicates that the value it computes is correct for
940 /// the zero extend, but that the additional BitsToClear bits need to be zero'd
941 /// out. For example, to promote something like:
942 ///
943 /// %B = trunc i64 %A to i32
944 /// %C = lshr i32 %B, 8
945 /// %E = zext i32 %C to i64
946 ///
947 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
948 /// set to 8 to indicate that the promoted value needs to have bits 24-31
949 /// cleared in addition to bits 32-63. Since an 'and' will be generated to
950 /// clear the top bits anyway, doing this has no extra cost.
951 ///
952 /// This function works on both vectors and scalars.
canEvaluateZExtd(Value * V,Type * Ty,unsigned & BitsToClear,InstCombiner & IC,Instruction * CxtI)953 static bool canEvaluateZExtd(Value* V, Type* Ty, unsigned& BitsToClear,
954 InstCombiner& IC, Instruction* CxtI) {
955 BitsToClear = 0;
956 if (canAlwaysEvaluateInType(V, Ty))
957 return true;
958 if (canNotEvaluateInType(V, Ty))
959 return false;
960
961 auto* I = cast<Instruction>(V);
962 unsigned Tmp;
963 switch (I->getOpcode()) {
964 case Instruction::ZExt: // zext(zext(x)) -> zext(x).
965 case Instruction::SExt: // zext(sext(x)) -> sext(x).
966 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
967 return true;
968 case Instruction::And:
969 case Instruction::Or:
970 case Instruction::Xor:
971 case Instruction::Add:
972 case Instruction::Sub:
973 case Instruction::Mul:
974 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
975 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI))
976 return false;
977 // These can all be promoted if neither operand has 'bits to clear'.
978 if (BitsToClear == 0 && Tmp == 0)
979 return true;
980
981 // If the operation is an AND/OR/XOR and the bits to clear are zero in the
982 // other side, BitsToClear is ok.
983 if (Tmp == 0 && I->isBitwiseLogicOp()) {
984 // We use MaskedValueIsZero here for generality, but the case we care
985 // about the most is constant RHS.
986 unsigned VSize = V->getType()->getScalarSizeInBits();
987 if (IC.MaskedValueIsZero(I->getOperand(1),
988 APInt::getHighBitsSet(VSize, BitsToClear),
989 0, CxtI)) {
990 // If this is an And instruction and all of the BitsToClear are
991 // known to be zero we can reset BitsToClear.
992 if (I->getOpcode() == Instruction::And)
993 BitsToClear = 0;
994 return true;
995 }
996 }
997
998 // Otherwise, we don't know how to analyze this BitsToClear case yet.
999 return false;
1000
1001 case Instruction::Shl: {
1002 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the
1003 // upper bits we can reduce BitsToClear by the shift amount.
1004 const APInt* Amt;
1005 if (match(I->getOperand(1), m_APInt(Amt))) {
1006 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1007 return false;
1008 uint64_t ShiftAmt = Amt->getZExtValue();
1009 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1010 return true;
1011 }
1012 return false;
1013 }
1014 case Instruction::LShr: {
1015 // We can promote lshr(x, cst) if we can promote x. This requires the
1016 // ultimate 'and' to clear out the high zero bits we're clearing out though.
1017 const APInt* Amt;
1018 if (match(I->getOperand(1), m_APInt(Amt))) {
1019 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1020 return false;
1021 BitsToClear += Amt->getZExtValue();
1022 if (BitsToClear > V->getType()->getScalarSizeInBits())
1023 BitsToClear = V->getType()->getScalarSizeInBits();
1024 return true;
1025 }
1026 // Cannot promote variable LSHR.
1027 return false;
1028 }
1029 case Instruction::Select:
1030 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) ||
1031 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
1032 // TODO: If important, we could handle the case when the BitsToClear are
1033 // known zero in the disagreeing side.
1034 Tmp != BitsToClear)
1035 return false;
1036 return true;
1037
1038 case Instruction::PHI: {
1039 // We can change a phi if we can change all operands. Note that we never
1040 // get into trouble with cyclic PHIs here because we only consider
1041 // instructions with a single use.
1042 PHINode* PN = cast<PHINode>(I);
1043 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI))
1044 return false;
1045 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
1046 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) ||
1047 // TODO: If important, we could handle the case when the BitsToClear
1048 // are known zero in the disagreeing input.
1049 Tmp != BitsToClear)
1050 return false;
1051 return true;
1052 }
1053 default:
1054 // TODO: Can handle more cases here.
1055 return false;
1056 }
1057 }
1058
visitZExt(ZExtInst & CI)1059 Instruction* InstCombiner::visitZExt(ZExtInst& CI) {
1060 // If this zero extend is only used by a truncate, let the truncate be
1061 // eliminated before we try to optimize this zext.
1062 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1063 return nullptr;
1064
1065 // If one of the common conversion will work, do it.
1066 if (Instruction * Result = commonCastTransforms(CI))
1067 return Result;
1068
1069 Value* Src = CI.getOperand(0);
1070 Type* SrcTy = Src->getType(), * DestTy = CI.getType();
1071
1072 // Attempt to extend the entire input expression tree to the destination
1073 // type. Only do this if the dest type is a simple type, don't convert the
1074 // expression tree to something weird like i93 unless the source is also
1075 // strange.
1076 unsigned BitsToClear;
1077 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
1078 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
1079 IGC_ASSERT_MESSAGE(BitsToClear <= SrcTy->getScalarSizeInBits(), "Can't clear more bits than in SrcTy");
1080
1081 // Okay, we can transform this! Insert the new expression now.
1082 LLVM_DEBUG(
1083 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1084 " to avoid zero extend: "
1085 << CI << '\n');
1086 Value* Res = EvaluateInDifferentType(Src, DestTy, false);
1087 IGC_ASSERT(Res->getType() == DestTy);
1088
1089 // Preserve debug values referring to Src if the zext is its last use.
1090 if (auto * SrcOp = dyn_cast<Instruction>(Src))
1091 if (SrcOp->hasOneUse())
1092 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT);
1093
1094 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits() - BitsToClear;
1095 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1096
1097 // If the high bits are already filled with zeros, just replace this
1098 // cast with the result.
1099 if (MaskedValueIsZero(Res,
1100 APInt::getHighBitsSet(DestBitSize,
1101 DestBitSize - SrcBitsKept),
1102 0, &CI))
1103 return replaceInstUsesWith(CI, Res);
1104
1105 // We need to emit an AND to clear the high bits.
1106 Constant* C = ConstantInt::get(Res->getType(),
1107 APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
1108 return BinaryOperator::CreateAnd(Res, C);
1109 }
1110
1111 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
1112 // types and if the sizes are just right we can convert this into a logical
1113 // 'and' which will be much cheaper than the pair of casts.
1114 if (TruncInst * CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
1115 // TODO: Subsume this into EvaluateInDifferentType.
1116
1117 // Get the sizes of the types involved. We know that the intermediate type
1118 // will be smaller than A or C, but don't know the relation between A and C.
1119 Value* A = CSrc->getOperand(0);
1120 unsigned SrcSize = A->getType()->getScalarSizeInBits();
1121 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1122 unsigned DstSize = CI.getType()->getScalarSizeInBits();
1123 // If we're actually extending zero bits, then if
1124 // SrcSize < DstSize: zext(a & mask)
1125 // SrcSize == DstSize: a & mask
1126 // SrcSize > DstSize: trunc(a) & mask
1127 if (SrcSize < DstSize) {
1128 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1129 Constant* AndConst = ConstantInt::get(A->getType(), AndValue);
1130 Value* And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
1131 return new ZExtInst(And, CI.getType());
1132 }
1133
1134 if (SrcSize == DstSize) {
1135 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1136 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
1137 AndValue));
1138 }
1139 if (SrcSize > DstSize) {
1140 Value* Trunc = Builder.CreateTrunc(A, CI.getType());
1141 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
1142 return BinaryOperator::CreateAnd(Trunc,
1143 ConstantInt::get(Trunc->getType(),
1144 AndValue));
1145 }
1146 }
1147
1148 if (ICmpInst * ICI = dyn_cast<ICmpInst>(Src))
1149 return transformZExtICmp(ICI, CI);
1150
1151 BinaryOperator* SrcI = dyn_cast<BinaryOperator>(Src);
1152 if (SrcI && SrcI->getOpcode() == Instruction::Or) {
1153 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp) if at least one
1154 // of the (zext icmp) can be eliminated. If so, immediately perform the
1155 // according elimination.
1156 ICmpInst* LHS = dyn_cast<ICmpInst>(SrcI->getOperand(0));
1157 ICmpInst* RHS = dyn_cast<ICmpInst>(SrcI->getOperand(1));
1158 if (LHS && RHS && LHS->hasOneUse() && RHS->hasOneUse() &&
1159 (transformZExtICmp(LHS, CI, false) ||
1160 transformZExtICmp(RHS, CI, false))) {
1161 // zext (or icmp, icmp) -> or (zext icmp), (zext icmp)
1162 Value* LCast = Builder.CreateZExt(LHS, CI.getType(), LHS->getName());
1163 Value* RCast = Builder.CreateZExt(RHS, CI.getType(), RHS->getName());
1164 BinaryOperator* Or = BinaryOperator::Create(Instruction::Or, LCast, RCast);
1165
1166 // Perform the elimination.
1167 if (auto * LZExt = dyn_cast<ZExtInst>(LCast))
1168 transformZExtICmp(LHS, *LZExt);
1169 if (auto * RZExt = dyn_cast<ZExtInst>(RCast))
1170 transformZExtICmp(RHS, *RZExt);
1171
1172 return Or;
1173 }
1174 }
1175
1176 // zext(trunc(X) & C) -> (X & zext(C)).
1177 Constant* C;
1178 Value* X;
1179 if (SrcI &&
1180 match(SrcI, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
1181 X->getType() == CI.getType())
1182 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
1183
1184 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
1185 Value* And;
1186 if (SrcI && match(SrcI, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
1187 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
1188 X->getType() == CI.getType()) {
1189 Constant* ZC = ConstantExpr::getZExt(C, CI.getType());
1190 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
1191 }
1192
1193 return nullptr;
1194 }
1195
1196 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp.
transformSExtICmp(ICmpInst * ICI,Instruction & CI)1197 Instruction* InstCombiner::transformSExtICmp(ICmpInst* ICI, Instruction& CI) {
1198 Value* Op0 = ICI->getOperand(0), * Op1 = ICI->getOperand(1);
1199 ICmpInst::Predicate Pred = ICI->getPredicate();
1200
1201 // Don't bother if Op1 isn't of vector or integer type.
1202 if (!Op1->getType()->isIntOrIntVectorTy())
1203 return nullptr;
1204
1205 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) ||
1206 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) {
1207 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative
1208 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive
1209 Value* Sh = ConstantInt::get(Op0->getType(),
1210 Op0->getType()->getScalarSizeInBits() - 1);
1211 Value* In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
1212 if (In->getType() != CI.getType())
1213 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
1214
1215 if (Pred == ICmpInst::ICMP_SGT)
1216 In = Builder.CreateNot(In, In->getName() + ".not");
1217 return replaceInstUsesWith(CI, In);
1218 }
1219
1220 if (ConstantInt * Op1C = dyn_cast<ConstantInt>(Op1)) {
1221 // If we know that only one bit of the LHS of the icmp can be set and we
1222 // have an equality comparison with zero or a power of 2, we can transform
1223 // the icmp and sext into bitwise/integer operations.
1224 if (ICI->hasOneUse() &&
1225 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())) {
1226 KnownBits Known = computeKnownBits(Op0, 0, &CI);
1227
1228 APInt KnownZeroMask(~Known.Zero);
1229 if (KnownZeroMask.isPowerOf2()) {
1230 Value* In = ICI->getOperand(0);
1231
1232 // If the icmp tests for a known zero bit we can constant fold it.
1233 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1234 Value* V = Pred == ICmpInst::ICMP_NE ?
1235 ConstantInt::getAllOnesValue(CI.getType()) :
1236 ConstantInt::getNullValue(CI.getType());
1237 return replaceInstUsesWith(CI, V);
1238 }
1239
1240 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
1241 // sext ((x & 2^n) == 0) -> (x >> n) - 1
1242 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1
1243 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
1244 // Perform a right shift to place the desired bit in the LSB.
1245 if (ShiftAmt)
1246 In = Builder.CreateLShr(In,
1247 ConstantInt::get(In->getType(), ShiftAmt));
1248
1249 // At this point "In" is either 1 or 0. Subtract 1 to turn
1250 // {1, 0} -> {0, -1}.
1251 In = Builder.CreateAdd(In,
1252 ConstantInt::getAllOnesValue(In->getType()),
1253 "sext");
1254 }
1255 else {
1256 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1
1257 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
1258 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
1259 // Perform a left shift to place the desired bit in the MSB.
1260 if (ShiftAmt)
1261 In = Builder.CreateShl(In,
1262 ConstantInt::get(In->getType(), ShiftAmt));
1263
1264 // Distribute the bit over the whole bit width.
1265 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
1266 KnownZeroMask.getBitWidth() - 1), "sext");
1267 }
1268
1269 if (CI.getType() == In->getType())
1270 return replaceInstUsesWith(CI, In);
1271 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
1272 }
1273 }
1274 }
1275
1276 return nullptr;
1277 }
1278
1279 /// Return true if we can take the specified value and return it as type Ty
1280 /// without inserting any new casts and without changing the value of the common
1281 /// low bits. This is used by code that tries to promote integer operations to
1282 /// a wider types will allow us to eliminate the extension.
1283 ///
1284 /// This function works on both vectors and scalars.
1285 ///
canEvaluateSExtd(Value * V,Type * Ty)1286 static bool canEvaluateSExtd(Value* V, Type* Ty) {
1287 IGC_ASSERT_MESSAGE(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits(), "Can't sign extend type to a smaller type");
1288 if (canAlwaysEvaluateInType(V, Ty))
1289 return true;
1290 if (canNotEvaluateInType(V, Ty))
1291 return false;
1292
1293 auto* I = cast<Instruction>(V);
1294 switch (I->getOpcode()) {
1295 case Instruction::SExt: // sext(sext(x)) -> sext(x)
1296 case Instruction::ZExt: // sext(zext(x)) -> zext(x)
1297 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
1298 return true;
1299 case Instruction::And:
1300 case Instruction::Or:
1301 case Instruction::Xor:
1302 case Instruction::Add:
1303 case Instruction::Sub:
1304 case Instruction::Mul:
1305 // These operators can all arbitrarily be extended if their inputs can.
1306 return canEvaluateSExtd(I->getOperand(0), Ty) &&
1307 canEvaluateSExtd(I->getOperand(1), Ty);
1308
1309 //case Instruction::Shl: TODO
1310 //case Instruction::LShr: TODO
1311
1312 case Instruction::Select:
1313 return canEvaluateSExtd(I->getOperand(1), Ty) &&
1314 canEvaluateSExtd(I->getOperand(2), Ty);
1315
1316 case Instruction::PHI: {
1317 // We can change a phi if we can change all operands. Note that we never
1318 // get into trouble with cyclic PHIs here because we only consider
1319 // instructions with a single use.
1320 PHINode* PN = cast<PHINode>(I);
1321 for (Value* IncValue : PN->incoming_values())
1322 if (!canEvaluateSExtd(IncValue, Ty)) return false;
1323 return true;
1324 }
1325 default:
1326 // TODO: Can handle more cases here.
1327 break;
1328 }
1329
1330 return false;
1331 }
1332
visitSExt(SExtInst & CI)1333 Instruction* InstCombiner::visitSExt(SExtInst& CI) {
1334 // If this sign extend is only used by a truncate, let the truncate be
1335 // eliminated before we try to optimize this sext.
1336 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1337 return nullptr;
1338
1339 if (Instruction * I = commonCastTransforms(CI))
1340 return I;
1341
1342 Value* Src = CI.getOperand(0);
1343 Type* SrcTy = Src->getType(), * DestTy = CI.getType();
1344
1345 // If we know that the value being extended is positive, we can use a zext
1346 // instead.
1347 KnownBits Known = computeKnownBits(Src, 0, &CI);
1348 if (Known.isNonNegative()) {
1349 Value* ZExt = Builder.CreateZExt(Src, DestTy);
1350 return replaceInstUsesWith(CI, ZExt);
1351 }
1352
1353 // Attempt to extend the entire input expression tree to the destination
1354 // type. Only do this if the dest type is a simple type, don't convert the
1355 // expression tree to something weird like i93 unless the source is also
1356 // strange.
1357 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
1358 canEvaluateSExtd(Src, DestTy)) {
1359 // Okay, we can transform this! Insert the new expression now.
1360 LLVM_DEBUG(
1361 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1362 " to avoid sign extend: "
1363 << CI << '\n');
1364 Value* Res = EvaluateInDifferentType(Src, DestTy, true);
1365 IGC_ASSERT(Res->getType() == DestTy);
1366
1367 uint32_t SrcBitSize = SrcTy->getScalarSizeInBits();
1368 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1369
1370 // If the high bits are already filled with sign bit, just replace this
1371 // cast with the result.
1372 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
1373 return replaceInstUsesWith(CI, Res);
1374
1375 // We need to emit a shl + ashr to do the sign extend.
1376 Value* ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1377 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
1378 ShAmt);
1379 }
1380
1381 // If the input is a trunc from the destination type, then turn sext(trunc(x))
1382 // into shifts.
1383 Value* X;
1384 if (match(Src, m_OneUse(m_Trunc(m_Value(X)))) && X->getType() == DestTy) {
1385 // sext(trunc(X)) --> ashr(shl(X, C), C)
1386 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
1387 unsigned DestBitSize = DestTy->getScalarSizeInBits();
1388 Constant* ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1389 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
1390 }
1391
1392 if (ICmpInst * ICI = dyn_cast<ICmpInst>(Src))
1393 return transformSExtICmp(ICI, CI);
1394
1395 // If the input is a shl/ashr pair of a same constant, then this is a sign
1396 // extension from a smaller value. If we could trust arbitrary bitwidth
1397 // integers, we could turn this into a truncate to the smaller bit and then
1398 // use a sext for the whole extension. Since we don't, look deeper and check
1399 // for a truncate. If the source and dest are the same type, eliminate the
1400 // trunc and extend and just do shifts. For example, turn:
1401 // %a = trunc i32 %i to i8
1402 // %b = shl i8 %a, 6
1403 // %c = ashr i8 %b, 6
1404 // %d = sext i8 %c to i32
1405 // into:
1406 // %a = shl i32 %i, 30
1407 // %d = ashr i32 %a, 30
1408 Value* A = nullptr;
1409 // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
1410 ConstantInt* BA = nullptr, * CA = nullptr;
1411 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_ConstantInt(BA)),
1412 m_ConstantInt(CA))) &&
1413 BA == CA && A->getType() == CI.getType()) {
1414 unsigned MidSize = Src->getType()->getScalarSizeInBits();
1415 unsigned SrcDstSize = CI.getType()->getScalarSizeInBits();
1416 unsigned ShAmt = CA->getZExtValue() + SrcDstSize - MidSize;
1417 Constant* ShAmtV = ConstantInt::get(CI.getType(), ShAmt);
1418 A = Builder.CreateShl(A, ShAmtV, CI.getName());
1419 return BinaryOperator::CreateAShr(A, ShAmtV);
1420 }
1421
1422 return nullptr;
1423 }
1424
1425
1426 /// Return a Constant* for the specified floating-point constant if it fits
1427 /// in the specified FP type without changing its value.
fitsInFPType(ConstantFP * CFP,const fltSemantics & Sem)1428 static bool fitsInFPType(ConstantFP* CFP, const fltSemantics& Sem) {
1429 bool losesInfo;
1430 APFloat F = CFP->getValueAPF();
1431 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
1432 return !losesInfo;
1433 }
1434
shrinkFPConstant(ConstantFP * CFP)1435 static Type* shrinkFPConstant(ConstantFP* CFP) {
1436 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext()))
1437 return nullptr; // No constant folding of this.
1438 // See if the value can be truncated to half and then reextended.
1439 if (fitsInFPType(CFP, APFloat::IEEEhalf()))
1440 return Type::getHalfTy(CFP->getContext());
1441 // See if the value can be truncated to float and then reextended.
1442 if (fitsInFPType(CFP, APFloat::IEEEsingle()))
1443 return Type::getFloatTy(CFP->getContext());
1444 if (CFP->getType()->isDoubleTy())
1445 return nullptr; // Won't shrink.
1446 if (fitsInFPType(CFP, APFloat::IEEEdouble()))
1447 return Type::getDoubleTy(CFP->getContext());
1448 // Don't try to shrink to various long double types.
1449 return nullptr;
1450 }
1451
1452 // Determine if this is a vector of ConstantFPs and if so, return the minimal
1453 // type we can safely truncate all elements to.
1454 // TODO: Make these support undef elements.
shrinkFPConstantVector(Value * V)1455 static Type* shrinkFPConstantVector(Value* V) {
1456 auto* CV = dyn_cast<Constant>(V);
1457 if (!CV || !CV->getType()->isVectorTy())
1458 return nullptr;
1459
1460 Type* MinType = nullptr;
1461
1462 unsigned NumElts = CV->getType()->getVectorNumElements();
1463 for (unsigned i = 0; i != NumElts; ++i) {
1464 auto* CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
1465 if (!CFP)
1466 return nullptr;
1467
1468 Type* T = shrinkFPConstant(CFP);
1469 if (!T)
1470 return nullptr;
1471
1472 // If we haven't found a type yet or this type has a larger mantissa than
1473 // our previous type, this is our new minimal type.
1474 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth())
1475 MinType = T;
1476 }
1477
1478 // Make a vector type from the minimal type.
1479 return VectorType::get(MinType, NumElts);
1480 }
1481
1482 /// Find the minimum FP type we can safely truncate to.
getMinimumFPType(Value * V)1483 static Type* getMinimumFPType(Value* V) {
1484 if (auto * FPExt = dyn_cast<FPExtInst>(V))
1485 return FPExt->getOperand(0)->getType();
1486
1487 // If this value is a constant, return the constant in the smallest FP type
1488 // that can accurately represent it. This allows us to turn
1489 // (float)((double)X+2.0) into x+2.0f.
1490 if (auto * CFP = dyn_cast<ConstantFP>(V))
1491 if (Type * T = shrinkFPConstant(CFP))
1492 return T;
1493
1494 // Try to shrink a vector of FP constants.
1495 if (Type * T = shrinkFPConstantVector(V))
1496 return T;
1497
1498 return V->getType();
1499 }
1500
visitFPTrunc(FPTruncInst & FPT)1501 Instruction* InstCombiner::visitFPTrunc(FPTruncInst& FPT) {
1502 if (Instruction * I = commonCastTransforms(FPT))
1503 return I;
1504
1505 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
1506 // simplify this expression to avoid one or more of the trunc/extend
1507 // operations if we can do so without changing the numerical results.
1508 //
1509 // The exact manner in which the widths of the operands interact to limit
1510 // what we can and cannot do safely varies from operation to operation, and
1511 // is explained below in the various case statements.
1512 Type* Ty = FPT.getType();
1513 BinaryOperator* OpI = dyn_cast<BinaryOperator>(FPT.getOperand(0));
1514 if (OpI && OpI->hasOneUse()) {
1515 Type* LHSMinType = getMinimumFPType(OpI->getOperand(0));
1516 Type* RHSMinType = getMinimumFPType(OpI->getOperand(1));
1517 unsigned OpWidth = OpI->getType()->getFPMantissaWidth();
1518 unsigned LHSWidth = LHSMinType->getFPMantissaWidth();
1519 unsigned RHSWidth = RHSMinType->getFPMantissaWidth();
1520 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1521 unsigned DstWidth = Ty->getFPMantissaWidth();
1522 switch (OpI->getOpcode()) {
1523 default: break;
1524 case Instruction::FAdd:
1525 case Instruction::FSub:
1526 // For addition and subtraction, the infinitely precise result can
1527 // essentially be arbitrarily wide; proving that double rounding
1528 // will not occur because the result of OpI is exact (as we will for
1529 // FMul, for example) is hopeless. However, we *can* nonetheless
1530 // frequently know that double rounding cannot occur (or that it is
1531 // innocuous) by taking advantage of the specific structure of
1532 // infinitely-precise results that admit double rounding.
1533 //
1534 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
1535 // to represent both sources, we can guarantee that the double
1536 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
1537 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
1538 // for proof of this fact).
1539 //
1540 // Note: Figueroa does not consider the case where DstFormat !=
1541 // SrcFormat. It's possible (likely even!) that this analysis
1542 // could be tightened for those cases, but they are rare (the main
1543 // case of interest here is (float)((double)float + float)).
1544 if (OpWidth >= 2 * DstWidth + 1 && DstWidth >= SrcWidth) {
1545 Value* LHS = Builder.CreateFPTrunc(OpI->getOperand(0), Ty);
1546 Value* RHS = Builder.CreateFPTrunc(OpI->getOperand(1), Ty);
1547 Instruction* RI = BinaryOperator::Create(OpI->getOpcode(), LHS, RHS);
1548 RI->copyFastMathFlags(OpI);
1549 return RI;
1550 }
1551 break;
1552 case Instruction::FMul:
1553 // For multiplication, the infinitely precise result has at most
1554 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
1555 // that such a value can be exactly represented, then no double
1556 // rounding can possibly occur; we can safely perform the operation
1557 // in the destination format if it can represent both sources.
1558 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1559 Value* LHS = Builder.CreateFPTrunc(OpI->getOperand(0), Ty);
1560 Value* RHS = Builder.CreateFPTrunc(OpI->getOperand(1), Ty);
1561 return BinaryOperator::CreateFMulFMF(LHS, RHS, OpI);
1562 }
1563 break;
1564 case Instruction::FDiv:
1565 // For division, we use again use the bound from Figueroa's
1566 // dissertation. I am entirely certain that this bound can be
1567 // tightened in the unbalanced operand case by an analysis based on
1568 // the diophantine rational approximation bound, but the well-known
1569 // condition used here is a good conservative first pass.
1570 // TODO: Tighten bound via rigorous analysis of the unbalanced case.
1571 if (OpWidth >= 2 * DstWidth && DstWidth >= SrcWidth) {
1572 Value* LHS = Builder.CreateFPTrunc(OpI->getOperand(0), Ty);
1573 Value* RHS = Builder.CreateFPTrunc(OpI->getOperand(1), Ty);
1574 return BinaryOperator::CreateFDivFMF(LHS, RHS, OpI);
1575 }
1576 break;
1577 case Instruction::FRem: {
1578 // Remainder is straightforward. Remainder is always exact, so the
1579 // type of OpI doesn't enter into things at all. We simply evaluate
1580 // in whichever source type is larger, then convert to the
1581 // destination type.
1582 if (SrcWidth == OpWidth)
1583 break;
1584 Value* LHS, * RHS;
1585 if (LHSWidth == SrcWidth) {
1586 LHS = Builder.CreateFPTrunc(OpI->getOperand(0), LHSMinType);
1587 RHS = Builder.CreateFPTrunc(OpI->getOperand(1), LHSMinType);
1588 }
1589 else {
1590 LHS = Builder.CreateFPTrunc(OpI->getOperand(0), RHSMinType);
1591 RHS = Builder.CreateFPTrunc(OpI->getOperand(1), RHSMinType);
1592 }
1593
1594 Value* ExactResult = Builder.CreateFRemFMF(LHS, RHS, OpI);
1595 return CastInst::CreateFPCast(ExactResult, Ty);
1596 }
1597 }
1598
1599 // (fptrunc (fneg x)) -> (fneg (fptrunc x))
1600 if (BinaryOperator::isFNeg(OpI)) {
1601 Value* InnerTrunc = Builder.CreateFPTrunc(OpI->getOperand(1), Ty);
1602 return BinaryOperator::CreateFNegFMF(InnerTrunc, OpI);
1603 }
1604 }
1605
1606 if (auto * II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) {
1607 switch (II->getIntrinsicID()) {
1608 default: break;
1609 case Intrinsic::ceil:
1610 case Intrinsic::fabs:
1611 case Intrinsic::floor:
1612 case Intrinsic::nearbyint:
1613 case Intrinsic::rint:
1614 case Intrinsic::round:
1615 case Intrinsic::trunc: {
1616 Value* Src = II->getArgOperand(0);
1617 if (!Src->hasOneUse())
1618 break;
1619
1620 // Except for fabs, this transformation requires the input of the unary FP
1621 // operation to be itself an fpext from the type to which we're
1622 // truncating.
1623 if (II->getIntrinsicID() != Intrinsic::fabs) {
1624 FPExtInst* FPExtSrc = dyn_cast<FPExtInst>(Src);
1625 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty)
1626 break;
1627 }
1628
1629 // Do unary FP operation on smaller type.
1630 // (fptrunc (fabs x)) -> (fabs (fptrunc x))
1631 Value* InnerTrunc = Builder.CreateFPTrunc(Src, Ty);
1632 Function* Overload = Intrinsic::getDeclaration(FPT.getModule(),
1633 II->getIntrinsicID(), Ty);
1634 SmallVector<OperandBundleDef, 1> OpBundles;
1635 II->getOperandBundlesAsDefs(OpBundles);
1636 CallInst* NewCI = CallInst::Create(Overload, { InnerTrunc }, OpBundles,
1637 II->getName());
1638 NewCI->copyFastMathFlags(II);
1639 return NewCI;
1640 }
1641 }
1642 }
1643
1644 if (Instruction * I = shrinkInsertElt(FPT, Builder))
1645 return I;
1646
1647 return nullptr;
1648 }
1649
visitFPExt(CastInst & CI)1650 Instruction* InstCombiner::visitFPExt(CastInst& CI) {
1651 return commonCastTransforms(CI);
1652 }
1653
1654 // fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X)
1655 // This is safe if the intermediate type has enough bits in its mantissa to
1656 // accurately represent all values of X. For example, this won't work with
1657 // i64 -> float -> i64.
FoldItoFPtoI(Instruction & FI)1658 Instruction* InstCombiner::FoldItoFPtoI(Instruction& FI) {
1659 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0)))
1660 return nullptr;
1661 Instruction* OpI = cast<Instruction>(FI.getOperand(0));
1662
1663 Value* SrcI = OpI->getOperand(0);
1664 Type* FITy = FI.getType();
1665 Type* OpITy = OpI->getType();
1666 Type* SrcTy = SrcI->getType();
1667 bool IsInputSigned = isa<SIToFPInst>(OpI);
1668 bool IsOutputSigned = isa<FPToSIInst>(FI);
1669
1670 // We can safely assume the conversion won't overflow the output range,
1671 // because (for example) (uint8_t)18293.f is undefined behavior.
1672
1673 // Since we can assume the conversion won't overflow, our decision as to
1674 // whether the input will fit in the float should depend on the minimum
1675 // of the input range and output range.
1676
1677 // This means this is also safe for a signed input and unsigned output, since
1678 // a negative input would lead to undefined behavior.
1679 int InputSize = (int)SrcTy->getScalarSizeInBits() - IsInputSigned;
1680 int OutputSize = (int)FITy->getScalarSizeInBits() - IsOutputSigned;
1681 int ActualSize = std::min(InputSize, OutputSize);
1682
1683 if (ActualSize <= OpITy->getFPMantissaWidth()) {
1684 if (FITy->getScalarSizeInBits() > SrcTy->getScalarSizeInBits()) {
1685 if (IsInputSigned && IsOutputSigned)
1686 return new SExtInst(SrcI, FITy);
1687 return new ZExtInst(SrcI, FITy);
1688 }
1689 if (FITy->getScalarSizeInBits() < SrcTy->getScalarSizeInBits())
1690 return new TruncInst(SrcI, FITy);
1691 if (SrcTy == FITy)
1692 return replaceInstUsesWith(FI, SrcI);
1693 return new BitCastInst(SrcI, FITy);
1694 }
1695 return nullptr;
1696 }
1697
visitFPToUI(FPToUIInst & FI)1698 Instruction* InstCombiner::visitFPToUI(FPToUIInst& FI) {
1699 Instruction* OpI = dyn_cast<Instruction>(FI.getOperand(0));
1700 if (!OpI)
1701 return commonCastTransforms(FI);
1702
1703 if (Instruction * I = FoldItoFPtoI(FI))
1704 return I;
1705
1706 return commonCastTransforms(FI);
1707 }
1708
visitFPToSI(FPToSIInst & FI)1709 Instruction* InstCombiner::visitFPToSI(FPToSIInst& FI) {
1710 Instruction* OpI = dyn_cast<Instruction>(FI.getOperand(0));
1711 if (!OpI)
1712 return commonCastTransforms(FI);
1713
1714 if (Instruction * I = FoldItoFPtoI(FI))
1715 return I;
1716
1717 return commonCastTransforms(FI);
1718 }
1719
visitUIToFP(CastInst & CI)1720 Instruction* InstCombiner::visitUIToFP(CastInst& CI) {
1721 return commonCastTransforms(CI);
1722 }
1723
visitSIToFP(CastInst & CI)1724 Instruction* InstCombiner::visitSIToFP(CastInst& CI) {
1725 return commonCastTransforms(CI);
1726 }
1727
visitIntToPtr(IntToPtrInst & CI)1728 Instruction* InstCombiner::visitIntToPtr(IntToPtrInst& CI) {
1729 // If the source integer type is not the intptr_t type for this target, do a
1730 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
1731 // cast to be exposed to other transforms.
1732 unsigned AS = CI.getAddressSpace();
1733 if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
1734 DL.getPointerSizeInBits(AS)) {
1735 Type* Ty = DL.getIntPtrType(CI.getContext(), AS);
1736 if (CI.getType()->isVectorTy()) // Handle vectors of pointers.
1737 Ty = VectorType::get(Ty, CI.getType()->getVectorNumElements());
1738
1739 Value* P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
1740 return new IntToPtrInst(P, CI.getType());
1741 }
1742
1743 if (Instruction * I = commonCastTransforms(CI))
1744 return I;
1745
1746 return nullptr;
1747 }
1748
1749 /// Implement the transforms for cast of pointer (bitcast/ptrtoint)
commonPointerCastTransforms(CastInst & CI)1750 Instruction* InstCombiner::commonPointerCastTransforms(CastInst& CI) {
1751 Value* Src = CI.getOperand(0);
1752
1753 if (GetElementPtrInst * GEP = dyn_cast<GetElementPtrInst>(Src)) {
1754 // If casting the result of a getelementptr instruction with no offset, turn
1755 // this into a cast of the original pointer!
1756 if (GEP->hasAllZeroIndices() &&
1757 // If CI is an addrspacecast and GEP changes the poiner type, merging
1758 // GEP into CI would undo canonicalizing addrspacecast with different
1759 // pointer types, causing infinite loops.
1760 (!isa<AddrSpaceCastInst>(CI) ||
1761 GEP->getType() == GEP->getPointerOperandType())) {
1762 // Changing the cast operand is usually not a good idea but it is safe
1763 // here because the pointer operand is being replaced with another
1764 // pointer operand so the opcode doesn't need to change.
1765 Worklist.Add(GEP);
1766 CI.setOperand(0, GEP->getOperand(0));
1767 return &CI;
1768 }
1769 }
1770
1771 return commonCastTransforms(CI);
1772 }
1773
visitPtrToInt(PtrToIntInst & CI)1774 Instruction* InstCombiner::visitPtrToInt(PtrToIntInst& CI) {
1775 // If the destination integer type is not the intptr_t type for this target,
1776 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
1777 // to be exposed to other transforms.
1778
1779 Type* Ty = CI.getType();
1780 unsigned AS = CI.getPointerAddressSpace();
1781
1782 if (Ty->getScalarSizeInBits() == DL.getIndexSizeInBits(AS))
1783 return commonPointerCastTransforms(CI);
1784
1785 Type* PtrTy = DL.getIntPtrType(CI.getContext(), AS);
1786 if (Ty->isVectorTy()) // Handle vectors of pointers.
1787 PtrTy = VectorType::get(PtrTy, Ty->getVectorNumElements());
1788
1789 Value* P = Builder.CreatePtrToInt(CI.getOperand(0), PtrTy);
1790 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
1791 }
1792
1793 /// This input value (which is known to have vector type) is being zero extended
1794 /// or truncated to the specified vector type.
1795 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible.
1796 ///
1797 /// The source and destination vector types may have different element types.
optimizeVectorResize(Value * InVal,VectorType * DestTy,InstCombiner & IC)1798 static Instruction* optimizeVectorResize(Value* InVal, VectorType* DestTy,
1799 InstCombiner& IC) {
1800 // We can only do this optimization if the output is a multiple of the input
1801 // element size, or the input is a multiple of the output element size.
1802 // Convert the input type to have the same element type as the output.
1803 VectorType* SrcTy = cast<VectorType>(InVal->getType());
1804
1805 if (SrcTy->getElementType() != DestTy->getElementType()) {
1806 // The input types don't need to be identical, but for now they must be the
1807 // same size. There is no specific reason we couldn't handle things like
1808 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
1809 // there yet.
1810 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
1811 DestTy->getElementType()->getPrimitiveSizeInBits())
1812 return nullptr;
1813
1814 SrcTy = VectorType::get(DestTy->getElementType(), SrcTy->getNumElements());
1815 InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
1816 }
1817
1818 // Now that the element types match, get the shuffle mask and RHS of the
1819 // shuffle to use, which depends on whether we're increasing or decreasing the
1820 // size of the input.
1821 SmallVector<uint32_t, 16> ShuffleMask;
1822 Value* V2;
1823
1824 if (SrcTy->getNumElements() > DestTy->getNumElements()) {
1825 // If we're shrinking the number of elements, just shuffle in the low
1826 // elements from the input and use undef as the second shuffle input.
1827 V2 = UndefValue::get(SrcTy);
1828 for (unsigned i = 0, e = DestTy->getNumElements(); i != e; ++i)
1829 ShuffleMask.push_back(i);
1830
1831 }
1832 else {
1833 // If we're increasing the number of elements, shuffle in all of the
1834 // elements from InVal and fill the rest of the result elements with zeros
1835 // from a constant zero.
1836 V2 = Constant::getNullValue(SrcTy);
1837 unsigned SrcElts = SrcTy->getNumElements();
1838 for (unsigned i = 0, e = SrcElts; i != e; ++i)
1839 ShuffleMask.push_back(i);
1840
1841 // The excess elements reference the first element of the zero input.
1842 for (unsigned i = 0, e = DestTy->getNumElements() - SrcElts; i != e; ++i)
1843 ShuffleMask.push_back(SrcElts);
1844 }
1845
1846 return new ShuffleVectorInst(InVal, V2,
1847 ConstantDataVector::get(V2->getContext(),
1848 ShuffleMask));
1849 }
1850
isMultipleOfTypeSize(unsigned Value,Type * Ty)1851 static bool isMultipleOfTypeSize(unsigned Value, Type* Ty) {
1852 return Value % Ty->getPrimitiveSizeInBits() == 0;
1853 }
1854
getTypeSizeIndex(unsigned Value,Type * Ty)1855 static unsigned getTypeSizeIndex(unsigned Value, Type* Ty) {
1856 return Value / Ty->getPrimitiveSizeInBits();
1857 }
1858
1859 /// V is a value which is inserted into a vector of VecEltTy.
1860 /// Look through the value to see if we can decompose it into
1861 /// insertions into the vector. See the example in the comment for
1862 /// OptimizeIntegerToVectorInsertions for the pattern this handles.
1863 /// The type of V is always a non-zero multiple of VecEltTy's size.
1864 /// Shift is the number of bits between the lsb of V and the lsb of
1865 /// the vector.
1866 ///
1867 /// This returns false if the pattern can't be matched or true if it can,
1868 /// filling in Elements with the elements found here.
collectInsertionElements(Value * V,unsigned Shift,SmallVectorImpl<Value * > & Elements,Type * VecEltTy,bool isBigEndian)1869 static bool collectInsertionElements(Value* V, unsigned Shift,
1870 SmallVectorImpl<Value*>& Elements,
1871 Type* VecEltTy, bool isBigEndian) {
1872 IGC_ASSERT_MESSAGE(isMultipleOfTypeSize(Shift, VecEltTy), "Shift should be a multiple of the element type size");
1873
1874 // Undef values never contribute useful bits to the result.
1875 if (isa<UndefValue>(V)) return true;
1876
1877 // If we got down to a value of the right type, we win, try inserting into the
1878 // right element.
1879 if (V->getType() == VecEltTy) {
1880 // Inserting null doesn't actually insert any elements.
1881 if (Constant * C = dyn_cast<Constant>(V))
1882 if (C->isNullValue())
1883 return true;
1884
1885 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
1886 if (isBigEndian)
1887 ElementIndex = Elements.size() - ElementIndex - 1;
1888
1889 // Fail if multiple elements are inserted into this slot.
1890 if (Elements[ElementIndex])
1891 return false;
1892
1893 Elements[ElementIndex] = V;
1894 return true;
1895 }
1896
1897 if (Constant * C = dyn_cast<Constant>(V)) {
1898 // Figure out the # elements this provides, and bitcast it or slice it up
1899 // as required.
1900 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(),
1901 VecEltTy);
1902 // If the constant is the size of a vector element, we just need to bitcast
1903 // it to the right type so it gets properly inserted.
1904 if (NumElts == 1)
1905 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
1906 Shift, Elements, VecEltTy, isBigEndian);
1907
1908 // Okay, this is a constant that covers multiple elements. Slice it up into
1909 // pieces and insert each element-sized piece into the vector.
1910 if (!isa<IntegerType>(C->getType()))
1911 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
1912 C->getType()->getPrimitiveSizeInBits()));
1913 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
1914 Type* ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
1915
1916 for (unsigned i = 0; i != NumElts; ++i) {
1917 unsigned ShiftI = Shift + i * ElementSize;
1918 Constant* Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
1919 ShiftI));
1920 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
1921 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
1922 isBigEndian))
1923 return false;
1924 }
1925 return true;
1926 }
1927
1928 if (!V->hasOneUse()) return false;
1929
1930 Instruction* I = dyn_cast<Instruction>(V);
1931 if (!I) return false;
1932 switch (I->getOpcode()) {
1933 default: return false; // Unhandled case.
1934 case Instruction::BitCast:
1935 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
1936 isBigEndian);
1937 case Instruction::ZExt:
1938 if (!isMultipleOfTypeSize(
1939 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
1940 VecEltTy))
1941 return false;
1942 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
1943 isBigEndian);
1944 case Instruction::Or:
1945 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
1946 isBigEndian) &&
1947 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
1948 isBigEndian);
1949 case Instruction::Shl: {
1950 // Must be shifting by a constant that is a multiple of the element size.
1951 ConstantInt* CI = dyn_cast<ConstantInt>(I->getOperand(1));
1952 if (!CI) return false;
1953 Shift += CI->getZExtValue();
1954 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
1955 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
1956 isBigEndian);
1957 }
1958
1959 }
1960 }
1961
1962
1963 /// If the input is an 'or' instruction, we may be doing shifts and ors to
1964 /// assemble the elements of the vector manually.
1965 /// Try to rip the code out and replace it with insertelements. This is to
1966 /// optimize code like this:
1967 ///
1968 /// %tmp37 = bitcast float %inc to i32
1969 /// %tmp38 = zext i32 %tmp37 to i64
1970 /// %tmp31 = bitcast float %inc5 to i32
1971 /// %tmp32 = zext i32 %tmp31 to i64
1972 /// %tmp33 = shl i64 %tmp32, 32
1973 /// %ins35 = or i64 %tmp33, %tmp38
1974 /// %tmp43 = bitcast i64 %ins35 to <2 x float>
1975 ///
1976 /// Into two insertelements that do "buildvector{%inc, %inc5}".
optimizeIntegerToVectorInsertions(BitCastInst & CI,InstCombiner & IC)1977 static Value* optimizeIntegerToVectorInsertions(BitCastInst& CI,
1978 InstCombiner& IC) {
1979 VectorType* DestVecTy = cast<VectorType>(CI.getType());
1980 Value* IntInput = CI.getOperand(0);
1981
1982 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
1983 if (!collectInsertionElements(IntInput, 0, Elements,
1984 DestVecTy->getElementType(),
1985 IC.getDataLayout().isBigEndian()))
1986 return nullptr;
1987
1988 // If we succeeded, we know that all of the element are specified by Elements
1989 // or are zero if Elements has a null entry. Recast this as a set of
1990 // insertions.
1991 Value* Result = Constant::getNullValue(CI.getType());
1992 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
1993 if (!Elements[i]) continue; // Unset element.
1994
1995 Result = IC.Builder.CreateInsertElement(Result, Elements[i],
1996 IC.Builder.getInt32(i));
1997 }
1998
1999 return Result;
2000 }
2001
2002 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the
2003 /// vector followed by extract element. The backend tends to handle bitcasts of
2004 /// vectors better than bitcasts of scalars because vector registers are
2005 /// usually not type-specific like scalar integer or scalar floating-point.
canonicalizeBitCastExtElt(BitCastInst & BitCast,InstCombiner & IC)2006 static Instruction* canonicalizeBitCastExtElt(BitCastInst& BitCast,
2007 InstCombiner& IC) {
2008 // TODO: Create and use a pattern matcher for ExtractElementInst.
2009 auto* ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0));
2010 if (!ExtElt || !ExtElt->hasOneUse())
2011 return nullptr;
2012
2013 // The bitcast must be to a vectorizable type, otherwise we can't make a new
2014 // type to extract from.
2015 Type* DestType = BitCast.getType();
2016 if (!VectorType::isValidElementType(DestType))
2017 return nullptr;
2018
2019 unsigned NumElts = ExtElt->getVectorOperandType()->getNumElements();
2020 auto* NewVecType = VectorType::get(DestType, NumElts);
2021 auto* NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(),
2022 NewVecType, "bc");
2023 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
2024 }
2025
2026 /// Change the type of a bitwise logic operation if we can eliminate a bitcast.
foldBitCastBitwiseLogic(BitCastInst & BitCast,InstCombiner::BuilderTy & Builder)2027 static Instruction* foldBitCastBitwiseLogic(BitCastInst& BitCast,
2028 InstCombiner::BuilderTy& Builder) {
2029 Type* DestTy = BitCast.getType();
2030 BinaryOperator* BO;
2031 if (!DestTy->isIntOrIntVectorTy() ||
2032 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
2033 !BO->isBitwiseLogicOp())
2034 return nullptr;
2035
2036 // FIXME: This transform is restricted to vector types to avoid backend
2037 // problems caused by creating potentially illegal operations. If a fix-up is
2038 // added to handle that situation, we can remove this check.
2039 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy())
2040 return nullptr;
2041
2042 Value* X;
2043 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2044 X->getType() == DestTy && !isa<Constant>(X)) {
2045 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
2046 Value* CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy);
2047 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1);
2048 }
2049
2050 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) &&
2051 X->getType() == DestTy && !isa<Constant>(X)) {
2052 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X)
2053 Value* CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2054 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X);
2055 }
2056
2057 // Canonicalize vector bitcasts to come before vector bitwise logic with a
2058 // constant. This eases recognition of special constants for later ops.
2059 // Example:
2060 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
2061 Constant* C;
2062 if (match(BO->getOperand(1), m_Constant(C))) {
2063 // bitcast (logic X, C) --> logic (bitcast X, C')
2064 Value* CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2065 Value* CastedC = ConstantExpr::getBitCast(C, DestTy);
2066 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC);
2067 }
2068
2069 return nullptr;
2070 }
2071
2072 /// Change the type of a select if we can eliminate a bitcast.
foldBitCastSelect(BitCastInst & BitCast,InstCombiner::BuilderTy & Builder)2073 static Instruction* foldBitCastSelect(BitCastInst& BitCast,
2074 InstCombiner::BuilderTy& Builder) {
2075 Value* Cond, * TVal, * FVal;
2076 if (!match(BitCast.getOperand(0),
2077 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
2078 return nullptr;
2079
2080 // A vector select must maintain the same number of elements in its operands.
2081 Type* CondTy = Cond->getType();
2082 Type* DestTy = BitCast.getType();
2083 if (CondTy->isVectorTy()) {
2084 if (!DestTy->isVectorTy())
2085 return nullptr;
2086 if (DestTy->getVectorNumElements() != CondTy->getVectorNumElements())
2087 return nullptr;
2088 }
2089
2090 // FIXME: This transform is restricted from changing the select between
2091 // scalars and vectors to avoid backend problems caused by creating
2092 // potentially illegal operations. If a fix-up is added to handle that
2093 // situation, we can remove this check.
2094 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy())
2095 return nullptr;
2096
2097 auto* Sel = cast<Instruction>(BitCast.getOperand(0));
2098 Value* X;
2099 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2100 !isa<Constant>(X)) {
2101 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y))
2102 Value* CastedVal = Builder.CreateBitCast(FVal, DestTy);
2103 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel);
2104 }
2105
2106 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2107 !isa<Constant>(X)) {
2108 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X)
2109 Value* CastedVal = Builder.CreateBitCast(TVal, DestTy);
2110 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel);
2111 }
2112
2113 return nullptr;
2114 }
2115
2116 /// Check if all users of CI are StoreInsts.
hasStoreUsersOnly(CastInst & CI)2117 static bool hasStoreUsersOnly(CastInst& CI) {
2118 for (User* U : CI.users()) {
2119 if (!isa<StoreInst>(U))
2120 return false;
2121 }
2122 return true;
2123 }
2124
2125 /// This function handles following case
2126 ///
2127 /// A -> B cast
2128 /// PHI
2129 /// B -> A cast
2130 ///
2131 /// All the related PHI nodes can be replaced by new PHI nodes with type A.
2132 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
optimizeBitCastFromPhi(CastInst & CI,PHINode * PN)2133 Instruction* InstCombiner::optimizeBitCastFromPhi(CastInst& CI, PHINode* PN) {
2134 // Temporarily disable optimization of bitcasts through phi as it creates
2135 // significant amount of PHI nodes under certain patterns. As IGC has no
2136 // general register coalescing, these PHI nodes won't be coalesced. It
2137 // results in huge register pressure and slowdown lots of benchmarks
2138 // significantly.
2139 return nullptr;
2140 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp.
2141 if (hasStoreUsersOnly(CI))
2142 return nullptr;
2143
2144 Value* Src = CI.getOperand(0);
2145 Type* SrcTy = Src->getType(); // Type B
2146 Type* DestTy = CI.getType(); // Type A
2147
2148 SmallVector<PHINode*, 4> PhiWorklist;
2149 SmallSetVector<PHINode*, 4> OldPhiNodes;
2150
2151 // Find all of the A->B casts and PHI nodes.
2152 // We need to inpect all related PHI nodes, but PHIs can be cyclic, so
2153 // OldPhiNodes is used to track all known PHI nodes, before adding a new
2154 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
2155 PhiWorklist.push_back(PN);
2156 OldPhiNodes.insert(PN);
2157 while (!PhiWorklist.empty()) {
2158 auto* OldPN = PhiWorklist.pop_back_val();
2159 for (Value* IncValue : OldPN->incoming_values()) {
2160 if (isa<Constant>(IncValue))
2161 continue;
2162
2163 if (auto * LI = dyn_cast<LoadInst>(IncValue)) {
2164 // If there is a sequence of one or more load instructions, each loaded
2165 // value is used as address of later load instruction, bitcast is
2166 // necessary to change the value type, don't optimize it. For
2167 // simplicity we give up if the load address comes from another load.
2168 Value* Addr = LI->getOperand(0);
2169 if (Addr == &CI || isa<LoadInst>(Addr))
2170 return nullptr;
2171 if (LI->hasOneUse() && LI->isSimple())
2172 continue;
2173 // If a LoadInst has more than one use, changing the type of loaded
2174 // value may create another bitcast.
2175 return nullptr;
2176 }
2177
2178 if (auto * PNode = dyn_cast<PHINode>(IncValue)) {
2179 if (OldPhiNodes.insert(PNode))
2180 PhiWorklist.push_back(PNode);
2181 continue;
2182 }
2183
2184 auto* BCI = dyn_cast<BitCastInst>(IncValue);
2185 // We can't handle other instructions.
2186 if (!BCI)
2187 return nullptr;
2188
2189 // Verify it's a A->B cast.
2190 Type* TyA = BCI->getOperand(0)->getType();
2191 Type* TyB = BCI->getType();
2192 if (TyA != DestTy || TyB != SrcTy)
2193 return nullptr;
2194 }
2195 }
2196
2197 // For each old PHI node, create a corresponding new PHI node with a type A.
2198 SmallDenseMap<PHINode*, PHINode*> NewPNodes;
2199 for (auto* OldPN : OldPhiNodes) {
2200 Builder.SetInsertPoint(OldPN);
2201 PHINode* NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
2202 NewPNodes[OldPN] = NewPN;
2203 }
2204
2205 // Fill in the operands of new PHI nodes.
2206 for (auto* OldPN : OldPhiNodes) {
2207 PHINode* NewPN = NewPNodes[OldPN];
2208 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2209 Value* V = OldPN->getOperand(j);
2210 Value* NewV = nullptr;
2211 if (auto * C = dyn_cast<Constant>(V)) {
2212 NewV = ConstantExpr::getBitCast(C, DestTy);
2213 }
2214 else if (auto * LI = dyn_cast<LoadInst>(V)) {
2215 Builder.SetInsertPoint(LI->getNextNode());
2216 NewV = Builder.CreateBitCast(LI, DestTy);
2217 Worklist.Add(LI);
2218 }
2219 else if (auto * BCI = dyn_cast<BitCastInst>(V)) {
2220 NewV = BCI->getOperand(0);
2221 }
2222 else if (auto * PrevPN = dyn_cast<PHINode>(V)) {
2223 NewV = NewPNodes[PrevPN];
2224 }
2225 IGC_ASSERT(NewV);
2226 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
2227 }
2228 }
2229
2230 // If there is a store with type B, change it to type A.
2231 for (User* U : PN->users()) {
2232 auto* SI = dyn_cast<StoreInst>(U);
2233 if (SI && SI->isSimple() && SI->getOperand(0) == PN) {
2234 Builder.SetInsertPoint(SI);
2235 auto* NewBC =
2236 cast<BitCastInst>(Builder.CreateBitCast(NewPNodes[PN], SrcTy));
2237 SI->setOperand(0, NewBC);
2238 Worklist.Add(SI);
2239 IGC_ASSERT(hasStoreUsersOnly(*NewBC));
2240 }
2241 }
2242
2243 return replaceInstUsesWith(CI, NewPNodes[PN]);
2244 }
2245
visitBitCast(BitCastInst & CI)2246 Instruction* InstCombiner::visitBitCast(BitCastInst& CI) {
2247 // If the operands are integer typed then apply the integer transforms,
2248 // otherwise just apply the common ones.
2249 Value* Src = CI.getOperand(0);
2250 Type* SrcTy = Src->getType();
2251 Type* DestTy = CI.getType();
2252
2253 // Get rid of casts from one type to the same type. These are useless and can
2254 // be replaced by the operand.
2255 if (DestTy == Src->getType())
2256 return replaceInstUsesWith(CI, Src);
2257
2258 if (PointerType * DstPTy = dyn_cast<PointerType>(DestTy)) {
2259 PointerType* SrcPTy = cast<PointerType>(SrcTy);
2260 Type* DstElTy = DstPTy->getElementType();
2261 Type* SrcElTy = SrcPTy->getElementType();
2262
2263 // Casting pointers between the same type, but with different address spaces
2264 // is an addrspace cast rather than a bitcast.
2265 if ((DstElTy == SrcElTy) &&
2266 (DstPTy->getAddressSpace() != SrcPTy->getAddressSpace()))
2267 return new AddrSpaceCastInst(Src, DestTy);
2268
2269 // If we are casting a alloca to a pointer to a type of the same
2270 // size, rewrite the allocation instruction to allocate the "right" type.
2271 // There is no need to modify malloc calls because it is their bitcast that
2272 // needs to be cleaned up.
2273 if (AllocaInst * AI = dyn_cast<AllocaInst>(Src))
2274 if (Instruction * V = PromoteCastOfAllocation(CI, *AI))
2275 return V;
2276
2277 // When the type pointed to is not sized the cast cannot be
2278 // turned into a gep.
2279 Type* PointeeType =
2280 cast<PointerType>(Src->getType()->getScalarType())->getElementType();
2281 if (!PointeeType->isSized())
2282 return nullptr;
2283
2284 // If the source and destination are pointers, and this cast is equivalent
2285 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
2286 // This can enhance SROA and other transforms that want type-safe pointers.
2287 unsigned NumZeros = 0;
2288 while (SrcElTy != DstElTy &&
2289 isa<CompositeType>(SrcElTy) && !SrcElTy->isPointerTy() &&
2290 SrcElTy->getNumContainedTypes() /* not "{}" */) {
2291 SrcElTy = cast<CompositeType>(SrcElTy)->getTypeAtIndex(0U);
2292 ++NumZeros;
2293 }
2294
2295 // If we found a path from the src to dest, create the getelementptr now.
2296 if (SrcElTy == DstElTy) {
2297 SmallVector<Value*, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
2298 return GetElementPtrInst::CreateInBounds(Src, Idxs);
2299 }
2300 }
2301
2302 if (VectorType * DestVTy = dyn_cast<VectorType>(DestTy)) {
2303 if (DestVTy->getNumElements() == 1 && !SrcTy->isVectorTy()) {
2304 Value* Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
2305 return InsertElementInst::Create(UndefValue::get(DestTy), Elem,
2306 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2307 // FIXME: Canonicalize bitcast(insertelement) -> insertelement(bitcast)
2308 }
2309
2310 if (isa<IntegerType>(SrcTy)) {
2311 // If this is a cast from an integer to vector, check to see if the input
2312 // is a trunc or zext of a bitcast from vector. If so, we can replace all
2313 // the casts with a shuffle and (potentially) a bitcast.
2314 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2315 CastInst* SrcCast = cast<CastInst>(Src);
2316 if (BitCastInst * BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0)))
2317 if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2318 if (Instruction * I = optimizeVectorResize(BCIn->getOperand(0),
2319 cast<VectorType>(DestTy), *this))
2320 return I;
2321 }
2322
2323 // If the input is an 'or' instruction, we may be doing shifts and ors to
2324 // assemble the elements of the vector manually. Try to rip the code out
2325 // and replace it with insertelements.
2326 if (Value * V = optimizeIntegerToVectorInsertions(CI, *this))
2327 return replaceInstUsesWith(CI, V);
2328 }
2329 }
2330
2331 if (VectorType * SrcVTy = dyn_cast<VectorType>(SrcTy)) {
2332 if (SrcVTy->getNumElements() == 1) {
2333 // If our destination is not a vector, then make this a straight
2334 // scalar-scalar cast.
2335 if (!DestTy->isVectorTy()) {
2336 Value* Elem =
2337 Builder.CreateExtractElement(Src,
2338 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2339 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
2340 }
2341
2342 // Otherwise, see if our source is an insert. If so, then use the scalar
2343 // component directly.
2344 if (InsertElementInst * IEI =
2345 dyn_cast<InsertElementInst>(CI.getOperand(0)))
2346 return CastInst::Create(Instruction::BitCast, IEI->getOperand(1),
2347 DestTy);
2348 }
2349 }
2350
2351 if (ShuffleVectorInst * SVI = dyn_cast<ShuffleVectorInst>(Src)) {
2352 // Okay, we have (bitcast (shuffle ..)). Check to see if this is
2353 // a bitcast to a vector with the same # elts.
2354 if (SVI->hasOneUse() && DestTy->isVectorTy() &&
2355 DestTy->getVectorNumElements() == SVI->getType()->getNumElements() &&
2356 SVI->getType()->getNumElements() ==
2357 SVI->getOperand(0)->getType()->getVectorNumElements()) {
2358 BitCastInst* Tmp;
2359 // If either of the operands is a cast from CI.getType(), then
2360 // evaluating the shuffle in the casted destination's type will allow
2361 // us to eliminate at least one cast.
2362 if (((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(0))) &&
2363 Tmp->getOperand(0)->getType() == DestTy) ||
2364 ((Tmp = dyn_cast<BitCastInst>(SVI->getOperand(1))) &&
2365 Tmp->getOperand(0)->getType() == DestTy)) {
2366 Value* LHS = Builder.CreateBitCast(SVI->getOperand(0), DestTy);
2367 Value* RHS = Builder.CreateBitCast(SVI->getOperand(1), DestTy);
2368 // Return a new shuffle vector. Use the same element ID's, as we
2369 // know the vector types match #elts.
2370 return new ShuffleVectorInst(LHS, RHS, SVI->getOperand(2));
2371 }
2372 }
2373 }
2374
2375 // Handle the A->B->A cast, and there is an intervening PHI node.
2376 if (PHINode * PN = dyn_cast<PHINode>(Src))
2377 if (Instruction * I = optimizeBitCastFromPhi(CI, PN))
2378 return I;
2379
2380 if (Instruction * I = canonicalizeBitCastExtElt(CI, *this))
2381 return I;
2382
2383 if (Instruction * I = foldBitCastBitwiseLogic(CI, Builder))
2384 return I;
2385
2386 if (Instruction * I = foldBitCastSelect(CI, Builder))
2387 return I;
2388
2389 if (SrcTy->isPointerTy())
2390 return commonPointerCastTransforms(CI);
2391 return commonCastTransforms(CI);
2392 }
2393
visitAddrSpaceCast(AddrSpaceCastInst & CI)2394 Instruction* InstCombiner::visitAddrSpaceCast(AddrSpaceCastInst& CI) {
2395 // If the destination pointer element type is not the same as the source's
2396 // first do a bitcast to the destination type, and then the addrspacecast.
2397 // This allows the cast to be exposed to other transforms.
2398 Value* Src = CI.getOperand(0);
2399 PointerType* SrcTy = cast<PointerType>(Src->getType()->getScalarType());
2400 PointerType* DestTy = cast<PointerType>(CI.getType()->getScalarType());
2401
2402 Type* DestElemTy = DestTy->getElementType();
2403 if (SrcTy->getElementType() != DestElemTy) {
2404 Type* MidTy = PointerType::get(DestElemTy, SrcTy->getAddressSpace());
2405 if (VectorType * VT = dyn_cast<VectorType>(CI.getType())) {
2406 // Handle vectors of pointers.
2407 MidTy = VectorType::get(MidTy, VT->getNumElements());
2408 }
2409
2410 Value* NewBitCast = Builder.CreateBitCast(Src, MidTy);
2411 return new AddrSpaceCastInst(NewBitCast, CI.getType());
2412 }
2413
2414 return commonPointerCastTransforms(CI);
2415 }
2416 #include "common/LLVMWarningsPop.hpp"
2417