1 //===- InstCombineCasts.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visit functions for cast operations.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/SetVector.h"
15 #include "llvm/Analysis/ConstantFolding.h"
16 #include "llvm/Analysis/TargetLibraryInfo.h"
17 #include "llvm/IR/DIBuilder.h"
18 #include "llvm/IR/DataLayout.h"
19 #include "llvm/IR/PatternMatch.h"
20 #include "llvm/Support/KnownBits.h"
21 #include "llvm/Transforms/InstCombine/InstCombiner.h"
22 #include <numeric>
23 using namespace llvm;
24 using namespace PatternMatch;
25
26 #define DEBUG_TYPE "instcombine"
27
28 /// Analyze 'Val', seeing if it is a simple linear expression.
29 /// If so, decompose it, returning some value X, such that Val is
30 /// X*Scale+Offset.
31 ///
decomposeSimpleLinearExpr(Value * Val,unsigned & Scale,uint64_t & Offset)32 static Value *decomposeSimpleLinearExpr(Value *Val, unsigned &Scale,
33 uint64_t &Offset) {
34 if (ConstantInt *CI = dyn_cast<ConstantInt>(Val)) {
35 Offset = CI->getZExtValue();
36 Scale = 0;
37 return ConstantInt::get(Val->getType(), 0);
38 }
39
40 if (BinaryOperator *I = dyn_cast<BinaryOperator>(Val)) {
41 // Cannot look past anything that might overflow.
42 OverflowingBinaryOperator *OBI = dyn_cast<OverflowingBinaryOperator>(Val);
43 if (OBI && !OBI->hasNoUnsignedWrap() && !OBI->hasNoSignedWrap()) {
44 Scale = 1;
45 Offset = 0;
46 return Val;
47 }
48
49 if (ConstantInt *RHS = dyn_cast<ConstantInt>(I->getOperand(1))) {
50 if (I->getOpcode() == Instruction::Shl) {
51 // This is a value scaled by '1 << the shift amt'.
52 Scale = UINT64_C(1) << RHS->getZExtValue();
53 Offset = 0;
54 return I->getOperand(0);
55 }
56
57 if (I->getOpcode() == Instruction::Mul) {
58 // This value is scaled by 'RHS'.
59 Scale = RHS->getZExtValue();
60 Offset = 0;
61 return I->getOperand(0);
62 }
63
64 if (I->getOpcode() == Instruction::Add) {
65 // We have X+C. Check to see if we really have (X*C2)+C1,
66 // where C1 is divisible by C2.
67 unsigned SubScale;
68 Value *SubVal =
69 decomposeSimpleLinearExpr(I->getOperand(0), SubScale, Offset);
70 Offset += RHS->getZExtValue();
71 Scale = SubScale;
72 return SubVal;
73 }
74 }
75 }
76
77 // Otherwise, we can't look past this.
78 Scale = 1;
79 Offset = 0;
80 return Val;
81 }
82
83 /// If we find a cast of an allocation instruction, try to eliminate the cast by
84 /// moving the type information into the alloc.
PromoteCastOfAllocation(BitCastInst & CI,AllocaInst & AI)85 Instruction *InstCombinerImpl::PromoteCastOfAllocation(BitCastInst &CI,
86 AllocaInst &AI) {
87 PointerType *PTy = cast<PointerType>(CI.getType());
88
89 IRBuilderBase::InsertPointGuard Guard(Builder);
90 Builder.SetInsertPoint(&AI);
91
92 // Get the type really allocated and the type casted to.
93 Type *AllocElTy = AI.getAllocatedType();
94 Type *CastElTy = PTy->getElementType();
95 if (!AllocElTy->isSized() || !CastElTy->isSized()) return nullptr;
96
97 // This optimisation does not work for cases where the cast type
98 // is scalable and the allocated type is not. This because we need to
99 // know how many times the casted type fits into the allocated type.
100 // For the opposite case where the allocated type is scalable and the
101 // cast type is not this leads to poor code quality due to the
102 // introduction of 'vscale' into the calculations. It seems better to
103 // bail out for this case too until we've done a proper cost-benefit
104 // analysis.
105 bool AllocIsScalable = isa<ScalableVectorType>(AllocElTy);
106 bool CastIsScalable = isa<ScalableVectorType>(CastElTy);
107 if (AllocIsScalable != CastIsScalable) return nullptr;
108
109 Align AllocElTyAlign = DL.getABITypeAlign(AllocElTy);
110 Align CastElTyAlign = DL.getABITypeAlign(CastElTy);
111 if (CastElTyAlign < AllocElTyAlign) return nullptr;
112
113 // If the allocation has multiple uses, only promote it if we are strictly
114 // increasing the alignment of the resultant allocation. If we keep it the
115 // same, we open the door to infinite loops of various kinds.
116 if (!AI.hasOneUse() && CastElTyAlign == AllocElTyAlign) return nullptr;
117
118 // The alloc and cast types should be either both fixed or both scalable.
119 uint64_t AllocElTySize = DL.getTypeAllocSize(AllocElTy).getKnownMinSize();
120 uint64_t CastElTySize = DL.getTypeAllocSize(CastElTy).getKnownMinSize();
121 if (CastElTySize == 0 || AllocElTySize == 0) return nullptr;
122
123 // If the allocation has multiple uses, only promote it if we're not
124 // shrinking the amount of memory being allocated.
125 uint64_t AllocElTyStoreSize = DL.getTypeStoreSize(AllocElTy).getKnownMinSize();
126 uint64_t CastElTyStoreSize = DL.getTypeStoreSize(CastElTy).getKnownMinSize();
127 if (!AI.hasOneUse() && CastElTyStoreSize < AllocElTyStoreSize) return nullptr;
128
129 // See if we can satisfy the modulus by pulling a scale out of the array
130 // size argument.
131 unsigned ArraySizeScale;
132 uint64_t ArrayOffset;
133 Value *NumElements = // See if the array size is a decomposable linear expr.
134 decomposeSimpleLinearExpr(AI.getOperand(0), ArraySizeScale, ArrayOffset);
135
136 // If we can now satisfy the modulus, by using a non-1 scale, we really can
137 // do the xform.
138 if ((AllocElTySize*ArraySizeScale) % CastElTySize != 0 ||
139 (AllocElTySize*ArrayOffset ) % CastElTySize != 0) return nullptr;
140
141 // We don't currently support arrays of scalable types.
142 assert(!AllocIsScalable || (ArrayOffset == 1 && ArraySizeScale == 0));
143
144 unsigned Scale = (AllocElTySize*ArraySizeScale)/CastElTySize;
145 Value *Amt = nullptr;
146 if (Scale == 1) {
147 Amt = NumElements;
148 } else {
149 Amt = ConstantInt::get(AI.getArraySize()->getType(), Scale);
150 // Insert before the alloca, not before the cast.
151 Amt = Builder.CreateMul(Amt, NumElements);
152 }
153
154 if (uint64_t Offset = (AllocElTySize*ArrayOffset)/CastElTySize) {
155 Value *Off = ConstantInt::get(AI.getArraySize()->getType(),
156 Offset, true);
157 Amt = Builder.CreateAdd(Amt, Off);
158 }
159
160 AllocaInst *New = Builder.CreateAlloca(CastElTy, Amt);
161 New->setAlignment(AI.getAlign());
162 New->takeName(&AI);
163 New->setUsedWithInAlloca(AI.isUsedWithInAlloca());
164
165 // If the allocation has multiple real uses, insert a cast and change all
166 // things that used it to use the new cast. This will also hack on CI, but it
167 // will die soon.
168 if (!AI.hasOneUse()) {
169 // New is the allocation instruction, pointer typed. AI is the original
170 // allocation instruction, also pointer typed. Thus, cast to use is BitCast.
171 Value *NewCast = Builder.CreateBitCast(New, AI.getType(), "tmpcast");
172 replaceInstUsesWith(AI, NewCast);
173 eraseInstFromFunction(AI);
174 }
175 return replaceInstUsesWith(CI, New);
176 }
177
178 /// Given an expression that CanEvaluateTruncated or CanEvaluateSExtd returns
179 /// true for, actually insert the code to evaluate the expression.
EvaluateInDifferentType(Value * V,Type * Ty,bool isSigned)180 Value *InstCombinerImpl::EvaluateInDifferentType(Value *V, Type *Ty,
181 bool isSigned) {
182 if (Constant *C = dyn_cast<Constant>(V)) {
183 C = ConstantExpr::getIntegerCast(C, Ty, isSigned /*Sext or ZExt*/);
184 // If we got a constantexpr back, try to simplify it with DL info.
185 return ConstantFoldConstant(C, DL, &TLI);
186 }
187
188 // Otherwise, it must be an instruction.
189 Instruction *I = cast<Instruction>(V);
190 Instruction *Res = nullptr;
191 unsigned Opc = I->getOpcode();
192 switch (Opc) {
193 case Instruction::Add:
194 case Instruction::Sub:
195 case Instruction::Mul:
196 case Instruction::And:
197 case Instruction::Or:
198 case Instruction::Xor:
199 case Instruction::AShr:
200 case Instruction::LShr:
201 case Instruction::Shl:
202 case Instruction::UDiv:
203 case Instruction::URem: {
204 Value *LHS = EvaluateInDifferentType(I->getOperand(0), Ty, isSigned);
205 Value *RHS = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
206 Res = BinaryOperator::Create((Instruction::BinaryOps)Opc, LHS, RHS);
207 break;
208 }
209 case Instruction::Trunc:
210 case Instruction::ZExt:
211 case Instruction::SExt:
212 // If the source type of the cast is the type we're trying for then we can
213 // just return the source. There's no need to insert it because it is not
214 // new.
215 if (I->getOperand(0)->getType() == Ty)
216 return I->getOperand(0);
217
218 // Otherwise, must be the same type of cast, so just reinsert a new one.
219 // This also handles the case of zext(trunc(x)) -> zext(x).
220 Res = CastInst::CreateIntegerCast(I->getOperand(0), Ty,
221 Opc == Instruction::SExt);
222 break;
223 case Instruction::Select: {
224 Value *True = EvaluateInDifferentType(I->getOperand(1), Ty, isSigned);
225 Value *False = EvaluateInDifferentType(I->getOperand(2), Ty, isSigned);
226 Res = SelectInst::Create(I->getOperand(0), True, False);
227 break;
228 }
229 case Instruction::PHI: {
230 PHINode *OPN = cast<PHINode>(I);
231 PHINode *NPN = PHINode::Create(Ty, OPN->getNumIncomingValues());
232 for (unsigned i = 0, e = OPN->getNumIncomingValues(); i != e; ++i) {
233 Value *V =
234 EvaluateInDifferentType(OPN->getIncomingValue(i), Ty, isSigned);
235 NPN->addIncoming(V, OPN->getIncomingBlock(i));
236 }
237 Res = NPN;
238 break;
239 }
240 default:
241 // TODO: Can handle more cases here.
242 llvm_unreachable("Unreachable!");
243 }
244
245 Res->takeName(I);
246 return InsertNewInstWith(Res, *I);
247 }
248
249 Instruction::CastOps
isEliminableCastPair(const CastInst * CI1,const CastInst * CI2)250 InstCombinerImpl::isEliminableCastPair(const CastInst *CI1,
251 const CastInst *CI2) {
252 Type *SrcTy = CI1->getSrcTy();
253 Type *MidTy = CI1->getDestTy();
254 Type *DstTy = CI2->getDestTy();
255
256 Instruction::CastOps firstOp = CI1->getOpcode();
257 Instruction::CastOps secondOp = CI2->getOpcode();
258 Type *SrcIntPtrTy =
259 SrcTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(SrcTy) : nullptr;
260 Type *MidIntPtrTy =
261 MidTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(MidTy) : nullptr;
262 Type *DstIntPtrTy =
263 DstTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(DstTy) : nullptr;
264 unsigned Res = CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy,
265 DstTy, SrcIntPtrTy, MidIntPtrTy,
266 DstIntPtrTy);
267
268 // We don't want to form an inttoptr or ptrtoint that converts to an integer
269 // type that differs from the pointer size.
270 if ((Res == Instruction::IntToPtr && SrcTy != DstIntPtrTy) ||
271 (Res == Instruction::PtrToInt && DstTy != SrcIntPtrTy))
272 Res = 0;
273
274 return Instruction::CastOps(Res);
275 }
276
277 /// Implement the transforms common to all CastInst visitors.
commonCastTransforms(CastInst & CI)278 Instruction *InstCombinerImpl::commonCastTransforms(CastInst &CI) {
279 Value *Src = CI.getOperand(0);
280 Type *Ty = CI.getType();
281
282 // Try to eliminate a cast of a cast.
283 if (auto *CSrc = dyn_cast<CastInst>(Src)) { // A->B->C cast
284 if (Instruction::CastOps NewOpc = isEliminableCastPair(CSrc, &CI)) {
285 // The first cast (CSrc) is eliminable so we need to fix up or replace
286 // the second cast (CI). CSrc will then have a good chance of being dead.
287 auto *Res = CastInst::Create(NewOpc, CSrc->getOperand(0), Ty);
288 // Point debug users of the dying cast to the new one.
289 if (CSrc->hasOneUse())
290 replaceAllDbgUsesWith(*CSrc, *Res, CI, DT);
291 return Res;
292 }
293 }
294
295 if (auto *Sel = dyn_cast<SelectInst>(Src)) {
296 // We are casting a select. Try to fold the cast into the select if the
297 // select does not have a compare instruction with matching operand types
298 // or the select is likely better done in a narrow type.
299 // Creating a select with operands that are different sizes than its
300 // condition may inhibit other folds and lead to worse codegen.
301 auto *Cmp = dyn_cast<CmpInst>(Sel->getCondition());
302 if (!Cmp || Cmp->getOperand(0)->getType() != Sel->getType() ||
303 (CI.getOpcode() == Instruction::Trunc &&
304 shouldChangeType(CI.getSrcTy(), CI.getType()))) {
305 if (Instruction *NV = FoldOpIntoSelect(CI, Sel)) {
306 replaceAllDbgUsesWith(*Sel, *NV, CI, DT);
307 return NV;
308 }
309 }
310 }
311
312 // If we are casting a PHI, then fold the cast into the PHI.
313 if (auto *PN = dyn_cast<PHINode>(Src)) {
314 // Don't do this if it would create a PHI node with an illegal type from a
315 // legal type.
316 if (!Src->getType()->isIntegerTy() || !CI.getType()->isIntegerTy() ||
317 shouldChangeType(CI.getSrcTy(), CI.getType()))
318 if (Instruction *NV = foldOpIntoPhi(CI, PN))
319 return NV;
320 }
321
322 // Canonicalize a unary shuffle after the cast if neither operation changes
323 // the size or element size of the input vector.
324 // TODO: We could allow size-changing ops if that doesn't harm codegen.
325 // cast (shuffle X, Mask) --> shuffle (cast X), Mask
326 Value *X;
327 ArrayRef<int> Mask;
328 if (match(Src, m_OneUse(m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))) {
329 // TODO: Allow scalable vectors?
330 auto *SrcTy = dyn_cast<FixedVectorType>(X->getType());
331 auto *DestTy = dyn_cast<FixedVectorType>(Ty);
332 if (SrcTy && DestTy &&
333 SrcTy->getNumElements() == DestTy->getNumElements() &&
334 SrcTy->getPrimitiveSizeInBits() == DestTy->getPrimitiveSizeInBits()) {
335 Value *CastX = Builder.CreateCast(CI.getOpcode(), X, DestTy);
336 return new ShuffleVectorInst(CastX, Mask);
337 }
338 }
339
340 return nullptr;
341 }
342
343 /// Constants and extensions/truncates from the destination type are always
344 /// free to be evaluated in that type. This is a helper for canEvaluate*.
canAlwaysEvaluateInType(Value * V,Type * Ty)345 static bool canAlwaysEvaluateInType(Value *V, Type *Ty) {
346 if (isa<Constant>(V))
347 return true;
348 Value *X;
349 if ((match(V, m_ZExtOrSExt(m_Value(X))) || match(V, m_Trunc(m_Value(X)))) &&
350 X->getType() == Ty)
351 return true;
352
353 return false;
354 }
355
356 /// Filter out values that we can not evaluate in the destination type for free.
357 /// This is a helper for canEvaluate*.
canNotEvaluateInType(Value * V,Type * Ty)358 static bool canNotEvaluateInType(Value *V, Type *Ty) {
359 assert(!isa<Constant>(V) && "Constant should already be handled.");
360 if (!isa<Instruction>(V))
361 return true;
362 // We don't extend or shrink something that has multiple uses -- doing so
363 // would require duplicating the instruction which isn't profitable.
364 if (!V->hasOneUse())
365 return true;
366
367 return false;
368 }
369
370 /// Return true if we can evaluate the specified expression tree as type Ty
371 /// instead of its larger type, and arrive with the same value.
372 /// This is used by code that tries to eliminate truncates.
373 ///
374 /// Ty will always be a type smaller than V. We should return true if trunc(V)
375 /// can be computed by computing V in the smaller type. If V is an instruction,
376 /// then trunc(inst(x,y)) can be computed as inst(trunc(x),trunc(y)), which only
377 /// makes sense if x and y can be efficiently truncated.
378 ///
379 /// This function works on both vectors and scalars.
380 ///
canEvaluateTruncated(Value * V,Type * Ty,InstCombinerImpl & IC,Instruction * CxtI)381 static bool canEvaluateTruncated(Value *V, Type *Ty, InstCombinerImpl &IC,
382 Instruction *CxtI) {
383 if (canAlwaysEvaluateInType(V, Ty))
384 return true;
385 if (canNotEvaluateInType(V, Ty))
386 return false;
387
388 auto *I = cast<Instruction>(V);
389 Type *OrigTy = V->getType();
390 switch (I->getOpcode()) {
391 case Instruction::Add:
392 case Instruction::Sub:
393 case Instruction::Mul:
394 case Instruction::And:
395 case Instruction::Or:
396 case Instruction::Xor:
397 // These operators can all arbitrarily be extended or truncated.
398 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
399 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
400
401 case Instruction::UDiv:
402 case Instruction::URem: {
403 // UDiv and URem can be truncated if all the truncated bits are zero.
404 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
405 uint32_t BitWidth = Ty->getScalarSizeInBits();
406 assert(BitWidth < OrigBitWidth && "Unexpected bitwidths!");
407 APInt Mask = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
408 if (IC.MaskedValueIsZero(I->getOperand(0), Mask, 0, CxtI) &&
409 IC.MaskedValueIsZero(I->getOperand(1), Mask, 0, CxtI)) {
410 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
411 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
412 }
413 break;
414 }
415 case Instruction::Shl: {
416 // If we are truncating the result of this SHL, and if it's a shift of an
417 // inrange amount, we can always perform a SHL in a smaller type.
418 uint32_t BitWidth = Ty->getScalarSizeInBits();
419 KnownBits AmtKnownBits =
420 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
421 if (AmtKnownBits.getMaxValue().ult(BitWidth))
422 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
423 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
424 break;
425 }
426 case Instruction::LShr: {
427 // If this is a truncate of a logical shr, we can truncate it to a smaller
428 // lshr iff we know that the bits we would otherwise be shifting in are
429 // already zeros.
430 // TODO: It is enough to check that the bits we would be shifting in are
431 // zero - use AmtKnownBits.getMaxValue().
432 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
433 uint32_t BitWidth = Ty->getScalarSizeInBits();
434 KnownBits AmtKnownBits =
435 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
436 APInt ShiftedBits = APInt::getBitsSetFrom(OrigBitWidth, BitWidth);
437 if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
438 IC.MaskedValueIsZero(I->getOperand(0), ShiftedBits, 0, CxtI)) {
439 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
440 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
441 }
442 break;
443 }
444 case Instruction::AShr: {
445 // If this is a truncate of an arithmetic shr, we can truncate it to a
446 // smaller ashr iff we know that all the bits from the sign bit of the
447 // original type and the sign bit of the truncate type are similar.
448 // TODO: It is enough to check that the bits we would be shifting in are
449 // similar to sign bit of the truncate type.
450 uint32_t OrigBitWidth = OrigTy->getScalarSizeInBits();
451 uint32_t BitWidth = Ty->getScalarSizeInBits();
452 KnownBits AmtKnownBits =
453 llvm::computeKnownBits(I->getOperand(1), IC.getDataLayout());
454 unsigned ShiftedBits = OrigBitWidth - BitWidth;
455 if (AmtKnownBits.getMaxValue().ult(BitWidth) &&
456 ShiftedBits < IC.ComputeNumSignBits(I->getOperand(0), 0, CxtI))
457 return canEvaluateTruncated(I->getOperand(0), Ty, IC, CxtI) &&
458 canEvaluateTruncated(I->getOperand(1), Ty, IC, CxtI);
459 break;
460 }
461 case Instruction::Trunc:
462 // trunc(trunc(x)) -> trunc(x)
463 return true;
464 case Instruction::ZExt:
465 case Instruction::SExt:
466 // trunc(ext(x)) -> ext(x) if the source type is smaller than the new dest
467 // trunc(ext(x)) -> trunc(x) if the source type is larger than the new dest
468 return true;
469 case Instruction::Select: {
470 SelectInst *SI = cast<SelectInst>(I);
471 return canEvaluateTruncated(SI->getTrueValue(), Ty, IC, CxtI) &&
472 canEvaluateTruncated(SI->getFalseValue(), Ty, IC, CxtI);
473 }
474 case Instruction::PHI: {
475 // We can change a phi if we can change all operands. Note that we never
476 // get into trouble with cyclic PHIs here because we only consider
477 // instructions with a single use.
478 PHINode *PN = cast<PHINode>(I);
479 for (Value *IncValue : PN->incoming_values())
480 if (!canEvaluateTruncated(IncValue, Ty, IC, CxtI))
481 return false;
482 return true;
483 }
484 default:
485 // TODO: Can handle more cases here.
486 break;
487 }
488
489 return false;
490 }
491
492 /// Given a vector that is bitcast to an integer, optionally logically
493 /// right-shifted, and truncated, convert it to an extractelement.
494 /// Example (big endian):
495 /// trunc (lshr (bitcast <4 x i32> %X to i128), 32) to i32
496 /// --->
497 /// extractelement <4 x i32> %X, 1
foldVecTruncToExtElt(TruncInst & Trunc,InstCombinerImpl & IC)498 static Instruction *foldVecTruncToExtElt(TruncInst &Trunc,
499 InstCombinerImpl &IC) {
500 Value *TruncOp = Trunc.getOperand(0);
501 Type *DestType = Trunc.getType();
502 if (!TruncOp->hasOneUse() || !isa<IntegerType>(DestType))
503 return nullptr;
504
505 Value *VecInput = nullptr;
506 ConstantInt *ShiftVal = nullptr;
507 if (!match(TruncOp, m_CombineOr(m_BitCast(m_Value(VecInput)),
508 m_LShr(m_BitCast(m_Value(VecInput)),
509 m_ConstantInt(ShiftVal)))) ||
510 !isa<VectorType>(VecInput->getType()))
511 return nullptr;
512
513 VectorType *VecType = cast<VectorType>(VecInput->getType());
514 unsigned VecWidth = VecType->getPrimitiveSizeInBits();
515 unsigned DestWidth = DestType->getPrimitiveSizeInBits();
516 unsigned ShiftAmount = ShiftVal ? ShiftVal->getZExtValue() : 0;
517
518 if ((VecWidth % DestWidth != 0) || (ShiftAmount % DestWidth != 0))
519 return nullptr;
520
521 // If the element type of the vector doesn't match the result type,
522 // bitcast it to a vector type that we can extract from.
523 unsigned NumVecElts = VecWidth / DestWidth;
524 if (VecType->getElementType() != DestType) {
525 VecType = FixedVectorType::get(DestType, NumVecElts);
526 VecInput = IC.Builder.CreateBitCast(VecInput, VecType, "bc");
527 }
528
529 unsigned Elt = ShiftAmount / DestWidth;
530 if (IC.getDataLayout().isBigEndian())
531 Elt = NumVecElts - 1 - Elt;
532
533 return ExtractElementInst::Create(VecInput, IC.Builder.getInt32(Elt));
534 }
535
536 /// Funnel/Rotate left/right may occur in a wider type than necessary because of
537 /// type promotion rules. Try to narrow the inputs and convert to funnel shift.
narrowFunnelShift(TruncInst & Trunc)538 Instruction *InstCombinerImpl::narrowFunnelShift(TruncInst &Trunc) {
539 assert((isa<VectorType>(Trunc.getSrcTy()) ||
540 shouldChangeType(Trunc.getSrcTy(), Trunc.getType())) &&
541 "Don't narrow to an illegal scalar type");
542
543 // Bail out on strange types. It is possible to handle some of these patterns
544 // even with non-power-of-2 sizes, but it is not a likely scenario.
545 Type *DestTy = Trunc.getType();
546 unsigned NarrowWidth = DestTy->getScalarSizeInBits();
547 unsigned WideWidth = Trunc.getSrcTy()->getScalarSizeInBits();
548 if (!isPowerOf2_32(NarrowWidth))
549 return nullptr;
550
551 // First, find an or'd pair of opposite shifts:
552 // trunc (or (lshr ShVal0, ShAmt0), (shl ShVal1, ShAmt1))
553 BinaryOperator *Or0, *Or1;
554 if (!match(Trunc.getOperand(0), m_OneUse(m_Or(m_BinOp(Or0), m_BinOp(Or1)))))
555 return nullptr;
556
557 Value *ShVal0, *ShVal1, *ShAmt0, *ShAmt1;
558 if (!match(Or0, m_OneUse(m_LogicalShift(m_Value(ShVal0), m_Value(ShAmt0)))) ||
559 !match(Or1, m_OneUse(m_LogicalShift(m_Value(ShVal1), m_Value(ShAmt1)))) ||
560 Or0->getOpcode() == Or1->getOpcode())
561 return nullptr;
562
563 // Canonicalize to or(shl(ShVal0, ShAmt0), lshr(ShVal1, ShAmt1)).
564 if (Or0->getOpcode() == BinaryOperator::LShr) {
565 std::swap(Or0, Or1);
566 std::swap(ShVal0, ShVal1);
567 std::swap(ShAmt0, ShAmt1);
568 }
569 assert(Or0->getOpcode() == BinaryOperator::Shl &&
570 Or1->getOpcode() == BinaryOperator::LShr &&
571 "Illegal or(shift,shift) pair");
572
573 // Match the shift amount operands for a funnel/rotate pattern. This always
574 // matches a subtraction on the R operand.
575 auto matchShiftAmount = [&](Value *L, Value *R, unsigned Width) -> Value * {
576 // The shift amounts may add up to the narrow bit width:
577 // (shl ShVal0, L) | (lshr ShVal1, Width - L)
578 // If this is a funnel shift (different operands are shifted), then the
579 // shift amount can not over-shift (create poison) in the narrow type.
580 unsigned MaxShiftAmountWidth = Log2_32(NarrowWidth);
581 APInt HiBitMask = ~APInt::getLowBitsSet(WideWidth, MaxShiftAmountWidth);
582 if (ShVal0 == ShVal1 || MaskedValueIsZero(L, HiBitMask))
583 if (match(R, m_OneUse(m_Sub(m_SpecificInt(Width), m_Specific(L)))))
584 return L;
585
586 // The following patterns currently only work for rotation patterns.
587 // TODO: Add more general funnel-shift compatible patterns.
588 if (ShVal0 != ShVal1)
589 return nullptr;
590
591 // The shift amount may be masked with negation:
592 // (shl ShVal0, (X & (Width - 1))) | (lshr ShVal1, ((-X) & (Width - 1)))
593 Value *X;
594 unsigned Mask = Width - 1;
595 if (match(L, m_And(m_Value(X), m_SpecificInt(Mask))) &&
596 match(R, m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask))))
597 return X;
598
599 // Same as above, but the shift amount may be extended after masking:
600 if (match(L, m_ZExt(m_And(m_Value(X), m_SpecificInt(Mask)))) &&
601 match(R, m_ZExt(m_And(m_Neg(m_Specific(X)), m_SpecificInt(Mask)))))
602 return X;
603
604 return nullptr;
605 };
606
607 Value *ShAmt = matchShiftAmount(ShAmt0, ShAmt1, NarrowWidth);
608 bool IsFshl = true; // Sub on LSHR.
609 if (!ShAmt) {
610 ShAmt = matchShiftAmount(ShAmt1, ShAmt0, NarrowWidth);
611 IsFshl = false; // Sub on SHL.
612 }
613 if (!ShAmt)
614 return nullptr;
615
616 // The right-shifted value must have high zeros in the wide type (for example
617 // from 'zext', 'and' or 'shift'). High bits of the left-shifted value are
618 // truncated, so those do not matter.
619 APInt HiBitMask = APInt::getHighBitsSet(WideWidth, WideWidth - NarrowWidth);
620 if (!MaskedValueIsZero(ShVal1, HiBitMask, 0, &Trunc))
621 return nullptr;
622
623 // We have an unnecessarily wide rotate!
624 // trunc (or (shl ShVal0, ShAmt), (lshr ShVal1, BitWidth - ShAmt))
625 // Narrow the inputs and convert to funnel shift intrinsic:
626 // llvm.fshl.i8(trunc(ShVal), trunc(ShVal), trunc(ShAmt))
627 Value *NarrowShAmt = Builder.CreateTrunc(ShAmt, DestTy);
628 Value *X, *Y;
629 X = Y = Builder.CreateTrunc(ShVal0, DestTy);
630 if (ShVal0 != ShVal1)
631 Y = Builder.CreateTrunc(ShVal1, DestTy);
632 Intrinsic::ID IID = IsFshl ? Intrinsic::fshl : Intrinsic::fshr;
633 Function *F = Intrinsic::getDeclaration(Trunc.getModule(), IID, DestTy);
634 return CallInst::Create(F, {X, Y, NarrowShAmt});
635 }
636
637 /// Try to narrow the width of math or bitwise logic instructions by pulling a
638 /// truncate ahead of binary operators.
639 /// TODO: Transforms for truncated shifts should be moved into here.
narrowBinOp(TruncInst & Trunc)640 Instruction *InstCombinerImpl::narrowBinOp(TruncInst &Trunc) {
641 Type *SrcTy = Trunc.getSrcTy();
642 Type *DestTy = Trunc.getType();
643 if (!isa<VectorType>(SrcTy) && !shouldChangeType(SrcTy, DestTy))
644 return nullptr;
645
646 BinaryOperator *BinOp;
647 if (!match(Trunc.getOperand(0), m_OneUse(m_BinOp(BinOp))))
648 return nullptr;
649
650 Value *BinOp0 = BinOp->getOperand(0);
651 Value *BinOp1 = BinOp->getOperand(1);
652 switch (BinOp->getOpcode()) {
653 case Instruction::And:
654 case Instruction::Or:
655 case Instruction::Xor:
656 case Instruction::Add:
657 case Instruction::Sub:
658 case Instruction::Mul: {
659 Constant *C;
660 if (match(BinOp0, m_Constant(C))) {
661 // trunc (binop C, X) --> binop (trunc C', X)
662 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
663 Value *TruncX = Builder.CreateTrunc(BinOp1, DestTy);
664 return BinaryOperator::Create(BinOp->getOpcode(), NarrowC, TruncX);
665 }
666 if (match(BinOp1, m_Constant(C))) {
667 // trunc (binop X, C) --> binop (trunc X, C')
668 Constant *NarrowC = ConstantExpr::getTrunc(C, DestTy);
669 Value *TruncX = Builder.CreateTrunc(BinOp0, DestTy);
670 return BinaryOperator::Create(BinOp->getOpcode(), TruncX, NarrowC);
671 }
672 Value *X;
673 if (match(BinOp0, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
674 // trunc (binop (ext X), Y) --> binop X, (trunc Y)
675 Value *NarrowOp1 = Builder.CreateTrunc(BinOp1, DestTy);
676 return BinaryOperator::Create(BinOp->getOpcode(), X, NarrowOp1);
677 }
678 if (match(BinOp1, m_ZExtOrSExt(m_Value(X))) && X->getType() == DestTy) {
679 // trunc (binop Y, (ext X)) --> binop (trunc Y), X
680 Value *NarrowOp0 = Builder.CreateTrunc(BinOp0, DestTy);
681 return BinaryOperator::Create(BinOp->getOpcode(), NarrowOp0, X);
682 }
683 break;
684 }
685
686 default: break;
687 }
688
689 if (Instruction *NarrowOr = narrowFunnelShift(Trunc))
690 return NarrowOr;
691
692 return nullptr;
693 }
694
695 /// Try to narrow the width of a splat shuffle. This could be generalized to any
696 /// shuffle with a constant operand, but we limit the transform to avoid
697 /// creating a shuffle type that targets may not be able to lower effectively.
shrinkSplatShuffle(TruncInst & Trunc,InstCombiner::BuilderTy & Builder)698 static Instruction *shrinkSplatShuffle(TruncInst &Trunc,
699 InstCombiner::BuilderTy &Builder) {
700 auto *Shuf = dyn_cast<ShuffleVectorInst>(Trunc.getOperand(0));
701 if (Shuf && Shuf->hasOneUse() && match(Shuf->getOperand(1), m_Undef()) &&
702 is_splat(Shuf->getShuffleMask()) &&
703 Shuf->getType() == Shuf->getOperand(0)->getType()) {
704 // trunc (shuf X, Undef, SplatMask) --> shuf (trunc X), Poison, SplatMask
705 // trunc (shuf X, Poison, SplatMask) --> shuf (trunc X), Poison, SplatMask
706 Value *NarrowOp = Builder.CreateTrunc(Shuf->getOperand(0), Trunc.getType());
707 return new ShuffleVectorInst(NarrowOp, Shuf->getShuffleMask());
708 }
709
710 return nullptr;
711 }
712
713 /// Try to narrow the width of an insert element. This could be generalized for
714 /// any vector constant, but we limit the transform to insertion into undef to
715 /// avoid potential backend problems from unsupported insertion widths. This
716 /// could also be extended to handle the case of inserting a scalar constant
717 /// into a vector variable.
shrinkInsertElt(CastInst & Trunc,InstCombiner::BuilderTy & Builder)718 static Instruction *shrinkInsertElt(CastInst &Trunc,
719 InstCombiner::BuilderTy &Builder) {
720 Instruction::CastOps Opcode = Trunc.getOpcode();
721 assert((Opcode == Instruction::Trunc || Opcode == Instruction::FPTrunc) &&
722 "Unexpected instruction for shrinking");
723
724 auto *InsElt = dyn_cast<InsertElementInst>(Trunc.getOperand(0));
725 if (!InsElt || !InsElt->hasOneUse())
726 return nullptr;
727
728 Type *DestTy = Trunc.getType();
729 Type *DestScalarTy = DestTy->getScalarType();
730 Value *VecOp = InsElt->getOperand(0);
731 Value *ScalarOp = InsElt->getOperand(1);
732 Value *Index = InsElt->getOperand(2);
733
734 if (match(VecOp, m_Undef())) {
735 // trunc (inselt undef, X, Index) --> inselt undef, (trunc X), Index
736 // fptrunc (inselt undef, X, Index) --> inselt undef, (fptrunc X), Index
737 UndefValue *NarrowUndef = UndefValue::get(DestTy);
738 Value *NarrowOp = Builder.CreateCast(Opcode, ScalarOp, DestScalarTy);
739 return InsertElementInst::Create(NarrowUndef, NarrowOp, Index);
740 }
741
742 return nullptr;
743 }
744
visitTrunc(TruncInst & Trunc)745 Instruction *InstCombinerImpl::visitTrunc(TruncInst &Trunc) {
746 if (Instruction *Result = commonCastTransforms(Trunc))
747 return Result;
748
749 Value *Src = Trunc.getOperand(0);
750 Type *DestTy = Trunc.getType(), *SrcTy = Src->getType();
751 unsigned DestWidth = DestTy->getScalarSizeInBits();
752 unsigned SrcWidth = SrcTy->getScalarSizeInBits();
753
754 // Attempt to truncate the entire input expression tree to the destination
755 // type. Only do this if the dest type is a simple type, don't convert the
756 // expression tree to something weird like i93 unless the source is also
757 // strange.
758 if ((DestTy->isVectorTy() || shouldChangeType(SrcTy, DestTy)) &&
759 canEvaluateTruncated(Src, DestTy, *this, &Trunc)) {
760
761 // If this cast is a truncate, evaluting in a different type always
762 // eliminates the cast, so it is always a win.
763 LLVM_DEBUG(
764 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
765 " to avoid cast: "
766 << Trunc << '\n');
767 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
768 assert(Res->getType() == DestTy);
769 return replaceInstUsesWith(Trunc, Res);
770 }
771
772 // For integer types, check if we can shorten the entire input expression to
773 // DestWidth * 2, which won't allow removing the truncate, but reducing the
774 // width may enable further optimizations, e.g. allowing for larger
775 // vectorization factors.
776 if (auto *DestITy = dyn_cast<IntegerType>(DestTy)) {
777 if (DestWidth * 2 < SrcWidth) {
778 auto *NewDestTy = DestITy->getExtendedType();
779 if (shouldChangeType(SrcTy, NewDestTy) &&
780 canEvaluateTruncated(Src, NewDestTy, *this, &Trunc)) {
781 LLVM_DEBUG(
782 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
783 " to reduce the width of operand of"
784 << Trunc << '\n');
785 Value *Res = EvaluateInDifferentType(Src, NewDestTy, false);
786 return new TruncInst(Res, DestTy);
787 }
788 }
789 }
790
791 // Test if the trunc is the user of a select which is part of a
792 // minimum or maximum operation. If so, don't do any more simplification.
793 // Even simplifying demanded bits can break the canonical form of a
794 // min/max.
795 Value *LHS, *RHS;
796 if (SelectInst *Sel = dyn_cast<SelectInst>(Src))
797 if (matchSelectPattern(Sel, LHS, RHS).Flavor != SPF_UNKNOWN)
798 return nullptr;
799
800 // See if we can simplify any instructions used by the input whose sole
801 // purpose is to compute bits we don't care about.
802 if (SimplifyDemandedInstructionBits(Trunc))
803 return &Trunc;
804
805 if (DestWidth == 1) {
806 Value *Zero = Constant::getNullValue(SrcTy);
807 if (DestTy->isIntegerTy()) {
808 // Canonicalize trunc x to i1 -> icmp ne (and x, 1), 0 (scalar only).
809 // TODO: We canonicalize to more instructions here because we are probably
810 // lacking equivalent analysis for trunc relative to icmp. There may also
811 // be codegen concerns. If those trunc limitations were removed, we could
812 // remove this transform.
813 Value *And = Builder.CreateAnd(Src, ConstantInt::get(SrcTy, 1));
814 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
815 }
816
817 // For vectors, we do not canonicalize all truncs to icmp, so optimize
818 // patterns that would be covered within visitICmpInst.
819 Value *X;
820 Constant *C;
821 if (match(Src, m_OneUse(m_LShr(m_Value(X), m_Constant(C))))) {
822 // trunc (lshr X, C) to i1 --> icmp ne (and X, C'), 0
823 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1));
824 Constant *MaskC = ConstantExpr::getShl(One, C);
825 Value *And = Builder.CreateAnd(X, MaskC);
826 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
827 }
828 if (match(Src, m_OneUse(m_c_Or(m_LShr(m_Value(X), m_Constant(C)),
829 m_Deferred(X))))) {
830 // trunc (or (lshr X, C), X) to i1 --> icmp ne (and X, C'), 0
831 Constant *One = ConstantInt::get(SrcTy, APInt(SrcWidth, 1));
832 Constant *MaskC = ConstantExpr::getShl(One, C);
833 MaskC = ConstantExpr::getOr(MaskC, One);
834 Value *And = Builder.CreateAnd(X, MaskC);
835 return new ICmpInst(ICmpInst::ICMP_NE, And, Zero);
836 }
837 }
838
839 Value *A, *B;
840 Constant *C;
841 if (match(Src, m_LShr(m_SExt(m_Value(A)), m_Constant(C)))) {
842 unsigned AWidth = A->getType()->getScalarSizeInBits();
843 unsigned MaxShiftAmt = SrcWidth - std::max(DestWidth, AWidth);
844 auto *OldSh = cast<Instruction>(Src);
845 bool IsExact = OldSh->isExact();
846
847 // If the shift is small enough, all zero bits created by the shift are
848 // removed by the trunc.
849 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE,
850 APInt(SrcWidth, MaxShiftAmt)))) {
851 // trunc (lshr (sext A), C) --> ashr A, C
852 if (A->getType() == DestTy) {
853 Constant *MaxAmt = ConstantInt::get(SrcTy, DestWidth - 1, false);
854 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt);
855 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType());
856 ShAmt = Constant::mergeUndefsWith(ShAmt, C);
857 return IsExact ? BinaryOperator::CreateExactAShr(A, ShAmt)
858 : BinaryOperator::CreateAShr(A, ShAmt);
859 }
860 // The types are mismatched, so create a cast after shifting:
861 // trunc (lshr (sext A), C) --> sext/trunc (ashr A, C)
862 if (Src->hasOneUse()) {
863 Constant *MaxAmt = ConstantInt::get(SrcTy, AWidth - 1, false);
864 Constant *ShAmt = ConstantExpr::getUMin(C, MaxAmt);
865 ShAmt = ConstantExpr::getTrunc(ShAmt, A->getType());
866 Value *Shift = Builder.CreateAShr(A, ShAmt, "", IsExact);
867 return CastInst::CreateIntegerCast(Shift, DestTy, true);
868 }
869 }
870 // TODO: Mask high bits with 'and'.
871 }
872
873 // trunc (*shr (trunc A), C) --> trunc(*shr A, C)
874 if (match(Src, m_OneUse(m_Shr(m_Trunc(m_Value(A)), m_Constant(C))))) {
875 unsigned MaxShiftAmt = SrcWidth - DestWidth;
876
877 // If the shift is small enough, all zero/sign bits created by the shift are
878 // removed by the trunc.
879 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULE,
880 APInt(SrcWidth, MaxShiftAmt)))) {
881 auto *OldShift = cast<Instruction>(Src);
882 bool IsExact = OldShift->isExact();
883 auto *ShAmt = ConstantExpr::getIntegerCast(C, A->getType(), true);
884 ShAmt = Constant::mergeUndefsWith(ShAmt, C);
885 Value *Shift =
886 OldShift->getOpcode() == Instruction::AShr
887 ? Builder.CreateAShr(A, ShAmt, OldShift->getName(), IsExact)
888 : Builder.CreateLShr(A, ShAmt, OldShift->getName(), IsExact);
889 return CastInst::CreateTruncOrBitCast(Shift, DestTy);
890 }
891 }
892
893 if (Instruction *I = narrowBinOp(Trunc))
894 return I;
895
896 if (Instruction *I = shrinkSplatShuffle(Trunc, Builder))
897 return I;
898
899 if (Instruction *I = shrinkInsertElt(Trunc, Builder))
900 return I;
901
902 if (Src->hasOneUse() &&
903 (isa<VectorType>(SrcTy) || shouldChangeType(SrcTy, DestTy))) {
904 // Transform "trunc (shl X, cst)" -> "shl (trunc X), cst" so long as the
905 // dest type is native and cst < dest size.
906 if (match(Src, m_Shl(m_Value(A), m_Constant(C))) &&
907 !match(A, m_Shr(m_Value(), m_Constant()))) {
908 // Skip shifts of shift by constants. It undoes a combine in
909 // FoldShiftByConstant and is the extend in reg pattern.
910 APInt Threshold = APInt(C->getType()->getScalarSizeInBits(), DestWidth);
911 if (match(C, m_SpecificInt_ICMP(ICmpInst::ICMP_ULT, Threshold))) {
912 Value *NewTrunc = Builder.CreateTrunc(A, DestTy, A->getName() + ".tr");
913 return BinaryOperator::Create(Instruction::Shl, NewTrunc,
914 ConstantExpr::getTrunc(C, DestTy));
915 }
916 }
917 }
918
919 if (Instruction *I = foldVecTruncToExtElt(Trunc, *this))
920 return I;
921
922 // Whenever an element is extracted from a vector, and then truncated,
923 // canonicalize by converting it to a bitcast followed by an
924 // extractelement.
925 //
926 // Example (little endian):
927 // trunc (extractelement <4 x i64> %X, 0) to i32
928 // --->
929 // extractelement <8 x i32> (bitcast <4 x i64> %X to <8 x i32>), i32 0
930 Value *VecOp;
931 ConstantInt *Cst;
932 if (match(Src, m_OneUse(m_ExtractElt(m_Value(VecOp), m_ConstantInt(Cst))))) {
933 auto *VecOpTy = cast<VectorType>(VecOp->getType());
934 auto VecElts = VecOpTy->getElementCount();
935
936 // A badly fit destination size would result in an invalid cast.
937 if (SrcWidth % DestWidth == 0) {
938 uint64_t TruncRatio = SrcWidth / DestWidth;
939 uint64_t BitCastNumElts = VecElts.getKnownMinValue() * TruncRatio;
940 uint64_t VecOpIdx = Cst->getZExtValue();
941 uint64_t NewIdx = DL.isBigEndian() ? (VecOpIdx + 1) * TruncRatio - 1
942 : VecOpIdx * TruncRatio;
943 assert(BitCastNumElts <= std::numeric_limits<uint32_t>::max() &&
944 "overflow 32-bits");
945
946 auto *BitCastTo =
947 VectorType::get(DestTy, BitCastNumElts, VecElts.isScalable());
948 Value *BitCast = Builder.CreateBitCast(VecOp, BitCastTo);
949 return ExtractElementInst::Create(BitCast, Builder.getInt32(NewIdx));
950 }
951 }
952
953 // trunc (ctlz_i32(zext(A), B) --> add(ctlz_i16(A, B), C)
954 if (match(Src, m_OneUse(m_Intrinsic<Intrinsic::ctlz>(m_ZExt(m_Value(A)),
955 m_Value(B))))) {
956 unsigned AWidth = A->getType()->getScalarSizeInBits();
957 if (AWidth == DestWidth && AWidth > Log2_32(SrcWidth)) {
958 Value *WidthDiff = ConstantInt::get(A->getType(), SrcWidth - AWidth);
959 Value *NarrowCtlz =
960 Builder.CreateIntrinsic(Intrinsic::ctlz, {Trunc.getType()}, {A, B});
961 return BinaryOperator::CreateAdd(NarrowCtlz, WidthDiff);
962 }
963 }
964
965 if (match(Src, m_VScale(DL))) {
966 if (Trunc.getFunction() &&
967 Trunc.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
968 unsigned MaxVScale = Trunc.getFunction()
969 ->getFnAttribute(Attribute::VScaleRange)
970 .getVScaleRangeArgs()
971 .second;
972 if (MaxVScale > 0 && Log2_32(MaxVScale) < DestWidth) {
973 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
974 return replaceInstUsesWith(Trunc, VScale);
975 }
976 }
977 }
978
979 return nullptr;
980 }
981
transformZExtICmp(ICmpInst * Cmp,ZExtInst & Zext)982 Instruction *InstCombinerImpl::transformZExtICmp(ICmpInst *Cmp, ZExtInst &Zext) {
983 // If we are just checking for a icmp eq of a single bit and zext'ing it
984 // to an integer, then shift the bit to the appropriate place and then
985 // cast to integer to avoid the comparison.
986 const APInt *Op1CV;
987 if (match(Cmp->getOperand(1), m_APInt(Op1CV))) {
988
989 // zext (x <s 0) to i32 --> x>>u31 true if signbit set.
990 // zext (x >s -1) to i32 --> (x>>u31)^1 true if signbit clear.
991 if ((Cmp->getPredicate() == ICmpInst::ICMP_SLT && Op1CV->isZero()) ||
992 (Cmp->getPredicate() == ICmpInst::ICMP_SGT && Op1CV->isAllOnes())) {
993 Value *In = Cmp->getOperand(0);
994 Value *Sh = ConstantInt::get(In->getType(),
995 In->getType()->getScalarSizeInBits() - 1);
996 In = Builder.CreateLShr(In, Sh, In->getName() + ".lobit");
997 if (In->getType() != Zext.getType())
998 In = Builder.CreateIntCast(In, Zext.getType(), false /*ZExt*/);
999
1000 if (Cmp->getPredicate() == ICmpInst::ICMP_SGT) {
1001 Constant *One = ConstantInt::get(In->getType(), 1);
1002 In = Builder.CreateXor(In, One, In->getName() + ".not");
1003 }
1004
1005 return replaceInstUsesWith(Zext, In);
1006 }
1007
1008 // zext (X == 0) to i32 --> X^1 iff X has only the low bit set.
1009 // zext (X == 0) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
1010 // zext (X == 1) to i32 --> X iff X has only the low bit set.
1011 // zext (X == 2) to i32 --> X>>1 iff X has only the 2nd bit set.
1012 // zext (X != 0) to i32 --> X iff X has only the low bit set.
1013 // zext (X != 0) to i32 --> X>>1 iff X has only the 2nd bit set.
1014 // zext (X != 1) to i32 --> X^1 iff X has only the low bit set.
1015 // zext (X != 2) to i32 --> (X>>1)^1 iff X has only the 2nd bit set.
1016 if ((Op1CV->isZero() || Op1CV->isPowerOf2()) &&
1017 // This only works for EQ and NE
1018 Cmp->isEquality()) {
1019 // If Op1C some other power of two, convert:
1020 KnownBits Known = computeKnownBits(Cmp->getOperand(0), 0, &Zext);
1021
1022 APInt KnownZeroMask(~Known.Zero);
1023 if (KnownZeroMask.isPowerOf2()) { // Exactly 1 possible 1?
1024 bool isNE = Cmp->getPredicate() == ICmpInst::ICMP_NE;
1025 if (!Op1CV->isZero() && (*Op1CV != KnownZeroMask)) {
1026 // (X&4) == 2 --> false
1027 // (X&4) != 2 --> true
1028 Constant *Res = ConstantInt::get(Zext.getType(), isNE);
1029 return replaceInstUsesWith(Zext, Res);
1030 }
1031
1032 uint32_t ShAmt = KnownZeroMask.logBase2();
1033 Value *In = Cmp->getOperand(0);
1034 if (ShAmt) {
1035 // Perform a logical shr by shiftamt.
1036 // Insert the shift to put the result in the low bit.
1037 In = Builder.CreateLShr(In, ConstantInt::get(In->getType(), ShAmt),
1038 In->getName() + ".lobit");
1039 }
1040
1041 if (!Op1CV->isZero() == isNE) { // Toggle the low bit.
1042 Constant *One = ConstantInt::get(In->getType(), 1);
1043 In = Builder.CreateXor(In, One);
1044 }
1045
1046 if (Zext.getType() == In->getType())
1047 return replaceInstUsesWith(Zext, In);
1048
1049 Value *IntCast = Builder.CreateIntCast(In, Zext.getType(), false);
1050 return replaceInstUsesWith(Zext, IntCast);
1051 }
1052 }
1053 }
1054
1055 if (Cmp->isEquality() && Zext.getType() == Cmp->getOperand(0)->getType()) {
1056 // Test if a bit is clear/set using a shifted-one mask:
1057 // zext (icmp eq (and X, (1 << ShAmt)), 0) --> and (lshr (not X), ShAmt), 1
1058 // zext (icmp ne (and X, (1 << ShAmt)), 0) --> and (lshr X, ShAmt), 1
1059 Value *X, *ShAmt;
1060 if (Cmp->hasOneUse() && match(Cmp->getOperand(1), m_ZeroInt()) &&
1061 match(Cmp->getOperand(0),
1062 m_OneUse(m_c_And(m_Shl(m_One(), m_Value(ShAmt)), m_Value(X))))) {
1063 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ)
1064 X = Builder.CreateNot(X);
1065 Value *Lshr = Builder.CreateLShr(X, ShAmt);
1066 Value *And1 = Builder.CreateAnd(Lshr, ConstantInt::get(X->getType(), 1));
1067 return replaceInstUsesWith(Zext, And1);
1068 }
1069
1070 // icmp ne A, B is equal to xor A, B when A and B only really have one bit.
1071 // It is also profitable to transform icmp eq into not(xor(A, B)) because
1072 // that may lead to additional simplifications.
1073 if (IntegerType *ITy = dyn_cast<IntegerType>(Zext.getType())) {
1074 Value *LHS = Cmp->getOperand(0);
1075 Value *RHS = Cmp->getOperand(1);
1076
1077 KnownBits KnownLHS = computeKnownBits(LHS, 0, &Zext);
1078 KnownBits KnownRHS = computeKnownBits(RHS, 0, &Zext);
1079
1080 if (KnownLHS.Zero == KnownRHS.Zero && KnownLHS.One == KnownRHS.One) {
1081 APInt KnownBits = KnownLHS.Zero | KnownLHS.One;
1082 APInt UnknownBit = ~KnownBits;
1083 if (UnknownBit.countPopulation() == 1) {
1084 Value *Result = Builder.CreateXor(LHS, RHS);
1085
1086 // Mask off any bits that are set and won't be shifted away.
1087 if (KnownLHS.One.uge(UnknownBit))
1088 Result = Builder.CreateAnd(Result,
1089 ConstantInt::get(ITy, UnknownBit));
1090
1091 // Shift the bit we're testing down to the lsb.
1092 Result = Builder.CreateLShr(
1093 Result, ConstantInt::get(ITy, UnknownBit.countTrailingZeros()));
1094
1095 if (Cmp->getPredicate() == ICmpInst::ICMP_EQ)
1096 Result = Builder.CreateXor(Result, ConstantInt::get(ITy, 1));
1097 Result->takeName(Cmp);
1098 return replaceInstUsesWith(Zext, Result);
1099 }
1100 }
1101 }
1102 }
1103
1104 return nullptr;
1105 }
1106
1107 /// Determine if the specified value can be computed in the specified wider type
1108 /// and produce the same low bits. If not, return false.
1109 ///
1110 /// If this function returns true, it can also return a non-zero number of bits
1111 /// (in BitsToClear) which indicates that the value it computes is correct for
1112 /// the zero extend, but that the additional BitsToClear bits need to be zero'd
1113 /// out. For example, to promote something like:
1114 ///
1115 /// %B = trunc i64 %A to i32
1116 /// %C = lshr i32 %B, 8
1117 /// %E = zext i32 %C to i64
1118 ///
1119 /// CanEvaluateZExtd for the 'lshr' will return true, and BitsToClear will be
1120 /// set to 8 to indicate that the promoted value needs to have bits 24-31
1121 /// cleared in addition to bits 32-63. Since an 'and' will be generated to
1122 /// clear the top bits anyway, doing this has no extra cost.
1123 ///
1124 /// This function works on both vectors and scalars.
canEvaluateZExtd(Value * V,Type * Ty,unsigned & BitsToClear,InstCombinerImpl & IC,Instruction * CxtI)1125 static bool canEvaluateZExtd(Value *V, Type *Ty, unsigned &BitsToClear,
1126 InstCombinerImpl &IC, Instruction *CxtI) {
1127 BitsToClear = 0;
1128 if (canAlwaysEvaluateInType(V, Ty))
1129 return true;
1130 if (canNotEvaluateInType(V, Ty))
1131 return false;
1132
1133 auto *I = cast<Instruction>(V);
1134 unsigned Tmp;
1135 switch (I->getOpcode()) {
1136 case Instruction::ZExt: // zext(zext(x)) -> zext(x).
1137 case Instruction::SExt: // zext(sext(x)) -> sext(x).
1138 case Instruction::Trunc: // zext(trunc(x)) -> trunc(x) or zext(x)
1139 return true;
1140 case Instruction::And:
1141 case Instruction::Or:
1142 case Instruction::Xor:
1143 case Instruction::Add:
1144 case Instruction::Sub:
1145 case Instruction::Mul:
1146 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI) ||
1147 !canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI))
1148 return false;
1149 // These can all be promoted if neither operand has 'bits to clear'.
1150 if (BitsToClear == 0 && Tmp == 0)
1151 return true;
1152
1153 // If the operation is an AND/OR/XOR and the bits to clear are zero in the
1154 // other side, BitsToClear is ok.
1155 if (Tmp == 0 && I->isBitwiseLogicOp()) {
1156 // We use MaskedValueIsZero here for generality, but the case we care
1157 // about the most is constant RHS.
1158 unsigned VSize = V->getType()->getScalarSizeInBits();
1159 if (IC.MaskedValueIsZero(I->getOperand(1),
1160 APInt::getHighBitsSet(VSize, BitsToClear),
1161 0, CxtI)) {
1162 // If this is an And instruction and all of the BitsToClear are
1163 // known to be zero we can reset BitsToClear.
1164 if (I->getOpcode() == Instruction::And)
1165 BitsToClear = 0;
1166 return true;
1167 }
1168 }
1169
1170 // Otherwise, we don't know how to analyze this BitsToClear case yet.
1171 return false;
1172
1173 case Instruction::Shl: {
1174 // We can promote shl(x, cst) if we can promote x. Since shl overwrites the
1175 // upper bits we can reduce BitsToClear by the shift amount.
1176 const APInt *Amt;
1177 if (match(I->getOperand(1), m_APInt(Amt))) {
1178 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1179 return false;
1180 uint64_t ShiftAmt = Amt->getZExtValue();
1181 BitsToClear = ShiftAmt < BitsToClear ? BitsToClear - ShiftAmt : 0;
1182 return true;
1183 }
1184 return false;
1185 }
1186 case Instruction::LShr: {
1187 // We can promote lshr(x, cst) if we can promote x. This requires the
1188 // ultimate 'and' to clear out the high zero bits we're clearing out though.
1189 const APInt *Amt;
1190 if (match(I->getOperand(1), m_APInt(Amt))) {
1191 if (!canEvaluateZExtd(I->getOperand(0), Ty, BitsToClear, IC, CxtI))
1192 return false;
1193 BitsToClear += Amt->getZExtValue();
1194 if (BitsToClear > V->getType()->getScalarSizeInBits())
1195 BitsToClear = V->getType()->getScalarSizeInBits();
1196 return true;
1197 }
1198 // Cannot promote variable LSHR.
1199 return false;
1200 }
1201 case Instruction::Select:
1202 if (!canEvaluateZExtd(I->getOperand(1), Ty, Tmp, IC, CxtI) ||
1203 !canEvaluateZExtd(I->getOperand(2), Ty, BitsToClear, IC, CxtI) ||
1204 // TODO: If important, we could handle the case when the BitsToClear are
1205 // known zero in the disagreeing side.
1206 Tmp != BitsToClear)
1207 return false;
1208 return true;
1209
1210 case Instruction::PHI: {
1211 // We can change a phi if we can change all operands. Note that we never
1212 // get into trouble with cyclic PHIs here because we only consider
1213 // instructions with a single use.
1214 PHINode *PN = cast<PHINode>(I);
1215 if (!canEvaluateZExtd(PN->getIncomingValue(0), Ty, BitsToClear, IC, CxtI))
1216 return false;
1217 for (unsigned i = 1, e = PN->getNumIncomingValues(); i != e; ++i)
1218 if (!canEvaluateZExtd(PN->getIncomingValue(i), Ty, Tmp, IC, CxtI) ||
1219 // TODO: If important, we could handle the case when the BitsToClear
1220 // are known zero in the disagreeing input.
1221 Tmp != BitsToClear)
1222 return false;
1223 return true;
1224 }
1225 default:
1226 // TODO: Can handle more cases here.
1227 return false;
1228 }
1229 }
1230
visitZExt(ZExtInst & CI)1231 Instruction *InstCombinerImpl::visitZExt(ZExtInst &CI) {
1232 // If this zero extend is only used by a truncate, let the truncate be
1233 // eliminated before we try to optimize this zext.
1234 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1235 return nullptr;
1236
1237 // If one of the common conversion will work, do it.
1238 if (Instruction *Result = commonCastTransforms(CI))
1239 return Result;
1240
1241 Value *Src = CI.getOperand(0);
1242 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1243
1244 // Try to extend the entire expression tree to the wide destination type.
1245 unsigned BitsToClear;
1246 if (shouldChangeType(SrcTy, DestTy) &&
1247 canEvaluateZExtd(Src, DestTy, BitsToClear, *this, &CI)) {
1248 assert(BitsToClear <= SrcTy->getScalarSizeInBits() &&
1249 "Can't clear more bits than in SrcTy");
1250
1251 // Okay, we can transform this! Insert the new expression now.
1252 LLVM_DEBUG(
1253 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1254 " to avoid zero extend: "
1255 << CI << '\n');
1256 Value *Res = EvaluateInDifferentType(Src, DestTy, false);
1257 assert(Res->getType() == DestTy);
1258
1259 // Preserve debug values referring to Src if the zext is its last use.
1260 if (auto *SrcOp = dyn_cast<Instruction>(Src))
1261 if (SrcOp->hasOneUse())
1262 replaceAllDbgUsesWith(*SrcOp, *Res, CI, DT);
1263
1264 uint32_t SrcBitsKept = SrcTy->getScalarSizeInBits()-BitsToClear;
1265 uint32_t DestBitSize = DestTy->getScalarSizeInBits();
1266
1267 // If the high bits are already filled with zeros, just replace this
1268 // cast with the result.
1269 if (MaskedValueIsZero(Res,
1270 APInt::getHighBitsSet(DestBitSize,
1271 DestBitSize-SrcBitsKept),
1272 0, &CI))
1273 return replaceInstUsesWith(CI, Res);
1274
1275 // We need to emit an AND to clear the high bits.
1276 Constant *C = ConstantInt::get(Res->getType(),
1277 APInt::getLowBitsSet(DestBitSize, SrcBitsKept));
1278 return BinaryOperator::CreateAnd(Res, C);
1279 }
1280
1281 // If this is a TRUNC followed by a ZEXT then we are dealing with integral
1282 // types and if the sizes are just right we can convert this into a logical
1283 // 'and' which will be much cheaper than the pair of casts.
1284 if (TruncInst *CSrc = dyn_cast<TruncInst>(Src)) { // A->B->C cast
1285 // TODO: Subsume this into EvaluateInDifferentType.
1286
1287 // Get the sizes of the types involved. We know that the intermediate type
1288 // will be smaller than A or C, but don't know the relation between A and C.
1289 Value *A = CSrc->getOperand(0);
1290 unsigned SrcSize = A->getType()->getScalarSizeInBits();
1291 unsigned MidSize = CSrc->getType()->getScalarSizeInBits();
1292 unsigned DstSize = CI.getType()->getScalarSizeInBits();
1293 // If we're actually extending zero bits, then if
1294 // SrcSize < DstSize: zext(a & mask)
1295 // SrcSize == DstSize: a & mask
1296 // SrcSize > DstSize: trunc(a) & mask
1297 if (SrcSize < DstSize) {
1298 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1299 Constant *AndConst = ConstantInt::get(A->getType(), AndValue);
1300 Value *And = Builder.CreateAnd(A, AndConst, CSrc->getName() + ".mask");
1301 return new ZExtInst(And, CI.getType());
1302 }
1303
1304 if (SrcSize == DstSize) {
1305 APInt AndValue(APInt::getLowBitsSet(SrcSize, MidSize));
1306 return BinaryOperator::CreateAnd(A, ConstantInt::get(A->getType(),
1307 AndValue));
1308 }
1309 if (SrcSize > DstSize) {
1310 Value *Trunc = Builder.CreateTrunc(A, CI.getType());
1311 APInt AndValue(APInt::getLowBitsSet(DstSize, MidSize));
1312 return BinaryOperator::CreateAnd(Trunc,
1313 ConstantInt::get(Trunc->getType(),
1314 AndValue));
1315 }
1316 }
1317
1318 if (ICmpInst *Cmp = dyn_cast<ICmpInst>(Src))
1319 return transformZExtICmp(Cmp, CI);
1320
1321 // zext(trunc(X) & C) -> (X & zext(C)).
1322 Constant *C;
1323 Value *X;
1324 if (match(Src, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Constant(C)))) &&
1325 X->getType() == CI.getType())
1326 return BinaryOperator::CreateAnd(X, ConstantExpr::getZExt(C, CI.getType()));
1327
1328 // zext((trunc(X) & C) ^ C) -> ((X & zext(C)) ^ zext(C)).
1329 Value *And;
1330 if (match(Src, m_OneUse(m_Xor(m_Value(And), m_Constant(C)))) &&
1331 match(And, m_OneUse(m_And(m_Trunc(m_Value(X)), m_Specific(C)))) &&
1332 X->getType() == CI.getType()) {
1333 Constant *ZC = ConstantExpr::getZExt(C, CI.getType());
1334 return BinaryOperator::CreateXor(Builder.CreateAnd(X, ZC), ZC);
1335 }
1336
1337 if (match(Src, m_VScale(DL))) {
1338 if (CI.getFunction() &&
1339 CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
1340 unsigned MaxVScale = CI.getFunction()
1341 ->getFnAttribute(Attribute::VScaleRange)
1342 .getVScaleRangeArgs()
1343 .second;
1344 unsigned TypeWidth = Src->getType()->getScalarSizeInBits();
1345 if (MaxVScale > 0 && Log2_32(MaxVScale) < TypeWidth) {
1346 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
1347 return replaceInstUsesWith(CI, VScale);
1348 }
1349 }
1350 }
1351
1352 return nullptr;
1353 }
1354
1355 /// Transform (sext icmp) to bitwise / integer operations to eliminate the icmp.
transformSExtICmp(ICmpInst * ICI,Instruction & CI)1356 Instruction *InstCombinerImpl::transformSExtICmp(ICmpInst *ICI,
1357 Instruction &CI) {
1358 Value *Op0 = ICI->getOperand(0), *Op1 = ICI->getOperand(1);
1359 ICmpInst::Predicate Pred = ICI->getPredicate();
1360
1361 // Don't bother if Op1 isn't of vector or integer type.
1362 if (!Op1->getType()->isIntOrIntVectorTy())
1363 return nullptr;
1364
1365 if ((Pred == ICmpInst::ICMP_SLT && match(Op1, m_ZeroInt())) ||
1366 (Pred == ICmpInst::ICMP_SGT && match(Op1, m_AllOnes()))) {
1367 // (x <s 0) ? -1 : 0 -> ashr x, 31 -> all ones if negative
1368 // (x >s -1) ? -1 : 0 -> not (ashr x, 31) -> all ones if positive
1369 Value *Sh = ConstantInt::get(Op0->getType(),
1370 Op0->getType()->getScalarSizeInBits() - 1);
1371 Value *In = Builder.CreateAShr(Op0, Sh, Op0->getName() + ".lobit");
1372 if (In->getType() != CI.getType())
1373 In = Builder.CreateIntCast(In, CI.getType(), true /*SExt*/);
1374
1375 if (Pred == ICmpInst::ICMP_SGT)
1376 In = Builder.CreateNot(In, In->getName() + ".not");
1377 return replaceInstUsesWith(CI, In);
1378 }
1379
1380 if (ConstantInt *Op1C = dyn_cast<ConstantInt>(Op1)) {
1381 // If we know that only one bit of the LHS of the icmp can be set and we
1382 // have an equality comparison with zero or a power of 2, we can transform
1383 // the icmp and sext into bitwise/integer operations.
1384 if (ICI->hasOneUse() &&
1385 ICI->isEquality() && (Op1C->isZero() || Op1C->getValue().isPowerOf2())){
1386 KnownBits Known = computeKnownBits(Op0, 0, &CI);
1387
1388 APInt KnownZeroMask(~Known.Zero);
1389 if (KnownZeroMask.isPowerOf2()) {
1390 Value *In = ICI->getOperand(0);
1391
1392 // If the icmp tests for a known zero bit we can constant fold it.
1393 if (!Op1C->isZero() && Op1C->getValue() != KnownZeroMask) {
1394 Value *V = Pred == ICmpInst::ICMP_NE ?
1395 ConstantInt::getAllOnesValue(CI.getType()) :
1396 ConstantInt::getNullValue(CI.getType());
1397 return replaceInstUsesWith(CI, V);
1398 }
1399
1400 if (!Op1C->isZero() == (Pred == ICmpInst::ICMP_NE)) {
1401 // sext ((x & 2^n) == 0) -> (x >> n) - 1
1402 // sext ((x & 2^n) != 2^n) -> (x >> n) - 1
1403 unsigned ShiftAmt = KnownZeroMask.countTrailingZeros();
1404 // Perform a right shift to place the desired bit in the LSB.
1405 if (ShiftAmt)
1406 In = Builder.CreateLShr(In,
1407 ConstantInt::get(In->getType(), ShiftAmt));
1408
1409 // At this point "In" is either 1 or 0. Subtract 1 to turn
1410 // {1, 0} -> {0, -1}.
1411 In = Builder.CreateAdd(In,
1412 ConstantInt::getAllOnesValue(In->getType()),
1413 "sext");
1414 } else {
1415 // sext ((x & 2^n) != 0) -> (x << bitwidth-n) a>> bitwidth-1
1416 // sext ((x & 2^n) == 2^n) -> (x << bitwidth-n) a>> bitwidth-1
1417 unsigned ShiftAmt = KnownZeroMask.countLeadingZeros();
1418 // Perform a left shift to place the desired bit in the MSB.
1419 if (ShiftAmt)
1420 In = Builder.CreateShl(In,
1421 ConstantInt::get(In->getType(), ShiftAmt));
1422
1423 // Distribute the bit over the whole bit width.
1424 In = Builder.CreateAShr(In, ConstantInt::get(In->getType(),
1425 KnownZeroMask.getBitWidth() - 1), "sext");
1426 }
1427
1428 if (CI.getType() == In->getType())
1429 return replaceInstUsesWith(CI, In);
1430 return CastInst::CreateIntegerCast(In, CI.getType(), true/*SExt*/);
1431 }
1432 }
1433 }
1434
1435 return nullptr;
1436 }
1437
1438 /// Return true if we can take the specified value and return it as type Ty
1439 /// without inserting any new casts and without changing the value of the common
1440 /// low bits. This is used by code that tries to promote integer operations to
1441 /// a wider types will allow us to eliminate the extension.
1442 ///
1443 /// This function works on both vectors and scalars.
1444 ///
canEvaluateSExtd(Value * V,Type * Ty)1445 static bool canEvaluateSExtd(Value *V, Type *Ty) {
1446 assert(V->getType()->getScalarSizeInBits() < Ty->getScalarSizeInBits() &&
1447 "Can't sign extend type to a smaller type");
1448 if (canAlwaysEvaluateInType(V, Ty))
1449 return true;
1450 if (canNotEvaluateInType(V, Ty))
1451 return false;
1452
1453 auto *I = cast<Instruction>(V);
1454 switch (I->getOpcode()) {
1455 case Instruction::SExt: // sext(sext(x)) -> sext(x)
1456 case Instruction::ZExt: // sext(zext(x)) -> zext(x)
1457 case Instruction::Trunc: // sext(trunc(x)) -> trunc(x) or sext(x)
1458 return true;
1459 case Instruction::And:
1460 case Instruction::Or:
1461 case Instruction::Xor:
1462 case Instruction::Add:
1463 case Instruction::Sub:
1464 case Instruction::Mul:
1465 // These operators can all arbitrarily be extended if their inputs can.
1466 return canEvaluateSExtd(I->getOperand(0), Ty) &&
1467 canEvaluateSExtd(I->getOperand(1), Ty);
1468
1469 //case Instruction::Shl: TODO
1470 //case Instruction::LShr: TODO
1471
1472 case Instruction::Select:
1473 return canEvaluateSExtd(I->getOperand(1), Ty) &&
1474 canEvaluateSExtd(I->getOperand(2), Ty);
1475
1476 case Instruction::PHI: {
1477 // We can change a phi if we can change all operands. Note that we never
1478 // get into trouble with cyclic PHIs here because we only consider
1479 // instructions with a single use.
1480 PHINode *PN = cast<PHINode>(I);
1481 for (Value *IncValue : PN->incoming_values())
1482 if (!canEvaluateSExtd(IncValue, Ty)) return false;
1483 return true;
1484 }
1485 default:
1486 // TODO: Can handle more cases here.
1487 break;
1488 }
1489
1490 return false;
1491 }
1492
visitSExt(SExtInst & CI)1493 Instruction *InstCombinerImpl::visitSExt(SExtInst &CI) {
1494 // If this sign extend is only used by a truncate, let the truncate be
1495 // eliminated before we try to optimize this sext.
1496 if (CI.hasOneUse() && isa<TruncInst>(CI.user_back()))
1497 return nullptr;
1498
1499 if (Instruction *I = commonCastTransforms(CI))
1500 return I;
1501
1502 Value *Src = CI.getOperand(0);
1503 Type *SrcTy = Src->getType(), *DestTy = CI.getType();
1504 unsigned SrcBitSize = SrcTy->getScalarSizeInBits();
1505 unsigned DestBitSize = DestTy->getScalarSizeInBits();
1506
1507 // If we know that the value being extended is positive, we can use a zext
1508 // instead.
1509 KnownBits Known = computeKnownBits(Src, 0, &CI);
1510 if (Known.isNonNegative())
1511 return CastInst::Create(Instruction::ZExt, Src, DestTy);
1512
1513 // Try to extend the entire expression tree to the wide destination type.
1514 if (shouldChangeType(SrcTy, DestTy) && canEvaluateSExtd(Src, DestTy)) {
1515 // Okay, we can transform this! Insert the new expression now.
1516 LLVM_DEBUG(
1517 dbgs() << "ICE: EvaluateInDifferentType converting expression type"
1518 " to avoid sign extend: "
1519 << CI << '\n');
1520 Value *Res = EvaluateInDifferentType(Src, DestTy, true);
1521 assert(Res->getType() == DestTy);
1522
1523 // If the high bits are already filled with sign bit, just replace this
1524 // cast with the result.
1525 if (ComputeNumSignBits(Res, 0, &CI) > DestBitSize - SrcBitSize)
1526 return replaceInstUsesWith(CI, Res);
1527
1528 // We need to emit a shl + ashr to do the sign extend.
1529 Value *ShAmt = ConstantInt::get(DestTy, DestBitSize-SrcBitSize);
1530 return BinaryOperator::CreateAShr(Builder.CreateShl(Res, ShAmt, "sext"),
1531 ShAmt);
1532 }
1533
1534 Value *X;
1535 if (match(Src, m_Trunc(m_Value(X)))) {
1536 // If the input has more sign bits than bits truncated, then convert
1537 // directly to final type.
1538 unsigned XBitSize = X->getType()->getScalarSizeInBits();
1539 if (ComputeNumSignBits(X, 0, &CI) > XBitSize - SrcBitSize)
1540 return CastInst::CreateIntegerCast(X, DestTy, /* isSigned */ true);
1541
1542 // If input is a trunc from the destination type, then convert into shifts.
1543 if (Src->hasOneUse() && X->getType() == DestTy) {
1544 // sext (trunc X) --> ashr (shl X, C), C
1545 Constant *ShAmt = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1546 return BinaryOperator::CreateAShr(Builder.CreateShl(X, ShAmt), ShAmt);
1547 }
1548
1549 // If we are replacing shifted-in high zero bits with sign bits, convert
1550 // the logic shift to arithmetic shift and eliminate the cast to
1551 // intermediate type:
1552 // sext (trunc (lshr Y, C)) --> sext/trunc (ashr Y, C)
1553 Value *Y;
1554 if (Src->hasOneUse() &&
1555 match(X, m_LShr(m_Value(Y),
1556 m_SpecificIntAllowUndef(XBitSize - SrcBitSize)))) {
1557 Value *Ashr = Builder.CreateAShr(Y, XBitSize - SrcBitSize);
1558 return CastInst::CreateIntegerCast(Ashr, DestTy, /* isSigned */ true);
1559 }
1560 }
1561
1562 if (ICmpInst *ICI = dyn_cast<ICmpInst>(Src))
1563 return transformSExtICmp(ICI, CI);
1564
1565 // If the input is a shl/ashr pair of a same constant, then this is a sign
1566 // extension from a smaller value. If we could trust arbitrary bitwidth
1567 // integers, we could turn this into a truncate to the smaller bit and then
1568 // use a sext for the whole extension. Since we don't, look deeper and check
1569 // for a truncate. If the source and dest are the same type, eliminate the
1570 // trunc and extend and just do shifts. For example, turn:
1571 // %a = trunc i32 %i to i8
1572 // %b = shl i8 %a, C
1573 // %c = ashr i8 %b, C
1574 // %d = sext i8 %c to i32
1575 // into:
1576 // %a = shl i32 %i, 32-(8-C)
1577 // %d = ashr i32 %a, 32-(8-C)
1578 Value *A = nullptr;
1579 // TODO: Eventually this could be subsumed by EvaluateInDifferentType.
1580 Constant *BA = nullptr, *CA = nullptr;
1581 if (match(Src, m_AShr(m_Shl(m_Trunc(m_Value(A)), m_Constant(BA)),
1582 m_Constant(CA))) &&
1583 BA->isElementWiseEqual(CA) && A->getType() == DestTy) {
1584 Constant *WideCurrShAmt = ConstantExpr::getSExt(CA, DestTy);
1585 Constant *NumLowbitsLeft = ConstantExpr::getSub(
1586 ConstantInt::get(DestTy, SrcTy->getScalarSizeInBits()), WideCurrShAmt);
1587 Constant *NewShAmt = ConstantExpr::getSub(
1588 ConstantInt::get(DestTy, DestTy->getScalarSizeInBits()),
1589 NumLowbitsLeft);
1590 NewShAmt =
1591 Constant::mergeUndefsWith(Constant::mergeUndefsWith(NewShAmt, BA), CA);
1592 A = Builder.CreateShl(A, NewShAmt, CI.getName());
1593 return BinaryOperator::CreateAShr(A, NewShAmt);
1594 }
1595
1596 // Splatting a bit of constant-index across a value:
1597 // sext (ashr (trunc iN X to iM), M-1) to iN --> ashr (shl X, N-M), N-1
1598 // TODO: If the dest type is different, use a cast (adjust use check).
1599 if (match(Src, m_OneUse(m_AShr(m_Trunc(m_Value(X)),
1600 m_SpecificInt(SrcBitSize - 1)))) &&
1601 X->getType() == DestTy) {
1602 Constant *ShlAmtC = ConstantInt::get(DestTy, DestBitSize - SrcBitSize);
1603 Constant *AshrAmtC = ConstantInt::get(DestTy, DestBitSize - 1);
1604 Value *Shl = Builder.CreateShl(X, ShlAmtC);
1605 return BinaryOperator::CreateAShr(Shl, AshrAmtC);
1606 }
1607
1608 if (match(Src, m_VScale(DL))) {
1609 if (CI.getFunction() &&
1610 CI.getFunction()->hasFnAttribute(Attribute::VScaleRange)) {
1611 unsigned MaxVScale = CI.getFunction()
1612 ->getFnAttribute(Attribute::VScaleRange)
1613 .getVScaleRangeArgs()
1614 .second;
1615 if (MaxVScale > 0 && Log2_32(MaxVScale) < (SrcBitSize - 1)) {
1616 Value *VScale = Builder.CreateVScale(ConstantInt::get(DestTy, 1));
1617 return replaceInstUsesWith(CI, VScale);
1618 }
1619 }
1620 }
1621
1622 return nullptr;
1623 }
1624
1625 /// Return a Constant* for the specified floating-point constant if it fits
1626 /// in the specified FP type without changing its value.
fitsInFPType(ConstantFP * CFP,const fltSemantics & Sem)1627 static bool fitsInFPType(ConstantFP *CFP, const fltSemantics &Sem) {
1628 bool losesInfo;
1629 APFloat F = CFP->getValueAPF();
1630 (void)F.convert(Sem, APFloat::rmNearestTiesToEven, &losesInfo);
1631 return !losesInfo;
1632 }
1633
shrinkFPConstant(ConstantFP * CFP)1634 static Type *shrinkFPConstant(ConstantFP *CFP) {
1635 if (CFP->getType() == Type::getPPC_FP128Ty(CFP->getContext()))
1636 return nullptr; // No constant folding of this.
1637 // See if the value can be truncated to half and then reextended.
1638 if (fitsInFPType(CFP, APFloat::IEEEhalf()))
1639 return Type::getHalfTy(CFP->getContext());
1640 // See if the value can be truncated to float and then reextended.
1641 if (fitsInFPType(CFP, APFloat::IEEEsingle()))
1642 return Type::getFloatTy(CFP->getContext());
1643 if (CFP->getType()->isDoubleTy())
1644 return nullptr; // Won't shrink.
1645 if (fitsInFPType(CFP, APFloat::IEEEdouble()))
1646 return Type::getDoubleTy(CFP->getContext());
1647 // Don't try to shrink to various long double types.
1648 return nullptr;
1649 }
1650
1651 // Determine if this is a vector of ConstantFPs and if so, return the minimal
1652 // type we can safely truncate all elements to.
1653 // TODO: Make these support undef elements.
shrinkFPConstantVector(Value * V)1654 static Type *shrinkFPConstantVector(Value *V) {
1655 auto *CV = dyn_cast<Constant>(V);
1656 auto *CVVTy = dyn_cast<FixedVectorType>(V->getType());
1657 if (!CV || !CVVTy)
1658 return nullptr;
1659
1660 Type *MinType = nullptr;
1661
1662 unsigned NumElts = CVVTy->getNumElements();
1663
1664 // For fixed-width vectors we find the minimal type by looking
1665 // through the constant values of the vector.
1666 for (unsigned i = 0; i != NumElts; ++i) {
1667 auto *CFP = dyn_cast_or_null<ConstantFP>(CV->getAggregateElement(i));
1668 if (!CFP)
1669 return nullptr;
1670
1671 Type *T = shrinkFPConstant(CFP);
1672 if (!T)
1673 return nullptr;
1674
1675 // If we haven't found a type yet or this type has a larger mantissa than
1676 // our previous type, this is our new minimal type.
1677 if (!MinType || T->getFPMantissaWidth() > MinType->getFPMantissaWidth())
1678 MinType = T;
1679 }
1680
1681 // Make a vector type from the minimal type.
1682 return FixedVectorType::get(MinType, NumElts);
1683 }
1684
1685 /// Find the minimum FP type we can safely truncate to.
getMinimumFPType(Value * V)1686 static Type *getMinimumFPType(Value *V) {
1687 if (auto *FPExt = dyn_cast<FPExtInst>(V))
1688 return FPExt->getOperand(0)->getType();
1689
1690 // If this value is a constant, return the constant in the smallest FP type
1691 // that can accurately represent it. This allows us to turn
1692 // (float)((double)X+2.0) into x+2.0f.
1693 if (auto *CFP = dyn_cast<ConstantFP>(V))
1694 if (Type *T = shrinkFPConstant(CFP))
1695 return T;
1696
1697 // We can only correctly find a minimum type for a scalable vector when it is
1698 // a splat. For splats of constant values the fpext is wrapped up as a
1699 // ConstantExpr.
1700 if (auto *FPCExt = dyn_cast<ConstantExpr>(V))
1701 if (FPCExt->getOpcode() == Instruction::FPExt)
1702 return FPCExt->getOperand(0)->getType();
1703
1704 // Try to shrink a vector of FP constants. This returns nullptr on scalable
1705 // vectors
1706 if (Type *T = shrinkFPConstantVector(V))
1707 return T;
1708
1709 return V->getType();
1710 }
1711
1712 /// Return true if the cast from integer to FP can be proven to be exact for all
1713 /// possible inputs (the conversion does not lose any precision).
isKnownExactCastIntToFP(CastInst & I)1714 static bool isKnownExactCastIntToFP(CastInst &I) {
1715 CastInst::CastOps Opcode = I.getOpcode();
1716 assert((Opcode == CastInst::SIToFP || Opcode == CastInst::UIToFP) &&
1717 "Unexpected cast");
1718 Value *Src = I.getOperand(0);
1719 Type *SrcTy = Src->getType();
1720 Type *FPTy = I.getType();
1721 bool IsSigned = Opcode == Instruction::SIToFP;
1722 int SrcSize = (int)SrcTy->getScalarSizeInBits() - IsSigned;
1723
1724 // Easy case - if the source integer type has less bits than the FP mantissa,
1725 // then the cast must be exact.
1726 int DestNumSigBits = FPTy->getFPMantissaWidth();
1727 if (SrcSize <= DestNumSigBits)
1728 return true;
1729
1730 // Cast from FP to integer and back to FP is independent of the intermediate
1731 // integer width because of poison on overflow.
1732 Value *F;
1733 if (match(Src, m_FPToSI(m_Value(F))) || match(Src, m_FPToUI(m_Value(F)))) {
1734 // If this is uitofp (fptosi F), the source needs an extra bit to avoid
1735 // potential rounding of negative FP input values.
1736 int SrcNumSigBits = F->getType()->getFPMantissaWidth();
1737 if (!IsSigned && match(Src, m_FPToSI(m_Value())))
1738 SrcNumSigBits++;
1739
1740 // [su]itofp (fpto[su]i F) --> exact if the source type has less or equal
1741 // significant bits than the destination (and make sure neither type is
1742 // weird -- ppc_fp128).
1743 if (SrcNumSigBits > 0 && DestNumSigBits > 0 &&
1744 SrcNumSigBits <= DestNumSigBits)
1745 return true;
1746 }
1747
1748 // TODO:
1749 // Try harder to find if the source integer type has less significant bits.
1750 // For example, compute number of sign bits or compute low bit mask.
1751 return false;
1752 }
1753
visitFPTrunc(FPTruncInst & FPT)1754 Instruction *InstCombinerImpl::visitFPTrunc(FPTruncInst &FPT) {
1755 if (Instruction *I = commonCastTransforms(FPT))
1756 return I;
1757
1758 // If we have fptrunc(OpI (fpextend x), (fpextend y)), we would like to
1759 // simplify this expression to avoid one or more of the trunc/extend
1760 // operations if we can do so without changing the numerical results.
1761 //
1762 // The exact manner in which the widths of the operands interact to limit
1763 // what we can and cannot do safely varies from operation to operation, and
1764 // is explained below in the various case statements.
1765 Type *Ty = FPT.getType();
1766 auto *BO = dyn_cast<BinaryOperator>(FPT.getOperand(0));
1767 if (BO && BO->hasOneUse()) {
1768 Type *LHSMinType = getMinimumFPType(BO->getOperand(0));
1769 Type *RHSMinType = getMinimumFPType(BO->getOperand(1));
1770 unsigned OpWidth = BO->getType()->getFPMantissaWidth();
1771 unsigned LHSWidth = LHSMinType->getFPMantissaWidth();
1772 unsigned RHSWidth = RHSMinType->getFPMantissaWidth();
1773 unsigned SrcWidth = std::max(LHSWidth, RHSWidth);
1774 unsigned DstWidth = Ty->getFPMantissaWidth();
1775 switch (BO->getOpcode()) {
1776 default: break;
1777 case Instruction::FAdd:
1778 case Instruction::FSub:
1779 // For addition and subtraction, the infinitely precise result can
1780 // essentially be arbitrarily wide; proving that double rounding
1781 // will not occur because the result of OpI is exact (as we will for
1782 // FMul, for example) is hopeless. However, we *can* nonetheless
1783 // frequently know that double rounding cannot occur (or that it is
1784 // innocuous) by taking advantage of the specific structure of
1785 // infinitely-precise results that admit double rounding.
1786 //
1787 // Specifically, if OpWidth >= 2*DstWdith+1 and DstWidth is sufficient
1788 // to represent both sources, we can guarantee that the double
1789 // rounding is innocuous (See p50 of Figueroa's 2000 PhD thesis,
1790 // "A Rigorous Framework for Fully Supporting the IEEE Standard ..."
1791 // for proof of this fact).
1792 //
1793 // Note: Figueroa does not consider the case where DstFormat !=
1794 // SrcFormat. It's possible (likely even!) that this analysis
1795 // could be tightened for those cases, but they are rare (the main
1796 // case of interest here is (float)((double)float + float)).
1797 if (OpWidth >= 2*DstWidth+1 && DstWidth >= SrcWidth) {
1798 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1799 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1800 Instruction *RI = BinaryOperator::Create(BO->getOpcode(), LHS, RHS);
1801 RI->copyFastMathFlags(BO);
1802 return RI;
1803 }
1804 break;
1805 case Instruction::FMul:
1806 // For multiplication, the infinitely precise result has at most
1807 // LHSWidth + RHSWidth significant bits; if OpWidth is sufficient
1808 // that such a value can be exactly represented, then no double
1809 // rounding can possibly occur; we can safely perform the operation
1810 // in the destination format if it can represent both sources.
1811 if (OpWidth >= LHSWidth + RHSWidth && DstWidth >= SrcWidth) {
1812 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1813 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1814 return BinaryOperator::CreateFMulFMF(LHS, RHS, BO);
1815 }
1816 break;
1817 case Instruction::FDiv:
1818 // For division, we use again use the bound from Figueroa's
1819 // dissertation. I am entirely certain that this bound can be
1820 // tightened in the unbalanced operand case by an analysis based on
1821 // the diophantine rational approximation bound, but the well-known
1822 // condition used here is a good conservative first pass.
1823 // TODO: Tighten bound via rigorous analysis of the unbalanced case.
1824 if (OpWidth >= 2*DstWidth && DstWidth >= SrcWidth) {
1825 Value *LHS = Builder.CreateFPTrunc(BO->getOperand(0), Ty);
1826 Value *RHS = Builder.CreateFPTrunc(BO->getOperand(1), Ty);
1827 return BinaryOperator::CreateFDivFMF(LHS, RHS, BO);
1828 }
1829 break;
1830 case Instruction::FRem: {
1831 // Remainder is straightforward. Remainder is always exact, so the
1832 // type of OpI doesn't enter into things at all. We simply evaluate
1833 // in whichever source type is larger, then convert to the
1834 // destination type.
1835 if (SrcWidth == OpWidth)
1836 break;
1837 Value *LHS, *RHS;
1838 if (LHSWidth == SrcWidth) {
1839 LHS = Builder.CreateFPTrunc(BO->getOperand(0), LHSMinType);
1840 RHS = Builder.CreateFPTrunc(BO->getOperand(1), LHSMinType);
1841 } else {
1842 LHS = Builder.CreateFPTrunc(BO->getOperand(0), RHSMinType);
1843 RHS = Builder.CreateFPTrunc(BO->getOperand(1), RHSMinType);
1844 }
1845
1846 Value *ExactResult = Builder.CreateFRemFMF(LHS, RHS, BO);
1847 return CastInst::CreateFPCast(ExactResult, Ty);
1848 }
1849 }
1850 }
1851
1852 // (fptrunc (fneg x)) -> (fneg (fptrunc x))
1853 Value *X;
1854 Instruction *Op = dyn_cast<Instruction>(FPT.getOperand(0));
1855 if (Op && Op->hasOneUse()) {
1856 // FIXME: The FMF should propagate from the fptrunc, not the source op.
1857 IRBuilder<>::FastMathFlagGuard FMFG(Builder);
1858 if (isa<FPMathOperator>(Op))
1859 Builder.setFastMathFlags(Op->getFastMathFlags());
1860
1861 if (match(Op, m_FNeg(m_Value(X)))) {
1862 Value *InnerTrunc = Builder.CreateFPTrunc(X, Ty);
1863
1864 return UnaryOperator::CreateFNegFMF(InnerTrunc, Op);
1865 }
1866
1867 // If we are truncating a select that has an extended operand, we can
1868 // narrow the other operand and do the select as a narrow op.
1869 Value *Cond, *X, *Y;
1870 if (match(Op, m_Select(m_Value(Cond), m_FPExt(m_Value(X)), m_Value(Y))) &&
1871 X->getType() == Ty) {
1872 // fptrunc (select Cond, (fpext X), Y --> select Cond, X, (fptrunc Y)
1873 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1874 Value *Sel = Builder.CreateSelect(Cond, X, NarrowY, "narrow.sel", Op);
1875 return replaceInstUsesWith(FPT, Sel);
1876 }
1877 if (match(Op, m_Select(m_Value(Cond), m_Value(Y), m_FPExt(m_Value(X)))) &&
1878 X->getType() == Ty) {
1879 // fptrunc (select Cond, Y, (fpext X) --> select Cond, (fptrunc Y), X
1880 Value *NarrowY = Builder.CreateFPTrunc(Y, Ty);
1881 Value *Sel = Builder.CreateSelect(Cond, NarrowY, X, "narrow.sel", Op);
1882 return replaceInstUsesWith(FPT, Sel);
1883 }
1884 }
1885
1886 if (auto *II = dyn_cast<IntrinsicInst>(FPT.getOperand(0))) {
1887 switch (II->getIntrinsicID()) {
1888 default: break;
1889 case Intrinsic::ceil:
1890 case Intrinsic::fabs:
1891 case Intrinsic::floor:
1892 case Intrinsic::nearbyint:
1893 case Intrinsic::rint:
1894 case Intrinsic::round:
1895 case Intrinsic::roundeven:
1896 case Intrinsic::trunc: {
1897 Value *Src = II->getArgOperand(0);
1898 if (!Src->hasOneUse())
1899 break;
1900
1901 // Except for fabs, this transformation requires the input of the unary FP
1902 // operation to be itself an fpext from the type to which we're
1903 // truncating.
1904 if (II->getIntrinsicID() != Intrinsic::fabs) {
1905 FPExtInst *FPExtSrc = dyn_cast<FPExtInst>(Src);
1906 if (!FPExtSrc || FPExtSrc->getSrcTy() != Ty)
1907 break;
1908 }
1909
1910 // Do unary FP operation on smaller type.
1911 // (fptrunc (fabs x)) -> (fabs (fptrunc x))
1912 Value *InnerTrunc = Builder.CreateFPTrunc(Src, Ty);
1913 Function *Overload = Intrinsic::getDeclaration(FPT.getModule(),
1914 II->getIntrinsicID(), Ty);
1915 SmallVector<OperandBundleDef, 1> OpBundles;
1916 II->getOperandBundlesAsDefs(OpBundles);
1917 CallInst *NewCI =
1918 CallInst::Create(Overload, {InnerTrunc}, OpBundles, II->getName());
1919 NewCI->copyFastMathFlags(II);
1920 return NewCI;
1921 }
1922 }
1923 }
1924
1925 if (Instruction *I = shrinkInsertElt(FPT, Builder))
1926 return I;
1927
1928 Value *Src = FPT.getOperand(0);
1929 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1930 auto *FPCast = cast<CastInst>(Src);
1931 if (isKnownExactCastIntToFP(*FPCast))
1932 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty);
1933 }
1934
1935 return nullptr;
1936 }
1937
visitFPExt(CastInst & FPExt)1938 Instruction *InstCombinerImpl::visitFPExt(CastInst &FPExt) {
1939 // If the source operand is a cast from integer to FP and known exact, then
1940 // cast the integer operand directly to the destination type.
1941 Type *Ty = FPExt.getType();
1942 Value *Src = FPExt.getOperand(0);
1943 if (isa<SIToFPInst>(Src) || isa<UIToFPInst>(Src)) {
1944 auto *FPCast = cast<CastInst>(Src);
1945 if (isKnownExactCastIntToFP(*FPCast))
1946 return CastInst::Create(FPCast->getOpcode(), FPCast->getOperand(0), Ty);
1947 }
1948
1949 return commonCastTransforms(FPExt);
1950 }
1951
1952 /// fpto{s/u}i({u/s}itofp(X)) --> X or zext(X) or sext(X) or trunc(X)
1953 /// This is safe if the intermediate type has enough bits in its mantissa to
1954 /// accurately represent all values of X. For example, this won't work with
1955 /// i64 -> float -> i64.
foldItoFPtoI(CastInst & FI)1956 Instruction *InstCombinerImpl::foldItoFPtoI(CastInst &FI) {
1957 if (!isa<UIToFPInst>(FI.getOperand(0)) && !isa<SIToFPInst>(FI.getOperand(0)))
1958 return nullptr;
1959
1960 auto *OpI = cast<CastInst>(FI.getOperand(0));
1961 Value *X = OpI->getOperand(0);
1962 Type *XType = X->getType();
1963 Type *DestType = FI.getType();
1964 bool IsOutputSigned = isa<FPToSIInst>(FI);
1965
1966 // Since we can assume the conversion won't overflow, our decision as to
1967 // whether the input will fit in the float should depend on the minimum
1968 // of the input range and output range.
1969
1970 // This means this is also safe for a signed input and unsigned output, since
1971 // a negative input would lead to undefined behavior.
1972 if (!isKnownExactCastIntToFP(*OpI)) {
1973 // The first cast may not round exactly based on the source integer width
1974 // and FP width, but the overflow UB rules can still allow this to fold.
1975 // If the destination type is narrow, that means the intermediate FP value
1976 // must be large enough to hold the source value exactly.
1977 // For example, (uint8_t)((float)(uint32_t 16777217) is undefined behavior.
1978 int OutputSize = (int)DestType->getScalarSizeInBits() - IsOutputSigned;
1979 if (OutputSize > OpI->getType()->getFPMantissaWidth())
1980 return nullptr;
1981 }
1982
1983 if (DestType->getScalarSizeInBits() > XType->getScalarSizeInBits()) {
1984 bool IsInputSigned = isa<SIToFPInst>(OpI);
1985 if (IsInputSigned && IsOutputSigned)
1986 return new SExtInst(X, DestType);
1987 return new ZExtInst(X, DestType);
1988 }
1989 if (DestType->getScalarSizeInBits() < XType->getScalarSizeInBits())
1990 return new TruncInst(X, DestType);
1991
1992 assert(XType == DestType && "Unexpected types for int to FP to int casts");
1993 return replaceInstUsesWith(FI, X);
1994 }
1995
visitFPToUI(FPToUIInst & FI)1996 Instruction *InstCombinerImpl::visitFPToUI(FPToUIInst &FI) {
1997 if (Instruction *I = foldItoFPtoI(FI))
1998 return I;
1999
2000 return commonCastTransforms(FI);
2001 }
2002
visitFPToSI(FPToSIInst & FI)2003 Instruction *InstCombinerImpl::visitFPToSI(FPToSIInst &FI) {
2004 if (Instruction *I = foldItoFPtoI(FI))
2005 return I;
2006
2007 return commonCastTransforms(FI);
2008 }
2009
visitUIToFP(CastInst & CI)2010 Instruction *InstCombinerImpl::visitUIToFP(CastInst &CI) {
2011 return commonCastTransforms(CI);
2012 }
2013
visitSIToFP(CastInst & CI)2014 Instruction *InstCombinerImpl::visitSIToFP(CastInst &CI) {
2015 return commonCastTransforms(CI);
2016 }
2017
visitIntToPtr(IntToPtrInst & CI)2018 Instruction *InstCombinerImpl::visitIntToPtr(IntToPtrInst &CI) {
2019 // If the source integer type is not the intptr_t type for this target, do a
2020 // trunc or zext to the intptr_t type, then inttoptr of it. This allows the
2021 // cast to be exposed to other transforms.
2022 unsigned AS = CI.getAddressSpace();
2023 if (CI.getOperand(0)->getType()->getScalarSizeInBits() !=
2024 DL.getPointerSizeInBits(AS)) {
2025 Type *Ty = CI.getOperand(0)->getType()->getWithNewType(
2026 DL.getIntPtrType(CI.getContext(), AS));
2027 Value *P = Builder.CreateZExtOrTrunc(CI.getOperand(0), Ty);
2028 return new IntToPtrInst(P, CI.getType());
2029 }
2030
2031 if (Instruction *I = commonCastTransforms(CI))
2032 return I;
2033
2034 return nullptr;
2035 }
2036
2037 /// Implement the transforms for cast of pointer (bitcast/ptrtoint)
commonPointerCastTransforms(CastInst & CI)2038 Instruction *InstCombinerImpl::commonPointerCastTransforms(CastInst &CI) {
2039 Value *Src = CI.getOperand(0);
2040
2041 if (GetElementPtrInst *GEP = dyn_cast<GetElementPtrInst>(Src)) {
2042 // If casting the result of a getelementptr instruction with no offset, turn
2043 // this into a cast of the original pointer!
2044 if (GEP->hasAllZeroIndices() &&
2045 // If CI is an addrspacecast and GEP changes the poiner type, merging
2046 // GEP into CI would undo canonicalizing addrspacecast with different
2047 // pointer types, causing infinite loops.
2048 (!isa<AddrSpaceCastInst>(CI) ||
2049 GEP->getType() == GEP->getPointerOperandType())) {
2050 // Changing the cast operand is usually not a good idea but it is safe
2051 // here because the pointer operand is being replaced with another
2052 // pointer operand so the opcode doesn't need to change.
2053 return replaceOperand(CI, 0, GEP->getOperand(0));
2054 }
2055 }
2056
2057 return commonCastTransforms(CI);
2058 }
2059
visitPtrToInt(PtrToIntInst & CI)2060 Instruction *InstCombinerImpl::visitPtrToInt(PtrToIntInst &CI) {
2061 // If the destination integer type is not the intptr_t type for this target,
2062 // do a ptrtoint to intptr_t then do a trunc or zext. This allows the cast
2063 // to be exposed to other transforms.
2064 Value *SrcOp = CI.getPointerOperand();
2065 Type *SrcTy = SrcOp->getType();
2066 Type *Ty = CI.getType();
2067 unsigned AS = CI.getPointerAddressSpace();
2068 unsigned TySize = Ty->getScalarSizeInBits();
2069 unsigned PtrSize = DL.getPointerSizeInBits(AS);
2070 if (TySize != PtrSize) {
2071 Type *IntPtrTy =
2072 SrcTy->getWithNewType(DL.getIntPtrType(CI.getContext(), AS));
2073 Value *P = Builder.CreatePtrToInt(SrcOp, IntPtrTy);
2074 return CastInst::CreateIntegerCast(P, Ty, /*isSigned=*/false);
2075 }
2076
2077 if (auto *GEP = dyn_cast<GetElementPtrInst>(SrcOp)) {
2078 // Fold ptrtoint(gep null, x) to multiply + constant if the GEP has one use.
2079 // While this can increase the number of instructions it doesn't actually
2080 // increase the overall complexity since the arithmetic is just part of
2081 // the GEP otherwise.
2082 if (GEP->hasOneUse() &&
2083 isa<ConstantPointerNull>(GEP->getPointerOperand())) {
2084 return replaceInstUsesWith(CI,
2085 Builder.CreateIntCast(EmitGEPOffset(GEP), Ty,
2086 /*isSigned=*/false));
2087 }
2088 }
2089
2090 Value *Vec, *Scalar, *Index;
2091 if (match(SrcOp, m_OneUse(m_InsertElt(m_IntToPtr(m_Value(Vec)),
2092 m_Value(Scalar), m_Value(Index)))) &&
2093 Vec->getType() == Ty) {
2094 assert(Vec->getType()->getScalarSizeInBits() == PtrSize && "Wrong type");
2095 // Convert the scalar to int followed by insert to eliminate one cast:
2096 // p2i (ins (i2p Vec), Scalar, Index --> ins Vec, (p2i Scalar), Index
2097 Value *NewCast = Builder.CreatePtrToInt(Scalar, Ty->getScalarType());
2098 return InsertElementInst::Create(Vec, NewCast, Index);
2099 }
2100
2101 return commonPointerCastTransforms(CI);
2102 }
2103
2104 /// This input value (which is known to have vector type) is being zero extended
2105 /// or truncated to the specified vector type. Since the zext/trunc is done
2106 /// using an integer type, we have a (bitcast(cast(bitcast))) pattern,
2107 /// endianness will impact which end of the vector that is extended or
2108 /// truncated.
2109 ///
2110 /// A vector is always stored with index 0 at the lowest address, which
2111 /// corresponds to the most significant bits for a big endian stored integer and
2112 /// the least significant bits for little endian. A trunc/zext of an integer
2113 /// impacts the big end of the integer. Thus, we need to add/remove elements at
2114 /// the front of the vector for big endian targets, and the back of the vector
2115 /// for little endian targets.
2116 ///
2117 /// Try to replace it with a shuffle (and vector/vector bitcast) if possible.
2118 ///
2119 /// The source and destination vector types may have different element types.
2120 static Instruction *
optimizeVectorResizeWithIntegerBitCasts(Value * InVal,VectorType * DestTy,InstCombinerImpl & IC)2121 optimizeVectorResizeWithIntegerBitCasts(Value *InVal, VectorType *DestTy,
2122 InstCombinerImpl &IC) {
2123 // We can only do this optimization if the output is a multiple of the input
2124 // element size, or the input is a multiple of the output element size.
2125 // Convert the input type to have the same element type as the output.
2126 VectorType *SrcTy = cast<VectorType>(InVal->getType());
2127
2128 if (SrcTy->getElementType() != DestTy->getElementType()) {
2129 // The input types don't need to be identical, but for now they must be the
2130 // same size. There is no specific reason we couldn't handle things like
2131 // <4 x i16> -> <4 x i32> by bitcasting to <2 x i32> but haven't gotten
2132 // there yet.
2133 if (SrcTy->getElementType()->getPrimitiveSizeInBits() !=
2134 DestTy->getElementType()->getPrimitiveSizeInBits())
2135 return nullptr;
2136
2137 SrcTy =
2138 FixedVectorType::get(DestTy->getElementType(),
2139 cast<FixedVectorType>(SrcTy)->getNumElements());
2140 InVal = IC.Builder.CreateBitCast(InVal, SrcTy);
2141 }
2142
2143 bool IsBigEndian = IC.getDataLayout().isBigEndian();
2144 unsigned SrcElts = cast<FixedVectorType>(SrcTy)->getNumElements();
2145 unsigned DestElts = cast<FixedVectorType>(DestTy)->getNumElements();
2146
2147 assert(SrcElts != DestElts && "Element counts should be different.");
2148
2149 // Now that the element types match, get the shuffle mask and RHS of the
2150 // shuffle to use, which depends on whether we're increasing or decreasing the
2151 // size of the input.
2152 SmallVector<int, 16> ShuffleMaskStorage;
2153 ArrayRef<int> ShuffleMask;
2154 Value *V2;
2155
2156 // Produce an identify shuffle mask for the src vector.
2157 ShuffleMaskStorage.resize(SrcElts);
2158 std::iota(ShuffleMaskStorage.begin(), ShuffleMaskStorage.end(), 0);
2159
2160 if (SrcElts > DestElts) {
2161 // If we're shrinking the number of elements (rewriting an integer
2162 // truncate), just shuffle in the elements corresponding to the least
2163 // significant bits from the input and use poison as the second shuffle
2164 // input.
2165 V2 = PoisonValue::get(SrcTy);
2166 // Make sure the shuffle mask selects the "least significant bits" by
2167 // keeping elements from back of the src vector for big endian, and from the
2168 // front for little endian.
2169 ShuffleMask = ShuffleMaskStorage;
2170 if (IsBigEndian)
2171 ShuffleMask = ShuffleMask.take_back(DestElts);
2172 else
2173 ShuffleMask = ShuffleMask.take_front(DestElts);
2174 } else {
2175 // If we're increasing the number of elements (rewriting an integer zext),
2176 // shuffle in all of the elements from InVal. Fill the rest of the result
2177 // elements with zeros from a constant zero.
2178 V2 = Constant::getNullValue(SrcTy);
2179 // Use first elt from V2 when indicating zero in the shuffle mask.
2180 uint32_t NullElt = SrcElts;
2181 // Extend with null values in the "most significant bits" by adding elements
2182 // in front of the src vector for big endian, and at the back for little
2183 // endian.
2184 unsigned DeltaElts = DestElts - SrcElts;
2185 if (IsBigEndian)
2186 ShuffleMaskStorage.insert(ShuffleMaskStorage.begin(), DeltaElts, NullElt);
2187 else
2188 ShuffleMaskStorage.append(DeltaElts, NullElt);
2189 ShuffleMask = ShuffleMaskStorage;
2190 }
2191
2192 return new ShuffleVectorInst(InVal, V2, ShuffleMask);
2193 }
2194
isMultipleOfTypeSize(unsigned Value,Type * Ty)2195 static bool isMultipleOfTypeSize(unsigned Value, Type *Ty) {
2196 return Value % Ty->getPrimitiveSizeInBits() == 0;
2197 }
2198
getTypeSizeIndex(unsigned Value,Type * Ty)2199 static unsigned getTypeSizeIndex(unsigned Value, Type *Ty) {
2200 return Value / Ty->getPrimitiveSizeInBits();
2201 }
2202
2203 /// V is a value which is inserted into a vector of VecEltTy.
2204 /// Look through the value to see if we can decompose it into
2205 /// insertions into the vector. See the example in the comment for
2206 /// OptimizeIntegerToVectorInsertions for the pattern this handles.
2207 /// The type of V is always a non-zero multiple of VecEltTy's size.
2208 /// Shift is the number of bits between the lsb of V and the lsb of
2209 /// the vector.
2210 ///
2211 /// This returns false if the pattern can't be matched or true if it can,
2212 /// filling in Elements with the elements found here.
collectInsertionElements(Value * V,unsigned Shift,SmallVectorImpl<Value * > & Elements,Type * VecEltTy,bool isBigEndian)2213 static bool collectInsertionElements(Value *V, unsigned Shift,
2214 SmallVectorImpl<Value *> &Elements,
2215 Type *VecEltTy, bool isBigEndian) {
2216 assert(isMultipleOfTypeSize(Shift, VecEltTy) &&
2217 "Shift should be a multiple of the element type size");
2218
2219 // Undef values never contribute useful bits to the result.
2220 if (isa<UndefValue>(V)) return true;
2221
2222 // If we got down to a value of the right type, we win, try inserting into the
2223 // right element.
2224 if (V->getType() == VecEltTy) {
2225 // Inserting null doesn't actually insert any elements.
2226 if (Constant *C = dyn_cast<Constant>(V))
2227 if (C->isNullValue())
2228 return true;
2229
2230 unsigned ElementIndex = getTypeSizeIndex(Shift, VecEltTy);
2231 if (isBigEndian)
2232 ElementIndex = Elements.size() - ElementIndex - 1;
2233
2234 // Fail if multiple elements are inserted into this slot.
2235 if (Elements[ElementIndex])
2236 return false;
2237
2238 Elements[ElementIndex] = V;
2239 return true;
2240 }
2241
2242 if (Constant *C = dyn_cast<Constant>(V)) {
2243 // Figure out the # elements this provides, and bitcast it or slice it up
2244 // as required.
2245 unsigned NumElts = getTypeSizeIndex(C->getType()->getPrimitiveSizeInBits(),
2246 VecEltTy);
2247 // If the constant is the size of a vector element, we just need to bitcast
2248 // it to the right type so it gets properly inserted.
2249 if (NumElts == 1)
2250 return collectInsertionElements(ConstantExpr::getBitCast(C, VecEltTy),
2251 Shift, Elements, VecEltTy, isBigEndian);
2252
2253 // Okay, this is a constant that covers multiple elements. Slice it up into
2254 // pieces and insert each element-sized piece into the vector.
2255 if (!isa<IntegerType>(C->getType()))
2256 C = ConstantExpr::getBitCast(C, IntegerType::get(V->getContext(),
2257 C->getType()->getPrimitiveSizeInBits()));
2258 unsigned ElementSize = VecEltTy->getPrimitiveSizeInBits();
2259 Type *ElementIntTy = IntegerType::get(C->getContext(), ElementSize);
2260
2261 for (unsigned i = 0; i != NumElts; ++i) {
2262 unsigned ShiftI = Shift+i*ElementSize;
2263 Constant *Piece = ConstantExpr::getLShr(C, ConstantInt::get(C->getType(),
2264 ShiftI));
2265 Piece = ConstantExpr::getTrunc(Piece, ElementIntTy);
2266 if (!collectInsertionElements(Piece, ShiftI, Elements, VecEltTy,
2267 isBigEndian))
2268 return false;
2269 }
2270 return true;
2271 }
2272
2273 if (!V->hasOneUse()) return false;
2274
2275 Instruction *I = dyn_cast<Instruction>(V);
2276 if (!I) return false;
2277 switch (I->getOpcode()) {
2278 default: return false; // Unhandled case.
2279 case Instruction::BitCast:
2280 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2281 isBigEndian);
2282 case Instruction::ZExt:
2283 if (!isMultipleOfTypeSize(
2284 I->getOperand(0)->getType()->getPrimitiveSizeInBits(),
2285 VecEltTy))
2286 return false;
2287 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2288 isBigEndian);
2289 case Instruction::Or:
2290 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2291 isBigEndian) &&
2292 collectInsertionElements(I->getOperand(1), Shift, Elements, VecEltTy,
2293 isBigEndian);
2294 case Instruction::Shl: {
2295 // Must be shifting by a constant that is a multiple of the element size.
2296 ConstantInt *CI = dyn_cast<ConstantInt>(I->getOperand(1));
2297 if (!CI) return false;
2298 Shift += CI->getZExtValue();
2299 if (!isMultipleOfTypeSize(Shift, VecEltTy)) return false;
2300 return collectInsertionElements(I->getOperand(0), Shift, Elements, VecEltTy,
2301 isBigEndian);
2302 }
2303
2304 }
2305 }
2306
2307
2308 /// If the input is an 'or' instruction, we may be doing shifts and ors to
2309 /// assemble the elements of the vector manually.
2310 /// Try to rip the code out and replace it with insertelements. This is to
2311 /// optimize code like this:
2312 ///
2313 /// %tmp37 = bitcast float %inc to i32
2314 /// %tmp38 = zext i32 %tmp37 to i64
2315 /// %tmp31 = bitcast float %inc5 to i32
2316 /// %tmp32 = zext i32 %tmp31 to i64
2317 /// %tmp33 = shl i64 %tmp32, 32
2318 /// %ins35 = or i64 %tmp33, %tmp38
2319 /// %tmp43 = bitcast i64 %ins35 to <2 x float>
2320 ///
2321 /// Into two insertelements that do "buildvector{%inc, %inc5}".
optimizeIntegerToVectorInsertions(BitCastInst & CI,InstCombinerImpl & IC)2322 static Value *optimizeIntegerToVectorInsertions(BitCastInst &CI,
2323 InstCombinerImpl &IC) {
2324 auto *DestVecTy = cast<FixedVectorType>(CI.getType());
2325 Value *IntInput = CI.getOperand(0);
2326
2327 SmallVector<Value*, 8> Elements(DestVecTy->getNumElements());
2328 if (!collectInsertionElements(IntInput, 0, Elements,
2329 DestVecTy->getElementType(),
2330 IC.getDataLayout().isBigEndian()))
2331 return nullptr;
2332
2333 // If we succeeded, we know that all of the element are specified by Elements
2334 // or are zero if Elements has a null entry. Recast this as a set of
2335 // insertions.
2336 Value *Result = Constant::getNullValue(CI.getType());
2337 for (unsigned i = 0, e = Elements.size(); i != e; ++i) {
2338 if (!Elements[i]) continue; // Unset element.
2339
2340 Result = IC.Builder.CreateInsertElement(Result, Elements[i],
2341 IC.Builder.getInt32(i));
2342 }
2343
2344 return Result;
2345 }
2346
2347 /// Canonicalize scalar bitcasts of extracted elements into a bitcast of the
2348 /// vector followed by extract element. The backend tends to handle bitcasts of
2349 /// vectors better than bitcasts of scalars because vector registers are
2350 /// usually not type-specific like scalar integer or scalar floating-point.
canonicalizeBitCastExtElt(BitCastInst & BitCast,InstCombinerImpl & IC)2351 static Instruction *canonicalizeBitCastExtElt(BitCastInst &BitCast,
2352 InstCombinerImpl &IC) {
2353 // TODO: Create and use a pattern matcher for ExtractElementInst.
2354 auto *ExtElt = dyn_cast<ExtractElementInst>(BitCast.getOperand(0));
2355 if (!ExtElt || !ExtElt->hasOneUse())
2356 return nullptr;
2357
2358 // The bitcast must be to a vectorizable type, otherwise we can't make a new
2359 // type to extract from.
2360 Type *DestType = BitCast.getType();
2361 if (!VectorType::isValidElementType(DestType))
2362 return nullptr;
2363
2364 auto *NewVecType = VectorType::get(DestType, ExtElt->getVectorOperandType());
2365 auto *NewBC = IC.Builder.CreateBitCast(ExtElt->getVectorOperand(),
2366 NewVecType, "bc");
2367 return ExtractElementInst::Create(NewBC, ExtElt->getIndexOperand());
2368 }
2369
2370 /// Change the type of a bitwise logic operation if we can eliminate a bitcast.
foldBitCastBitwiseLogic(BitCastInst & BitCast,InstCombiner::BuilderTy & Builder)2371 static Instruction *foldBitCastBitwiseLogic(BitCastInst &BitCast,
2372 InstCombiner::BuilderTy &Builder) {
2373 Type *DestTy = BitCast.getType();
2374 BinaryOperator *BO;
2375 if (!DestTy->isIntOrIntVectorTy() ||
2376 !match(BitCast.getOperand(0), m_OneUse(m_BinOp(BO))) ||
2377 !BO->isBitwiseLogicOp())
2378 return nullptr;
2379
2380 // FIXME: This transform is restricted to vector types to avoid backend
2381 // problems caused by creating potentially illegal operations. If a fix-up is
2382 // added to handle that situation, we can remove this check.
2383 if (!DestTy->isVectorTy() || !BO->getType()->isVectorTy())
2384 return nullptr;
2385
2386 Value *X;
2387 if (match(BO->getOperand(0), m_OneUse(m_BitCast(m_Value(X)))) &&
2388 X->getType() == DestTy && !isa<Constant>(X)) {
2389 // bitcast(logic(bitcast(X), Y)) --> logic'(X, bitcast(Y))
2390 Value *CastedOp1 = Builder.CreateBitCast(BO->getOperand(1), DestTy);
2391 return BinaryOperator::Create(BO->getOpcode(), X, CastedOp1);
2392 }
2393
2394 if (match(BO->getOperand(1), m_OneUse(m_BitCast(m_Value(X)))) &&
2395 X->getType() == DestTy && !isa<Constant>(X)) {
2396 // bitcast(logic(Y, bitcast(X))) --> logic'(bitcast(Y), X)
2397 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2398 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, X);
2399 }
2400
2401 // Canonicalize vector bitcasts to come before vector bitwise logic with a
2402 // constant. This eases recognition of special constants for later ops.
2403 // Example:
2404 // icmp u/s (a ^ signmask), (b ^ signmask) --> icmp s/u a, b
2405 Constant *C;
2406 if (match(BO->getOperand(1), m_Constant(C))) {
2407 // bitcast (logic X, C) --> logic (bitcast X, C')
2408 Value *CastedOp0 = Builder.CreateBitCast(BO->getOperand(0), DestTy);
2409 Value *CastedC = Builder.CreateBitCast(C, DestTy);
2410 return BinaryOperator::Create(BO->getOpcode(), CastedOp0, CastedC);
2411 }
2412
2413 return nullptr;
2414 }
2415
2416 /// Change the type of a select if we can eliminate a bitcast.
foldBitCastSelect(BitCastInst & BitCast,InstCombiner::BuilderTy & Builder)2417 static Instruction *foldBitCastSelect(BitCastInst &BitCast,
2418 InstCombiner::BuilderTy &Builder) {
2419 Value *Cond, *TVal, *FVal;
2420 if (!match(BitCast.getOperand(0),
2421 m_OneUse(m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))))
2422 return nullptr;
2423
2424 // A vector select must maintain the same number of elements in its operands.
2425 Type *CondTy = Cond->getType();
2426 Type *DestTy = BitCast.getType();
2427 if (auto *CondVTy = dyn_cast<VectorType>(CondTy))
2428 if (!DestTy->isVectorTy() ||
2429 CondVTy->getElementCount() !=
2430 cast<VectorType>(DestTy)->getElementCount())
2431 return nullptr;
2432
2433 // FIXME: This transform is restricted from changing the select between
2434 // scalars and vectors to avoid backend problems caused by creating
2435 // potentially illegal operations. If a fix-up is added to handle that
2436 // situation, we can remove this check.
2437 if (DestTy->isVectorTy() != TVal->getType()->isVectorTy())
2438 return nullptr;
2439
2440 auto *Sel = cast<Instruction>(BitCast.getOperand(0));
2441 Value *X;
2442 if (match(TVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2443 !isa<Constant>(X)) {
2444 // bitcast(select(Cond, bitcast(X), Y)) --> select'(Cond, X, bitcast(Y))
2445 Value *CastedVal = Builder.CreateBitCast(FVal, DestTy);
2446 return SelectInst::Create(Cond, X, CastedVal, "", nullptr, Sel);
2447 }
2448
2449 if (match(FVal, m_OneUse(m_BitCast(m_Value(X)))) && X->getType() == DestTy &&
2450 !isa<Constant>(X)) {
2451 // bitcast(select(Cond, Y, bitcast(X))) --> select'(Cond, bitcast(Y), X)
2452 Value *CastedVal = Builder.CreateBitCast(TVal, DestTy);
2453 return SelectInst::Create(Cond, CastedVal, X, "", nullptr, Sel);
2454 }
2455
2456 return nullptr;
2457 }
2458
2459 /// Check if all users of CI are StoreInsts.
hasStoreUsersOnly(CastInst & CI)2460 static bool hasStoreUsersOnly(CastInst &CI) {
2461 for (User *U : CI.users()) {
2462 if (!isa<StoreInst>(U))
2463 return false;
2464 }
2465 return true;
2466 }
2467
2468 /// This function handles following case
2469 ///
2470 /// A -> B cast
2471 /// PHI
2472 /// B -> A cast
2473 ///
2474 /// All the related PHI nodes can be replaced by new PHI nodes with type A.
2475 /// The uses of \p CI can be changed to the new PHI node corresponding to \p PN.
optimizeBitCastFromPhi(CastInst & CI,PHINode * PN)2476 Instruction *InstCombinerImpl::optimizeBitCastFromPhi(CastInst &CI,
2477 PHINode *PN) {
2478 // BitCast used by Store can be handled in InstCombineLoadStoreAlloca.cpp.
2479 if (hasStoreUsersOnly(CI))
2480 return nullptr;
2481
2482 Value *Src = CI.getOperand(0);
2483 Type *SrcTy = Src->getType(); // Type B
2484 Type *DestTy = CI.getType(); // Type A
2485
2486 SmallVector<PHINode *, 4> PhiWorklist;
2487 SmallSetVector<PHINode *, 4> OldPhiNodes;
2488
2489 // Find all of the A->B casts and PHI nodes.
2490 // We need to inspect all related PHI nodes, but PHIs can be cyclic, so
2491 // OldPhiNodes is used to track all known PHI nodes, before adding a new
2492 // PHI to PhiWorklist, it is checked against and added to OldPhiNodes first.
2493 PhiWorklist.push_back(PN);
2494 OldPhiNodes.insert(PN);
2495 while (!PhiWorklist.empty()) {
2496 auto *OldPN = PhiWorklist.pop_back_val();
2497 for (Value *IncValue : OldPN->incoming_values()) {
2498 if (isa<Constant>(IncValue))
2499 continue;
2500
2501 if (auto *LI = dyn_cast<LoadInst>(IncValue)) {
2502 // If there is a sequence of one or more load instructions, each loaded
2503 // value is used as address of later load instruction, bitcast is
2504 // necessary to change the value type, don't optimize it. For
2505 // simplicity we give up if the load address comes from another load.
2506 Value *Addr = LI->getOperand(0);
2507 if (Addr == &CI || isa<LoadInst>(Addr))
2508 return nullptr;
2509 // Don't tranform "load <256 x i32>, <256 x i32>*" to
2510 // "load x86_amx, x86_amx*", because x86_amx* is invalid.
2511 // TODO: Remove this check when bitcast between vector and x86_amx
2512 // is replaced with a specific intrinsic.
2513 if (DestTy->isX86_AMXTy())
2514 return nullptr;
2515 if (LI->hasOneUse() && LI->isSimple())
2516 continue;
2517 // If a LoadInst has more than one use, changing the type of loaded
2518 // value may create another bitcast.
2519 return nullptr;
2520 }
2521
2522 if (auto *PNode = dyn_cast<PHINode>(IncValue)) {
2523 if (OldPhiNodes.insert(PNode))
2524 PhiWorklist.push_back(PNode);
2525 continue;
2526 }
2527
2528 auto *BCI = dyn_cast<BitCastInst>(IncValue);
2529 // We can't handle other instructions.
2530 if (!BCI)
2531 return nullptr;
2532
2533 // Verify it's a A->B cast.
2534 Type *TyA = BCI->getOperand(0)->getType();
2535 Type *TyB = BCI->getType();
2536 if (TyA != DestTy || TyB != SrcTy)
2537 return nullptr;
2538 }
2539 }
2540
2541 // Check that each user of each old PHI node is something that we can
2542 // rewrite, so that all of the old PHI nodes can be cleaned up afterwards.
2543 for (auto *OldPN : OldPhiNodes) {
2544 for (User *V : OldPN->users()) {
2545 if (auto *SI = dyn_cast<StoreInst>(V)) {
2546 if (!SI->isSimple() || SI->getOperand(0) != OldPN)
2547 return nullptr;
2548 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2549 // Verify it's a B->A cast.
2550 Type *TyB = BCI->getOperand(0)->getType();
2551 Type *TyA = BCI->getType();
2552 if (TyA != DestTy || TyB != SrcTy)
2553 return nullptr;
2554 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2555 // As long as the user is another old PHI node, then even if we don't
2556 // rewrite it, the PHI web we're considering won't have any users
2557 // outside itself, so it'll be dead.
2558 if (OldPhiNodes.count(PHI) == 0)
2559 return nullptr;
2560 } else {
2561 return nullptr;
2562 }
2563 }
2564 }
2565
2566 // For each old PHI node, create a corresponding new PHI node with a type A.
2567 SmallDenseMap<PHINode *, PHINode *> NewPNodes;
2568 for (auto *OldPN : OldPhiNodes) {
2569 Builder.SetInsertPoint(OldPN);
2570 PHINode *NewPN = Builder.CreatePHI(DestTy, OldPN->getNumOperands());
2571 NewPNodes[OldPN] = NewPN;
2572 }
2573
2574 // Fill in the operands of new PHI nodes.
2575 for (auto *OldPN : OldPhiNodes) {
2576 PHINode *NewPN = NewPNodes[OldPN];
2577 for (unsigned j = 0, e = OldPN->getNumOperands(); j != e; ++j) {
2578 Value *V = OldPN->getOperand(j);
2579 Value *NewV = nullptr;
2580 if (auto *C = dyn_cast<Constant>(V)) {
2581 NewV = ConstantExpr::getBitCast(C, DestTy);
2582 } else if (auto *LI = dyn_cast<LoadInst>(V)) {
2583 // Explicitly perform load combine to make sure no opposing transform
2584 // can remove the bitcast in the meantime and trigger an infinite loop.
2585 Builder.SetInsertPoint(LI);
2586 NewV = combineLoadToNewType(*LI, DestTy);
2587 // Remove the old load and its use in the old phi, which itself becomes
2588 // dead once the whole transform finishes.
2589 replaceInstUsesWith(*LI, PoisonValue::get(LI->getType()));
2590 eraseInstFromFunction(*LI);
2591 } else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2592 NewV = BCI->getOperand(0);
2593 } else if (auto *PrevPN = dyn_cast<PHINode>(V)) {
2594 NewV = NewPNodes[PrevPN];
2595 }
2596 assert(NewV);
2597 NewPN->addIncoming(NewV, OldPN->getIncomingBlock(j));
2598 }
2599 }
2600
2601 // Traverse all accumulated PHI nodes and process its users,
2602 // which are Stores and BitcCasts. Without this processing
2603 // NewPHI nodes could be replicated and could lead to extra
2604 // moves generated after DeSSA.
2605 // If there is a store with type B, change it to type A.
2606
2607
2608 // Replace users of BitCast B->A with NewPHI. These will help
2609 // later to get rid off a closure formed by OldPHI nodes.
2610 Instruction *RetVal = nullptr;
2611 for (auto *OldPN : OldPhiNodes) {
2612 PHINode *NewPN = NewPNodes[OldPN];
2613 for (User *V : make_early_inc_range(OldPN->users())) {
2614 if (auto *SI = dyn_cast<StoreInst>(V)) {
2615 assert(SI->isSimple() && SI->getOperand(0) == OldPN);
2616 Builder.SetInsertPoint(SI);
2617 auto *NewBC =
2618 cast<BitCastInst>(Builder.CreateBitCast(NewPN, SrcTy));
2619 SI->setOperand(0, NewBC);
2620 Worklist.push(SI);
2621 assert(hasStoreUsersOnly(*NewBC));
2622 }
2623 else if (auto *BCI = dyn_cast<BitCastInst>(V)) {
2624 Type *TyB = BCI->getOperand(0)->getType();
2625 Type *TyA = BCI->getType();
2626 assert(TyA == DestTy && TyB == SrcTy);
2627 (void) TyA;
2628 (void) TyB;
2629 Instruction *I = replaceInstUsesWith(*BCI, NewPN);
2630 if (BCI == &CI)
2631 RetVal = I;
2632 } else if (auto *PHI = dyn_cast<PHINode>(V)) {
2633 assert(OldPhiNodes.contains(PHI));
2634 (void) PHI;
2635 } else {
2636 llvm_unreachable("all uses should be handled");
2637 }
2638 }
2639 }
2640
2641 return RetVal;
2642 }
2643
convertBitCastToGEP(BitCastInst & CI,IRBuilderBase & Builder,const DataLayout & DL)2644 static Instruction *convertBitCastToGEP(BitCastInst &CI, IRBuilderBase &Builder,
2645 const DataLayout &DL) {
2646 Value *Src = CI.getOperand(0);
2647 PointerType *SrcPTy = cast<PointerType>(Src->getType());
2648 PointerType *DstPTy = cast<PointerType>(CI.getType());
2649
2650 // Bitcasts involving opaque pointers cannot be converted into a GEP.
2651 if (SrcPTy->isOpaque() || DstPTy->isOpaque())
2652 return nullptr;
2653
2654 Type *DstElTy = DstPTy->getElementType();
2655 Type *SrcElTy = SrcPTy->getElementType();
2656
2657 // When the type pointed to is not sized the cast cannot be
2658 // turned into a gep.
2659 if (!SrcElTy->isSized())
2660 return nullptr;
2661
2662 // If the source and destination are pointers, and this cast is equivalent
2663 // to a getelementptr X, 0, 0, 0... turn it into the appropriate gep.
2664 // This can enhance SROA and other transforms that want type-safe pointers.
2665 unsigned NumZeros = 0;
2666 while (SrcElTy && SrcElTy != DstElTy) {
2667 SrcElTy = GetElementPtrInst::getTypeAtIndex(SrcElTy, (uint64_t)0);
2668 ++NumZeros;
2669 }
2670
2671 // If we found a path from the src to dest, create the getelementptr now.
2672 if (SrcElTy == DstElTy) {
2673 SmallVector<Value *, 8> Idxs(NumZeros + 1, Builder.getInt32(0));
2674 GetElementPtrInst *GEP =
2675 GetElementPtrInst::Create(SrcPTy->getElementType(), Src, Idxs);
2676
2677 // If the source pointer is dereferenceable, then assume it points to an
2678 // allocated object and apply "inbounds" to the GEP.
2679 bool CanBeNull, CanBeFreed;
2680 if (Src->getPointerDereferenceableBytes(DL, CanBeNull, CanBeFreed)) {
2681 // In a non-default address space (not 0), a null pointer can not be
2682 // assumed inbounds, so ignore that case (dereferenceable_or_null).
2683 // The reason is that 'null' is not treated differently in these address
2684 // spaces, and we consequently ignore the 'gep inbounds' special case
2685 // for 'null' which allows 'inbounds' on 'null' if the indices are
2686 // zeros.
2687 if (SrcPTy->getAddressSpace() == 0 || !CanBeNull)
2688 GEP->setIsInBounds();
2689 }
2690 return GEP;
2691 }
2692 return nullptr;
2693 }
2694
visitBitCast(BitCastInst & CI)2695 Instruction *InstCombinerImpl::visitBitCast(BitCastInst &CI) {
2696 // If the operands are integer typed then apply the integer transforms,
2697 // otherwise just apply the common ones.
2698 Value *Src = CI.getOperand(0);
2699 Type *SrcTy = Src->getType();
2700 Type *DestTy = CI.getType();
2701
2702 // Get rid of casts from one type to the same type. These are useless and can
2703 // be replaced by the operand.
2704 if (DestTy == Src->getType())
2705 return replaceInstUsesWith(CI, Src);
2706
2707 if (isa<PointerType>(SrcTy) && isa<PointerType>(DestTy)) {
2708 // If we are casting a alloca to a pointer to a type of the same
2709 // size, rewrite the allocation instruction to allocate the "right" type.
2710 // There is no need to modify malloc calls because it is their bitcast that
2711 // needs to be cleaned up.
2712 if (AllocaInst *AI = dyn_cast<AllocaInst>(Src))
2713 if (Instruction *V = PromoteCastOfAllocation(CI, *AI))
2714 return V;
2715
2716 if (Instruction *I = convertBitCastToGEP(CI, Builder, DL))
2717 return I;
2718 }
2719
2720 if (FixedVectorType *DestVTy = dyn_cast<FixedVectorType>(DestTy)) {
2721 // Beware: messing with this target-specific oddity may cause trouble.
2722 if (DestVTy->getNumElements() == 1 && SrcTy->isX86_MMXTy()) {
2723 Value *Elem = Builder.CreateBitCast(Src, DestVTy->getElementType());
2724 return InsertElementInst::Create(PoisonValue::get(DestTy), Elem,
2725 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2726 }
2727
2728 if (isa<IntegerType>(SrcTy)) {
2729 // If this is a cast from an integer to vector, check to see if the input
2730 // is a trunc or zext of a bitcast from vector. If so, we can replace all
2731 // the casts with a shuffle and (potentially) a bitcast.
2732 if (isa<TruncInst>(Src) || isa<ZExtInst>(Src)) {
2733 CastInst *SrcCast = cast<CastInst>(Src);
2734 if (BitCastInst *BCIn = dyn_cast<BitCastInst>(SrcCast->getOperand(0)))
2735 if (isa<VectorType>(BCIn->getOperand(0)->getType()))
2736 if (Instruction *I = optimizeVectorResizeWithIntegerBitCasts(
2737 BCIn->getOperand(0), cast<VectorType>(DestTy), *this))
2738 return I;
2739 }
2740
2741 // If the input is an 'or' instruction, we may be doing shifts and ors to
2742 // assemble the elements of the vector manually. Try to rip the code out
2743 // and replace it with insertelements.
2744 if (Value *V = optimizeIntegerToVectorInsertions(CI, *this))
2745 return replaceInstUsesWith(CI, V);
2746 }
2747 }
2748
2749 if (FixedVectorType *SrcVTy = dyn_cast<FixedVectorType>(SrcTy)) {
2750 if (SrcVTy->getNumElements() == 1) {
2751 // If our destination is not a vector, then make this a straight
2752 // scalar-scalar cast.
2753 if (!DestTy->isVectorTy()) {
2754 Value *Elem =
2755 Builder.CreateExtractElement(Src,
2756 Constant::getNullValue(Type::getInt32Ty(CI.getContext())));
2757 return CastInst::Create(Instruction::BitCast, Elem, DestTy);
2758 }
2759
2760 // Otherwise, see if our source is an insert. If so, then use the scalar
2761 // component directly:
2762 // bitcast (inselt <1 x elt> V, X, 0) to <n x m> --> bitcast X to <n x m>
2763 if (auto *InsElt = dyn_cast<InsertElementInst>(Src))
2764 return new BitCastInst(InsElt->getOperand(1), DestTy);
2765 }
2766 }
2767
2768 if (auto *Shuf = dyn_cast<ShuffleVectorInst>(Src)) {
2769 // Okay, we have (bitcast (shuffle ..)). Check to see if this is
2770 // a bitcast to a vector with the same # elts.
2771 Value *ShufOp0 = Shuf->getOperand(0);
2772 Value *ShufOp1 = Shuf->getOperand(1);
2773 auto ShufElts = cast<VectorType>(Shuf->getType())->getElementCount();
2774 auto SrcVecElts = cast<VectorType>(ShufOp0->getType())->getElementCount();
2775 if (Shuf->hasOneUse() && DestTy->isVectorTy() &&
2776 cast<VectorType>(DestTy)->getElementCount() == ShufElts &&
2777 ShufElts == SrcVecElts) {
2778 BitCastInst *Tmp;
2779 // If either of the operands is a cast from CI.getType(), then
2780 // evaluating the shuffle in the casted destination's type will allow
2781 // us to eliminate at least one cast.
2782 if (((Tmp = dyn_cast<BitCastInst>(ShufOp0)) &&
2783 Tmp->getOperand(0)->getType() == DestTy) ||
2784 ((Tmp = dyn_cast<BitCastInst>(ShufOp1)) &&
2785 Tmp->getOperand(0)->getType() == DestTy)) {
2786 Value *LHS = Builder.CreateBitCast(ShufOp0, DestTy);
2787 Value *RHS = Builder.CreateBitCast(ShufOp1, DestTy);
2788 // Return a new shuffle vector. Use the same element ID's, as we
2789 // know the vector types match #elts.
2790 return new ShuffleVectorInst(LHS, RHS, Shuf->getShuffleMask());
2791 }
2792 }
2793
2794 // A bitcasted-to-scalar and byte-reversing shuffle is better recognized as
2795 // a byte-swap:
2796 // bitcast <N x i8> (shuf X, undef, <N, N-1,...0>) --> bswap (bitcast X)
2797 // TODO: We should match the related pattern for bitreverse.
2798 if (DestTy->isIntegerTy() &&
2799 DL.isLegalInteger(DestTy->getScalarSizeInBits()) &&
2800 SrcTy->getScalarSizeInBits() == 8 &&
2801 ShufElts.getKnownMinValue() % 2 == 0 && Shuf->hasOneUse() &&
2802 Shuf->isReverse()) {
2803 assert(ShufOp0->getType() == SrcTy && "Unexpected shuffle mask");
2804 assert(match(ShufOp1, m_Undef()) && "Unexpected shuffle op");
2805 Function *Bswap =
2806 Intrinsic::getDeclaration(CI.getModule(), Intrinsic::bswap, DestTy);
2807 Value *ScalarX = Builder.CreateBitCast(ShufOp0, DestTy);
2808 return CallInst::Create(Bswap, { ScalarX });
2809 }
2810 }
2811
2812 // Handle the A->B->A cast, and there is an intervening PHI node.
2813 if (PHINode *PN = dyn_cast<PHINode>(Src))
2814 if (Instruction *I = optimizeBitCastFromPhi(CI, PN))
2815 return I;
2816
2817 if (Instruction *I = canonicalizeBitCastExtElt(CI, *this))
2818 return I;
2819
2820 if (Instruction *I = foldBitCastBitwiseLogic(CI, Builder))
2821 return I;
2822
2823 if (Instruction *I = foldBitCastSelect(CI, Builder))
2824 return I;
2825
2826 if (SrcTy->isPointerTy())
2827 return commonPointerCastTransforms(CI);
2828 return commonCastTransforms(CI);
2829 }
2830
visitAddrSpaceCast(AddrSpaceCastInst & CI)2831 Instruction *InstCombinerImpl::visitAddrSpaceCast(AddrSpaceCastInst &CI) {
2832 // If the destination pointer element type is not the same as the source's
2833 // first do a bitcast to the destination type, and then the addrspacecast.
2834 // This allows the cast to be exposed to other transforms.
2835 Value *Src = CI.getOperand(0);
2836 PointerType *SrcTy = cast<PointerType>(Src->getType()->getScalarType());
2837 PointerType *DestTy = cast<PointerType>(CI.getType()->getScalarType());
2838
2839 if (!SrcTy->hasSameElementTypeAs(DestTy)) {
2840 Type *MidTy =
2841 PointerType::getWithSamePointeeType(DestTy, SrcTy->getAddressSpace());
2842 // Handle vectors of pointers.
2843 if (VectorType *VT = dyn_cast<VectorType>(CI.getType()))
2844 MidTy = VectorType::get(MidTy, VT->getElementCount());
2845
2846 Value *NewBitCast = Builder.CreateBitCast(Src, MidTy);
2847 return new AddrSpaceCastInst(NewBitCast, CI.getType());
2848 }
2849
2850 return commonPointerCastTransforms(CI);
2851 }
2852