1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
10 //
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/APSInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetFolder.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalValue.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/IntrinsicsAMDGPU.h"
45 #include "llvm/IR/IntrinsicsARM.h"
46 #include "llvm/IR/IntrinsicsWebAssembly.h"
47 #include "llvm/IR/IntrinsicsX86.h"
48 #include "llvm/IR/Operator.h"
49 #include "llvm/IR/Type.h"
50 #include "llvm/IR/Value.h"
51 #include "llvm/Support/Casting.h"
52 #include "llvm/Support/ErrorHandling.h"
53 #include "llvm/Support/KnownBits.h"
54 #include "llvm/Support/MathExtras.h"
55 #include <cassert>
56 #include <cerrno>
57 #include <cfenv>
58 #include <cmath>
59 #include <cstddef>
60 #include <cstdint>
61
62 using namespace llvm;
63
64 namespace {
65
66 //===----------------------------------------------------------------------===//
67 // Constant Folding internal helper functions
68 //===----------------------------------------------------------------------===//
69
foldConstVectorToAPInt(APInt & Result,Type * DestTy,Constant * C,Type * SrcEltTy,unsigned NumSrcElts,const DataLayout & DL)70 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
71 Constant *C, Type *SrcEltTy,
72 unsigned NumSrcElts,
73 const DataLayout &DL) {
74 // Now that we know that the input value is a vector of integers, just shift
75 // and insert them into our result.
76 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
77 for (unsigned i = 0; i != NumSrcElts; ++i) {
78 Constant *Element;
79 if (DL.isLittleEndian())
80 Element = C->getAggregateElement(NumSrcElts - i - 1);
81 else
82 Element = C->getAggregateElement(i);
83
84 if (Element && isa<UndefValue>(Element)) {
85 Result <<= BitShift;
86 continue;
87 }
88
89 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
90 if (!ElementCI)
91 return ConstantExpr::getBitCast(C, DestTy);
92
93 Result <<= BitShift;
94 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
95 }
96
97 return nullptr;
98 }
99
100 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
101 /// This always returns a non-null constant, but it may be a
102 /// ConstantExpr if unfoldable.
FoldBitCast(Constant * C,Type * DestTy,const DataLayout & DL)103 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
104 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
105 "Invalid constantexpr bitcast!");
106
107 // Catch the obvious splat cases.
108 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
109 return Constant::getNullValue(DestTy);
110 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy() &&
111 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
112 return Constant::getAllOnesValue(DestTy);
113
114 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
115 // Handle a vector->scalar integer/fp cast.
116 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
117 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
118 Type *SrcEltTy = VTy->getElementType();
119
120 // If the vector is a vector of floating point, convert it to vector of int
121 // to simplify things.
122 if (SrcEltTy->isFloatingPointTy()) {
123 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
124 auto *SrcIVTy = FixedVectorType::get(
125 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
126 // Ask IR to do the conversion now that #elts line up.
127 C = ConstantExpr::getBitCast(C, SrcIVTy);
128 }
129
130 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
131 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
132 SrcEltTy, NumSrcElts, DL))
133 return CE;
134
135 if (isa<IntegerType>(DestTy))
136 return ConstantInt::get(DestTy, Result);
137
138 APFloat FP(DestTy->getFltSemantics(), Result);
139 return ConstantFP::get(DestTy->getContext(), FP);
140 }
141 }
142
143 // The code below only handles casts to vectors currently.
144 auto *DestVTy = dyn_cast<VectorType>(DestTy);
145 if (!DestVTy)
146 return ConstantExpr::getBitCast(C, DestTy);
147
148 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
149 // vector so the code below can handle it uniformly.
150 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
151 Constant *Ops = C; // don't take the address of C!
152 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
153 }
154
155 // If this is a bitcast from constant vector -> vector, fold it.
156 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
157 return ConstantExpr::getBitCast(C, DestTy);
158
159 // If the element types match, IR can fold it.
160 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
161 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
162 if (NumDstElt == NumSrcElt)
163 return ConstantExpr::getBitCast(C, DestTy);
164
165 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
166 Type *DstEltTy = DestVTy->getElementType();
167
168 // Otherwise, we're changing the number of elements in a vector, which
169 // requires endianness information to do the right thing. For example,
170 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
171 // folds to (little endian):
172 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
173 // and to (big endian):
174 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
175
176 // First thing is first. We only want to think about integer here, so if
177 // we have something in FP form, recast it as integer.
178 if (DstEltTy->isFloatingPointTy()) {
179 // Fold to an vector of integers with same size as our FP type.
180 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
181 auto *DestIVTy = FixedVectorType::get(
182 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
183 // Recursively handle this integer conversion, if possible.
184 C = FoldBitCast(C, DestIVTy, DL);
185
186 // Finally, IR can handle this now that #elts line up.
187 return ConstantExpr::getBitCast(C, DestTy);
188 }
189
190 // Okay, we know the destination is integer, if the input is FP, convert
191 // it to integer first.
192 if (SrcEltTy->isFloatingPointTy()) {
193 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
194 auto *SrcIVTy = FixedVectorType::get(
195 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
196 // Ask IR to do the conversion now that #elts line up.
197 C = ConstantExpr::getBitCast(C, SrcIVTy);
198 // If IR wasn't able to fold it, bail out.
199 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
200 !isa<ConstantDataVector>(C))
201 return C;
202 }
203
204 // Now we know that the input and output vectors are both integer vectors
205 // of the same size, and that their #elements is not the same. Do the
206 // conversion here, which depends on whether the input or output has
207 // more elements.
208 bool isLittleEndian = DL.isLittleEndian();
209
210 SmallVector<Constant*, 32> Result;
211 if (NumDstElt < NumSrcElt) {
212 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
213 Constant *Zero = Constant::getNullValue(DstEltTy);
214 unsigned Ratio = NumSrcElt/NumDstElt;
215 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
216 unsigned SrcElt = 0;
217 for (unsigned i = 0; i != NumDstElt; ++i) {
218 // Build each element of the result.
219 Constant *Elt = Zero;
220 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
221 for (unsigned j = 0; j != Ratio; ++j) {
222 Constant *Src = C->getAggregateElement(SrcElt++);
223 if (Src && isa<UndefValue>(Src))
224 Src = Constant::getNullValue(
225 cast<VectorType>(C->getType())->getElementType());
226 else
227 Src = dyn_cast_or_null<ConstantInt>(Src);
228 if (!Src) // Reject constantexpr elements.
229 return ConstantExpr::getBitCast(C, DestTy);
230
231 // Zero extend the element to the right size.
232 Src = ConstantExpr::getZExt(Src, Elt->getType());
233
234 // Shift it to the right place, depending on endianness.
235 Src = ConstantExpr::getShl(Src,
236 ConstantInt::get(Src->getType(), ShiftAmt));
237 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
238
239 // Mix it in.
240 Elt = ConstantExpr::getOr(Elt, Src);
241 }
242 Result.push_back(Elt);
243 }
244 return ConstantVector::get(Result);
245 }
246
247 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
248 unsigned Ratio = NumDstElt/NumSrcElt;
249 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
250
251 // Loop over each source value, expanding into multiple results.
252 for (unsigned i = 0; i != NumSrcElt; ++i) {
253 auto *Element = C->getAggregateElement(i);
254
255 if (!Element) // Reject constantexpr elements.
256 return ConstantExpr::getBitCast(C, DestTy);
257
258 if (isa<UndefValue>(Element)) {
259 // Correctly Propagate undef values.
260 Result.append(Ratio, UndefValue::get(DstEltTy));
261 continue;
262 }
263
264 auto *Src = dyn_cast<ConstantInt>(Element);
265 if (!Src)
266 return ConstantExpr::getBitCast(C, DestTy);
267
268 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
269 for (unsigned j = 0; j != Ratio; ++j) {
270 // Shift the piece of the value into the right place, depending on
271 // endianness.
272 Constant *Elt = ConstantExpr::getLShr(Src,
273 ConstantInt::get(Src->getType(), ShiftAmt));
274 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
275
276 // Truncate the element to an integer with the same pointer size and
277 // convert the element back to a pointer using a inttoptr.
278 if (DstEltTy->isPointerTy()) {
279 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
280 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
281 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
282 continue;
283 }
284
285 // Truncate and remember this piece.
286 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
287 }
288 }
289
290 return ConstantVector::get(Result);
291 }
292
293 } // end anonymous namespace
294
295 /// If this constant is a constant offset from a global, return the global and
296 /// the constant. Because of constantexprs, this function is recursive.
IsConstantOffsetFromGlobal(Constant * C,GlobalValue * & GV,APInt & Offset,const DataLayout & DL,DSOLocalEquivalent ** DSOEquiv)297 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
298 APInt &Offset, const DataLayout &DL,
299 DSOLocalEquivalent **DSOEquiv) {
300 if (DSOEquiv)
301 *DSOEquiv = nullptr;
302
303 // Trivial case, constant is the global.
304 if ((GV = dyn_cast<GlobalValue>(C))) {
305 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
306 Offset = APInt(BitWidth, 0);
307 return true;
308 }
309
310 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
311 if (DSOEquiv)
312 *DSOEquiv = FoundDSOEquiv;
313 GV = FoundDSOEquiv->getGlobalValue();
314 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
315 Offset = APInt(BitWidth, 0);
316 return true;
317 }
318
319 // Otherwise, if this isn't a constant expr, bail out.
320 auto *CE = dyn_cast<ConstantExpr>(C);
321 if (!CE) return false;
322
323 // Look through ptr->int and ptr->ptr casts.
324 if (CE->getOpcode() == Instruction::PtrToInt ||
325 CE->getOpcode() == Instruction::BitCast)
326 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
327 DSOEquiv);
328
329 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
330 auto *GEP = dyn_cast<GEPOperator>(CE);
331 if (!GEP)
332 return false;
333
334 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
335 APInt TmpOffset(BitWidth, 0);
336
337 // If the base isn't a global+constant, we aren't either.
338 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
339 DSOEquiv))
340 return false;
341
342 // Otherwise, add any offset that our operands provide.
343 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
344 return false;
345
346 Offset = TmpOffset;
347 return true;
348 }
349
ConstantFoldLoadThroughBitcast(Constant * C,Type * DestTy,const DataLayout & DL)350 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
351 const DataLayout &DL) {
352 do {
353 Type *SrcTy = C->getType();
354 uint64_t DestSize = DL.getTypeSizeInBits(DestTy);
355 uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy);
356 if (SrcSize < DestSize)
357 return nullptr;
358
359 // Catch the obvious splat cases (since all-zeros can coerce non-integral
360 // pointers legally).
361 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
362 return Constant::getNullValue(DestTy);
363 if (C->isAllOnesValue() &&
364 (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() ||
365 DestTy->isVectorTy()) &&
366 !DestTy->isX86_AMXTy() && !DestTy->isX86_MMXTy() &&
367 !DestTy->isPtrOrPtrVectorTy())
368 // Get ones when the input is trivial, but
369 // only for supported types inside getAllOnesValue.
370 return Constant::getAllOnesValue(DestTy);
371
372 // If the type sizes are the same and a cast is legal, just directly
373 // cast the constant.
374 // But be careful not to coerce non-integral pointers illegally.
375 if (SrcSize == DestSize &&
376 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
377 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
378 Instruction::CastOps Cast = Instruction::BitCast;
379 // If we are going from a pointer to int or vice versa, we spell the cast
380 // differently.
381 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
382 Cast = Instruction::IntToPtr;
383 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
384 Cast = Instruction::PtrToInt;
385
386 if (CastInst::castIsValid(Cast, C, DestTy))
387 return ConstantExpr::getCast(Cast, C, DestTy);
388 }
389
390 // If this isn't an aggregate type, there is nothing we can do to drill down
391 // and find a bitcastable constant.
392 if (!SrcTy->isAggregateType())
393 return nullptr;
394
395 // We're simulating a load through a pointer that was bitcast to point to
396 // a different type, so we can try to walk down through the initial
397 // elements of an aggregate to see if some part of the aggregate is
398 // castable to implement the "load" semantic model.
399 if (SrcTy->isStructTy()) {
400 // Struct types might have leading zero-length elements like [0 x i32],
401 // which are certainly not what we are looking for, so skip them.
402 unsigned Elem = 0;
403 Constant *ElemC;
404 do {
405 ElemC = C->getAggregateElement(Elem++);
406 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
407 C = ElemC;
408 } else {
409 C = C->getAggregateElement(0u);
410 }
411 } while (C);
412
413 return nullptr;
414 }
415
416 namespace {
417
418 /// Recursive helper to read bits out of global. C is the constant being copied
419 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
420 /// results into and BytesLeft is the number of bytes left in
421 /// the CurPtr buffer. DL is the DataLayout.
ReadDataFromGlobal(Constant * C,uint64_t ByteOffset,unsigned char * CurPtr,unsigned BytesLeft,const DataLayout & DL)422 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
423 unsigned BytesLeft, const DataLayout &DL) {
424 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
425 "Out of range access");
426
427 // If this element is zero or undefined, we can just return since *CurPtr is
428 // zero initialized.
429 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
430 return true;
431
432 if (auto *CI = dyn_cast<ConstantInt>(C)) {
433 if (CI->getBitWidth() > 64 ||
434 (CI->getBitWidth() & 7) != 0)
435 return false;
436
437 uint64_t Val = CI->getZExtValue();
438 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
439
440 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
441 int n = ByteOffset;
442 if (!DL.isLittleEndian())
443 n = IntBytes - n - 1;
444 CurPtr[i] = (unsigned char)(Val >> (n * 8));
445 ++ByteOffset;
446 }
447 return true;
448 }
449
450 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
451 if (CFP->getType()->isDoubleTy()) {
452 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
453 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
454 }
455 if (CFP->getType()->isFloatTy()){
456 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
457 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
458 }
459 if (CFP->getType()->isHalfTy()){
460 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
461 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
462 }
463 return false;
464 }
465
466 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
467 const StructLayout *SL = DL.getStructLayout(CS->getType());
468 unsigned Index = SL->getElementContainingOffset(ByteOffset);
469 uint64_t CurEltOffset = SL->getElementOffset(Index);
470 ByteOffset -= CurEltOffset;
471
472 while (true) {
473 // If the element access is to the element itself and not to tail padding,
474 // read the bytes from the element.
475 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
476
477 if (ByteOffset < EltSize &&
478 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
479 BytesLeft, DL))
480 return false;
481
482 ++Index;
483
484 // Check to see if we read from the last struct element, if so we're done.
485 if (Index == CS->getType()->getNumElements())
486 return true;
487
488 // If we read all of the bytes we needed from this element we're done.
489 uint64_t NextEltOffset = SL->getElementOffset(Index);
490
491 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
492 return true;
493
494 // Move to the next element of the struct.
495 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
496 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
497 ByteOffset = 0;
498 CurEltOffset = NextEltOffset;
499 }
500 // not reached.
501 }
502
503 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
504 isa<ConstantDataSequential>(C)) {
505 uint64_t NumElts;
506 Type *EltTy;
507 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
508 NumElts = AT->getNumElements();
509 EltTy = AT->getElementType();
510 } else {
511 NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
512 EltTy = cast<FixedVectorType>(C->getType())->getElementType();
513 }
514 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
515 uint64_t Index = ByteOffset / EltSize;
516 uint64_t Offset = ByteOffset - Index * EltSize;
517
518 for (; Index != NumElts; ++Index) {
519 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
520 BytesLeft, DL))
521 return false;
522
523 uint64_t BytesWritten = EltSize - Offset;
524 assert(BytesWritten <= EltSize && "Not indexing into this element?");
525 if (BytesWritten >= BytesLeft)
526 return true;
527
528 Offset = 0;
529 BytesLeft -= BytesWritten;
530 CurPtr += BytesWritten;
531 }
532 return true;
533 }
534
535 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
536 if (CE->getOpcode() == Instruction::IntToPtr &&
537 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
538 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
539 BytesLeft, DL);
540 }
541 }
542
543 // Otherwise, unknown initializer type.
544 return false;
545 }
546
FoldReinterpretLoadFromConstPtr(Constant * C,Type * LoadTy,const DataLayout & DL)547 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
548 const DataLayout &DL) {
549 // Bail out early. Not expect to load from scalable global variable.
550 if (isa<ScalableVectorType>(LoadTy))
551 return nullptr;
552
553 auto *PTy = cast<PointerType>(C->getType());
554 auto *IntType = dyn_cast<IntegerType>(LoadTy);
555
556 // If this isn't an integer load we can't fold it directly.
557 if (!IntType) {
558 unsigned AS = PTy->getAddressSpace();
559
560 // If this is a float/double load, we can try folding it as an int32/64 load
561 // and then bitcast the result. This can be useful for union cases. Note
562 // that address spaces don't matter here since we're not going to result in
563 // an actual new load.
564 Type *MapTy;
565 if (LoadTy->isHalfTy())
566 MapTy = Type::getInt16Ty(C->getContext());
567 else if (LoadTy->isFloatTy())
568 MapTy = Type::getInt32Ty(C->getContext());
569 else if (LoadTy->isDoubleTy())
570 MapTy = Type::getInt64Ty(C->getContext());
571 else if (LoadTy->isVectorTy()) {
572 MapTy = PointerType::getIntNTy(
573 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
574 } else
575 return nullptr;
576
577 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
578 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) {
579 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
580 !LoadTy->isX86_AMXTy())
581 // Materializing a zero can be done trivially without a bitcast
582 return Constant::getNullValue(LoadTy);
583 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
584 Res = FoldBitCast(Res, CastTy, DL);
585 if (LoadTy->isPtrOrPtrVectorTy()) {
586 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
587 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
588 !LoadTy->isX86_AMXTy())
589 return Constant::getNullValue(LoadTy);
590 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
591 // Be careful not to replace a load of an addrspace value with an inttoptr here
592 return nullptr;
593 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy);
594 }
595 return Res;
596 }
597 return nullptr;
598 }
599
600 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
601 if (BytesLoaded > 32 || BytesLoaded == 0)
602 return nullptr;
603
604 GlobalValue *GVal;
605 APInt OffsetAI;
606 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
607 return nullptr;
608
609 auto *GV = dyn_cast<GlobalVariable>(GVal);
610 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
611 !GV->getInitializer()->getType()->isSized())
612 return nullptr;
613
614 int64_t Offset = OffsetAI.getSExtValue();
615 int64_t InitializerSize =
616 DL.getTypeAllocSize(GV->getInitializer()->getType()).getFixedSize();
617
618 // If we're not accessing anything in this constant, the result is undefined.
619 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
620 return UndefValue::get(IntType);
621
622 // If we're not accessing anything in this constant, the result is undefined.
623 if (Offset >= InitializerSize)
624 return UndefValue::get(IntType);
625
626 unsigned char RawBytes[32] = {0};
627 unsigned char *CurPtr = RawBytes;
628 unsigned BytesLeft = BytesLoaded;
629
630 // If we're loading off the beginning of the global, some bytes may be valid.
631 if (Offset < 0) {
632 CurPtr += -Offset;
633 BytesLeft += Offset;
634 Offset = 0;
635 }
636
637 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
638 return nullptr;
639
640 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
641 if (DL.isLittleEndian()) {
642 ResultVal = RawBytes[BytesLoaded - 1];
643 for (unsigned i = 1; i != BytesLoaded; ++i) {
644 ResultVal <<= 8;
645 ResultVal |= RawBytes[BytesLoaded - 1 - i];
646 }
647 } else {
648 ResultVal = RawBytes[0];
649 for (unsigned i = 1; i != BytesLoaded; ++i) {
650 ResultVal <<= 8;
651 ResultVal |= RawBytes[i];
652 }
653 }
654
655 return ConstantInt::get(IntType->getContext(), ResultVal);
656 }
657
ConstantFoldLoadThroughBitcastExpr(ConstantExpr * CE,Type * DestTy,const DataLayout & DL)658 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
659 const DataLayout &DL) {
660 auto *SrcPtr = CE->getOperand(0);
661 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
662 if (!SrcPtrTy)
663 return nullptr;
664 Type *SrcTy = SrcPtrTy->getPointerElementType();
665
666 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
667 if (!C)
668 return nullptr;
669
670 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
671 }
672
673 } // end anonymous namespace
674
ConstantFoldLoadFromConstPtr(Constant * C,Type * Ty,const DataLayout & DL)675 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
676 const DataLayout &DL) {
677 // First, try the easy cases:
678 if (auto *GV = dyn_cast<GlobalVariable>(C))
679 if (GV->isConstant() && GV->hasDefinitiveInitializer())
680 return GV->getInitializer();
681
682 if (auto *GA = dyn_cast<GlobalAlias>(C))
683 if (GA->getAliasee() && !GA->isInterposable())
684 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
685
686 // If the loaded value isn't a constant expr, we can't handle it.
687 auto *CE = dyn_cast<ConstantExpr>(C);
688 if (!CE)
689 return nullptr;
690
691 if (CE->getOpcode() == Instruction::GetElementPtr) {
692 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
693 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
694 if (Constant *V =
695 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
696 return V;
697 }
698 }
699 }
700
701 if (CE->getOpcode() == Instruction::BitCast)
702 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
703 return LoadedC;
704
705 // Instead of loading constant c string, use corresponding integer value
706 // directly if string length is small enough.
707 StringRef Str;
708 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
709 size_t StrLen = Str.size();
710 unsigned NumBits = Ty->getPrimitiveSizeInBits();
711 // Replace load with immediate integer if the result is an integer or fp
712 // value.
713 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
714 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
715 APInt StrVal(NumBits, 0);
716 APInt SingleChar(NumBits, 0);
717 if (DL.isLittleEndian()) {
718 for (unsigned char C : reverse(Str.bytes())) {
719 SingleChar = static_cast<uint64_t>(C);
720 StrVal = (StrVal << 8) | SingleChar;
721 }
722 } else {
723 for (unsigned char C : Str.bytes()) {
724 SingleChar = static_cast<uint64_t>(C);
725 StrVal = (StrVal << 8) | SingleChar;
726 }
727 // Append NULL at the end.
728 SingleChar = 0;
729 StrVal = (StrVal << 8) | SingleChar;
730 }
731
732 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
733 if (Ty->isFloatingPointTy())
734 Res = ConstantExpr::getBitCast(Res, Ty);
735 return Res;
736 }
737 }
738
739 // If this load comes from anywhere in a constant global, and if the global
740 // is all undef or zero, we know what it loads.
741 if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(CE))) {
742 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
743 if (GV->getInitializer()->isNullValue())
744 return Constant::getNullValue(Ty);
745 if (isa<UndefValue>(GV->getInitializer()))
746 return UndefValue::get(Ty);
747 }
748 }
749
750 // Try hard to fold loads from bitcasted strange and non-type-safe things.
751 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
752 }
753
754 namespace {
755
ConstantFoldLoadInst(const LoadInst * LI,const DataLayout & DL)756 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
757 if (LI->isVolatile()) return nullptr;
758
759 if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
760 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
761
762 return nullptr;
763 }
764
765 /// One of Op0/Op1 is a constant expression.
766 /// Attempt to symbolically evaluate the result of a binary operator merging
767 /// these together. If target data info is available, it is provided as DL,
768 /// otherwise DL is null.
SymbolicallyEvaluateBinop(unsigned Opc,Constant * Op0,Constant * Op1,const DataLayout & DL)769 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
770 const DataLayout &DL) {
771 // SROA
772
773 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
774 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
775 // bits.
776
777 if (Opc == Instruction::And) {
778 KnownBits Known0 = computeKnownBits(Op0, DL);
779 KnownBits Known1 = computeKnownBits(Op1, DL);
780 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
781 // All the bits of Op0 that the 'and' could be masking are already zero.
782 return Op0;
783 }
784 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
785 // All the bits of Op1 that the 'and' could be masking are already zero.
786 return Op1;
787 }
788
789 Known0 &= Known1;
790 if (Known0.isConstant())
791 return ConstantInt::get(Op0->getType(), Known0.getConstant());
792 }
793
794 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
795 // constant. This happens frequently when iterating over a global array.
796 if (Opc == Instruction::Sub) {
797 GlobalValue *GV1, *GV2;
798 APInt Offs1, Offs2;
799
800 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
801 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
802 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
803
804 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
805 // PtrToInt may change the bitwidth so we have convert to the right size
806 // first.
807 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
808 Offs2.zextOrTrunc(OpSize));
809 }
810 }
811
812 return nullptr;
813 }
814
815 /// If array indices are not pointer-sized integers, explicitly cast them so
816 /// that they aren't implicitly casted by the getelementptr.
CastGEPIndices(Type * SrcElemTy,ArrayRef<Constant * > Ops,Type * ResultTy,Optional<unsigned> InRangeIndex,const DataLayout & DL,const TargetLibraryInfo * TLI)817 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
818 Type *ResultTy, Optional<unsigned> InRangeIndex,
819 const DataLayout &DL, const TargetLibraryInfo *TLI) {
820 Type *IntIdxTy = DL.getIndexType(ResultTy);
821 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
822
823 bool Any = false;
824 SmallVector<Constant*, 32> NewIdxs;
825 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
826 if ((i == 1 ||
827 !isa<StructType>(GetElementPtrInst::getIndexedType(
828 SrcElemTy, Ops.slice(1, i - 1)))) &&
829 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
830 Any = true;
831 Type *NewType = Ops[i]->getType()->isVectorTy()
832 ? IntIdxTy
833 : IntIdxScalarTy;
834 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
835 true,
836 NewType,
837 true),
838 Ops[i], NewType));
839 } else
840 NewIdxs.push_back(Ops[i]);
841 }
842
843 if (!Any)
844 return nullptr;
845
846 Constant *C = ConstantExpr::getGetElementPtr(
847 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
848 return ConstantFoldConstant(C, DL, TLI);
849 }
850
851 /// Strip the pointer casts, but preserve the address space information.
StripPtrCastKeepAS(Constant * Ptr,Type * & ElemTy)852 Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) {
853 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
854 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
855 Ptr = cast<Constant>(Ptr->stripPointerCasts());
856 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
857
858 ElemTy = NewPtrTy->getPointerElementType();
859
860 // Preserve the address space number of the pointer.
861 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
862 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
863 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
864 }
865 return Ptr;
866 }
867
868 /// If we can symbolically evaluate the GEP constant expression, do so.
SymbolicallyEvaluateGEP(const GEPOperator * GEP,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)869 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
870 ArrayRef<Constant *> Ops,
871 const DataLayout &DL,
872 const TargetLibraryInfo *TLI) {
873 const GEPOperator *InnermostGEP = GEP;
874 bool InBounds = GEP->isInBounds();
875
876 Type *SrcElemTy = GEP->getSourceElementType();
877 Type *ResElemTy = GEP->getResultElementType();
878 Type *ResTy = GEP->getType();
879 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
880 return nullptr;
881
882 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
883 GEP->getInRangeIndex(), DL, TLI))
884 return C;
885
886 Constant *Ptr = Ops[0];
887 if (!Ptr->getType()->isPointerTy())
888 return nullptr;
889
890 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
891
892 // If this is a constant expr gep that is effectively computing an
893 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
894 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
895 if (!isa<ConstantInt>(Ops[i])) {
896
897 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
898 // "inttoptr (sub (ptrtoint Ptr), V)"
899 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
900 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
901 assert((!CE || CE->getType() == IntIdxTy) &&
902 "CastGEPIndices didn't canonicalize index types!");
903 if (CE && CE->getOpcode() == Instruction::Sub &&
904 CE->getOperand(0)->isNullValue()) {
905 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
906 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
907 Res = ConstantExpr::getIntToPtr(Res, ResTy);
908 return ConstantFoldConstant(Res, DL, TLI);
909 }
910 }
911 return nullptr;
912 }
913
914 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
915 APInt Offset =
916 APInt(BitWidth,
917 DL.getIndexedOffsetInType(
918 SrcElemTy,
919 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
920 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
921
922 // If this is a GEP of a GEP, fold it all into a single GEP.
923 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
924 InnermostGEP = GEP;
925 InBounds &= GEP->isInBounds();
926
927 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
928
929 // Do not try the incorporate the sub-GEP if some index is not a number.
930 bool AllConstantInt = true;
931 for (Value *NestedOp : NestedOps)
932 if (!isa<ConstantInt>(NestedOp)) {
933 AllConstantInt = false;
934 break;
935 }
936 if (!AllConstantInt)
937 break;
938
939 Ptr = cast<Constant>(GEP->getOperand(0));
940 SrcElemTy = GEP->getSourceElementType();
941 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
942 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
943 }
944
945 // If the base value for this address is a literal integer value, fold the
946 // getelementptr to the resulting integer value casted to the pointer type.
947 APInt BasePtr(BitWidth, 0);
948 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
949 if (CE->getOpcode() == Instruction::IntToPtr) {
950 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
951 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
952 }
953 }
954
955 auto *PTy = cast<PointerType>(Ptr->getType());
956 if ((Ptr->isNullValue() || BasePtr != 0) &&
957 !DL.isNonIntegralPointerType(PTy)) {
958 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
959 return ConstantExpr::getIntToPtr(C, ResTy);
960 }
961
962 // Otherwise form a regular getelementptr. Recompute the indices so that
963 // we eliminate over-indexing of the notional static type array bounds.
964 // This makes it easy to determine if the getelementptr is "inbounds".
965 // Also, this helps GlobalOpt do SROA on GlobalVariables.
966 Type *Ty = PTy;
967 SmallVector<Constant *, 32> NewIdxs;
968
969 do {
970 if (!Ty->isStructTy()) {
971 if (Ty->isPointerTy()) {
972 // The only pointer indexing we'll do is on the first index of the GEP.
973 if (!NewIdxs.empty())
974 break;
975
976 Ty = SrcElemTy;
977
978 // Only handle pointers to sized types, not pointers to functions.
979 if (!Ty->isSized())
980 return nullptr;
981 } else {
982 Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
983 if (!NextTy)
984 break;
985 Ty = NextTy;
986 }
987
988 // Determine which element of the array the offset points into.
989 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
990 if (ElemSize == 0) {
991 // The element size is 0. This may be [0 x Ty]*, so just use a zero
992 // index for this level and proceed to the next level to see if it can
993 // accommodate the offset.
994 NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0));
995 } else {
996 // The element size is non-zero divide the offset by the element
997 // size (rounding down), to compute the index at this level.
998 bool Overflow;
999 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
1000 if (Overflow)
1001 break;
1002 Offset -= NewIdx * ElemSize;
1003 NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx));
1004 }
1005 } else {
1006 auto *STy = cast<StructType>(Ty);
1007 // If we end up with an offset that isn't valid for this struct type, we
1008 // can't re-form this GEP in a regular form, so bail out. The pointer
1009 // operand likely went through casts that are necessary to make the GEP
1010 // sensible.
1011 const StructLayout &SL = *DL.getStructLayout(STy);
1012 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
1013 break;
1014
1015 // Determine which field of the struct the offset points into. The
1016 // getZExtValue is fine as we've already ensured that the offset is
1017 // within the range representable by the StructLayout API.
1018 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
1019 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
1020 ElIdx));
1021 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
1022 Ty = STy->getTypeAtIndex(ElIdx);
1023 }
1024 } while (Ty != ResElemTy);
1025
1026 // If we haven't used up the entire offset by descending the static
1027 // type, then the offset is pointing into the middle of an indivisible
1028 // member, so we can't simplify it.
1029 if (Offset != 0)
1030 return nullptr;
1031
1032 // Preserve the inrange index from the innermost GEP if possible. We must
1033 // have calculated the same indices up to and including the inrange index.
1034 Optional<unsigned> InRangeIndex;
1035 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
1036 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
1037 NewIdxs.size() > *LastIRIndex) {
1038 InRangeIndex = LastIRIndex;
1039 for (unsigned I = 0; I <= *LastIRIndex; ++I)
1040 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
1041 return nullptr;
1042 }
1043
1044 // Create a GEP.
1045 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
1046 InBounds, InRangeIndex);
1047 assert(C->getType()->getPointerElementType() == Ty &&
1048 "Computed GetElementPtr has unexpected type!");
1049
1050 // If we ended up indexing a member with a type that doesn't match
1051 // the type of what the original indices indexed, add a cast.
1052 if (Ty != ResElemTy)
1053 C = FoldBitCast(C, ResTy, DL);
1054
1055 return C;
1056 }
1057
1058 /// Attempt to constant fold an instruction with the
1059 /// specified opcode and operands. If successful, the constant result is
1060 /// returned, if not, null is returned. Note that this function can fail when
1061 /// attempting to fold instructions like loads and stores, which have no
1062 /// constant expression form.
ConstantFoldInstOperandsImpl(const Value * InstOrCE,unsigned Opcode,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1063 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
1064 ArrayRef<Constant *> Ops,
1065 const DataLayout &DL,
1066 const TargetLibraryInfo *TLI) {
1067 Type *DestTy = InstOrCE->getType();
1068
1069 if (Instruction::isUnaryOp(Opcode))
1070 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
1071
1072 if (Instruction::isBinaryOp(Opcode))
1073 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1074
1075 if (Instruction::isCast(Opcode))
1076 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1077
1078 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1079 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1080 return C;
1081
1082 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1083 Ops.slice(1), GEP->isInBounds(),
1084 GEP->getInRangeIndex());
1085 }
1086
1087 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1088 return CE->getWithOperands(Ops);
1089
1090 switch (Opcode) {
1091 default: return nullptr;
1092 case Instruction::ICmp:
1093 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1094 case Instruction::Freeze:
1095 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1096 case Instruction::Call:
1097 if (auto *F = dyn_cast<Function>(Ops.back())) {
1098 const auto *Call = cast<CallBase>(InstOrCE);
1099 if (canConstantFoldCallTo(Call, F))
1100 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1101 }
1102 return nullptr;
1103 case Instruction::Select:
1104 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1105 case Instruction::ExtractElement:
1106 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1107 case Instruction::ExtractValue:
1108 return ConstantExpr::getExtractValue(
1109 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1110 case Instruction::InsertElement:
1111 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1112 case Instruction::ShuffleVector:
1113 return ConstantExpr::getShuffleVector(
1114 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1115 }
1116 }
1117
1118 } // end anonymous namespace
1119
1120 //===----------------------------------------------------------------------===//
1121 // Constant Folding public APIs
1122 //===----------------------------------------------------------------------===//
1123
1124 namespace {
1125
1126 Constant *
ConstantFoldConstantImpl(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI,SmallDenseMap<Constant *,Constant * > & FoldedOps)1127 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1128 const TargetLibraryInfo *TLI,
1129 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1130 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1131 return const_cast<Constant *>(C);
1132
1133 SmallVector<Constant *, 8> Ops;
1134 for (const Use &OldU : C->operands()) {
1135 Constant *OldC = cast<Constant>(&OldU);
1136 Constant *NewC = OldC;
1137 // Recursively fold the ConstantExpr's operands. If we have already folded
1138 // a ConstantExpr, we don't have to process it again.
1139 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1140 auto It = FoldedOps.find(OldC);
1141 if (It == FoldedOps.end()) {
1142 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1143 FoldedOps.insert({OldC, NewC});
1144 } else {
1145 NewC = It->second;
1146 }
1147 }
1148 Ops.push_back(NewC);
1149 }
1150
1151 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1152 if (CE->isCompare())
1153 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1154 DL, TLI);
1155
1156 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1157 }
1158
1159 assert(isa<ConstantVector>(C));
1160 return ConstantVector::get(Ops);
1161 }
1162
1163 } // end anonymous namespace
1164
ConstantFoldInstruction(Instruction * I,const DataLayout & DL,const TargetLibraryInfo * TLI)1165 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1166 const TargetLibraryInfo *TLI) {
1167 // Handle PHI nodes quickly here...
1168 if (auto *PN = dyn_cast<PHINode>(I)) {
1169 Constant *CommonValue = nullptr;
1170
1171 SmallDenseMap<Constant *, Constant *> FoldedOps;
1172 for (Value *Incoming : PN->incoming_values()) {
1173 // If the incoming value is undef then skip it. Note that while we could
1174 // skip the value if it is equal to the phi node itself we choose not to
1175 // because that would break the rule that constant folding only applies if
1176 // all operands are constants.
1177 if (isa<UndefValue>(Incoming))
1178 continue;
1179 // If the incoming value is not a constant, then give up.
1180 auto *C = dyn_cast<Constant>(Incoming);
1181 if (!C)
1182 return nullptr;
1183 // Fold the PHI's operands.
1184 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1185 // If the incoming value is a different constant to
1186 // the one we saw previously, then give up.
1187 if (CommonValue && C != CommonValue)
1188 return nullptr;
1189 CommonValue = C;
1190 }
1191
1192 // If we reach here, all incoming values are the same constant or undef.
1193 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1194 }
1195
1196 // Scan the operand list, checking to see if they are all constants, if so,
1197 // hand off to ConstantFoldInstOperandsImpl.
1198 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1199 return nullptr;
1200
1201 SmallDenseMap<Constant *, Constant *> FoldedOps;
1202 SmallVector<Constant *, 8> Ops;
1203 for (const Use &OpU : I->operands()) {
1204 auto *Op = cast<Constant>(&OpU);
1205 // Fold the Instruction's operands.
1206 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1207 Ops.push_back(Op);
1208 }
1209
1210 if (const auto *CI = dyn_cast<CmpInst>(I))
1211 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1212 DL, TLI);
1213
1214 if (const auto *LI = dyn_cast<LoadInst>(I))
1215 return ConstantFoldLoadInst(LI, DL);
1216
1217 if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1218 return ConstantExpr::getInsertValue(
1219 cast<Constant>(IVI->getAggregateOperand()),
1220 cast<Constant>(IVI->getInsertedValueOperand()),
1221 IVI->getIndices());
1222 }
1223
1224 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1225 return ConstantExpr::getExtractValue(
1226 cast<Constant>(EVI->getAggregateOperand()),
1227 EVI->getIndices());
1228 }
1229
1230 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1231 }
1232
ConstantFoldConstant(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI)1233 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1234 const TargetLibraryInfo *TLI) {
1235 SmallDenseMap<Constant *, Constant *> FoldedOps;
1236 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1237 }
1238
ConstantFoldInstOperands(Instruction * I,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1239 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1240 ArrayRef<Constant *> Ops,
1241 const DataLayout &DL,
1242 const TargetLibraryInfo *TLI) {
1243 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1244 }
1245
ConstantFoldCompareInstOperands(unsigned Predicate,Constant * Ops0,Constant * Ops1,const DataLayout & DL,const TargetLibraryInfo * TLI)1246 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1247 Constant *Ops0, Constant *Ops1,
1248 const DataLayout &DL,
1249 const TargetLibraryInfo *TLI) {
1250 // fold: icmp (inttoptr x), null -> icmp x, 0
1251 // fold: icmp null, (inttoptr x) -> icmp 0, x
1252 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1253 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1254 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1255 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1256 //
1257 // FIXME: The following comment is out of data and the DataLayout is here now.
1258 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1259 // around to know if bit truncation is happening.
1260 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1261 if (Ops1->isNullValue()) {
1262 if (CE0->getOpcode() == Instruction::IntToPtr) {
1263 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1264 // Convert the integer value to the right size to ensure we get the
1265 // proper extension or truncation.
1266 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1267 IntPtrTy, false);
1268 Constant *Null = Constant::getNullValue(C->getType());
1269 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1270 }
1271
1272 // Only do this transformation if the int is intptrty in size, otherwise
1273 // there is a truncation or extension that we aren't modeling.
1274 if (CE0->getOpcode() == Instruction::PtrToInt) {
1275 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1276 if (CE0->getType() == IntPtrTy) {
1277 Constant *C = CE0->getOperand(0);
1278 Constant *Null = Constant::getNullValue(C->getType());
1279 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1280 }
1281 }
1282 }
1283
1284 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1285 if (CE0->getOpcode() == CE1->getOpcode()) {
1286 if (CE0->getOpcode() == Instruction::IntToPtr) {
1287 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1288
1289 // Convert the integer value to the right size to ensure we get the
1290 // proper extension or truncation.
1291 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1292 IntPtrTy, false);
1293 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1294 IntPtrTy, false);
1295 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1296 }
1297
1298 // Only do this transformation if the int is intptrty in size, otherwise
1299 // there is a truncation or extension that we aren't modeling.
1300 if (CE0->getOpcode() == Instruction::PtrToInt) {
1301 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1302 if (CE0->getType() == IntPtrTy &&
1303 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1304 return ConstantFoldCompareInstOperands(
1305 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1306 }
1307 }
1308 }
1309 }
1310
1311 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1312 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1313 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1314 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1315 Constant *LHS = ConstantFoldCompareInstOperands(
1316 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1317 Constant *RHS = ConstantFoldCompareInstOperands(
1318 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1319 unsigned OpC =
1320 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1321 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1322 }
1323 } else if (isa<ConstantExpr>(Ops1)) {
1324 // If RHS is a constant expression, but the left side isn't, swap the
1325 // operands and try again.
1326 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1327 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1328 }
1329
1330 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1331 }
1332
ConstantFoldUnaryOpOperand(unsigned Opcode,Constant * Op,const DataLayout & DL)1333 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1334 const DataLayout &DL) {
1335 assert(Instruction::isUnaryOp(Opcode));
1336
1337 return ConstantExpr::get(Opcode, Op);
1338 }
1339
ConstantFoldBinaryOpOperands(unsigned Opcode,Constant * LHS,Constant * RHS,const DataLayout & DL)1340 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1341 Constant *RHS,
1342 const DataLayout &DL) {
1343 assert(Instruction::isBinaryOp(Opcode));
1344 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1345 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1346 return C;
1347
1348 return ConstantExpr::get(Opcode, LHS, RHS);
1349 }
1350
ConstantFoldCastOperand(unsigned Opcode,Constant * C,Type * DestTy,const DataLayout & DL)1351 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1352 Type *DestTy, const DataLayout &DL) {
1353 assert(Instruction::isCast(Opcode));
1354 switch (Opcode) {
1355 default:
1356 llvm_unreachable("Missing case");
1357 case Instruction::PtrToInt:
1358 // If the input is a inttoptr, eliminate the pair. This requires knowing
1359 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1360 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1361 if (CE->getOpcode() == Instruction::IntToPtr) {
1362 Constant *Input = CE->getOperand(0);
1363 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1364 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1365 if (PtrWidth < InWidth) {
1366 Constant *Mask =
1367 ConstantInt::get(CE->getContext(),
1368 APInt::getLowBitsSet(InWidth, PtrWidth));
1369 Input = ConstantExpr::getAnd(Input, Mask);
1370 }
1371 // Do a zext or trunc to get to the dest size.
1372 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1373 }
1374 }
1375 return ConstantExpr::getCast(Opcode, C, DestTy);
1376 case Instruction::IntToPtr:
1377 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1378 // the int size is >= the ptr size and the address spaces are the same.
1379 // This requires knowing the width of a pointer, so it can't be done in
1380 // ConstantExpr::getCast.
1381 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1382 if (CE->getOpcode() == Instruction::PtrToInt) {
1383 Constant *SrcPtr = CE->getOperand(0);
1384 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1385 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1386
1387 if (MidIntSize >= SrcPtrSize) {
1388 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1389 if (SrcAS == DestTy->getPointerAddressSpace())
1390 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1391 }
1392 }
1393 }
1394
1395 return ConstantExpr::getCast(Opcode, C, DestTy);
1396 case Instruction::Trunc:
1397 case Instruction::ZExt:
1398 case Instruction::SExt:
1399 case Instruction::FPTrunc:
1400 case Instruction::FPExt:
1401 case Instruction::UIToFP:
1402 case Instruction::SIToFP:
1403 case Instruction::FPToUI:
1404 case Instruction::FPToSI:
1405 case Instruction::AddrSpaceCast:
1406 return ConstantExpr::getCast(Opcode, C, DestTy);
1407 case Instruction::BitCast:
1408 return FoldBitCast(C, DestTy, DL);
1409 }
1410 }
1411
ConstantFoldLoadThroughGEPConstantExpr(Constant * C,ConstantExpr * CE)1412 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1413 ConstantExpr *CE) {
1414 if (!CE->getOperand(1)->isNullValue())
1415 return nullptr; // Do not allow stepping over the value!
1416
1417 // Loop over all of the operands, tracking down which value we are
1418 // addressing.
1419 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1420 C = C->getAggregateElement(CE->getOperand(i));
1421 if (!C)
1422 return nullptr;
1423 }
1424 return C;
1425 }
1426
1427 Constant *
ConstantFoldLoadThroughGEPIndices(Constant * C,ArrayRef<Constant * > Indices)1428 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1429 ArrayRef<Constant *> Indices) {
1430 // Loop over all of the operands, tracking down which value we are
1431 // addressing.
1432 for (Constant *Index : Indices) {
1433 C = C->getAggregateElement(Index);
1434 if (!C)
1435 return nullptr;
1436 }
1437 return C;
1438 }
1439
1440 //===----------------------------------------------------------------------===//
1441 // Constant Folding for Calls
1442 //
1443
canConstantFoldCallTo(const CallBase * Call,const Function * F)1444 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1445 if (Call->isNoBuiltin())
1446 return false;
1447 switch (F->getIntrinsicID()) {
1448 // Operations that do not operate floating-point numbers and do not depend on
1449 // FP environment can be folded even in strictfp functions.
1450 case Intrinsic::bswap:
1451 case Intrinsic::ctpop:
1452 case Intrinsic::ctlz:
1453 case Intrinsic::cttz:
1454 case Intrinsic::fshl:
1455 case Intrinsic::fshr:
1456 case Intrinsic::launder_invariant_group:
1457 case Intrinsic::strip_invariant_group:
1458 case Intrinsic::masked_load:
1459 case Intrinsic::get_active_lane_mask:
1460 case Intrinsic::abs:
1461 case Intrinsic::smax:
1462 case Intrinsic::smin:
1463 case Intrinsic::umax:
1464 case Intrinsic::umin:
1465 case Intrinsic::sadd_with_overflow:
1466 case Intrinsic::uadd_with_overflow:
1467 case Intrinsic::ssub_with_overflow:
1468 case Intrinsic::usub_with_overflow:
1469 case Intrinsic::smul_with_overflow:
1470 case Intrinsic::umul_with_overflow:
1471 case Intrinsic::sadd_sat:
1472 case Intrinsic::uadd_sat:
1473 case Intrinsic::ssub_sat:
1474 case Intrinsic::usub_sat:
1475 case Intrinsic::smul_fix:
1476 case Intrinsic::smul_fix_sat:
1477 case Intrinsic::bitreverse:
1478 case Intrinsic::is_constant:
1479 case Intrinsic::vector_reduce_add:
1480 case Intrinsic::vector_reduce_mul:
1481 case Intrinsic::vector_reduce_and:
1482 case Intrinsic::vector_reduce_or:
1483 case Intrinsic::vector_reduce_xor:
1484 case Intrinsic::vector_reduce_smin:
1485 case Intrinsic::vector_reduce_smax:
1486 case Intrinsic::vector_reduce_umin:
1487 case Intrinsic::vector_reduce_umax:
1488 // Target intrinsics
1489 case Intrinsic::arm_mve_vctp8:
1490 case Intrinsic::arm_mve_vctp16:
1491 case Intrinsic::arm_mve_vctp32:
1492 case Intrinsic::arm_mve_vctp64:
1493 // WebAssembly float semantics are always known
1494 case Intrinsic::wasm_trunc_signed:
1495 case Intrinsic::wasm_trunc_unsigned:
1496 case Intrinsic::wasm_trunc_saturate_signed:
1497 case Intrinsic::wasm_trunc_saturate_unsigned:
1498 return true;
1499
1500 // Floating point operations cannot be folded in strictfp functions in
1501 // general case. They can be folded if FP environment is known to compiler.
1502 case Intrinsic::minnum:
1503 case Intrinsic::maxnum:
1504 case Intrinsic::minimum:
1505 case Intrinsic::maximum:
1506 case Intrinsic::log:
1507 case Intrinsic::log2:
1508 case Intrinsic::log10:
1509 case Intrinsic::exp:
1510 case Intrinsic::exp2:
1511 case Intrinsic::sqrt:
1512 case Intrinsic::sin:
1513 case Intrinsic::cos:
1514 case Intrinsic::pow:
1515 case Intrinsic::powi:
1516 case Intrinsic::fma:
1517 case Intrinsic::fmuladd:
1518 case Intrinsic::fptoui_sat:
1519 case Intrinsic::fptosi_sat:
1520 case Intrinsic::convert_from_fp16:
1521 case Intrinsic::convert_to_fp16:
1522 case Intrinsic::amdgcn_cos:
1523 case Intrinsic::amdgcn_cubeid:
1524 case Intrinsic::amdgcn_cubema:
1525 case Intrinsic::amdgcn_cubesc:
1526 case Intrinsic::amdgcn_cubetc:
1527 case Intrinsic::amdgcn_fmul_legacy:
1528 case Intrinsic::amdgcn_fma_legacy:
1529 case Intrinsic::amdgcn_fract:
1530 case Intrinsic::amdgcn_ldexp:
1531 case Intrinsic::amdgcn_sin:
1532 // The intrinsics below depend on rounding mode in MXCSR.
1533 case Intrinsic::x86_sse_cvtss2si:
1534 case Intrinsic::x86_sse_cvtss2si64:
1535 case Intrinsic::x86_sse_cvttss2si:
1536 case Intrinsic::x86_sse_cvttss2si64:
1537 case Intrinsic::x86_sse2_cvtsd2si:
1538 case Intrinsic::x86_sse2_cvtsd2si64:
1539 case Intrinsic::x86_sse2_cvttsd2si:
1540 case Intrinsic::x86_sse2_cvttsd2si64:
1541 case Intrinsic::x86_avx512_vcvtss2si32:
1542 case Intrinsic::x86_avx512_vcvtss2si64:
1543 case Intrinsic::x86_avx512_cvttss2si:
1544 case Intrinsic::x86_avx512_cvttss2si64:
1545 case Intrinsic::x86_avx512_vcvtsd2si32:
1546 case Intrinsic::x86_avx512_vcvtsd2si64:
1547 case Intrinsic::x86_avx512_cvttsd2si:
1548 case Intrinsic::x86_avx512_cvttsd2si64:
1549 case Intrinsic::x86_avx512_vcvtss2usi32:
1550 case Intrinsic::x86_avx512_vcvtss2usi64:
1551 case Intrinsic::x86_avx512_cvttss2usi:
1552 case Intrinsic::x86_avx512_cvttss2usi64:
1553 case Intrinsic::x86_avx512_vcvtsd2usi32:
1554 case Intrinsic::x86_avx512_vcvtsd2usi64:
1555 case Intrinsic::x86_avx512_cvttsd2usi:
1556 case Intrinsic::x86_avx512_cvttsd2usi64:
1557 return !Call->isStrictFP();
1558
1559 // Sign operations are actually bitwise operations, they do not raise
1560 // exceptions even for SNANs.
1561 case Intrinsic::fabs:
1562 case Intrinsic::copysign:
1563 // Non-constrained variants of rounding operations means default FP
1564 // environment, they can be folded in any case.
1565 case Intrinsic::ceil:
1566 case Intrinsic::floor:
1567 case Intrinsic::round:
1568 case Intrinsic::roundeven:
1569 case Intrinsic::trunc:
1570 case Intrinsic::nearbyint:
1571 case Intrinsic::rint:
1572 // Constrained intrinsics can be folded if FP environment is known
1573 // to compiler.
1574 case Intrinsic::experimental_constrained_ceil:
1575 case Intrinsic::experimental_constrained_floor:
1576 case Intrinsic::experimental_constrained_round:
1577 case Intrinsic::experimental_constrained_roundeven:
1578 case Intrinsic::experimental_constrained_trunc:
1579 case Intrinsic::experimental_constrained_nearbyint:
1580 case Intrinsic::experimental_constrained_rint:
1581 return true;
1582 default:
1583 return false;
1584 case Intrinsic::not_intrinsic: break;
1585 }
1586
1587 if (!F->hasName() || Call->isStrictFP())
1588 return false;
1589
1590 // In these cases, the check of the length is required. We don't want to
1591 // return true for a name like "cos\0blah" which strcmp would return equal to
1592 // "cos", but has length 8.
1593 StringRef Name = F->getName();
1594 switch (Name[0]) {
1595 default:
1596 return false;
1597 case 'a':
1598 return Name == "acos" || Name == "acosf" ||
1599 Name == "asin" || Name == "asinf" ||
1600 Name == "atan" || Name == "atanf" ||
1601 Name == "atan2" || Name == "atan2f";
1602 case 'c':
1603 return Name == "ceil" || Name == "ceilf" ||
1604 Name == "cos" || Name == "cosf" ||
1605 Name == "cosh" || Name == "coshf";
1606 case 'e':
1607 return Name == "exp" || Name == "expf" ||
1608 Name == "exp2" || Name == "exp2f";
1609 case 'f':
1610 return Name == "fabs" || Name == "fabsf" ||
1611 Name == "floor" || Name == "floorf" ||
1612 Name == "fmod" || Name == "fmodf";
1613 case 'l':
1614 return Name == "log" || Name == "logf" ||
1615 Name == "log2" || Name == "log2f" ||
1616 Name == "log10" || Name == "log10f";
1617 case 'n':
1618 return Name == "nearbyint" || Name == "nearbyintf";
1619 case 'p':
1620 return Name == "pow" || Name == "powf";
1621 case 'r':
1622 return Name == "remainder" || Name == "remainderf" ||
1623 Name == "rint" || Name == "rintf" ||
1624 Name == "round" || Name == "roundf";
1625 case 's':
1626 return Name == "sin" || Name == "sinf" ||
1627 Name == "sinh" || Name == "sinhf" ||
1628 Name == "sqrt" || Name == "sqrtf";
1629 case 't':
1630 return Name == "tan" || Name == "tanf" ||
1631 Name == "tanh" || Name == "tanhf" ||
1632 Name == "trunc" || Name == "truncf";
1633 case '_':
1634 // Check for various function names that get used for the math functions
1635 // when the header files are preprocessed with the macro
1636 // __FINITE_MATH_ONLY__ enabled.
1637 // The '12' here is the length of the shortest name that can match.
1638 // We need to check the size before looking at Name[1] and Name[2]
1639 // so we may as well check a limit that will eliminate mismatches.
1640 if (Name.size() < 12 || Name[1] != '_')
1641 return false;
1642 switch (Name[2]) {
1643 default:
1644 return false;
1645 case 'a':
1646 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1647 Name == "__asin_finite" || Name == "__asinf_finite" ||
1648 Name == "__atan2_finite" || Name == "__atan2f_finite";
1649 case 'c':
1650 return Name == "__cosh_finite" || Name == "__coshf_finite";
1651 case 'e':
1652 return Name == "__exp_finite" || Name == "__expf_finite" ||
1653 Name == "__exp2_finite" || Name == "__exp2f_finite";
1654 case 'l':
1655 return Name == "__log_finite" || Name == "__logf_finite" ||
1656 Name == "__log10_finite" || Name == "__log10f_finite";
1657 case 'p':
1658 return Name == "__pow_finite" || Name == "__powf_finite";
1659 case 's':
1660 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1661 }
1662 }
1663 }
1664
1665 namespace {
1666
GetConstantFoldFPValue(double V,Type * Ty)1667 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1668 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1669 APFloat APF(V);
1670 bool unused;
1671 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1672 return ConstantFP::get(Ty->getContext(), APF);
1673 }
1674 if (Ty->isDoubleTy())
1675 return ConstantFP::get(Ty->getContext(), APFloat(V));
1676 llvm_unreachable("Can only constant fold half/float/double");
1677 }
1678
1679 /// Clear the floating-point exception state.
llvm_fenv_clearexcept()1680 inline void llvm_fenv_clearexcept() {
1681 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1682 feclearexcept(FE_ALL_EXCEPT);
1683 #endif
1684 errno = 0;
1685 }
1686
1687 /// Test if a floating-point exception was raised.
llvm_fenv_testexcept()1688 inline bool llvm_fenv_testexcept() {
1689 int errno_val = errno;
1690 if (errno_val == ERANGE || errno_val == EDOM)
1691 return true;
1692 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1693 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1694 return true;
1695 #endif
1696 return false;
1697 }
1698
ConstantFoldFP(double (* NativeFP)(double),double V,Type * Ty)1699 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1700 llvm_fenv_clearexcept();
1701 V = NativeFP(V);
1702 if (llvm_fenv_testexcept()) {
1703 llvm_fenv_clearexcept();
1704 return nullptr;
1705 }
1706
1707 return GetConstantFoldFPValue(V, Ty);
1708 }
1709
ConstantFoldBinaryFP(double (* NativeFP)(double,double),double V,double W,Type * Ty)1710 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1711 double W, Type *Ty) {
1712 llvm_fenv_clearexcept();
1713 V = NativeFP(V, W);
1714 if (llvm_fenv_testexcept()) {
1715 llvm_fenv_clearexcept();
1716 return nullptr;
1717 }
1718
1719 return GetConstantFoldFPValue(V, Ty);
1720 }
1721
ConstantFoldVectorReduce(Intrinsic::ID IID,Constant * Op)1722 Constant *ConstantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1723 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1724 if (!VT)
1725 return nullptr;
1726 ConstantInt *CI = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1727 if (!CI)
1728 return nullptr;
1729 APInt Acc = CI->getValue();
1730
1731 for (unsigned I = 1; I < VT->getNumElements(); I++) {
1732 if (!(CI = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1733 return nullptr;
1734 const APInt &X = CI->getValue();
1735 switch (IID) {
1736 case Intrinsic::vector_reduce_add:
1737 Acc = Acc + X;
1738 break;
1739 case Intrinsic::vector_reduce_mul:
1740 Acc = Acc * X;
1741 break;
1742 case Intrinsic::vector_reduce_and:
1743 Acc = Acc & X;
1744 break;
1745 case Intrinsic::vector_reduce_or:
1746 Acc = Acc | X;
1747 break;
1748 case Intrinsic::vector_reduce_xor:
1749 Acc = Acc ^ X;
1750 break;
1751 case Intrinsic::vector_reduce_smin:
1752 Acc = APIntOps::smin(Acc, X);
1753 break;
1754 case Intrinsic::vector_reduce_smax:
1755 Acc = APIntOps::smax(Acc, X);
1756 break;
1757 case Intrinsic::vector_reduce_umin:
1758 Acc = APIntOps::umin(Acc, X);
1759 break;
1760 case Intrinsic::vector_reduce_umax:
1761 Acc = APIntOps::umax(Acc, X);
1762 break;
1763 }
1764 }
1765
1766 return ConstantInt::get(Op->getContext(), Acc);
1767 }
1768
1769 /// Attempt to fold an SSE floating point to integer conversion of a constant
1770 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1771 /// used (toward nearest, ties to even). This matches the behavior of the
1772 /// non-truncating SSE instructions in the default rounding mode. The desired
1773 /// integer type Ty is used to select how many bits are available for the
1774 /// result. Returns null if the conversion cannot be performed, otherwise
1775 /// returns the Constant value resulting from the conversion.
ConstantFoldSSEConvertToInt(const APFloat & Val,bool roundTowardZero,Type * Ty,bool IsSigned)1776 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1777 Type *Ty, bool IsSigned) {
1778 // All of these conversion intrinsics form an integer of at most 64bits.
1779 unsigned ResultWidth = Ty->getIntegerBitWidth();
1780 assert(ResultWidth <= 64 &&
1781 "Can only constant fold conversions to 64 and 32 bit ints");
1782
1783 uint64_t UIntVal;
1784 bool isExact = false;
1785 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1786 : APFloat::rmNearestTiesToEven;
1787 APFloat::opStatus status =
1788 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1789 IsSigned, mode, &isExact);
1790 if (status != APFloat::opOK &&
1791 (!roundTowardZero || status != APFloat::opInexact))
1792 return nullptr;
1793 return ConstantInt::get(Ty, UIntVal, IsSigned);
1794 }
1795
getValueAsDouble(ConstantFP * Op)1796 double getValueAsDouble(ConstantFP *Op) {
1797 Type *Ty = Op->getType();
1798
1799 if (Ty->isFloatTy())
1800 return Op->getValueAPF().convertToFloat();
1801
1802 if (Ty->isDoubleTy())
1803 return Op->getValueAPF().convertToDouble();
1804
1805 bool unused;
1806 APFloat APF = Op->getValueAPF();
1807 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1808 return APF.convertToDouble();
1809 }
1810
getConstIntOrUndef(Value * Op,const APInt * & C)1811 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1812 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1813 C = &CI->getValue();
1814 return true;
1815 }
1816 if (isa<UndefValue>(Op)) {
1817 C = nullptr;
1818 return true;
1819 }
1820 return false;
1821 }
1822
ConstantFoldScalarCall1(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)1823 static Constant *ConstantFoldScalarCall1(StringRef Name,
1824 Intrinsic::ID IntrinsicID,
1825 Type *Ty,
1826 ArrayRef<Constant *> Operands,
1827 const TargetLibraryInfo *TLI,
1828 const CallBase *Call) {
1829 assert(Operands.size() == 1 && "Wrong number of operands.");
1830
1831 if (IntrinsicID == Intrinsic::is_constant) {
1832 // We know we have a "Constant" argument. But we want to only
1833 // return true for manifest constants, not those that depend on
1834 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1835 if (Operands[0]->isManifestConstant())
1836 return ConstantInt::getTrue(Ty->getContext());
1837 return nullptr;
1838 }
1839 if (isa<UndefValue>(Operands[0])) {
1840 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1841 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1842 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
1843 if (IntrinsicID == Intrinsic::cos ||
1844 IntrinsicID == Intrinsic::ctpop ||
1845 IntrinsicID == Intrinsic::fptoui_sat ||
1846 IntrinsicID == Intrinsic::fptosi_sat)
1847 return Constant::getNullValue(Ty);
1848 if (IntrinsicID == Intrinsic::bswap ||
1849 IntrinsicID == Intrinsic::bitreverse ||
1850 IntrinsicID == Intrinsic::launder_invariant_group ||
1851 IntrinsicID == Intrinsic::strip_invariant_group)
1852 return Operands[0];
1853 }
1854
1855 if (isa<ConstantPointerNull>(Operands[0])) {
1856 // launder(null) == null == strip(null) iff in addrspace 0
1857 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1858 IntrinsicID == Intrinsic::strip_invariant_group) {
1859 // If instruction is not yet put in a basic block (e.g. when cloning
1860 // a function during inlining), Call's caller may not be available.
1861 // So check Call's BB first before querying Call->getCaller.
1862 const Function *Caller =
1863 Call->getParent() ? Call->getCaller() : nullptr;
1864 if (Caller &&
1865 !NullPointerIsDefined(
1866 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1867 return Operands[0];
1868 }
1869 return nullptr;
1870 }
1871 }
1872
1873 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1874 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1875 APFloat Val(Op->getValueAPF());
1876
1877 bool lost = false;
1878 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1879
1880 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1881 }
1882
1883 APFloat U = Op->getValueAPF();
1884
1885 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
1886 IntrinsicID == Intrinsic::wasm_trunc_unsigned ||
1887 IntrinsicID == Intrinsic::wasm_trunc_saturate_signed ||
1888 IntrinsicID == Intrinsic::wasm_trunc_saturate_unsigned) {
1889
1890 bool Saturating = IntrinsicID == Intrinsic::wasm_trunc_saturate_signed ||
1891 IntrinsicID == Intrinsic::wasm_trunc_saturate_unsigned;
1892 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed ||
1893 IntrinsicID == Intrinsic::wasm_trunc_saturate_signed;
1894
1895 if (U.isNaN())
1896 return Saturating ? ConstantInt::get(Ty, 0) : nullptr;
1897
1898 unsigned Width = Ty->getIntegerBitWidth();
1899 APSInt Int(Width, !Signed);
1900 bool IsExact = false;
1901 APFloat::opStatus Status =
1902 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1903
1904 if (Status == APFloat::opOK || Status == APFloat::opInexact)
1905 return ConstantInt::get(Ty, Int);
1906
1907 if (!Saturating)
1908 return nullptr;
1909
1910 if (U.isNegative())
1911 return Signed ? ConstantInt::get(Ty, APInt::getSignedMinValue(Width))
1912 : ConstantInt::get(Ty, APInt::getMinValue(Width));
1913 else
1914 return Signed ? ConstantInt::get(Ty, APInt::getSignedMaxValue(Width))
1915 : ConstantInt::get(Ty, APInt::getMaxValue(Width));
1916 }
1917
1918 if (IntrinsicID == Intrinsic::fptoui_sat ||
1919 IntrinsicID == Intrinsic::fptosi_sat) {
1920 // convertToInteger() already has the desired saturation semantics.
1921 APSInt Int(Ty->getIntegerBitWidth(),
1922 IntrinsicID == Intrinsic::fptoui_sat);
1923 bool IsExact;
1924 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1925 return ConstantInt::get(Ty, Int);
1926 }
1927
1928 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1929 return nullptr;
1930
1931 // Use internal versions of these intrinsics.
1932
1933 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
1934 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1935 return ConstantFP::get(Ty->getContext(), U);
1936 }
1937
1938 if (IntrinsicID == Intrinsic::round) {
1939 U.roundToIntegral(APFloat::rmNearestTiesToAway);
1940 return ConstantFP::get(Ty->getContext(), U);
1941 }
1942
1943 if (IntrinsicID == Intrinsic::roundeven) {
1944 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1945 return ConstantFP::get(Ty->getContext(), U);
1946 }
1947
1948 if (IntrinsicID == Intrinsic::ceil) {
1949 U.roundToIntegral(APFloat::rmTowardPositive);
1950 return ConstantFP::get(Ty->getContext(), U);
1951 }
1952
1953 if (IntrinsicID == Intrinsic::floor) {
1954 U.roundToIntegral(APFloat::rmTowardNegative);
1955 return ConstantFP::get(Ty->getContext(), U);
1956 }
1957
1958 if (IntrinsicID == Intrinsic::trunc) {
1959 U.roundToIntegral(APFloat::rmTowardZero);
1960 return ConstantFP::get(Ty->getContext(), U);
1961 }
1962
1963 if (IntrinsicID == Intrinsic::fabs) {
1964 U.clearSign();
1965 return ConstantFP::get(Ty->getContext(), U);
1966 }
1967
1968 if (IntrinsicID == Intrinsic::amdgcn_fract) {
1969 // The v_fract instruction behaves like the OpenCL spec, which defines
1970 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
1971 // there to prevent fract(-small) from returning 1.0. It returns the
1972 // largest positive floating-point number less than 1.0."
1973 APFloat FloorU(U);
1974 FloorU.roundToIntegral(APFloat::rmTowardNegative);
1975 APFloat FractU(U - FloorU);
1976 APFloat AlmostOne(U.getSemantics(), 1);
1977 AlmostOne.next(/*nextDown*/ true);
1978 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
1979 }
1980
1981 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
1982 // raise FP exceptions, unless the argument is signaling NaN.
1983
1984 Optional<APFloat::roundingMode> RM;
1985 switch (IntrinsicID) {
1986 default:
1987 break;
1988 case Intrinsic::experimental_constrained_nearbyint:
1989 case Intrinsic::experimental_constrained_rint: {
1990 auto CI = cast<ConstrainedFPIntrinsic>(Call);
1991 RM = CI->getRoundingMode();
1992 if (!RM || RM.getValue() == RoundingMode::Dynamic)
1993 return nullptr;
1994 break;
1995 }
1996 case Intrinsic::experimental_constrained_round:
1997 RM = APFloat::rmNearestTiesToAway;
1998 break;
1999 case Intrinsic::experimental_constrained_ceil:
2000 RM = APFloat::rmTowardPositive;
2001 break;
2002 case Intrinsic::experimental_constrained_floor:
2003 RM = APFloat::rmTowardNegative;
2004 break;
2005 case Intrinsic::experimental_constrained_trunc:
2006 RM = APFloat::rmTowardZero;
2007 break;
2008 }
2009 if (RM) {
2010 auto CI = cast<ConstrainedFPIntrinsic>(Call);
2011 if (U.isFinite()) {
2012 APFloat::opStatus St = U.roundToIntegral(*RM);
2013 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
2014 St == APFloat::opInexact) {
2015 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2016 if (EB && *EB == fp::ebStrict)
2017 return nullptr;
2018 }
2019 } else if (U.isSignaling()) {
2020 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2021 if (EB && *EB != fp::ebIgnore)
2022 return nullptr;
2023 U = APFloat::getQNaN(U.getSemantics());
2024 }
2025 return ConstantFP::get(Ty->getContext(), U);
2026 }
2027
2028 /// We only fold functions with finite arguments. Folding NaN and inf is
2029 /// likely to be aborted with an exception anyway, and some host libms
2030 /// have known errors raising exceptions.
2031 if (!U.isFinite())
2032 return nullptr;
2033
2034 /// Currently APFloat versions of these functions do not exist, so we use
2035 /// the host native double versions. Float versions are not called
2036 /// directly but for all these it is true (float)(f((double)arg)) ==
2037 /// f(arg). Long double not supported yet.
2038 double V = getValueAsDouble(Op);
2039
2040 switch (IntrinsicID) {
2041 default: break;
2042 case Intrinsic::log:
2043 return ConstantFoldFP(log, V, Ty);
2044 case Intrinsic::log2:
2045 // TODO: What about hosts that lack a C99 library?
2046 return ConstantFoldFP(Log2, V, Ty);
2047 case Intrinsic::log10:
2048 // TODO: What about hosts that lack a C99 library?
2049 return ConstantFoldFP(log10, V, Ty);
2050 case Intrinsic::exp:
2051 return ConstantFoldFP(exp, V, Ty);
2052 case Intrinsic::exp2:
2053 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2054 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
2055 case Intrinsic::sin:
2056 return ConstantFoldFP(sin, V, Ty);
2057 case Intrinsic::cos:
2058 return ConstantFoldFP(cos, V, Ty);
2059 case Intrinsic::sqrt:
2060 return ConstantFoldFP(sqrt, V, Ty);
2061 case Intrinsic::amdgcn_cos:
2062 case Intrinsic::amdgcn_sin:
2063 if (V < -256.0 || V > 256.0)
2064 // The gfx8 and gfx9 architectures handle arguments outside the range
2065 // [-256, 256] differently. This should be a rare case so bail out
2066 // rather than trying to handle the difference.
2067 return nullptr;
2068 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2069 double V4 = V * 4.0;
2070 if (V4 == floor(V4)) {
2071 // Force exact results for quarter-integer inputs.
2072 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2073 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2074 } else {
2075 if (IsCos)
2076 V = cos(V * 2.0 * numbers::pi);
2077 else
2078 V = sin(V * 2.0 * numbers::pi);
2079 }
2080 return GetConstantFoldFPValue(V, Ty);
2081 }
2082
2083 if (!TLI)
2084 return nullptr;
2085
2086 LibFunc Func = NotLibFunc;
2087 TLI->getLibFunc(Name, Func);
2088 switch (Func) {
2089 default:
2090 break;
2091 case LibFunc_acos:
2092 case LibFunc_acosf:
2093 case LibFunc_acos_finite:
2094 case LibFunc_acosf_finite:
2095 if (TLI->has(Func))
2096 return ConstantFoldFP(acos, V, Ty);
2097 break;
2098 case LibFunc_asin:
2099 case LibFunc_asinf:
2100 case LibFunc_asin_finite:
2101 case LibFunc_asinf_finite:
2102 if (TLI->has(Func))
2103 return ConstantFoldFP(asin, V, Ty);
2104 break;
2105 case LibFunc_atan:
2106 case LibFunc_atanf:
2107 if (TLI->has(Func))
2108 return ConstantFoldFP(atan, V, Ty);
2109 break;
2110 case LibFunc_ceil:
2111 case LibFunc_ceilf:
2112 if (TLI->has(Func)) {
2113 U.roundToIntegral(APFloat::rmTowardPositive);
2114 return ConstantFP::get(Ty->getContext(), U);
2115 }
2116 break;
2117 case LibFunc_cos:
2118 case LibFunc_cosf:
2119 if (TLI->has(Func))
2120 return ConstantFoldFP(cos, V, Ty);
2121 break;
2122 case LibFunc_cosh:
2123 case LibFunc_coshf:
2124 case LibFunc_cosh_finite:
2125 case LibFunc_coshf_finite:
2126 if (TLI->has(Func))
2127 return ConstantFoldFP(cosh, V, Ty);
2128 break;
2129 case LibFunc_exp:
2130 case LibFunc_expf:
2131 case LibFunc_exp_finite:
2132 case LibFunc_expf_finite:
2133 if (TLI->has(Func))
2134 return ConstantFoldFP(exp, V, Ty);
2135 break;
2136 case LibFunc_exp2:
2137 case LibFunc_exp2f:
2138 case LibFunc_exp2_finite:
2139 case LibFunc_exp2f_finite:
2140 if (TLI->has(Func))
2141 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2142 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
2143 break;
2144 case LibFunc_fabs:
2145 case LibFunc_fabsf:
2146 if (TLI->has(Func)) {
2147 U.clearSign();
2148 return ConstantFP::get(Ty->getContext(), U);
2149 }
2150 break;
2151 case LibFunc_floor:
2152 case LibFunc_floorf:
2153 if (TLI->has(Func)) {
2154 U.roundToIntegral(APFloat::rmTowardNegative);
2155 return ConstantFP::get(Ty->getContext(), U);
2156 }
2157 break;
2158 case LibFunc_log:
2159 case LibFunc_logf:
2160 case LibFunc_log_finite:
2161 case LibFunc_logf_finite:
2162 if (V > 0.0 && TLI->has(Func))
2163 return ConstantFoldFP(log, V, Ty);
2164 break;
2165 case LibFunc_log2:
2166 case LibFunc_log2f:
2167 case LibFunc_log2_finite:
2168 case LibFunc_log2f_finite:
2169 if (V > 0.0 && TLI->has(Func))
2170 // TODO: What about hosts that lack a C99 library?
2171 return ConstantFoldFP(Log2, V, Ty);
2172 break;
2173 case LibFunc_log10:
2174 case LibFunc_log10f:
2175 case LibFunc_log10_finite:
2176 case LibFunc_log10f_finite:
2177 if (V > 0.0 && TLI->has(Func))
2178 // TODO: What about hosts that lack a C99 library?
2179 return ConstantFoldFP(log10, V, Ty);
2180 break;
2181 case LibFunc_nearbyint:
2182 case LibFunc_nearbyintf:
2183 case LibFunc_rint:
2184 case LibFunc_rintf:
2185 if (TLI->has(Func)) {
2186 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2187 return ConstantFP::get(Ty->getContext(), U);
2188 }
2189 break;
2190 case LibFunc_round:
2191 case LibFunc_roundf:
2192 if (TLI->has(Func)) {
2193 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2194 return ConstantFP::get(Ty->getContext(), U);
2195 }
2196 break;
2197 case LibFunc_sin:
2198 case LibFunc_sinf:
2199 if (TLI->has(Func))
2200 return ConstantFoldFP(sin, V, Ty);
2201 break;
2202 case LibFunc_sinh:
2203 case LibFunc_sinhf:
2204 case LibFunc_sinh_finite:
2205 case LibFunc_sinhf_finite:
2206 if (TLI->has(Func))
2207 return ConstantFoldFP(sinh, V, Ty);
2208 break;
2209 case LibFunc_sqrt:
2210 case LibFunc_sqrtf:
2211 if (V >= 0.0 && TLI->has(Func))
2212 return ConstantFoldFP(sqrt, V, Ty);
2213 break;
2214 case LibFunc_tan:
2215 case LibFunc_tanf:
2216 if (TLI->has(Func))
2217 return ConstantFoldFP(tan, V, Ty);
2218 break;
2219 case LibFunc_tanh:
2220 case LibFunc_tanhf:
2221 if (TLI->has(Func))
2222 return ConstantFoldFP(tanh, V, Ty);
2223 break;
2224 case LibFunc_trunc:
2225 case LibFunc_truncf:
2226 if (TLI->has(Func)) {
2227 U.roundToIntegral(APFloat::rmTowardZero);
2228 return ConstantFP::get(Ty->getContext(), U);
2229 }
2230 break;
2231 }
2232 return nullptr;
2233 }
2234
2235 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2236 switch (IntrinsicID) {
2237 case Intrinsic::bswap:
2238 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2239 case Intrinsic::ctpop:
2240 return ConstantInt::get(Ty, Op->getValue().countPopulation());
2241 case Intrinsic::bitreverse:
2242 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2243 case Intrinsic::convert_from_fp16: {
2244 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2245
2246 bool lost = false;
2247 APFloat::opStatus status = Val.convert(
2248 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2249
2250 // Conversion is always precise.
2251 (void)status;
2252 assert(status == APFloat::opOK && !lost &&
2253 "Precision lost during fp16 constfolding");
2254
2255 return ConstantFP::get(Ty->getContext(), Val);
2256 }
2257 default:
2258 return nullptr;
2259 }
2260 }
2261
2262 if (isa<ConstantAggregateZero>(Operands[0])) {
2263 switch (IntrinsicID) {
2264 default: break;
2265 case Intrinsic::vector_reduce_add:
2266 case Intrinsic::vector_reduce_mul:
2267 case Intrinsic::vector_reduce_and:
2268 case Intrinsic::vector_reduce_or:
2269 case Intrinsic::vector_reduce_xor:
2270 case Intrinsic::vector_reduce_smin:
2271 case Intrinsic::vector_reduce_smax:
2272 case Intrinsic::vector_reduce_umin:
2273 case Intrinsic::vector_reduce_umax:
2274 return ConstantInt::get(Ty, 0);
2275 }
2276 }
2277
2278 // Support ConstantVector in case we have an Undef in the top.
2279 if (isa<ConstantVector>(Operands[0]) ||
2280 isa<ConstantDataVector>(Operands[0])) {
2281 auto *Op = cast<Constant>(Operands[0]);
2282 switch (IntrinsicID) {
2283 default: break;
2284 case Intrinsic::vector_reduce_add:
2285 case Intrinsic::vector_reduce_mul:
2286 case Intrinsic::vector_reduce_and:
2287 case Intrinsic::vector_reduce_or:
2288 case Intrinsic::vector_reduce_xor:
2289 case Intrinsic::vector_reduce_smin:
2290 case Intrinsic::vector_reduce_smax:
2291 case Intrinsic::vector_reduce_umin:
2292 case Intrinsic::vector_reduce_umax:
2293 if (Constant *C = ConstantFoldVectorReduce(IntrinsicID, Op))
2294 return C;
2295 break;
2296 case Intrinsic::x86_sse_cvtss2si:
2297 case Intrinsic::x86_sse_cvtss2si64:
2298 case Intrinsic::x86_sse2_cvtsd2si:
2299 case Intrinsic::x86_sse2_cvtsd2si64:
2300 if (ConstantFP *FPOp =
2301 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2302 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2303 /*roundTowardZero=*/false, Ty,
2304 /*IsSigned*/true);
2305 break;
2306 case Intrinsic::x86_sse_cvttss2si:
2307 case Intrinsic::x86_sse_cvttss2si64:
2308 case Intrinsic::x86_sse2_cvttsd2si:
2309 case Intrinsic::x86_sse2_cvttsd2si64:
2310 if (ConstantFP *FPOp =
2311 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2312 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2313 /*roundTowardZero=*/true, Ty,
2314 /*IsSigned*/true);
2315 break;
2316 }
2317 }
2318
2319 return nullptr;
2320 }
2321
ConstantFoldScalarCall2(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2322 static Constant *ConstantFoldScalarCall2(StringRef Name,
2323 Intrinsic::ID IntrinsicID,
2324 Type *Ty,
2325 ArrayRef<Constant *> Operands,
2326 const TargetLibraryInfo *TLI,
2327 const CallBase *Call) {
2328 assert(Operands.size() == 2 && "Wrong number of operands.");
2329
2330 if (Ty->isFloatingPointTy()) {
2331 // TODO: We should have undef handling for all of the FP intrinsics that
2332 // are attempted to be folded in this function.
2333 bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2334 bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2335 switch (IntrinsicID) {
2336 case Intrinsic::maxnum:
2337 case Intrinsic::minnum:
2338 case Intrinsic::maximum:
2339 case Intrinsic::minimum:
2340 // If one argument is undef, return the other argument.
2341 if (IsOp0Undef)
2342 return Operands[1];
2343 if (IsOp1Undef)
2344 return Operands[0];
2345 break;
2346 }
2347 }
2348
2349 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2350 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2351 return nullptr;
2352 double Op1V = getValueAsDouble(Op1);
2353
2354 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2355 if (Op2->getType() != Op1->getType())
2356 return nullptr;
2357
2358 double Op2V = getValueAsDouble(Op2);
2359 if (IntrinsicID == Intrinsic::pow) {
2360 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2361 }
2362 if (IntrinsicID == Intrinsic::copysign) {
2363 APFloat V1 = Op1->getValueAPF();
2364 const APFloat &V2 = Op2->getValueAPF();
2365 V1.copySign(V2);
2366 return ConstantFP::get(Ty->getContext(), V1);
2367 }
2368
2369 if (IntrinsicID == Intrinsic::minnum) {
2370 const APFloat &C1 = Op1->getValueAPF();
2371 const APFloat &C2 = Op2->getValueAPF();
2372 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
2373 }
2374
2375 if (IntrinsicID == Intrinsic::maxnum) {
2376 const APFloat &C1 = Op1->getValueAPF();
2377 const APFloat &C2 = Op2->getValueAPF();
2378 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
2379 }
2380
2381 if (IntrinsicID == Intrinsic::minimum) {
2382 const APFloat &C1 = Op1->getValueAPF();
2383 const APFloat &C2 = Op2->getValueAPF();
2384 return ConstantFP::get(Ty->getContext(), minimum(C1, C2));
2385 }
2386
2387 if (IntrinsicID == Intrinsic::maximum) {
2388 const APFloat &C1 = Op1->getValueAPF();
2389 const APFloat &C2 = Op2->getValueAPF();
2390 return ConstantFP::get(Ty->getContext(), maximum(C1, C2));
2391 }
2392
2393 if (IntrinsicID == Intrinsic::amdgcn_fmul_legacy) {
2394 const APFloat &C1 = Op1->getValueAPF();
2395 const APFloat &C2 = Op2->getValueAPF();
2396 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2397 // NaN or infinity, gives +0.0.
2398 if (C1.isZero() || C2.isZero())
2399 return ConstantFP::getNullValue(Ty);
2400 return ConstantFP::get(Ty->getContext(), C1 * C2);
2401 }
2402
2403 if (!TLI)
2404 return nullptr;
2405
2406 LibFunc Func = NotLibFunc;
2407 TLI->getLibFunc(Name, Func);
2408 switch (Func) {
2409 default:
2410 break;
2411 case LibFunc_pow:
2412 case LibFunc_powf:
2413 case LibFunc_pow_finite:
2414 case LibFunc_powf_finite:
2415 if (TLI->has(Func))
2416 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2417 break;
2418 case LibFunc_fmod:
2419 case LibFunc_fmodf:
2420 if (TLI->has(Func)) {
2421 APFloat V = Op1->getValueAPF();
2422 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2423 return ConstantFP::get(Ty->getContext(), V);
2424 }
2425 break;
2426 case LibFunc_remainder:
2427 case LibFunc_remainderf:
2428 if (TLI->has(Func)) {
2429 APFloat V = Op1->getValueAPF();
2430 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2431 return ConstantFP::get(Ty->getContext(), V);
2432 }
2433 break;
2434 case LibFunc_atan2:
2435 case LibFunc_atan2f:
2436 case LibFunc_atan2_finite:
2437 case LibFunc_atan2f_finite:
2438 if (TLI->has(Func))
2439 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2440 break;
2441 }
2442 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2443 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2444 return ConstantFP::get(Ty->getContext(),
2445 APFloat((float)std::pow((float)Op1V,
2446 (int)Op2C->getZExtValue())));
2447 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2448 return ConstantFP::get(Ty->getContext(),
2449 APFloat((float)std::pow((float)Op1V,
2450 (int)Op2C->getZExtValue())));
2451 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2452 return ConstantFP::get(Ty->getContext(),
2453 APFloat((double)std::pow((double)Op1V,
2454 (int)Op2C->getZExtValue())));
2455
2456 if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2457 // FIXME: Should flush denorms depending on FP mode, but that's ignored
2458 // everywhere else.
2459
2460 // scalbn is equivalent to ldexp with float radix 2
2461 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(),
2462 APFloat::rmNearestTiesToEven);
2463 return ConstantFP::get(Ty->getContext(), Result);
2464 }
2465 }
2466 return nullptr;
2467 }
2468
2469 if (Operands[0]->getType()->isIntegerTy() &&
2470 Operands[1]->getType()->isIntegerTy()) {
2471 const APInt *C0, *C1;
2472 if (!getConstIntOrUndef(Operands[0], C0) ||
2473 !getConstIntOrUndef(Operands[1], C1))
2474 return nullptr;
2475
2476 unsigned BitWidth = Ty->getScalarSizeInBits();
2477 switch (IntrinsicID) {
2478 default: break;
2479 case Intrinsic::smax:
2480 if (!C0 && !C1)
2481 return UndefValue::get(Ty);
2482 if (!C0 || !C1)
2483 return ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
2484 return ConstantInt::get(Ty, C0->sgt(*C1) ? *C0 : *C1);
2485
2486 case Intrinsic::smin:
2487 if (!C0 && !C1)
2488 return UndefValue::get(Ty);
2489 if (!C0 || !C1)
2490 return ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth));
2491 return ConstantInt::get(Ty, C0->slt(*C1) ? *C0 : *C1);
2492
2493 case Intrinsic::umax:
2494 if (!C0 && !C1)
2495 return UndefValue::get(Ty);
2496 if (!C0 || !C1)
2497 return ConstantInt::get(Ty, APInt::getMaxValue(BitWidth));
2498 return ConstantInt::get(Ty, C0->ugt(*C1) ? *C0 : *C1);
2499
2500 case Intrinsic::umin:
2501 if (!C0 && !C1)
2502 return UndefValue::get(Ty);
2503 if (!C0 || !C1)
2504 return ConstantInt::get(Ty, APInt::getMinValue(BitWidth));
2505 return ConstantInt::get(Ty, C0->ult(*C1) ? *C0 : *C1);
2506
2507 case Intrinsic::usub_with_overflow:
2508 case Intrinsic::ssub_with_overflow:
2509 case Intrinsic::uadd_with_overflow:
2510 case Intrinsic::sadd_with_overflow:
2511 // X - undef -> { undef, false }
2512 // undef - X -> { undef, false }
2513 // X + undef -> { undef, false }
2514 // undef + x -> { undef, false }
2515 if (!C0 || !C1) {
2516 return ConstantStruct::get(
2517 cast<StructType>(Ty),
2518 {UndefValue::get(Ty->getStructElementType(0)),
2519 Constant::getNullValue(Ty->getStructElementType(1))});
2520 }
2521 LLVM_FALLTHROUGH;
2522 case Intrinsic::smul_with_overflow:
2523 case Intrinsic::umul_with_overflow: {
2524 // undef * X -> { 0, false }
2525 // X * undef -> { 0, false }
2526 if (!C0 || !C1)
2527 return Constant::getNullValue(Ty);
2528
2529 APInt Res;
2530 bool Overflow;
2531 switch (IntrinsicID) {
2532 default: llvm_unreachable("Invalid case");
2533 case Intrinsic::sadd_with_overflow:
2534 Res = C0->sadd_ov(*C1, Overflow);
2535 break;
2536 case Intrinsic::uadd_with_overflow:
2537 Res = C0->uadd_ov(*C1, Overflow);
2538 break;
2539 case Intrinsic::ssub_with_overflow:
2540 Res = C0->ssub_ov(*C1, Overflow);
2541 break;
2542 case Intrinsic::usub_with_overflow:
2543 Res = C0->usub_ov(*C1, Overflow);
2544 break;
2545 case Intrinsic::smul_with_overflow:
2546 Res = C0->smul_ov(*C1, Overflow);
2547 break;
2548 case Intrinsic::umul_with_overflow:
2549 Res = C0->umul_ov(*C1, Overflow);
2550 break;
2551 }
2552 Constant *Ops[] = {
2553 ConstantInt::get(Ty->getContext(), Res),
2554 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2555 };
2556 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2557 }
2558 case Intrinsic::uadd_sat:
2559 case Intrinsic::sadd_sat:
2560 if (!C0 && !C1)
2561 return UndefValue::get(Ty);
2562 if (!C0 || !C1)
2563 return Constant::getAllOnesValue(Ty);
2564 if (IntrinsicID == Intrinsic::uadd_sat)
2565 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2566 else
2567 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2568 case Intrinsic::usub_sat:
2569 case Intrinsic::ssub_sat:
2570 if (!C0 && !C1)
2571 return UndefValue::get(Ty);
2572 if (!C0 || !C1)
2573 return Constant::getNullValue(Ty);
2574 if (IntrinsicID == Intrinsic::usub_sat)
2575 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2576 else
2577 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2578 case Intrinsic::cttz:
2579 case Intrinsic::ctlz:
2580 assert(C1 && "Must be constant int");
2581
2582 // cttz(0, 1) and ctlz(0, 1) are undef.
2583 if (C1->isOneValue() && (!C0 || C0->isNullValue()))
2584 return UndefValue::get(Ty);
2585 if (!C0)
2586 return Constant::getNullValue(Ty);
2587 if (IntrinsicID == Intrinsic::cttz)
2588 return ConstantInt::get(Ty, C0->countTrailingZeros());
2589 else
2590 return ConstantInt::get(Ty, C0->countLeadingZeros());
2591
2592 case Intrinsic::abs:
2593 // Undef or minimum val operand with poison min --> undef
2594 assert(C1 && "Must be constant int");
2595 if (C1->isOneValue() && (!C0 || C0->isMinSignedValue()))
2596 return UndefValue::get(Ty);
2597
2598 // Undef operand with no poison min --> 0 (sign bit must be clear)
2599 if (C1->isNullValue() && !C0)
2600 return Constant::getNullValue(Ty);
2601
2602 return ConstantInt::get(Ty, C0->abs());
2603 }
2604
2605 return nullptr;
2606 }
2607
2608 // Support ConstantVector in case we have an Undef in the top.
2609 if ((isa<ConstantVector>(Operands[0]) ||
2610 isa<ConstantDataVector>(Operands[0])) &&
2611 // Check for default rounding mode.
2612 // FIXME: Support other rounding modes?
2613 isa<ConstantInt>(Operands[1]) &&
2614 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2615 auto *Op = cast<Constant>(Operands[0]);
2616 switch (IntrinsicID) {
2617 default: break;
2618 case Intrinsic::x86_avx512_vcvtss2si32:
2619 case Intrinsic::x86_avx512_vcvtss2si64:
2620 case Intrinsic::x86_avx512_vcvtsd2si32:
2621 case Intrinsic::x86_avx512_vcvtsd2si64:
2622 if (ConstantFP *FPOp =
2623 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2624 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2625 /*roundTowardZero=*/false, Ty,
2626 /*IsSigned*/true);
2627 break;
2628 case Intrinsic::x86_avx512_vcvtss2usi32:
2629 case Intrinsic::x86_avx512_vcvtss2usi64:
2630 case Intrinsic::x86_avx512_vcvtsd2usi32:
2631 case Intrinsic::x86_avx512_vcvtsd2usi64:
2632 if (ConstantFP *FPOp =
2633 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2634 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2635 /*roundTowardZero=*/false, Ty,
2636 /*IsSigned*/false);
2637 break;
2638 case Intrinsic::x86_avx512_cvttss2si:
2639 case Intrinsic::x86_avx512_cvttss2si64:
2640 case Intrinsic::x86_avx512_cvttsd2si:
2641 case Intrinsic::x86_avx512_cvttsd2si64:
2642 if (ConstantFP *FPOp =
2643 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2644 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2645 /*roundTowardZero=*/true, Ty,
2646 /*IsSigned*/true);
2647 break;
2648 case Intrinsic::x86_avx512_cvttss2usi:
2649 case Intrinsic::x86_avx512_cvttss2usi64:
2650 case Intrinsic::x86_avx512_cvttsd2usi:
2651 case Intrinsic::x86_avx512_cvttsd2usi64:
2652 if (ConstantFP *FPOp =
2653 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2654 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2655 /*roundTowardZero=*/true, Ty,
2656 /*IsSigned*/false);
2657 break;
2658 }
2659 }
2660 return nullptr;
2661 }
2662
ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,const APFloat & S0,const APFloat & S1,const APFloat & S2)2663 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
2664 const APFloat &S0,
2665 const APFloat &S1,
2666 const APFloat &S2) {
2667 unsigned ID;
2668 const fltSemantics &Sem = S0.getSemantics();
2669 APFloat MA(Sem), SC(Sem), TC(Sem);
2670 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
2671 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
2672 // S2 < 0
2673 ID = 5;
2674 SC = -S0;
2675 } else {
2676 ID = 4;
2677 SC = S0;
2678 }
2679 MA = S2;
2680 TC = -S1;
2681 } else if (abs(S1) >= abs(S0)) {
2682 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
2683 // S1 < 0
2684 ID = 3;
2685 TC = -S2;
2686 } else {
2687 ID = 2;
2688 TC = S2;
2689 }
2690 MA = S1;
2691 SC = S0;
2692 } else {
2693 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
2694 // S0 < 0
2695 ID = 1;
2696 SC = S2;
2697 } else {
2698 ID = 0;
2699 SC = -S2;
2700 }
2701 MA = S0;
2702 TC = -S1;
2703 }
2704 switch (IntrinsicID) {
2705 default:
2706 llvm_unreachable("unhandled amdgcn cube intrinsic");
2707 case Intrinsic::amdgcn_cubeid:
2708 return APFloat(Sem, ID);
2709 case Intrinsic::amdgcn_cubema:
2710 return MA + MA;
2711 case Intrinsic::amdgcn_cubesc:
2712 return SC;
2713 case Intrinsic::amdgcn_cubetc:
2714 return TC;
2715 }
2716 }
2717
ConstantFoldScalarCall3(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2718 static Constant *ConstantFoldScalarCall3(StringRef Name,
2719 Intrinsic::ID IntrinsicID,
2720 Type *Ty,
2721 ArrayRef<Constant *> Operands,
2722 const TargetLibraryInfo *TLI,
2723 const CallBase *Call) {
2724 assert(Operands.size() == 3 && "Wrong number of operands.");
2725
2726 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2727 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2728 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2729 switch (IntrinsicID) {
2730 default: break;
2731 case Intrinsic::amdgcn_fma_legacy: {
2732 const APFloat &C1 = Op1->getValueAPF();
2733 const APFloat &C2 = Op2->getValueAPF();
2734 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2735 // NaN or infinity, gives +0.0.
2736 if (C1.isZero() || C2.isZero()) {
2737 const APFloat &C3 = Op3->getValueAPF();
2738 // It's tempting to just return C3 here, but that would give the
2739 // wrong result if C3 was -0.0.
2740 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
2741 }
2742 LLVM_FALLTHROUGH;
2743 }
2744 case Intrinsic::fma:
2745 case Intrinsic::fmuladd: {
2746 APFloat V = Op1->getValueAPF();
2747 V.fusedMultiplyAdd(Op2->getValueAPF(), Op3->getValueAPF(),
2748 APFloat::rmNearestTiesToEven);
2749 return ConstantFP::get(Ty->getContext(), V);
2750 }
2751 case Intrinsic::amdgcn_cubeid:
2752 case Intrinsic::amdgcn_cubema:
2753 case Intrinsic::amdgcn_cubesc:
2754 case Intrinsic::amdgcn_cubetc: {
2755 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(
2756 IntrinsicID, Op1->getValueAPF(), Op2->getValueAPF(),
2757 Op3->getValueAPF());
2758 return ConstantFP::get(Ty->getContext(), V);
2759 }
2760 }
2761 }
2762 }
2763 }
2764
2765 if (const auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
2766 if (const auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
2767 if (const auto *Op3 = dyn_cast<ConstantInt>(Operands[2])) {
2768 switch (IntrinsicID) {
2769 default: break;
2770 case Intrinsic::smul_fix:
2771 case Intrinsic::smul_fix_sat: {
2772 // This code performs rounding towards negative infinity in case the
2773 // result cannot be represented exactly for the given scale. Targets
2774 // that do care about rounding should use a target hook for specifying
2775 // how rounding should be done, and provide their own folding to be
2776 // consistent with rounding. This is the same approach as used by
2777 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2778 const APInt &Lhs = Op1->getValue();
2779 const APInt &Rhs = Op2->getValue();
2780 unsigned Scale = Op3->getValue().getZExtValue();
2781 unsigned Width = Lhs.getBitWidth();
2782 assert(Scale < Width && "Illegal scale.");
2783 unsigned ExtendedWidth = Width * 2;
2784 APInt Product = (Lhs.sextOrSelf(ExtendedWidth) *
2785 Rhs.sextOrSelf(ExtendedWidth)).ashr(Scale);
2786 if (IntrinsicID == Intrinsic::smul_fix_sat) {
2787 APInt MaxValue =
2788 APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
2789 APInt MinValue =
2790 APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
2791 Product = APIntOps::smin(Product, MaxValue);
2792 Product = APIntOps::smax(Product, MinValue);
2793 }
2794 return ConstantInt::get(Ty->getContext(),
2795 Product.sextOrTrunc(Width));
2796 }
2797 }
2798 }
2799 }
2800 }
2801
2802 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2803 const APInt *C0, *C1, *C2;
2804 if (!getConstIntOrUndef(Operands[0], C0) ||
2805 !getConstIntOrUndef(Operands[1], C1) ||
2806 !getConstIntOrUndef(Operands[2], C2))
2807 return nullptr;
2808
2809 bool IsRight = IntrinsicID == Intrinsic::fshr;
2810 if (!C2)
2811 return Operands[IsRight ? 1 : 0];
2812 if (!C0 && !C1)
2813 return UndefValue::get(Ty);
2814
2815 // The shift amount is interpreted as modulo the bitwidth. If the shift
2816 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2817 unsigned BitWidth = C2->getBitWidth();
2818 unsigned ShAmt = C2->urem(BitWidth);
2819 if (!ShAmt)
2820 return Operands[IsRight ? 1 : 0];
2821
2822 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2823 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2824 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2825 if (!C0)
2826 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2827 if (!C1)
2828 return ConstantInt::get(Ty, C0->shl(ShlAmt));
2829 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2830 }
2831
2832 return nullptr;
2833 }
2834
ConstantFoldScalarCall(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2835 static Constant *ConstantFoldScalarCall(StringRef Name,
2836 Intrinsic::ID IntrinsicID,
2837 Type *Ty,
2838 ArrayRef<Constant *> Operands,
2839 const TargetLibraryInfo *TLI,
2840 const CallBase *Call) {
2841 if (Operands.size() == 1)
2842 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
2843
2844 if (Operands.size() == 2)
2845 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
2846
2847 if (Operands.size() == 3)
2848 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
2849
2850 return nullptr;
2851 }
2852
ConstantFoldVectorCall(StringRef Name,Intrinsic::ID IntrinsicID,VectorType * VTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)2853 static Constant *ConstantFoldVectorCall(StringRef Name,
2854 Intrinsic::ID IntrinsicID,
2855 VectorType *VTy,
2856 ArrayRef<Constant *> Operands,
2857 const DataLayout &DL,
2858 const TargetLibraryInfo *TLI,
2859 const CallBase *Call) {
2860 // Do not iterate on scalable vector. The number of elements is unknown at
2861 // compile-time.
2862 if (isa<ScalableVectorType>(VTy))
2863 return nullptr;
2864
2865 auto *FVTy = cast<FixedVectorType>(VTy);
2866
2867 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
2868 SmallVector<Constant *, 4> Lane(Operands.size());
2869 Type *Ty = FVTy->getElementType();
2870
2871 switch (IntrinsicID) {
2872 case Intrinsic::masked_load: {
2873 auto *SrcPtr = Operands[0];
2874 auto *Mask = Operands[2];
2875 auto *Passthru = Operands[3];
2876
2877 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
2878
2879 SmallVector<Constant *, 32> NewElements;
2880 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
2881 auto *MaskElt = Mask->getAggregateElement(I);
2882 if (!MaskElt)
2883 break;
2884 auto *PassthruElt = Passthru->getAggregateElement(I);
2885 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2886 if (isa<UndefValue>(MaskElt)) {
2887 if (PassthruElt)
2888 NewElements.push_back(PassthruElt);
2889 else if (VecElt)
2890 NewElements.push_back(VecElt);
2891 else
2892 return nullptr;
2893 }
2894 if (MaskElt->isNullValue()) {
2895 if (!PassthruElt)
2896 return nullptr;
2897 NewElements.push_back(PassthruElt);
2898 } else if (MaskElt->isOneValue()) {
2899 if (!VecElt)
2900 return nullptr;
2901 NewElements.push_back(VecElt);
2902 } else {
2903 return nullptr;
2904 }
2905 }
2906 if (NewElements.size() != FVTy->getNumElements())
2907 return nullptr;
2908 return ConstantVector::get(NewElements);
2909 }
2910 case Intrinsic::arm_mve_vctp8:
2911 case Intrinsic::arm_mve_vctp16:
2912 case Intrinsic::arm_mve_vctp32:
2913 case Intrinsic::arm_mve_vctp64: {
2914 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2915 unsigned Lanes = FVTy->getNumElements();
2916 uint64_t Limit = Op->getZExtValue();
2917 // vctp64 are currently modelled as returning a v4i1, not a v2i1. Make
2918 // sure we get the limit right in that case and set all relevant lanes.
2919 if (IntrinsicID == Intrinsic::arm_mve_vctp64)
2920 Limit *= 2;
2921
2922 SmallVector<Constant *, 16> NCs;
2923 for (unsigned i = 0; i < Lanes; i++) {
2924 if (i < Limit)
2925 NCs.push_back(ConstantInt::getTrue(Ty));
2926 else
2927 NCs.push_back(ConstantInt::getFalse(Ty));
2928 }
2929 return ConstantVector::get(NCs);
2930 }
2931 break;
2932 }
2933 case Intrinsic::get_active_lane_mask: {
2934 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
2935 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
2936 if (Op0 && Op1) {
2937 unsigned Lanes = FVTy->getNumElements();
2938 uint64_t Base = Op0->getZExtValue();
2939 uint64_t Limit = Op1->getZExtValue();
2940
2941 SmallVector<Constant *, 16> NCs;
2942 for (unsigned i = 0; i < Lanes; i++) {
2943 if (Base + i < Limit)
2944 NCs.push_back(ConstantInt::getTrue(Ty));
2945 else
2946 NCs.push_back(ConstantInt::getFalse(Ty));
2947 }
2948 return ConstantVector::get(NCs);
2949 }
2950 break;
2951 }
2952 default:
2953 break;
2954 }
2955
2956 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
2957 // Gather a column of constants.
2958 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2959 // Some intrinsics use a scalar type for certain arguments.
2960 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
2961 Lane[J] = Operands[J];
2962 continue;
2963 }
2964
2965 Constant *Agg = Operands[J]->getAggregateElement(I);
2966 if (!Agg)
2967 return nullptr;
2968
2969 Lane[J] = Agg;
2970 }
2971
2972 // Use the regular scalar folding to simplify this column.
2973 Constant *Folded =
2974 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
2975 if (!Folded)
2976 return nullptr;
2977 Result[I] = Folded;
2978 }
2979
2980 return ConstantVector::get(Result);
2981 }
2982
2983 } // end anonymous namespace
2984
ConstantFoldCall(const CallBase * Call,Function * F,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI)2985 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
2986 ArrayRef<Constant *> Operands,
2987 const TargetLibraryInfo *TLI) {
2988 if (Call->isNoBuiltin())
2989 return nullptr;
2990 if (!F->hasName())
2991 return nullptr;
2992 StringRef Name = F->getName();
2993
2994 Type *Ty = F->getReturnType();
2995
2996 if (auto *VTy = dyn_cast<VectorType>(Ty))
2997 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2998 F->getParent()->getDataLayout(), TLI, Call);
2999
3000 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
3001 Call);
3002 }
3003
isMathLibCallNoop(const CallBase * Call,const TargetLibraryInfo * TLI)3004 bool llvm::isMathLibCallNoop(const CallBase *Call,
3005 const TargetLibraryInfo *TLI) {
3006 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3007 // (and to some extent ConstantFoldScalarCall).
3008 if (Call->isNoBuiltin() || Call->isStrictFP())
3009 return false;
3010 Function *F = Call->getCalledFunction();
3011 if (!F)
3012 return false;
3013
3014 LibFunc Func;
3015 if (!TLI || !TLI->getLibFunc(*F, Func))
3016 return false;
3017
3018 if (Call->getNumArgOperands() == 1) {
3019 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3020 const APFloat &Op = OpC->getValueAPF();
3021 switch (Func) {
3022 case LibFunc_logl:
3023 case LibFunc_log:
3024 case LibFunc_logf:
3025 case LibFunc_log2l:
3026 case LibFunc_log2:
3027 case LibFunc_log2f:
3028 case LibFunc_log10l:
3029 case LibFunc_log10:
3030 case LibFunc_log10f:
3031 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3032
3033 case LibFunc_expl:
3034 case LibFunc_exp:
3035 case LibFunc_expf:
3036 // FIXME: These boundaries are slightly conservative.
3037 if (OpC->getType()->isDoubleTy())
3038 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3039 if (OpC->getType()->isFloatTy())
3040 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3041 break;
3042
3043 case LibFunc_exp2l:
3044 case LibFunc_exp2:
3045 case LibFunc_exp2f:
3046 // FIXME: These boundaries are slightly conservative.
3047 if (OpC->getType()->isDoubleTy())
3048 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3049 if (OpC->getType()->isFloatTy())
3050 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3051 break;
3052
3053 case LibFunc_sinl:
3054 case LibFunc_sin:
3055 case LibFunc_sinf:
3056 case LibFunc_cosl:
3057 case LibFunc_cos:
3058 case LibFunc_cosf:
3059 return !Op.isInfinity();
3060
3061 case LibFunc_tanl:
3062 case LibFunc_tan:
3063 case LibFunc_tanf: {
3064 // FIXME: Stop using the host math library.
3065 // FIXME: The computation isn't done in the right precision.
3066 Type *Ty = OpC->getType();
3067 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3068 double OpV = getValueAsDouble(OpC);
3069 return ConstantFoldFP(tan, OpV, Ty) != nullptr;
3070 }
3071 break;
3072 }
3073
3074 case LibFunc_asinl:
3075 case LibFunc_asin:
3076 case LibFunc_asinf:
3077 case LibFunc_acosl:
3078 case LibFunc_acos:
3079 case LibFunc_acosf:
3080 return !(Op < APFloat(Op.getSemantics(), "-1") ||
3081 Op > APFloat(Op.getSemantics(), "1"));
3082
3083 case LibFunc_sinh:
3084 case LibFunc_cosh:
3085 case LibFunc_sinhf:
3086 case LibFunc_coshf:
3087 case LibFunc_sinhl:
3088 case LibFunc_coshl:
3089 // FIXME: These boundaries are slightly conservative.
3090 if (OpC->getType()->isDoubleTy())
3091 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3092 if (OpC->getType()->isFloatTy())
3093 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3094 break;
3095
3096 case LibFunc_sqrtl:
3097 case LibFunc_sqrt:
3098 case LibFunc_sqrtf:
3099 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3100
3101 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3102 // maybe others?
3103 default:
3104 break;
3105 }
3106 }
3107 }
3108
3109 if (Call->getNumArgOperands() == 2) {
3110 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3111 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3112 if (Op0C && Op1C) {
3113 const APFloat &Op0 = Op0C->getValueAPF();
3114 const APFloat &Op1 = Op1C->getValueAPF();
3115
3116 switch (Func) {
3117 case LibFunc_powl:
3118 case LibFunc_pow:
3119 case LibFunc_powf: {
3120 // FIXME: Stop using the host math library.
3121 // FIXME: The computation isn't done in the right precision.
3122 Type *Ty = Op0C->getType();
3123 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3124 if (Ty == Op1C->getType()) {
3125 double Op0V = getValueAsDouble(Op0C);
3126 double Op1V = getValueAsDouble(Op1C);
3127 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
3128 }
3129 }
3130 break;
3131 }
3132
3133 case LibFunc_fmodl:
3134 case LibFunc_fmod:
3135 case LibFunc_fmodf:
3136 case LibFunc_remainderl:
3137 case LibFunc_remainder:
3138 case LibFunc_remainderf:
3139 return Op0.isNaN() || Op1.isNaN() ||
3140 (!Op0.isInfinity() && !Op1.isZero());
3141
3142 default:
3143 break;
3144 }
3145 }
3146 }
3147
3148 return false;
3149 }
3150
anchor()3151 void TargetFolder::anchor() {}
3152