1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
10 //
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/APSInt.h"
22 #include "llvm/ADT/ArrayRef.h"
23 #include "llvm/ADT/DenseMap.h"
24 #include "llvm/ADT/STLExtras.h"
25 #include "llvm/ADT/SmallVector.h"
26 #include "llvm/ADT/StringRef.h"
27 #include "llvm/Analysis/TargetFolder.h"
28 #include "llvm/Analysis/TargetLibraryInfo.h"
29 #include "llvm/Analysis/ValueTracking.h"
30 #include "llvm/Analysis/VectorUtils.h"
31 #include "llvm/Config/config.h"
32 #include "llvm/IR/Constant.h"
33 #include "llvm/IR/Constants.h"
34 #include "llvm/IR/DataLayout.h"
35 #include "llvm/IR/DerivedTypes.h"
36 #include "llvm/IR/Function.h"
37 #include "llvm/IR/GlobalValue.h"
38 #include "llvm/IR/GlobalVariable.h"
39 #include "llvm/IR/InstrTypes.h"
40 #include "llvm/IR/Instruction.h"
41 #include "llvm/IR/Instructions.h"
42 #include "llvm/IR/IntrinsicInst.h"
43 #include "llvm/IR/Intrinsics.h"
44 #include "llvm/IR/IntrinsicsAArch64.h"
45 #include "llvm/IR/IntrinsicsAMDGPU.h"
46 #include "llvm/IR/IntrinsicsARM.h"
47 #include "llvm/IR/IntrinsicsWebAssembly.h"
48 #include "llvm/IR/IntrinsicsX86.h"
49 #include "llvm/IR/Operator.h"
50 #include "llvm/IR/Type.h"
51 #include "llvm/IR/Value.h"
52 #include "llvm/Support/Casting.h"
53 #include "llvm/Support/ErrorHandling.h"
54 #include "llvm/Support/KnownBits.h"
55 #include "llvm/Support/MathExtras.h"
56 #include <cassert>
57 #include <cerrno>
58 #include <cfenv>
59 #include <cmath>
60 #include <cstddef>
61 #include <cstdint>
62
63 using namespace llvm;
64
65 namespace {
66
67 //===----------------------------------------------------------------------===//
68 // Constant Folding internal helper functions
69 //===----------------------------------------------------------------------===//
70
foldConstVectorToAPInt(APInt & Result,Type * DestTy,Constant * C,Type * SrcEltTy,unsigned NumSrcElts,const DataLayout & DL)71 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
72 Constant *C, Type *SrcEltTy,
73 unsigned NumSrcElts,
74 const DataLayout &DL) {
75 // Now that we know that the input value is a vector of integers, just shift
76 // and insert them into our result.
77 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
78 for (unsigned i = 0; i != NumSrcElts; ++i) {
79 Constant *Element;
80 if (DL.isLittleEndian())
81 Element = C->getAggregateElement(NumSrcElts - i - 1);
82 else
83 Element = C->getAggregateElement(i);
84
85 if (Element && isa<UndefValue>(Element)) {
86 Result <<= BitShift;
87 continue;
88 }
89
90 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
91 if (!ElementCI)
92 return ConstantExpr::getBitCast(C, DestTy);
93
94 Result <<= BitShift;
95 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
96 }
97
98 return nullptr;
99 }
100
101 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
102 /// This always returns a non-null constant, but it may be a
103 /// ConstantExpr if unfoldable.
FoldBitCast(Constant * C,Type * DestTy,const DataLayout & DL)104 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
105 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
106 "Invalid constantexpr bitcast!");
107
108 // Catch the obvious splat cases.
109 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
110 return Constant::getNullValue(DestTy);
111 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy() &&
112 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
113 return Constant::getAllOnesValue(DestTy);
114
115 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
116 // Handle a vector->scalar integer/fp cast.
117 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
118 unsigned NumSrcElts = cast<FixedVectorType>(VTy)->getNumElements();
119 Type *SrcEltTy = VTy->getElementType();
120
121 // If the vector is a vector of floating point, convert it to vector of int
122 // to simplify things.
123 if (SrcEltTy->isFloatingPointTy()) {
124 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
125 auto *SrcIVTy = FixedVectorType::get(
126 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
127 // Ask IR to do the conversion now that #elts line up.
128 C = ConstantExpr::getBitCast(C, SrcIVTy);
129 }
130
131 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
132 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
133 SrcEltTy, NumSrcElts, DL))
134 return CE;
135
136 if (isa<IntegerType>(DestTy))
137 return ConstantInt::get(DestTy, Result);
138
139 APFloat FP(DestTy->getFltSemantics(), Result);
140 return ConstantFP::get(DestTy->getContext(), FP);
141 }
142 }
143
144 // The code below only handles casts to vectors currently.
145 auto *DestVTy = dyn_cast<VectorType>(DestTy);
146 if (!DestVTy)
147 return ConstantExpr::getBitCast(C, DestTy);
148
149 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
150 // vector so the code below can handle it uniformly.
151 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
152 Constant *Ops = C; // don't take the address of C!
153 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
154 }
155
156 // If this is a bitcast from constant vector -> vector, fold it.
157 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
158 return ConstantExpr::getBitCast(C, DestTy);
159
160 // If the element types match, IR can fold it.
161 unsigned NumDstElt = cast<FixedVectorType>(DestVTy)->getNumElements();
162 unsigned NumSrcElt = cast<FixedVectorType>(C->getType())->getNumElements();
163 if (NumDstElt == NumSrcElt)
164 return ConstantExpr::getBitCast(C, DestTy);
165
166 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
167 Type *DstEltTy = DestVTy->getElementType();
168
169 // Otherwise, we're changing the number of elements in a vector, which
170 // requires endianness information to do the right thing. For example,
171 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
172 // folds to (little endian):
173 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
174 // and to (big endian):
175 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
176
177 // First thing is first. We only want to think about integer here, so if
178 // we have something in FP form, recast it as integer.
179 if (DstEltTy->isFloatingPointTy()) {
180 // Fold to an vector of integers with same size as our FP type.
181 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
182 auto *DestIVTy = FixedVectorType::get(
183 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
184 // Recursively handle this integer conversion, if possible.
185 C = FoldBitCast(C, DestIVTy, DL);
186
187 // Finally, IR can handle this now that #elts line up.
188 return ConstantExpr::getBitCast(C, DestTy);
189 }
190
191 // Okay, we know the destination is integer, if the input is FP, convert
192 // it to integer first.
193 if (SrcEltTy->isFloatingPointTy()) {
194 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
195 auto *SrcIVTy = FixedVectorType::get(
196 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
197 // Ask IR to do the conversion now that #elts line up.
198 C = ConstantExpr::getBitCast(C, SrcIVTy);
199 // If IR wasn't able to fold it, bail out.
200 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
201 !isa<ConstantDataVector>(C))
202 return C;
203 }
204
205 // Now we know that the input and output vectors are both integer vectors
206 // of the same size, and that their #elements is not the same. Do the
207 // conversion here, which depends on whether the input or output has
208 // more elements.
209 bool isLittleEndian = DL.isLittleEndian();
210
211 SmallVector<Constant*, 32> Result;
212 if (NumDstElt < NumSrcElt) {
213 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
214 Constant *Zero = Constant::getNullValue(DstEltTy);
215 unsigned Ratio = NumSrcElt/NumDstElt;
216 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
217 unsigned SrcElt = 0;
218 for (unsigned i = 0; i != NumDstElt; ++i) {
219 // Build each element of the result.
220 Constant *Elt = Zero;
221 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
222 for (unsigned j = 0; j != Ratio; ++j) {
223 Constant *Src = C->getAggregateElement(SrcElt++);
224 if (Src && isa<UndefValue>(Src))
225 Src = Constant::getNullValue(
226 cast<VectorType>(C->getType())->getElementType());
227 else
228 Src = dyn_cast_or_null<ConstantInt>(Src);
229 if (!Src) // Reject constantexpr elements.
230 return ConstantExpr::getBitCast(C, DestTy);
231
232 // Zero extend the element to the right size.
233 Src = ConstantExpr::getZExt(Src, Elt->getType());
234
235 // Shift it to the right place, depending on endianness.
236 Src = ConstantExpr::getShl(Src,
237 ConstantInt::get(Src->getType(), ShiftAmt));
238 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
239
240 // Mix it in.
241 Elt = ConstantExpr::getOr(Elt, Src);
242 }
243 Result.push_back(Elt);
244 }
245 return ConstantVector::get(Result);
246 }
247
248 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
249 unsigned Ratio = NumDstElt/NumSrcElt;
250 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
251
252 // Loop over each source value, expanding into multiple results.
253 for (unsigned i = 0; i != NumSrcElt; ++i) {
254 auto *Element = C->getAggregateElement(i);
255
256 if (!Element) // Reject constantexpr elements.
257 return ConstantExpr::getBitCast(C, DestTy);
258
259 if (isa<UndefValue>(Element)) {
260 // Correctly Propagate undef values.
261 Result.append(Ratio, UndefValue::get(DstEltTy));
262 continue;
263 }
264
265 auto *Src = dyn_cast<ConstantInt>(Element);
266 if (!Src)
267 return ConstantExpr::getBitCast(C, DestTy);
268
269 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
270 for (unsigned j = 0; j != Ratio; ++j) {
271 // Shift the piece of the value into the right place, depending on
272 // endianness.
273 Constant *Elt = ConstantExpr::getLShr(Src,
274 ConstantInt::get(Src->getType(), ShiftAmt));
275 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
276
277 // Truncate the element to an integer with the same pointer size and
278 // convert the element back to a pointer using a inttoptr.
279 if (DstEltTy->isPointerTy()) {
280 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
281 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
282 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
283 continue;
284 }
285
286 // Truncate and remember this piece.
287 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
288 }
289 }
290
291 return ConstantVector::get(Result);
292 }
293
294 } // end anonymous namespace
295
296 /// If this constant is a constant offset from a global, return the global and
297 /// the constant. Because of constantexprs, this function is recursive.
IsConstantOffsetFromGlobal(Constant * C,GlobalValue * & GV,APInt & Offset,const DataLayout & DL,DSOLocalEquivalent ** DSOEquiv)298 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
299 APInt &Offset, const DataLayout &DL,
300 DSOLocalEquivalent **DSOEquiv) {
301 if (DSOEquiv)
302 *DSOEquiv = nullptr;
303
304 // Trivial case, constant is the global.
305 if ((GV = dyn_cast<GlobalValue>(C))) {
306 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
307 Offset = APInt(BitWidth, 0);
308 return true;
309 }
310
311 if (auto *FoundDSOEquiv = dyn_cast<DSOLocalEquivalent>(C)) {
312 if (DSOEquiv)
313 *DSOEquiv = FoundDSOEquiv;
314 GV = FoundDSOEquiv->getGlobalValue();
315 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
316 Offset = APInt(BitWidth, 0);
317 return true;
318 }
319
320 // Otherwise, if this isn't a constant expr, bail out.
321 auto *CE = dyn_cast<ConstantExpr>(C);
322 if (!CE) return false;
323
324 // Look through ptr->int and ptr->ptr casts.
325 if (CE->getOpcode() == Instruction::PtrToInt ||
326 CE->getOpcode() == Instruction::BitCast)
327 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL,
328 DSOEquiv);
329
330 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
331 auto *GEP = dyn_cast<GEPOperator>(CE);
332 if (!GEP)
333 return false;
334
335 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
336 APInt TmpOffset(BitWidth, 0);
337
338 // If the base isn't a global+constant, we aren't either.
339 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL,
340 DSOEquiv))
341 return false;
342
343 // Otherwise, add any offset that our operands provide.
344 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
345 return false;
346
347 Offset = TmpOffset;
348 return true;
349 }
350
ConstantFoldLoadThroughBitcast(Constant * C,Type * DestTy,const DataLayout & DL)351 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
352 const DataLayout &DL) {
353 do {
354 Type *SrcTy = C->getType();
355 uint64_t DestSize = DL.getTypeSizeInBits(DestTy);
356 uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy);
357 if (SrcSize < DestSize)
358 return nullptr;
359
360 // Catch the obvious splat cases (since all-zeros can coerce non-integral
361 // pointers legally).
362 if (C->isNullValue() && !DestTy->isX86_MMXTy() && !DestTy->isX86_AMXTy())
363 return Constant::getNullValue(DestTy);
364 if (C->isAllOnesValue() &&
365 (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() ||
366 DestTy->isVectorTy()) &&
367 !DestTy->isX86_AMXTy() && !DestTy->isX86_MMXTy() &&
368 !DestTy->isPtrOrPtrVectorTy())
369 // Get ones when the input is trivial, but
370 // only for supported types inside getAllOnesValue.
371 return Constant::getAllOnesValue(DestTy);
372
373 // If the type sizes are the same and a cast is legal, just directly
374 // cast the constant.
375 // But be careful not to coerce non-integral pointers illegally.
376 if (SrcSize == DestSize &&
377 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
378 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
379 Instruction::CastOps Cast = Instruction::BitCast;
380 // If we are going from a pointer to int or vice versa, we spell the cast
381 // differently.
382 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
383 Cast = Instruction::IntToPtr;
384 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
385 Cast = Instruction::PtrToInt;
386
387 if (CastInst::castIsValid(Cast, C, DestTy))
388 return ConstantExpr::getCast(Cast, C, DestTy);
389 }
390
391 // If this isn't an aggregate type, there is nothing we can do to drill down
392 // and find a bitcastable constant.
393 if (!SrcTy->isAggregateType() && !SrcTy->isVectorTy())
394 return nullptr;
395
396 // We're simulating a load through a pointer that was bitcast to point to
397 // a different type, so we can try to walk down through the initial
398 // elements of an aggregate to see if some part of the aggregate is
399 // castable to implement the "load" semantic model.
400 if (SrcTy->isStructTy()) {
401 // Struct types might have leading zero-length elements like [0 x i32],
402 // which are certainly not what we are looking for, so skip them.
403 unsigned Elem = 0;
404 Constant *ElemC;
405 do {
406 ElemC = C->getAggregateElement(Elem++);
407 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
408 C = ElemC;
409 } else {
410 C = C->getAggregateElement(0u);
411 }
412 } while (C);
413
414 return nullptr;
415 }
416
417 namespace {
418
419 /// Recursive helper to read bits out of global. C is the constant being copied
420 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
421 /// results into and BytesLeft is the number of bytes left in
422 /// the CurPtr buffer. DL is the DataLayout.
ReadDataFromGlobal(Constant * C,uint64_t ByteOffset,unsigned char * CurPtr,unsigned BytesLeft,const DataLayout & DL)423 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
424 unsigned BytesLeft, const DataLayout &DL) {
425 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
426 "Out of range access");
427
428 // If this element is zero or undefined, we can just return since *CurPtr is
429 // zero initialized.
430 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
431 return true;
432
433 if (auto *CI = dyn_cast<ConstantInt>(C)) {
434 if (CI->getBitWidth() > 64 ||
435 (CI->getBitWidth() & 7) != 0)
436 return false;
437
438 uint64_t Val = CI->getZExtValue();
439 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
440
441 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
442 int n = ByteOffset;
443 if (!DL.isLittleEndian())
444 n = IntBytes - n - 1;
445 CurPtr[i] = (unsigned char)(Val >> (n * 8));
446 ++ByteOffset;
447 }
448 return true;
449 }
450
451 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
452 if (CFP->getType()->isDoubleTy()) {
453 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
454 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
455 }
456 if (CFP->getType()->isFloatTy()){
457 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
458 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
459 }
460 if (CFP->getType()->isHalfTy()){
461 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
462 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
463 }
464 return false;
465 }
466
467 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
468 const StructLayout *SL = DL.getStructLayout(CS->getType());
469 unsigned Index = SL->getElementContainingOffset(ByteOffset);
470 uint64_t CurEltOffset = SL->getElementOffset(Index);
471 ByteOffset -= CurEltOffset;
472
473 while (true) {
474 // If the element access is to the element itself and not to tail padding,
475 // read the bytes from the element.
476 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
477
478 if (ByteOffset < EltSize &&
479 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
480 BytesLeft, DL))
481 return false;
482
483 ++Index;
484
485 // Check to see if we read from the last struct element, if so we're done.
486 if (Index == CS->getType()->getNumElements())
487 return true;
488
489 // If we read all of the bytes we needed from this element we're done.
490 uint64_t NextEltOffset = SL->getElementOffset(Index);
491
492 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
493 return true;
494
495 // Move to the next element of the struct.
496 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
497 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
498 ByteOffset = 0;
499 CurEltOffset = NextEltOffset;
500 }
501 // not reached.
502 }
503
504 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
505 isa<ConstantDataSequential>(C)) {
506 uint64_t NumElts;
507 Type *EltTy;
508 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
509 NumElts = AT->getNumElements();
510 EltTy = AT->getElementType();
511 } else {
512 NumElts = cast<FixedVectorType>(C->getType())->getNumElements();
513 EltTy = cast<FixedVectorType>(C->getType())->getElementType();
514 }
515 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
516 uint64_t Index = ByteOffset / EltSize;
517 uint64_t Offset = ByteOffset - Index * EltSize;
518
519 for (; Index != NumElts; ++Index) {
520 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
521 BytesLeft, DL))
522 return false;
523
524 uint64_t BytesWritten = EltSize - Offset;
525 assert(BytesWritten <= EltSize && "Not indexing into this element?");
526 if (BytesWritten >= BytesLeft)
527 return true;
528
529 Offset = 0;
530 BytesLeft -= BytesWritten;
531 CurPtr += BytesWritten;
532 }
533 return true;
534 }
535
536 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
537 if (CE->getOpcode() == Instruction::IntToPtr &&
538 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
539 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
540 BytesLeft, DL);
541 }
542 }
543
544 // Otherwise, unknown initializer type.
545 return false;
546 }
547
FoldReinterpretLoadFromConst(Constant * C,Type * LoadTy,int64_t Offset,const DataLayout & DL)548 Constant *FoldReinterpretLoadFromConst(Constant *C, Type *LoadTy,
549 int64_t Offset, const DataLayout &DL) {
550 // Bail out early. Not expect to load from scalable global variable.
551 if (isa<ScalableVectorType>(LoadTy))
552 return nullptr;
553
554 auto *IntType = dyn_cast<IntegerType>(LoadTy);
555
556 // If this isn't an integer load we can't fold it directly.
557 if (!IntType) {
558 // If this is a float/double load, we can try folding it as an int32/64 load
559 // and then bitcast the result. This can be useful for union cases. Note
560 // that address spaces don't matter here since we're not going to result in
561 // an actual new load.
562 Type *MapTy;
563 if (LoadTy->isHalfTy())
564 MapTy = Type::getInt16Ty(C->getContext());
565 else if (LoadTy->isFloatTy())
566 MapTy = Type::getInt32Ty(C->getContext());
567 else if (LoadTy->isDoubleTy())
568 MapTy = Type::getInt64Ty(C->getContext());
569 else if (LoadTy->isVectorTy()) {
570 MapTy = PointerType::getIntNTy(
571 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
572 } else
573 return nullptr;
574
575 if (Constant *Res = FoldReinterpretLoadFromConst(C, MapTy, Offset, DL)) {
576 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
577 !LoadTy->isX86_AMXTy())
578 // Materializing a zero can be done trivially without a bitcast
579 return Constant::getNullValue(LoadTy);
580 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
581 Res = FoldBitCast(Res, CastTy, DL);
582 if (LoadTy->isPtrOrPtrVectorTy()) {
583 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
584 if (Res->isNullValue() && !LoadTy->isX86_MMXTy() &&
585 !LoadTy->isX86_AMXTy())
586 return Constant::getNullValue(LoadTy);
587 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
588 // Be careful not to replace a load of an addrspace value with an inttoptr here
589 return nullptr;
590 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy);
591 }
592 return Res;
593 }
594 return nullptr;
595 }
596
597 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
598 if (BytesLoaded > 32 || BytesLoaded == 0)
599 return nullptr;
600
601 int64_t InitializerSize = DL.getTypeAllocSize(C->getType()).getFixedSize();
602
603 // If we're not accessing anything in this constant, the result is undefined.
604 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
605 return UndefValue::get(IntType);
606
607 // If we're not accessing anything in this constant, the result is undefined.
608 if (Offset >= InitializerSize)
609 return UndefValue::get(IntType);
610
611 unsigned char RawBytes[32] = {0};
612 unsigned char *CurPtr = RawBytes;
613 unsigned BytesLeft = BytesLoaded;
614
615 // If we're loading off the beginning of the global, some bytes may be valid.
616 if (Offset < 0) {
617 CurPtr += -Offset;
618 BytesLeft += Offset;
619 Offset = 0;
620 }
621
622 if (!ReadDataFromGlobal(C, Offset, CurPtr, BytesLeft, DL))
623 return nullptr;
624
625 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
626 if (DL.isLittleEndian()) {
627 ResultVal = RawBytes[BytesLoaded - 1];
628 for (unsigned i = 1; i != BytesLoaded; ++i) {
629 ResultVal <<= 8;
630 ResultVal |= RawBytes[BytesLoaded - 1 - i];
631 }
632 } else {
633 ResultVal = RawBytes[0];
634 for (unsigned i = 1; i != BytesLoaded; ++i) {
635 ResultVal <<= 8;
636 ResultVal |= RawBytes[i];
637 }
638 }
639
640 return ConstantInt::get(IntType->getContext(), ResultVal);
641 }
642
643 /// If this Offset points exactly to the start of an aggregate element, return
644 /// that element, otherwise return nullptr.
getConstantAtOffset(Constant * Base,APInt Offset,const DataLayout & DL)645 Constant *getConstantAtOffset(Constant *Base, APInt Offset,
646 const DataLayout &DL) {
647 if (Offset.isZero())
648 return Base;
649
650 if (!isa<ConstantAggregate>(Base) && !isa<ConstantDataSequential>(Base))
651 return nullptr;
652
653 Type *ElemTy = Base->getType();
654 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
655 if (!Offset.isZero() || !Indices[0].isZero())
656 return nullptr;
657
658 Constant *C = Base;
659 for (const APInt &Index : drop_begin(Indices)) {
660 if (Index.isNegative() || Index.getActiveBits() >= 32)
661 return nullptr;
662
663 C = C->getAggregateElement(Index.getZExtValue());
664 if (!C)
665 return nullptr;
666 }
667
668 return C;
669 }
670
ConstantFoldLoadFromConst(Constant * C,Type * Ty,const APInt & Offset,const DataLayout & DL)671 Constant *ConstantFoldLoadFromConst(Constant *C, Type *Ty, const APInt &Offset,
672 const DataLayout &DL) {
673 if (Constant *AtOffset = getConstantAtOffset(C, Offset, DL))
674 if (Constant *Result = ConstantFoldLoadThroughBitcast(AtOffset, Ty, DL))
675 return Result;
676
677 // Try hard to fold loads from bitcasted strange and non-type-safe things.
678 if (Offset.getMinSignedBits() <= 64)
679 return FoldReinterpretLoadFromConst(C, Ty, Offset.getSExtValue(), DL);
680
681 return nullptr;
682 }
683
684 } // end anonymous namespace
685
ConstantFoldLoadFromConstPtr(Constant * C,Type * Ty,const DataLayout & DL)686 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
687 const DataLayout &DL) {
688 APInt Offset(DL.getIndexTypeSizeInBits(C->getType()), 0);
689 C = cast<Constant>(C->stripAndAccumulateConstantOffsets(
690 DL, Offset, /* AllowNonInbounds */ true));
691
692 if (auto *GV = dyn_cast<GlobalVariable>(C))
693 if (GV->isConstant() && GV->hasDefinitiveInitializer())
694 if (Constant *Result = ConstantFoldLoadFromConst(GV->getInitializer(), Ty,
695 Offset, DL))
696 return Result;
697
698 // If this load comes from anywhere in a constant global, and if the global
699 // is all undef or zero, we know what it loads.
700 if (auto *GV = dyn_cast<GlobalVariable>(getUnderlyingObject(C))) {
701 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
702 if (GV->getInitializer()->isNullValue())
703 return Constant::getNullValue(Ty);
704 if (isa<UndefValue>(GV->getInitializer()))
705 return UndefValue::get(Ty);
706 }
707 }
708
709 return nullptr;
710 }
711
712 namespace {
713
714 /// One of Op0/Op1 is a constant expression.
715 /// Attempt to symbolically evaluate the result of a binary operator merging
716 /// these together. If target data info is available, it is provided as DL,
717 /// otherwise DL is null.
SymbolicallyEvaluateBinop(unsigned Opc,Constant * Op0,Constant * Op1,const DataLayout & DL)718 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
719 const DataLayout &DL) {
720 // SROA
721
722 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
723 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
724 // bits.
725
726 if (Opc == Instruction::And) {
727 KnownBits Known0 = computeKnownBits(Op0, DL);
728 KnownBits Known1 = computeKnownBits(Op1, DL);
729 if ((Known1.One | Known0.Zero).isAllOnes()) {
730 // All the bits of Op0 that the 'and' could be masking are already zero.
731 return Op0;
732 }
733 if ((Known0.One | Known1.Zero).isAllOnes()) {
734 // All the bits of Op1 that the 'and' could be masking are already zero.
735 return Op1;
736 }
737
738 Known0 &= Known1;
739 if (Known0.isConstant())
740 return ConstantInt::get(Op0->getType(), Known0.getConstant());
741 }
742
743 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
744 // constant. This happens frequently when iterating over a global array.
745 if (Opc == Instruction::Sub) {
746 GlobalValue *GV1, *GV2;
747 APInt Offs1, Offs2;
748
749 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
750 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
751 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
752
753 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
754 // PtrToInt may change the bitwidth so we have convert to the right size
755 // first.
756 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
757 Offs2.zextOrTrunc(OpSize));
758 }
759 }
760
761 return nullptr;
762 }
763
764 /// If array indices are not pointer-sized integers, explicitly cast them so
765 /// that they aren't implicitly casted by the getelementptr.
CastGEPIndices(Type * SrcElemTy,ArrayRef<Constant * > Ops,Type * ResultTy,Optional<unsigned> InRangeIndex,const DataLayout & DL,const TargetLibraryInfo * TLI)766 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
767 Type *ResultTy, Optional<unsigned> InRangeIndex,
768 const DataLayout &DL, const TargetLibraryInfo *TLI) {
769 Type *IntIdxTy = DL.getIndexType(ResultTy);
770 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
771
772 bool Any = false;
773 SmallVector<Constant*, 32> NewIdxs;
774 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
775 if ((i == 1 ||
776 !isa<StructType>(GetElementPtrInst::getIndexedType(
777 SrcElemTy, Ops.slice(1, i - 1)))) &&
778 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
779 Any = true;
780 Type *NewType = Ops[i]->getType()->isVectorTy()
781 ? IntIdxTy
782 : IntIdxScalarTy;
783 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
784 true,
785 NewType,
786 true),
787 Ops[i], NewType));
788 } else
789 NewIdxs.push_back(Ops[i]);
790 }
791
792 if (!Any)
793 return nullptr;
794
795 Constant *C = ConstantExpr::getGetElementPtr(
796 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
797 return ConstantFoldConstant(C, DL, TLI);
798 }
799
800 /// Strip the pointer casts, but preserve the address space information.
StripPtrCastKeepAS(Constant * Ptr)801 Constant *StripPtrCastKeepAS(Constant *Ptr) {
802 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
803 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
804 Ptr = cast<Constant>(Ptr->stripPointerCasts());
805 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
806
807 // Preserve the address space number of the pointer.
808 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
809 Ptr = ConstantExpr::getPointerCast(
810 Ptr, PointerType::getWithSamePointeeType(NewPtrTy,
811 OldPtrTy->getAddressSpace()));
812 }
813 return Ptr;
814 }
815
816 /// If we can symbolically evaluate the GEP constant expression, do so.
SymbolicallyEvaluateGEP(const GEPOperator * GEP,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)817 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
818 ArrayRef<Constant *> Ops,
819 const DataLayout &DL,
820 const TargetLibraryInfo *TLI) {
821 const GEPOperator *InnermostGEP = GEP;
822 bool InBounds = GEP->isInBounds();
823
824 Type *SrcElemTy = GEP->getSourceElementType();
825 Type *ResElemTy = GEP->getResultElementType();
826 Type *ResTy = GEP->getType();
827 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
828 return nullptr;
829
830 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
831 GEP->getInRangeIndex(), DL, TLI))
832 return C;
833
834 Constant *Ptr = Ops[0];
835 if (!Ptr->getType()->isPointerTy())
836 return nullptr;
837
838 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
839
840 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
841 // "inttoptr (sub (ptrtoint Ptr), V)"
842 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
843 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
844 assert((!CE || CE->getType() == IntIdxTy) &&
845 "CastGEPIndices didn't canonicalize index types!");
846 if (CE && CE->getOpcode() == Instruction::Sub &&
847 CE->getOperand(0)->isNullValue()) {
848 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
849 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
850 Res = ConstantExpr::getIntToPtr(Res, ResTy);
851 return ConstantFoldConstant(Res, DL, TLI);
852 }
853 }
854
855 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
856 if (!isa<ConstantInt>(Ops[i]))
857 return nullptr;
858
859 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
860 APInt Offset =
861 APInt(BitWidth,
862 DL.getIndexedOffsetInType(
863 SrcElemTy,
864 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
865 Ptr = StripPtrCastKeepAS(Ptr);
866
867 // If this is a GEP of a GEP, fold it all into a single GEP.
868 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
869 InnermostGEP = GEP;
870 InBounds &= GEP->isInBounds();
871
872 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
873
874 // Do not try the incorporate the sub-GEP if some index is not a number.
875 bool AllConstantInt = true;
876 for (Value *NestedOp : NestedOps)
877 if (!isa<ConstantInt>(NestedOp)) {
878 AllConstantInt = false;
879 break;
880 }
881 if (!AllConstantInt)
882 break;
883
884 Ptr = cast<Constant>(GEP->getOperand(0));
885 SrcElemTy = GEP->getSourceElementType();
886 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
887 Ptr = StripPtrCastKeepAS(Ptr);
888 }
889
890 // If the base value for this address is a literal integer value, fold the
891 // getelementptr to the resulting integer value casted to the pointer type.
892 APInt BasePtr(BitWidth, 0);
893 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
894 if (CE->getOpcode() == Instruction::IntToPtr) {
895 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
896 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
897 }
898 }
899
900 auto *PTy = cast<PointerType>(Ptr->getType());
901 if ((Ptr->isNullValue() || BasePtr != 0) &&
902 !DL.isNonIntegralPointerType(PTy)) {
903 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
904 return ConstantExpr::getIntToPtr(C, ResTy);
905 }
906
907 // Otherwise form a regular getelementptr. Recompute the indices so that
908 // we eliminate over-indexing of the notional static type array bounds.
909 // This makes it easy to determine if the getelementptr is "inbounds".
910 // Also, this helps GlobalOpt do SROA on GlobalVariables.
911
912 // For GEPs of GlobalValues, use the value type even for opaque pointers.
913 // Otherwise use an i8 GEP.
914 if (auto *GV = dyn_cast<GlobalValue>(Ptr))
915 SrcElemTy = GV->getValueType();
916 else if (!PTy->isOpaque())
917 SrcElemTy = PTy->getElementType();
918 else
919 SrcElemTy = Type::getInt8Ty(Ptr->getContext());
920
921 if (!SrcElemTy->isSized())
922 return nullptr;
923
924 Type *ElemTy = SrcElemTy;
925 SmallVector<APInt> Indices = DL.getGEPIndicesForOffset(ElemTy, Offset);
926 if (Offset != 0)
927 return nullptr;
928
929 // Try to add additional zero indices to reach the desired result element
930 // type.
931 // TODO: Should we avoid extra zero indices if ResElemTy can't be reached and
932 // we'll have to insert a bitcast anyway?
933 while (ElemTy != ResElemTy) {
934 Type *NextTy = GetElementPtrInst::getTypeAtIndex(ElemTy, (uint64_t)0);
935 if (!NextTy)
936 break;
937
938 Indices.push_back(APInt::getZero(isa<StructType>(ElemTy) ? 32 : BitWidth));
939 ElemTy = NextTy;
940 }
941
942 SmallVector<Constant *, 32> NewIdxs;
943 for (const APInt &Index : Indices)
944 NewIdxs.push_back(ConstantInt::get(
945 Type::getIntNTy(Ptr->getContext(), Index.getBitWidth()), Index));
946
947 // Preserve the inrange index from the innermost GEP if possible. We must
948 // have calculated the same indices up to and including the inrange index.
949 Optional<unsigned> InRangeIndex;
950 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
951 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
952 NewIdxs.size() > *LastIRIndex) {
953 InRangeIndex = LastIRIndex;
954 for (unsigned I = 0; I <= *LastIRIndex; ++I)
955 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
956 return nullptr;
957 }
958
959 // Create a GEP.
960 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
961 InBounds, InRangeIndex);
962 assert(
963 cast<PointerType>(C->getType())->isOpaqueOrPointeeTypeMatches(ElemTy) &&
964 "Computed GetElementPtr has unexpected type!");
965
966 // If we ended up indexing a member with a type that doesn't match
967 // the type of what the original indices indexed, add a cast.
968 if (C->getType() != ResTy)
969 C = FoldBitCast(C, ResTy, DL);
970
971 return C;
972 }
973
974 /// Attempt to constant fold an instruction with the
975 /// specified opcode and operands. If successful, the constant result is
976 /// returned, if not, null is returned. Note that this function can fail when
977 /// attempting to fold instructions like loads and stores, which have no
978 /// constant expression form.
ConstantFoldInstOperandsImpl(const Value * InstOrCE,unsigned Opcode,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)979 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
980 ArrayRef<Constant *> Ops,
981 const DataLayout &DL,
982 const TargetLibraryInfo *TLI) {
983 Type *DestTy = InstOrCE->getType();
984
985 if (Instruction::isUnaryOp(Opcode))
986 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
987
988 if (Instruction::isBinaryOp(Opcode))
989 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
990
991 if (Instruction::isCast(Opcode))
992 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
993
994 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
995 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
996 return C;
997
998 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
999 Ops.slice(1), GEP->isInBounds(),
1000 GEP->getInRangeIndex());
1001 }
1002
1003 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1004 return CE->getWithOperands(Ops);
1005
1006 switch (Opcode) {
1007 default: return nullptr;
1008 case Instruction::ICmp:
1009 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1010 case Instruction::Freeze:
1011 return isGuaranteedNotToBeUndefOrPoison(Ops[0]) ? Ops[0] : nullptr;
1012 case Instruction::Call:
1013 if (auto *F = dyn_cast<Function>(Ops.back())) {
1014 const auto *Call = cast<CallBase>(InstOrCE);
1015 if (canConstantFoldCallTo(Call, F))
1016 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1017 }
1018 return nullptr;
1019 case Instruction::Select:
1020 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1021 case Instruction::ExtractElement:
1022 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1023 case Instruction::ExtractValue:
1024 return ConstantExpr::getExtractValue(
1025 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1026 case Instruction::InsertElement:
1027 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1028 case Instruction::ShuffleVector:
1029 return ConstantExpr::getShuffleVector(
1030 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1031 }
1032 }
1033
1034 } // end anonymous namespace
1035
1036 //===----------------------------------------------------------------------===//
1037 // Constant Folding public APIs
1038 //===----------------------------------------------------------------------===//
1039
1040 namespace {
1041
1042 Constant *
ConstantFoldConstantImpl(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI,SmallDenseMap<Constant *,Constant * > & FoldedOps)1043 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1044 const TargetLibraryInfo *TLI,
1045 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1046 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1047 return const_cast<Constant *>(C);
1048
1049 SmallVector<Constant *, 8> Ops;
1050 for (const Use &OldU : C->operands()) {
1051 Constant *OldC = cast<Constant>(&OldU);
1052 Constant *NewC = OldC;
1053 // Recursively fold the ConstantExpr's operands. If we have already folded
1054 // a ConstantExpr, we don't have to process it again.
1055 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1056 auto It = FoldedOps.find(OldC);
1057 if (It == FoldedOps.end()) {
1058 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1059 FoldedOps.insert({OldC, NewC});
1060 } else {
1061 NewC = It->second;
1062 }
1063 }
1064 Ops.push_back(NewC);
1065 }
1066
1067 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1068 if (CE->isCompare())
1069 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1070 DL, TLI);
1071
1072 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1073 }
1074
1075 assert(isa<ConstantVector>(C));
1076 return ConstantVector::get(Ops);
1077 }
1078
1079 } // end anonymous namespace
1080
ConstantFoldInstruction(Instruction * I,const DataLayout & DL,const TargetLibraryInfo * TLI)1081 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1082 const TargetLibraryInfo *TLI) {
1083 // Handle PHI nodes quickly here...
1084 if (auto *PN = dyn_cast<PHINode>(I)) {
1085 Constant *CommonValue = nullptr;
1086
1087 SmallDenseMap<Constant *, Constant *> FoldedOps;
1088 for (Value *Incoming : PN->incoming_values()) {
1089 // If the incoming value is undef then skip it. Note that while we could
1090 // skip the value if it is equal to the phi node itself we choose not to
1091 // because that would break the rule that constant folding only applies if
1092 // all operands are constants.
1093 if (isa<UndefValue>(Incoming))
1094 continue;
1095 // If the incoming value is not a constant, then give up.
1096 auto *C = dyn_cast<Constant>(Incoming);
1097 if (!C)
1098 return nullptr;
1099 // Fold the PHI's operands.
1100 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1101 // If the incoming value is a different constant to
1102 // the one we saw previously, then give up.
1103 if (CommonValue && C != CommonValue)
1104 return nullptr;
1105 CommonValue = C;
1106 }
1107
1108 // If we reach here, all incoming values are the same constant or undef.
1109 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1110 }
1111
1112 // Scan the operand list, checking to see if they are all constants, if so,
1113 // hand off to ConstantFoldInstOperandsImpl.
1114 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1115 return nullptr;
1116
1117 SmallDenseMap<Constant *, Constant *> FoldedOps;
1118 SmallVector<Constant *, 8> Ops;
1119 for (const Use &OpU : I->operands()) {
1120 auto *Op = cast<Constant>(&OpU);
1121 // Fold the Instruction's operands.
1122 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1123 Ops.push_back(Op);
1124 }
1125
1126 if (const auto *CI = dyn_cast<CmpInst>(I))
1127 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1128 DL, TLI);
1129
1130 if (const auto *LI = dyn_cast<LoadInst>(I)) {
1131 if (LI->isVolatile())
1132 return nullptr;
1133 return ConstantFoldLoadFromConstPtr(Ops[0], LI->getType(), DL);
1134 }
1135
1136 if (auto *IVI = dyn_cast<InsertValueInst>(I))
1137 return ConstantExpr::getInsertValue(Ops[0], Ops[1], IVI->getIndices());
1138
1139 if (auto *EVI = dyn_cast<ExtractValueInst>(I))
1140 return ConstantExpr::getExtractValue(Ops[0], EVI->getIndices());
1141
1142 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1143 }
1144
ConstantFoldConstant(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI)1145 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1146 const TargetLibraryInfo *TLI) {
1147 SmallDenseMap<Constant *, Constant *> FoldedOps;
1148 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1149 }
1150
ConstantFoldInstOperands(Instruction * I,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1151 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1152 ArrayRef<Constant *> Ops,
1153 const DataLayout &DL,
1154 const TargetLibraryInfo *TLI) {
1155 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1156 }
1157
ConstantFoldCompareInstOperands(unsigned Predicate,Constant * Ops0,Constant * Ops1,const DataLayout & DL,const TargetLibraryInfo * TLI)1158 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1159 Constant *Ops0, Constant *Ops1,
1160 const DataLayout &DL,
1161 const TargetLibraryInfo *TLI) {
1162 // fold: icmp (inttoptr x), null -> icmp x, 0
1163 // fold: icmp null, (inttoptr x) -> icmp 0, x
1164 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1165 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1166 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1167 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1168 //
1169 // FIXME: The following comment is out of data and the DataLayout is here now.
1170 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1171 // around to know if bit truncation is happening.
1172 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1173 if (Ops1->isNullValue()) {
1174 if (CE0->getOpcode() == Instruction::IntToPtr) {
1175 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1176 // Convert the integer value to the right size to ensure we get the
1177 // proper extension or truncation.
1178 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1179 IntPtrTy, false);
1180 Constant *Null = Constant::getNullValue(C->getType());
1181 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1182 }
1183
1184 // Only do this transformation if the int is intptrty in size, otherwise
1185 // there is a truncation or extension that we aren't modeling.
1186 if (CE0->getOpcode() == Instruction::PtrToInt) {
1187 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1188 if (CE0->getType() == IntPtrTy) {
1189 Constant *C = CE0->getOperand(0);
1190 Constant *Null = Constant::getNullValue(C->getType());
1191 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1192 }
1193 }
1194 }
1195
1196 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1197 if (CE0->getOpcode() == CE1->getOpcode()) {
1198 if (CE0->getOpcode() == Instruction::IntToPtr) {
1199 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1200
1201 // Convert the integer value to the right size to ensure we get the
1202 // proper extension or truncation.
1203 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1204 IntPtrTy, false);
1205 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1206 IntPtrTy, false);
1207 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1208 }
1209
1210 // Only do this transformation if the int is intptrty in size, otherwise
1211 // there is a truncation or extension that we aren't modeling.
1212 if (CE0->getOpcode() == Instruction::PtrToInt) {
1213 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1214 if (CE0->getType() == IntPtrTy &&
1215 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1216 return ConstantFoldCompareInstOperands(
1217 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1218 }
1219 }
1220 }
1221 }
1222
1223 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1224 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1225 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1226 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1227 Constant *LHS = ConstantFoldCompareInstOperands(
1228 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1229 Constant *RHS = ConstantFoldCompareInstOperands(
1230 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1231 unsigned OpC =
1232 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1233 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1234 }
1235 } else if (isa<ConstantExpr>(Ops1)) {
1236 // If RHS is a constant expression, but the left side isn't, swap the
1237 // operands and try again.
1238 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1239 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1240 }
1241
1242 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1243 }
1244
ConstantFoldUnaryOpOperand(unsigned Opcode,Constant * Op,const DataLayout & DL)1245 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1246 const DataLayout &DL) {
1247 assert(Instruction::isUnaryOp(Opcode));
1248
1249 return ConstantExpr::get(Opcode, Op);
1250 }
1251
ConstantFoldBinaryOpOperands(unsigned Opcode,Constant * LHS,Constant * RHS,const DataLayout & DL)1252 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1253 Constant *RHS,
1254 const DataLayout &DL) {
1255 assert(Instruction::isBinaryOp(Opcode));
1256 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1257 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1258 return C;
1259
1260 return ConstantExpr::get(Opcode, LHS, RHS);
1261 }
1262
ConstantFoldCastOperand(unsigned Opcode,Constant * C,Type * DestTy,const DataLayout & DL)1263 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1264 Type *DestTy, const DataLayout &DL) {
1265 assert(Instruction::isCast(Opcode));
1266 switch (Opcode) {
1267 default:
1268 llvm_unreachable("Missing case");
1269 case Instruction::PtrToInt:
1270 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1271 Constant *FoldedValue = nullptr;
1272 // If the input is a inttoptr, eliminate the pair. This requires knowing
1273 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1274 if (CE->getOpcode() == Instruction::IntToPtr) {
1275 // zext/trunc the inttoptr to pointer size.
1276 FoldedValue = ConstantExpr::getIntegerCast(
1277 CE->getOperand(0), DL.getIntPtrType(CE->getType()),
1278 /*IsSigned=*/false);
1279 } else if (auto *GEP = dyn_cast<GEPOperator>(CE)) {
1280 // If we have GEP, we can perform the following folds:
1281 // (ptrtoint (gep null, x)) -> x
1282 // (ptrtoint (gep (gep null, x), y) -> x + y, etc.
1283 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
1284 APInt BaseOffset(BitWidth, 0);
1285 auto *Base = cast<Constant>(GEP->stripAndAccumulateConstantOffsets(
1286 DL, BaseOffset, /*AllowNonInbounds=*/true));
1287 if (Base->isNullValue()) {
1288 FoldedValue = ConstantInt::get(CE->getContext(), BaseOffset);
1289 }
1290 }
1291 if (FoldedValue) {
1292 // Do a zext or trunc to get to the ptrtoint dest size.
1293 return ConstantExpr::getIntegerCast(FoldedValue, DestTy,
1294 /*IsSigned=*/false);
1295 }
1296 }
1297 return ConstantExpr::getCast(Opcode, C, DestTy);
1298 case Instruction::IntToPtr:
1299 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1300 // the int size is >= the ptr size and the address spaces are the same.
1301 // This requires knowing the width of a pointer, so it can't be done in
1302 // ConstantExpr::getCast.
1303 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1304 if (CE->getOpcode() == Instruction::PtrToInt) {
1305 Constant *SrcPtr = CE->getOperand(0);
1306 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1307 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1308
1309 if (MidIntSize >= SrcPtrSize) {
1310 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1311 if (SrcAS == DestTy->getPointerAddressSpace())
1312 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1313 }
1314 }
1315 }
1316
1317 return ConstantExpr::getCast(Opcode, C, DestTy);
1318 case Instruction::Trunc:
1319 case Instruction::ZExt:
1320 case Instruction::SExt:
1321 case Instruction::FPTrunc:
1322 case Instruction::FPExt:
1323 case Instruction::UIToFP:
1324 case Instruction::SIToFP:
1325 case Instruction::FPToUI:
1326 case Instruction::FPToSI:
1327 case Instruction::AddrSpaceCast:
1328 return ConstantExpr::getCast(Opcode, C, DestTy);
1329 case Instruction::BitCast:
1330 return FoldBitCast(C, DestTy, DL);
1331 }
1332 }
1333
ConstantFoldLoadThroughGEPConstantExpr(Constant * C,ConstantExpr * CE,Type * Ty,const DataLayout & DL)1334 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1335 ConstantExpr *CE,
1336 Type *Ty,
1337 const DataLayout &DL) {
1338 if (!CE->getOperand(1)->isNullValue())
1339 return nullptr; // Do not allow stepping over the value!
1340
1341 // Loop over all of the operands, tracking down which value we are
1342 // addressing.
1343 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1344 C = C->getAggregateElement(CE->getOperand(i));
1345 if (!C)
1346 return nullptr;
1347 }
1348 return ConstantFoldLoadThroughBitcast(C, Ty, DL);
1349 }
1350
1351 Constant *
ConstantFoldLoadThroughGEPIndices(Constant * C,ArrayRef<Constant * > Indices)1352 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1353 ArrayRef<Constant *> Indices) {
1354 // Loop over all of the operands, tracking down which value we are
1355 // addressing.
1356 for (Constant *Index : Indices) {
1357 C = C->getAggregateElement(Index);
1358 if (!C)
1359 return nullptr;
1360 }
1361 return C;
1362 }
1363
1364 //===----------------------------------------------------------------------===//
1365 // Constant Folding for Calls
1366 //
1367
canConstantFoldCallTo(const CallBase * Call,const Function * F)1368 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1369 if (Call->isNoBuiltin())
1370 return false;
1371 switch (F->getIntrinsicID()) {
1372 // Operations that do not operate floating-point numbers and do not depend on
1373 // FP environment can be folded even in strictfp functions.
1374 case Intrinsic::bswap:
1375 case Intrinsic::ctpop:
1376 case Intrinsic::ctlz:
1377 case Intrinsic::cttz:
1378 case Intrinsic::fshl:
1379 case Intrinsic::fshr:
1380 case Intrinsic::launder_invariant_group:
1381 case Intrinsic::strip_invariant_group:
1382 case Intrinsic::masked_load:
1383 case Intrinsic::get_active_lane_mask:
1384 case Intrinsic::abs:
1385 case Intrinsic::smax:
1386 case Intrinsic::smin:
1387 case Intrinsic::umax:
1388 case Intrinsic::umin:
1389 case Intrinsic::sadd_with_overflow:
1390 case Intrinsic::uadd_with_overflow:
1391 case Intrinsic::ssub_with_overflow:
1392 case Intrinsic::usub_with_overflow:
1393 case Intrinsic::smul_with_overflow:
1394 case Intrinsic::umul_with_overflow:
1395 case Intrinsic::sadd_sat:
1396 case Intrinsic::uadd_sat:
1397 case Intrinsic::ssub_sat:
1398 case Intrinsic::usub_sat:
1399 case Intrinsic::smul_fix:
1400 case Intrinsic::smul_fix_sat:
1401 case Intrinsic::bitreverse:
1402 case Intrinsic::is_constant:
1403 case Intrinsic::vector_reduce_add:
1404 case Intrinsic::vector_reduce_mul:
1405 case Intrinsic::vector_reduce_and:
1406 case Intrinsic::vector_reduce_or:
1407 case Intrinsic::vector_reduce_xor:
1408 case Intrinsic::vector_reduce_smin:
1409 case Intrinsic::vector_reduce_smax:
1410 case Intrinsic::vector_reduce_umin:
1411 case Intrinsic::vector_reduce_umax:
1412 // Target intrinsics
1413 case Intrinsic::amdgcn_perm:
1414 case Intrinsic::arm_mve_vctp8:
1415 case Intrinsic::arm_mve_vctp16:
1416 case Intrinsic::arm_mve_vctp32:
1417 case Intrinsic::arm_mve_vctp64:
1418 case Intrinsic::aarch64_sve_convert_from_svbool:
1419 // WebAssembly float semantics are always known
1420 case Intrinsic::wasm_trunc_signed:
1421 case Intrinsic::wasm_trunc_unsigned:
1422 return true;
1423
1424 // Floating point operations cannot be folded in strictfp functions in
1425 // general case. They can be folded if FP environment is known to compiler.
1426 case Intrinsic::minnum:
1427 case Intrinsic::maxnum:
1428 case Intrinsic::minimum:
1429 case Intrinsic::maximum:
1430 case Intrinsic::log:
1431 case Intrinsic::log2:
1432 case Intrinsic::log10:
1433 case Intrinsic::exp:
1434 case Intrinsic::exp2:
1435 case Intrinsic::sqrt:
1436 case Intrinsic::sin:
1437 case Intrinsic::cos:
1438 case Intrinsic::pow:
1439 case Intrinsic::powi:
1440 case Intrinsic::fma:
1441 case Intrinsic::fmuladd:
1442 case Intrinsic::fptoui_sat:
1443 case Intrinsic::fptosi_sat:
1444 case Intrinsic::convert_from_fp16:
1445 case Intrinsic::convert_to_fp16:
1446 case Intrinsic::amdgcn_cos:
1447 case Intrinsic::amdgcn_cubeid:
1448 case Intrinsic::amdgcn_cubema:
1449 case Intrinsic::amdgcn_cubesc:
1450 case Intrinsic::amdgcn_cubetc:
1451 case Intrinsic::amdgcn_fmul_legacy:
1452 case Intrinsic::amdgcn_fma_legacy:
1453 case Intrinsic::amdgcn_fract:
1454 case Intrinsic::amdgcn_ldexp:
1455 case Intrinsic::amdgcn_sin:
1456 // The intrinsics below depend on rounding mode in MXCSR.
1457 case Intrinsic::x86_sse_cvtss2si:
1458 case Intrinsic::x86_sse_cvtss2si64:
1459 case Intrinsic::x86_sse_cvttss2si:
1460 case Intrinsic::x86_sse_cvttss2si64:
1461 case Intrinsic::x86_sse2_cvtsd2si:
1462 case Intrinsic::x86_sse2_cvtsd2si64:
1463 case Intrinsic::x86_sse2_cvttsd2si:
1464 case Intrinsic::x86_sse2_cvttsd2si64:
1465 case Intrinsic::x86_avx512_vcvtss2si32:
1466 case Intrinsic::x86_avx512_vcvtss2si64:
1467 case Intrinsic::x86_avx512_cvttss2si:
1468 case Intrinsic::x86_avx512_cvttss2si64:
1469 case Intrinsic::x86_avx512_vcvtsd2si32:
1470 case Intrinsic::x86_avx512_vcvtsd2si64:
1471 case Intrinsic::x86_avx512_cvttsd2si:
1472 case Intrinsic::x86_avx512_cvttsd2si64:
1473 case Intrinsic::x86_avx512_vcvtss2usi32:
1474 case Intrinsic::x86_avx512_vcvtss2usi64:
1475 case Intrinsic::x86_avx512_cvttss2usi:
1476 case Intrinsic::x86_avx512_cvttss2usi64:
1477 case Intrinsic::x86_avx512_vcvtsd2usi32:
1478 case Intrinsic::x86_avx512_vcvtsd2usi64:
1479 case Intrinsic::x86_avx512_cvttsd2usi:
1480 case Intrinsic::x86_avx512_cvttsd2usi64:
1481 return !Call->isStrictFP();
1482
1483 // Sign operations are actually bitwise operations, they do not raise
1484 // exceptions even for SNANs.
1485 case Intrinsic::fabs:
1486 case Intrinsic::copysign:
1487 // Non-constrained variants of rounding operations means default FP
1488 // environment, they can be folded in any case.
1489 case Intrinsic::ceil:
1490 case Intrinsic::floor:
1491 case Intrinsic::round:
1492 case Intrinsic::roundeven:
1493 case Intrinsic::trunc:
1494 case Intrinsic::nearbyint:
1495 case Intrinsic::rint:
1496 // Constrained intrinsics can be folded if FP environment is known
1497 // to compiler.
1498 case Intrinsic::experimental_constrained_fma:
1499 case Intrinsic::experimental_constrained_fmuladd:
1500 case Intrinsic::experimental_constrained_fadd:
1501 case Intrinsic::experimental_constrained_fsub:
1502 case Intrinsic::experimental_constrained_fmul:
1503 case Intrinsic::experimental_constrained_fdiv:
1504 case Intrinsic::experimental_constrained_frem:
1505 case Intrinsic::experimental_constrained_ceil:
1506 case Intrinsic::experimental_constrained_floor:
1507 case Intrinsic::experimental_constrained_round:
1508 case Intrinsic::experimental_constrained_roundeven:
1509 case Intrinsic::experimental_constrained_trunc:
1510 case Intrinsic::experimental_constrained_nearbyint:
1511 case Intrinsic::experimental_constrained_rint:
1512 return true;
1513 default:
1514 return false;
1515 case Intrinsic::not_intrinsic: break;
1516 }
1517
1518 if (!F->hasName() || Call->isStrictFP())
1519 return false;
1520
1521 // In these cases, the check of the length is required. We don't want to
1522 // return true for a name like "cos\0blah" which strcmp would return equal to
1523 // "cos", but has length 8.
1524 StringRef Name = F->getName();
1525 switch (Name[0]) {
1526 default:
1527 return false;
1528 case 'a':
1529 return Name == "acos" || Name == "acosf" ||
1530 Name == "asin" || Name == "asinf" ||
1531 Name == "atan" || Name == "atanf" ||
1532 Name == "atan2" || Name == "atan2f";
1533 case 'c':
1534 return Name == "ceil" || Name == "ceilf" ||
1535 Name == "cos" || Name == "cosf" ||
1536 Name == "cosh" || Name == "coshf";
1537 case 'e':
1538 return Name == "exp" || Name == "expf" ||
1539 Name == "exp2" || Name == "exp2f";
1540 case 'f':
1541 return Name == "fabs" || Name == "fabsf" ||
1542 Name == "floor" || Name == "floorf" ||
1543 Name == "fmod" || Name == "fmodf";
1544 case 'l':
1545 return Name == "log" || Name == "logf" ||
1546 Name == "log2" || Name == "log2f" ||
1547 Name == "log10" || Name == "log10f";
1548 case 'n':
1549 return Name == "nearbyint" || Name == "nearbyintf";
1550 case 'p':
1551 return Name == "pow" || Name == "powf";
1552 case 'r':
1553 return Name == "remainder" || Name == "remainderf" ||
1554 Name == "rint" || Name == "rintf" ||
1555 Name == "round" || Name == "roundf";
1556 case 's':
1557 return Name == "sin" || Name == "sinf" ||
1558 Name == "sinh" || Name == "sinhf" ||
1559 Name == "sqrt" || Name == "sqrtf";
1560 case 't':
1561 return Name == "tan" || Name == "tanf" ||
1562 Name == "tanh" || Name == "tanhf" ||
1563 Name == "trunc" || Name == "truncf";
1564 case '_':
1565 // Check for various function names that get used for the math functions
1566 // when the header files are preprocessed with the macro
1567 // __FINITE_MATH_ONLY__ enabled.
1568 // The '12' here is the length of the shortest name that can match.
1569 // We need to check the size before looking at Name[1] and Name[2]
1570 // so we may as well check a limit that will eliminate mismatches.
1571 if (Name.size() < 12 || Name[1] != '_')
1572 return false;
1573 switch (Name[2]) {
1574 default:
1575 return false;
1576 case 'a':
1577 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1578 Name == "__asin_finite" || Name == "__asinf_finite" ||
1579 Name == "__atan2_finite" || Name == "__atan2f_finite";
1580 case 'c':
1581 return Name == "__cosh_finite" || Name == "__coshf_finite";
1582 case 'e':
1583 return Name == "__exp_finite" || Name == "__expf_finite" ||
1584 Name == "__exp2_finite" || Name == "__exp2f_finite";
1585 case 'l':
1586 return Name == "__log_finite" || Name == "__logf_finite" ||
1587 Name == "__log10_finite" || Name == "__log10f_finite";
1588 case 'p':
1589 return Name == "__pow_finite" || Name == "__powf_finite";
1590 case 's':
1591 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1592 }
1593 }
1594 }
1595
1596 namespace {
1597
GetConstantFoldFPValue(double V,Type * Ty)1598 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1599 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1600 APFloat APF(V);
1601 bool unused;
1602 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1603 return ConstantFP::get(Ty->getContext(), APF);
1604 }
1605 if (Ty->isDoubleTy())
1606 return ConstantFP::get(Ty->getContext(), APFloat(V));
1607 llvm_unreachable("Can only constant fold half/float/double");
1608 }
1609
1610 /// Clear the floating-point exception state.
llvm_fenv_clearexcept()1611 inline void llvm_fenv_clearexcept() {
1612 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1613 feclearexcept(FE_ALL_EXCEPT);
1614 #endif
1615 errno = 0;
1616 }
1617
1618 /// Test if a floating-point exception was raised.
llvm_fenv_testexcept()1619 inline bool llvm_fenv_testexcept() {
1620 int errno_val = errno;
1621 if (errno_val == ERANGE || errno_val == EDOM)
1622 return true;
1623 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1624 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1625 return true;
1626 #endif
1627 return false;
1628 }
1629
ConstantFoldFP(double (* NativeFP)(double),const APFloat & V,Type * Ty)1630 Constant *ConstantFoldFP(double (*NativeFP)(double), const APFloat &V,
1631 Type *Ty) {
1632 llvm_fenv_clearexcept();
1633 double Result = NativeFP(V.convertToDouble());
1634 if (llvm_fenv_testexcept()) {
1635 llvm_fenv_clearexcept();
1636 return nullptr;
1637 }
1638
1639 return GetConstantFoldFPValue(Result, Ty);
1640 }
1641
ConstantFoldBinaryFP(double (* NativeFP)(double,double),const APFloat & V,const APFloat & W,Type * Ty)1642 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double),
1643 const APFloat &V, const APFloat &W, Type *Ty) {
1644 llvm_fenv_clearexcept();
1645 double Result = NativeFP(V.convertToDouble(), W.convertToDouble());
1646 if (llvm_fenv_testexcept()) {
1647 llvm_fenv_clearexcept();
1648 return nullptr;
1649 }
1650
1651 return GetConstantFoldFPValue(Result, Ty);
1652 }
1653
constantFoldVectorReduce(Intrinsic::ID IID,Constant * Op)1654 Constant *constantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1655 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1656 if (!VT)
1657 return nullptr;
1658
1659 // This isn't strictly necessary, but handle the special/common case of zero:
1660 // all integer reductions of a zero input produce zero.
1661 if (isa<ConstantAggregateZero>(Op))
1662 return ConstantInt::get(VT->getElementType(), 0);
1663
1664 // This is the same as the underlying binops - poison propagates.
1665 if (isa<PoisonValue>(Op) || Op->containsPoisonElement())
1666 return PoisonValue::get(VT->getElementType());
1667
1668 // TODO: Handle undef.
1669 if (!isa<ConstantVector>(Op) && !isa<ConstantDataVector>(Op))
1670 return nullptr;
1671
1672 auto *EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1673 if (!EltC)
1674 return nullptr;
1675
1676 APInt Acc = EltC->getValue();
1677 for (unsigned I = 1, E = VT->getNumElements(); I != E; I++) {
1678 if (!(EltC = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1679 return nullptr;
1680 const APInt &X = EltC->getValue();
1681 switch (IID) {
1682 case Intrinsic::vector_reduce_add:
1683 Acc = Acc + X;
1684 break;
1685 case Intrinsic::vector_reduce_mul:
1686 Acc = Acc * X;
1687 break;
1688 case Intrinsic::vector_reduce_and:
1689 Acc = Acc & X;
1690 break;
1691 case Intrinsic::vector_reduce_or:
1692 Acc = Acc | X;
1693 break;
1694 case Intrinsic::vector_reduce_xor:
1695 Acc = Acc ^ X;
1696 break;
1697 case Intrinsic::vector_reduce_smin:
1698 Acc = APIntOps::smin(Acc, X);
1699 break;
1700 case Intrinsic::vector_reduce_smax:
1701 Acc = APIntOps::smax(Acc, X);
1702 break;
1703 case Intrinsic::vector_reduce_umin:
1704 Acc = APIntOps::umin(Acc, X);
1705 break;
1706 case Intrinsic::vector_reduce_umax:
1707 Acc = APIntOps::umax(Acc, X);
1708 break;
1709 }
1710 }
1711
1712 return ConstantInt::get(Op->getContext(), Acc);
1713 }
1714
1715 /// Attempt to fold an SSE floating point to integer conversion of a constant
1716 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1717 /// used (toward nearest, ties to even). This matches the behavior of the
1718 /// non-truncating SSE instructions in the default rounding mode. The desired
1719 /// integer type Ty is used to select how many bits are available for the
1720 /// result. Returns null if the conversion cannot be performed, otherwise
1721 /// returns the Constant value resulting from the conversion.
ConstantFoldSSEConvertToInt(const APFloat & Val,bool roundTowardZero,Type * Ty,bool IsSigned)1722 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1723 Type *Ty, bool IsSigned) {
1724 // All of these conversion intrinsics form an integer of at most 64bits.
1725 unsigned ResultWidth = Ty->getIntegerBitWidth();
1726 assert(ResultWidth <= 64 &&
1727 "Can only constant fold conversions to 64 and 32 bit ints");
1728
1729 uint64_t UIntVal;
1730 bool isExact = false;
1731 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1732 : APFloat::rmNearestTiesToEven;
1733 APFloat::opStatus status =
1734 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1735 IsSigned, mode, &isExact);
1736 if (status != APFloat::opOK &&
1737 (!roundTowardZero || status != APFloat::opInexact))
1738 return nullptr;
1739 return ConstantInt::get(Ty, UIntVal, IsSigned);
1740 }
1741
getValueAsDouble(ConstantFP * Op)1742 double getValueAsDouble(ConstantFP *Op) {
1743 Type *Ty = Op->getType();
1744
1745 if (Ty->isBFloatTy() || Ty->isHalfTy() || Ty->isFloatTy() || Ty->isDoubleTy())
1746 return Op->getValueAPF().convertToDouble();
1747
1748 bool unused;
1749 APFloat APF = Op->getValueAPF();
1750 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1751 return APF.convertToDouble();
1752 }
1753
getConstIntOrUndef(Value * Op,const APInt * & C)1754 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1755 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1756 C = &CI->getValue();
1757 return true;
1758 }
1759 if (isa<UndefValue>(Op)) {
1760 C = nullptr;
1761 return true;
1762 }
1763 return false;
1764 }
1765
1766 /// Checks if the given intrinsic call, which evaluates to constant, is allowed
1767 /// to be folded.
1768 ///
1769 /// \param CI Constrained intrinsic call.
1770 /// \param St Exception flags raised during constant evaluation.
mayFoldConstrained(ConstrainedFPIntrinsic * CI,APFloat::opStatus St)1771 static bool mayFoldConstrained(ConstrainedFPIntrinsic *CI,
1772 APFloat::opStatus St) {
1773 Optional<RoundingMode> ORM = CI->getRoundingMode();
1774 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1775
1776 // If the operation does not change exception status flags, it is safe
1777 // to fold.
1778 if (St == APFloat::opStatus::opOK) {
1779 // When FP exceptions are not ignored, intrinsic call will not be
1780 // eliminated, because it is considered as having side effect. But we
1781 // know that its evaluation does not raise exceptions, so side effect
1782 // is absent. To allow removing the call, mark it as not accessing memory.
1783 if (EB && *EB != fp::ExceptionBehavior::ebIgnore)
1784 CI->addFnAttr(Attribute::ReadNone);
1785 return true;
1786 }
1787
1788 // If evaluation raised FP exception, the result can depend on rounding
1789 // mode. If the latter is unknown, folding is not possible.
1790 if (!ORM || *ORM == RoundingMode::Dynamic)
1791 return false;
1792
1793 // If FP exceptions are ignored, fold the call, even if such exception is
1794 // raised.
1795 if (!EB || *EB != fp::ExceptionBehavior::ebStrict)
1796 return true;
1797
1798 // Leave the calculation for runtime so that exception flags be correctly set
1799 // in hardware.
1800 return false;
1801 }
1802
1803 /// Returns the rounding mode that should be used for constant evaluation.
1804 static RoundingMode
getEvaluationRoundingMode(const ConstrainedFPIntrinsic * CI)1805 getEvaluationRoundingMode(const ConstrainedFPIntrinsic *CI) {
1806 Optional<RoundingMode> ORM = CI->getRoundingMode();
1807 if (!ORM || *ORM == RoundingMode::Dynamic)
1808 // Even if the rounding mode is unknown, try evaluating the operation.
1809 // If it does not raise inexact exception, rounding was not applied,
1810 // so the result is exact and does not depend on rounding mode. Whether
1811 // other FP exceptions are raised, it does not depend on rounding mode.
1812 return RoundingMode::NearestTiesToEven;
1813 return *ORM;
1814 }
1815
ConstantFoldScalarCall1(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)1816 static Constant *ConstantFoldScalarCall1(StringRef Name,
1817 Intrinsic::ID IntrinsicID,
1818 Type *Ty,
1819 ArrayRef<Constant *> Operands,
1820 const TargetLibraryInfo *TLI,
1821 const CallBase *Call) {
1822 assert(Operands.size() == 1 && "Wrong number of operands.");
1823
1824 if (IntrinsicID == Intrinsic::is_constant) {
1825 // We know we have a "Constant" argument. But we want to only
1826 // return true for manifest constants, not those that depend on
1827 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1828 if (Operands[0]->isManifestConstant())
1829 return ConstantInt::getTrue(Ty->getContext());
1830 return nullptr;
1831 }
1832 if (isa<UndefValue>(Operands[0])) {
1833 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1834 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1835 // fptoui.sat and fptosi.sat can always fold to zero (for a zero input).
1836 if (IntrinsicID == Intrinsic::cos ||
1837 IntrinsicID == Intrinsic::ctpop ||
1838 IntrinsicID == Intrinsic::fptoui_sat ||
1839 IntrinsicID == Intrinsic::fptosi_sat)
1840 return Constant::getNullValue(Ty);
1841 if (IntrinsicID == Intrinsic::bswap ||
1842 IntrinsicID == Intrinsic::bitreverse ||
1843 IntrinsicID == Intrinsic::launder_invariant_group ||
1844 IntrinsicID == Intrinsic::strip_invariant_group)
1845 return Operands[0];
1846 }
1847
1848 if (isa<ConstantPointerNull>(Operands[0])) {
1849 // launder(null) == null == strip(null) iff in addrspace 0
1850 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1851 IntrinsicID == Intrinsic::strip_invariant_group) {
1852 // If instruction is not yet put in a basic block (e.g. when cloning
1853 // a function during inlining), Call's caller may not be available.
1854 // So check Call's BB first before querying Call->getCaller.
1855 const Function *Caller =
1856 Call->getParent() ? Call->getCaller() : nullptr;
1857 if (Caller &&
1858 !NullPointerIsDefined(
1859 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1860 return Operands[0];
1861 }
1862 return nullptr;
1863 }
1864 }
1865
1866 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1867 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1868 APFloat Val(Op->getValueAPF());
1869
1870 bool lost = false;
1871 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1872
1873 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1874 }
1875
1876 APFloat U = Op->getValueAPF();
1877
1878 if (IntrinsicID == Intrinsic::wasm_trunc_signed ||
1879 IntrinsicID == Intrinsic::wasm_trunc_unsigned) {
1880 bool Signed = IntrinsicID == Intrinsic::wasm_trunc_signed;
1881
1882 if (U.isNaN())
1883 return nullptr;
1884
1885 unsigned Width = Ty->getIntegerBitWidth();
1886 APSInt Int(Width, !Signed);
1887 bool IsExact = false;
1888 APFloat::opStatus Status =
1889 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1890
1891 if (Status == APFloat::opOK || Status == APFloat::opInexact)
1892 return ConstantInt::get(Ty, Int);
1893
1894 return nullptr;
1895 }
1896
1897 if (IntrinsicID == Intrinsic::fptoui_sat ||
1898 IntrinsicID == Intrinsic::fptosi_sat) {
1899 // convertToInteger() already has the desired saturation semantics.
1900 APSInt Int(Ty->getIntegerBitWidth(),
1901 IntrinsicID == Intrinsic::fptoui_sat);
1902 bool IsExact;
1903 U.convertToInteger(Int, APFloat::rmTowardZero, &IsExact);
1904 return ConstantInt::get(Ty, Int);
1905 }
1906
1907 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1908 return nullptr;
1909
1910 // Use internal versions of these intrinsics.
1911
1912 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
1913 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1914 return ConstantFP::get(Ty->getContext(), U);
1915 }
1916
1917 if (IntrinsicID == Intrinsic::round) {
1918 U.roundToIntegral(APFloat::rmNearestTiesToAway);
1919 return ConstantFP::get(Ty->getContext(), U);
1920 }
1921
1922 if (IntrinsicID == Intrinsic::roundeven) {
1923 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1924 return ConstantFP::get(Ty->getContext(), U);
1925 }
1926
1927 if (IntrinsicID == Intrinsic::ceil) {
1928 U.roundToIntegral(APFloat::rmTowardPositive);
1929 return ConstantFP::get(Ty->getContext(), U);
1930 }
1931
1932 if (IntrinsicID == Intrinsic::floor) {
1933 U.roundToIntegral(APFloat::rmTowardNegative);
1934 return ConstantFP::get(Ty->getContext(), U);
1935 }
1936
1937 if (IntrinsicID == Intrinsic::trunc) {
1938 U.roundToIntegral(APFloat::rmTowardZero);
1939 return ConstantFP::get(Ty->getContext(), U);
1940 }
1941
1942 if (IntrinsicID == Intrinsic::fabs) {
1943 U.clearSign();
1944 return ConstantFP::get(Ty->getContext(), U);
1945 }
1946
1947 if (IntrinsicID == Intrinsic::amdgcn_fract) {
1948 // The v_fract instruction behaves like the OpenCL spec, which defines
1949 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
1950 // there to prevent fract(-small) from returning 1.0. It returns the
1951 // largest positive floating-point number less than 1.0."
1952 APFloat FloorU(U);
1953 FloorU.roundToIntegral(APFloat::rmTowardNegative);
1954 APFloat FractU(U - FloorU);
1955 APFloat AlmostOne(U.getSemantics(), 1);
1956 AlmostOne.next(/*nextDown*/ true);
1957 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
1958 }
1959
1960 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
1961 // raise FP exceptions, unless the argument is signaling NaN.
1962
1963 Optional<APFloat::roundingMode> RM;
1964 switch (IntrinsicID) {
1965 default:
1966 break;
1967 case Intrinsic::experimental_constrained_nearbyint:
1968 case Intrinsic::experimental_constrained_rint: {
1969 auto CI = cast<ConstrainedFPIntrinsic>(Call);
1970 RM = CI->getRoundingMode();
1971 if (!RM || RM.getValue() == RoundingMode::Dynamic)
1972 return nullptr;
1973 break;
1974 }
1975 case Intrinsic::experimental_constrained_round:
1976 RM = APFloat::rmNearestTiesToAway;
1977 break;
1978 case Intrinsic::experimental_constrained_ceil:
1979 RM = APFloat::rmTowardPositive;
1980 break;
1981 case Intrinsic::experimental_constrained_floor:
1982 RM = APFloat::rmTowardNegative;
1983 break;
1984 case Intrinsic::experimental_constrained_trunc:
1985 RM = APFloat::rmTowardZero;
1986 break;
1987 }
1988 if (RM) {
1989 auto CI = cast<ConstrainedFPIntrinsic>(Call);
1990 if (U.isFinite()) {
1991 APFloat::opStatus St = U.roundToIntegral(*RM);
1992 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
1993 St == APFloat::opInexact) {
1994 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1995 if (EB && *EB == fp::ebStrict)
1996 return nullptr;
1997 }
1998 } else if (U.isSignaling()) {
1999 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
2000 if (EB && *EB != fp::ebIgnore)
2001 return nullptr;
2002 U = APFloat::getQNaN(U.getSemantics());
2003 }
2004 return ConstantFP::get(Ty->getContext(), U);
2005 }
2006
2007 /// We only fold functions with finite arguments. Folding NaN and inf is
2008 /// likely to be aborted with an exception anyway, and some host libms
2009 /// have known errors raising exceptions.
2010 if (!U.isFinite())
2011 return nullptr;
2012
2013 /// Currently APFloat versions of these functions do not exist, so we use
2014 /// the host native double versions. Float versions are not called
2015 /// directly but for all these it is true (float)(f((double)arg)) ==
2016 /// f(arg). Long double not supported yet.
2017 APFloat APF = Op->getValueAPF();
2018
2019 switch (IntrinsicID) {
2020 default: break;
2021 case Intrinsic::log:
2022 return ConstantFoldFP(log, APF, Ty);
2023 case Intrinsic::log2:
2024 // TODO: What about hosts that lack a C99 library?
2025 return ConstantFoldFP(Log2, APF, Ty);
2026 case Intrinsic::log10:
2027 // TODO: What about hosts that lack a C99 library?
2028 return ConstantFoldFP(log10, APF, Ty);
2029 case Intrinsic::exp:
2030 return ConstantFoldFP(exp, APF, Ty);
2031 case Intrinsic::exp2:
2032 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2033 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2034 case Intrinsic::sin:
2035 return ConstantFoldFP(sin, APF, Ty);
2036 case Intrinsic::cos:
2037 return ConstantFoldFP(cos, APF, Ty);
2038 case Intrinsic::sqrt:
2039 return ConstantFoldFP(sqrt, APF, Ty);
2040 case Intrinsic::amdgcn_cos:
2041 case Intrinsic::amdgcn_sin: {
2042 double V = getValueAsDouble(Op);
2043 if (V < -256.0 || V > 256.0)
2044 // The gfx8 and gfx9 architectures handle arguments outside the range
2045 // [-256, 256] differently. This should be a rare case so bail out
2046 // rather than trying to handle the difference.
2047 return nullptr;
2048 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
2049 double V4 = V * 4.0;
2050 if (V4 == floor(V4)) {
2051 // Force exact results for quarter-integer inputs.
2052 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
2053 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
2054 } else {
2055 if (IsCos)
2056 V = cos(V * 2.0 * numbers::pi);
2057 else
2058 V = sin(V * 2.0 * numbers::pi);
2059 }
2060 return GetConstantFoldFPValue(V, Ty);
2061 }
2062 }
2063
2064 if (!TLI)
2065 return nullptr;
2066
2067 LibFunc Func = NotLibFunc;
2068 TLI->getLibFunc(Name, Func);
2069 switch (Func) {
2070 default:
2071 break;
2072 case LibFunc_acos:
2073 case LibFunc_acosf:
2074 case LibFunc_acos_finite:
2075 case LibFunc_acosf_finite:
2076 if (TLI->has(Func))
2077 return ConstantFoldFP(acos, APF, Ty);
2078 break;
2079 case LibFunc_asin:
2080 case LibFunc_asinf:
2081 case LibFunc_asin_finite:
2082 case LibFunc_asinf_finite:
2083 if (TLI->has(Func))
2084 return ConstantFoldFP(asin, APF, Ty);
2085 break;
2086 case LibFunc_atan:
2087 case LibFunc_atanf:
2088 if (TLI->has(Func))
2089 return ConstantFoldFP(atan, APF, Ty);
2090 break;
2091 case LibFunc_ceil:
2092 case LibFunc_ceilf:
2093 if (TLI->has(Func)) {
2094 U.roundToIntegral(APFloat::rmTowardPositive);
2095 return ConstantFP::get(Ty->getContext(), U);
2096 }
2097 break;
2098 case LibFunc_cos:
2099 case LibFunc_cosf:
2100 if (TLI->has(Func))
2101 return ConstantFoldFP(cos, APF, Ty);
2102 break;
2103 case LibFunc_cosh:
2104 case LibFunc_coshf:
2105 case LibFunc_cosh_finite:
2106 case LibFunc_coshf_finite:
2107 if (TLI->has(Func))
2108 return ConstantFoldFP(cosh, APF, Ty);
2109 break;
2110 case LibFunc_exp:
2111 case LibFunc_expf:
2112 case LibFunc_exp_finite:
2113 case LibFunc_expf_finite:
2114 if (TLI->has(Func))
2115 return ConstantFoldFP(exp, APF, Ty);
2116 break;
2117 case LibFunc_exp2:
2118 case LibFunc_exp2f:
2119 case LibFunc_exp2_finite:
2120 case LibFunc_exp2f_finite:
2121 if (TLI->has(Func))
2122 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2123 return ConstantFoldBinaryFP(pow, APFloat(2.0), APF, Ty);
2124 break;
2125 case LibFunc_fabs:
2126 case LibFunc_fabsf:
2127 if (TLI->has(Func)) {
2128 U.clearSign();
2129 return ConstantFP::get(Ty->getContext(), U);
2130 }
2131 break;
2132 case LibFunc_floor:
2133 case LibFunc_floorf:
2134 if (TLI->has(Func)) {
2135 U.roundToIntegral(APFloat::rmTowardNegative);
2136 return ConstantFP::get(Ty->getContext(), U);
2137 }
2138 break;
2139 case LibFunc_log:
2140 case LibFunc_logf:
2141 case LibFunc_log_finite:
2142 case LibFunc_logf_finite:
2143 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2144 return ConstantFoldFP(log, APF, Ty);
2145 break;
2146 case LibFunc_log2:
2147 case LibFunc_log2f:
2148 case LibFunc_log2_finite:
2149 case LibFunc_log2f_finite:
2150 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2151 // TODO: What about hosts that lack a C99 library?
2152 return ConstantFoldFP(Log2, APF, Ty);
2153 break;
2154 case LibFunc_log10:
2155 case LibFunc_log10f:
2156 case LibFunc_log10_finite:
2157 case LibFunc_log10f_finite:
2158 if (!APF.isNegative() && !APF.isZero() && TLI->has(Func))
2159 // TODO: What about hosts that lack a C99 library?
2160 return ConstantFoldFP(log10, APF, Ty);
2161 break;
2162 case LibFunc_nearbyint:
2163 case LibFunc_nearbyintf:
2164 case LibFunc_rint:
2165 case LibFunc_rintf:
2166 if (TLI->has(Func)) {
2167 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2168 return ConstantFP::get(Ty->getContext(), U);
2169 }
2170 break;
2171 case LibFunc_round:
2172 case LibFunc_roundf:
2173 if (TLI->has(Func)) {
2174 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2175 return ConstantFP::get(Ty->getContext(), U);
2176 }
2177 break;
2178 case LibFunc_sin:
2179 case LibFunc_sinf:
2180 if (TLI->has(Func))
2181 return ConstantFoldFP(sin, APF, Ty);
2182 break;
2183 case LibFunc_sinh:
2184 case LibFunc_sinhf:
2185 case LibFunc_sinh_finite:
2186 case LibFunc_sinhf_finite:
2187 if (TLI->has(Func))
2188 return ConstantFoldFP(sinh, APF, Ty);
2189 break;
2190 case LibFunc_sqrt:
2191 case LibFunc_sqrtf:
2192 if (!APF.isNegative() && TLI->has(Func))
2193 return ConstantFoldFP(sqrt, APF, Ty);
2194 break;
2195 case LibFunc_tan:
2196 case LibFunc_tanf:
2197 if (TLI->has(Func))
2198 return ConstantFoldFP(tan, APF, Ty);
2199 break;
2200 case LibFunc_tanh:
2201 case LibFunc_tanhf:
2202 if (TLI->has(Func))
2203 return ConstantFoldFP(tanh, APF, Ty);
2204 break;
2205 case LibFunc_trunc:
2206 case LibFunc_truncf:
2207 if (TLI->has(Func)) {
2208 U.roundToIntegral(APFloat::rmTowardZero);
2209 return ConstantFP::get(Ty->getContext(), U);
2210 }
2211 break;
2212 }
2213 return nullptr;
2214 }
2215
2216 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2217 switch (IntrinsicID) {
2218 case Intrinsic::bswap:
2219 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2220 case Intrinsic::ctpop:
2221 return ConstantInt::get(Ty, Op->getValue().countPopulation());
2222 case Intrinsic::bitreverse:
2223 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2224 case Intrinsic::convert_from_fp16: {
2225 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2226
2227 bool lost = false;
2228 APFloat::opStatus status = Val.convert(
2229 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2230
2231 // Conversion is always precise.
2232 (void)status;
2233 assert(status == APFloat::opOK && !lost &&
2234 "Precision lost during fp16 constfolding");
2235
2236 return ConstantFP::get(Ty->getContext(), Val);
2237 }
2238 default:
2239 return nullptr;
2240 }
2241 }
2242
2243 switch (IntrinsicID) {
2244 default: break;
2245 case Intrinsic::vector_reduce_add:
2246 case Intrinsic::vector_reduce_mul:
2247 case Intrinsic::vector_reduce_and:
2248 case Intrinsic::vector_reduce_or:
2249 case Intrinsic::vector_reduce_xor:
2250 case Intrinsic::vector_reduce_smin:
2251 case Intrinsic::vector_reduce_smax:
2252 case Intrinsic::vector_reduce_umin:
2253 case Intrinsic::vector_reduce_umax:
2254 if (Constant *C = constantFoldVectorReduce(IntrinsicID, Operands[0]))
2255 return C;
2256 break;
2257 }
2258
2259 // Support ConstantVector in case we have an Undef in the top.
2260 if (isa<ConstantVector>(Operands[0]) ||
2261 isa<ConstantDataVector>(Operands[0])) {
2262 auto *Op = cast<Constant>(Operands[0]);
2263 switch (IntrinsicID) {
2264 default: break;
2265 case Intrinsic::x86_sse_cvtss2si:
2266 case Intrinsic::x86_sse_cvtss2si64:
2267 case Intrinsic::x86_sse2_cvtsd2si:
2268 case Intrinsic::x86_sse2_cvtsd2si64:
2269 if (ConstantFP *FPOp =
2270 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2271 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2272 /*roundTowardZero=*/false, Ty,
2273 /*IsSigned*/true);
2274 break;
2275 case Intrinsic::x86_sse_cvttss2si:
2276 case Intrinsic::x86_sse_cvttss2si64:
2277 case Intrinsic::x86_sse2_cvttsd2si:
2278 case Intrinsic::x86_sse2_cvttsd2si64:
2279 if (ConstantFP *FPOp =
2280 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2281 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2282 /*roundTowardZero=*/true, Ty,
2283 /*IsSigned*/true);
2284 break;
2285 }
2286 }
2287
2288 return nullptr;
2289 }
2290
ConstantFoldScalarCall2(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2291 static Constant *ConstantFoldScalarCall2(StringRef Name,
2292 Intrinsic::ID IntrinsicID,
2293 Type *Ty,
2294 ArrayRef<Constant *> Operands,
2295 const TargetLibraryInfo *TLI,
2296 const CallBase *Call) {
2297 assert(Operands.size() == 2 && "Wrong number of operands.");
2298
2299 if (Ty->isFloatingPointTy()) {
2300 // TODO: We should have undef handling for all of the FP intrinsics that
2301 // are attempted to be folded in this function.
2302 bool IsOp0Undef = isa<UndefValue>(Operands[0]);
2303 bool IsOp1Undef = isa<UndefValue>(Operands[1]);
2304 switch (IntrinsicID) {
2305 case Intrinsic::maxnum:
2306 case Intrinsic::minnum:
2307 case Intrinsic::maximum:
2308 case Intrinsic::minimum:
2309 // If one argument is undef, return the other argument.
2310 if (IsOp0Undef)
2311 return Operands[1];
2312 if (IsOp1Undef)
2313 return Operands[0];
2314 break;
2315 }
2316 }
2317
2318 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2319 if (!Ty->isFloatingPointTy())
2320 return nullptr;
2321 APFloat Op1V = Op1->getValueAPF();
2322
2323 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2324 if (Op2->getType() != Op1->getType())
2325 return nullptr;
2326 APFloat Op2V = Op2->getValueAPF();
2327
2328 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2329 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2330 APFloat Res = Op1V;
2331 APFloat::opStatus St;
2332 switch (IntrinsicID) {
2333 default:
2334 return nullptr;
2335 case Intrinsic::experimental_constrained_fadd:
2336 St = Res.add(Op2V, RM);
2337 break;
2338 case Intrinsic::experimental_constrained_fsub:
2339 St = Res.subtract(Op2V, RM);
2340 break;
2341 case Intrinsic::experimental_constrained_fmul:
2342 St = Res.multiply(Op2V, RM);
2343 break;
2344 case Intrinsic::experimental_constrained_fdiv:
2345 St = Res.divide(Op2V, RM);
2346 break;
2347 case Intrinsic::experimental_constrained_frem:
2348 St = Res.mod(Op2V);
2349 break;
2350 }
2351 if (mayFoldConstrained(const_cast<ConstrainedFPIntrinsic *>(ConstrIntr),
2352 St))
2353 return ConstantFP::get(Ty->getContext(), Res);
2354 return nullptr;
2355 }
2356
2357 switch (IntrinsicID) {
2358 default:
2359 break;
2360 case Intrinsic::copysign:
2361 return ConstantFP::get(Ty->getContext(), APFloat::copySign(Op1V, Op2V));
2362 case Intrinsic::minnum:
2363 return ConstantFP::get(Ty->getContext(), minnum(Op1V, Op2V));
2364 case Intrinsic::maxnum:
2365 return ConstantFP::get(Ty->getContext(), maxnum(Op1V, Op2V));
2366 case Intrinsic::minimum:
2367 return ConstantFP::get(Ty->getContext(), minimum(Op1V, Op2V));
2368 case Intrinsic::maximum:
2369 return ConstantFP::get(Ty->getContext(), maximum(Op1V, Op2V));
2370 }
2371
2372 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2373 return nullptr;
2374
2375 switch (IntrinsicID) {
2376 default:
2377 break;
2378 case Intrinsic::pow:
2379 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2380 case Intrinsic::amdgcn_fmul_legacy:
2381 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2382 // NaN or infinity, gives +0.0.
2383 if (Op1V.isZero() || Op2V.isZero())
2384 return ConstantFP::getNullValue(Ty);
2385 return ConstantFP::get(Ty->getContext(), Op1V * Op2V);
2386 }
2387
2388 if (!TLI)
2389 return nullptr;
2390
2391 LibFunc Func = NotLibFunc;
2392 TLI->getLibFunc(Name, Func);
2393 switch (Func) {
2394 default:
2395 break;
2396 case LibFunc_pow:
2397 case LibFunc_powf:
2398 case LibFunc_pow_finite:
2399 case LibFunc_powf_finite:
2400 if (TLI->has(Func))
2401 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2402 break;
2403 case LibFunc_fmod:
2404 case LibFunc_fmodf:
2405 if (TLI->has(Func)) {
2406 APFloat V = Op1->getValueAPF();
2407 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2408 return ConstantFP::get(Ty->getContext(), V);
2409 }
2410 break;
2411 case LibFunc_remainder:
2412 case LibFunc_remainderf:
2413 if (TLI->has(Func)) {
2414 APFloat V = Op1->getValueAPF();
2415 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2416 return ConstantFP::get(Ty->getContext(), V);
2417 }
2418 break;
2419 case LibFunc_atan2:
2420 case LibFunc_atan2f:
2421 case LibFunc_atan2_finite:
2422 case LibFunc_atan2f_finite:
2423 if (TLI->has(Func))
2424 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2425 break;
2426 }
2427 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2428 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2429 return nullptr;
2430 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2431 return ConstantFP::get(
2432 Ty->getContext(),
2433 APFloat((float)std::pow((float)Op1V.convertToDouble(),
2434 (int)Op2C->getZExtValue())));
2435 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2436 return ConstantFP::get(
2437 Ty->getContext(),
2438 APFloat((float)std::pow((float)Op1V.convertToDouble(),
2439 (int)Op2C->getZExtValue())));
2440 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2441 return ConstantFP::get(
2442 Ty->getContext(),
2443 APFloat((double)std::pow(Op1V.convertToDouble(),
2444 (int)Op2C->getZExtValue())));
2445
2446 if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2447 // FIXME: Should flush denorms depending on FP mode, but that's ignored
2448 // everywhere else.
2449
2450 // scalbn is equivalent to ldexp with float radix 2
2451 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(),
2452 APFloat::rmNearestTiesToEven);
2453 return ConstantFP::get(Ty->getContext(), Result);
2454 }
2455 }
2456 return nullptr;
2457 }
2458
2459 if (Operands[0]->getType()->isIntegerTy() &&
2460 Operands[1]->getType()->isIntegerTy()) {
2461 const APInt *C0, *C1;
2462 if (!getConstIntOrUndef(Operands[0], C0) ||
2463 !getConstIntOrUndef(Operands[1], C1))
2464 return nullptr;
2465
2466 unsigned BitWidth = Ty->getScalarSizeInBits();
2467 switch (IntrinsicID) {
2468 default: break;
2469 case Intrinsic::smax:
2470 if (!C0 && !C1)
2471 return UndefValue::get(Ty);
2472 if (!C0 || !C1)
2473 return ConstantInt::get(Ty, APInt::getSignedMaxValue(BitWidth));
2474 return ConstantInt::get(Ty, C0->sgt(*C1) ? *C0 : *C1);
2475
2476 case Intrinsic::smin:
2477 if (!C0 && !C1)
2478 return UndefValue::get(Ty);
2479 if (!C0 || !C1)
2480 return ConstantInt::get(Ty, APInt::getSignedMinValue(BitWidth));
2481 return ConstantInt::get(Ty, C0->slt(*C1) ? *C0 : *C1);
2482
2483 case Intrinsic::umax:
2484 if (!C0 && !C1)
2485 return UndefValue::get(Ty);
2486 if (!C0 || !C1)
2487 return ConstantInt::get(Ty, APInt::getMaxValue(BitWidth));
2488 return ConstantInt::get(Ty, C0->ugt(*C1) ? *C0 : *C1);
2489
2490 case Intrinsic::umin:
2491 if (!C0 && !C1)
2492 return UndefValue::get(Ty);
2493 if (!C0 || !C1)
2494 return ConstantInt::get(Ty, APInt::getMinValue(BitWidth));
2495 return ConstantInt::get(Ty, C0->ult(*C1) ? *C0 : *C1);
2496
2497 case Intrinsic::usub_with_overflow:
2498 case Intrinsic::ssub_with_overflow:
2499 // X - undef -> { 0, false }
2500 // undef - X -> { 0, false }
2501 if (!C0 || !C1)
2502 return Constant::getNullValue(Ty);
2503 LLVM_FALLTHROUGH;
2504 case Intrinsic::uadd_with_overflow:
2505 case Intrinsic::sadd_with_overflow:
2506 // X + undef -> { -1, false }
2507 // undef + x -> { -1, false }
2508 if (!C0 || !C1) {
2509 return ConstantStruct::get(
2510 cast<StructType>(Ty),
2511 {Constant::getAllOnesValue(Ty->getStructElementType(0)),
2512 Constant::getNullValue(Ty->getStructElementType(1))});
2513 }
2514 LLVM_FALLTHROUGH;
2515 case Intrinsic::smul_with_overflow:
2516 case Intrinsic::umul_with_overflow: {
2517 // undef * X -> { 0, false }
2518 // X * undef -> { 0, false }
2519 if (!C0 || !C1)
2520 return Constant::getNullValue(Ty);
2521
2522 APInt Res;
2523 bool Overflow;
2524 switch (IntrinsicID) {
2525 default: llvm_unreachable("Invalid case");
2526 case Intrinsic::sadd_with_overflow:
2527 Res = C0->sadd_ov(*C1, Overflow);
2528 break;
2529 case Intrinsic::uadd_with_overflow:
2530 Res = C0->uadd_ov(*C1, Overflow);
2531 break;
2532 case Intrinsic::ssub_with_overflow:
2533 Res = C0->ssub_ov(*C1, Overflow);
2534 break;
2535 case Intrinsic::usub_with_overflow:
2536 Res = C0->usub_ov(*C1, Overflow);
2537 break;
2538 case Intrinsic::smul_with_overflow:
2539 Res = C0->smul_ov(*C1, Overflow);
2540 break;
2541 case Intrinsic::umul_with_overflow:
2542 Res = C0->umul_ov(*C1, Overflow);
2543 break;
2544 }
2545 Constant *Ops[] = {
2546 ConstantInt::get(Ty->getContext(), Res),
2547 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2548 };
2549 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2550 }
2551 case Intrinsic::uadd_sat:
2552 case Intrinsic::sadd_sat:
2553 if (!C0 && !C1)
2554 return UndefValue::get(Ty);
2555 if (!C0 || !C1)
2556 return Constant::getAllOnesValue(Ty);
2557 if (IntrinsicID == Intrinsic::uadd_sat)
2558 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2559 else
2560 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2561 case Intrinsic::usub_sat:
2562 case Intrinsic::ssub_sat:
2563 if (!C0 && !C1)
2564 return UndefValue::get(Ty);
2565 if (!C0 || !C1)
2566 return Constant::getNullValue(Ty);
2567 if (IntrinsicID == Intrinsic::usub_sat)
2568 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2569 else
2570 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2571 case Intrinsic::cttz:
2572 case Intrinsic::ctlz:
2573 assert(C1 && "Must be constant int");
2574
2575 // cttz(0, 1) and ctlz(0, 1) are undef.
2576 if (C1->isOne() && (!C0 || C0->isZero()))
2577 return UndefValue::get(Ty);
2578 if (!C0)
2579 return Constant::getNullValue(Ty);
2580 if (IntrinsicID == Intrinsic::cttz)
2581 return ConstantInt::get(Ty, C0->countTrailingZeros());
2582 else
2583 return ConstantInt::get(Ty, C0->countLeadingZeros());
2584
2585 case Intrinsic::abs:
2586 // Undef or minimum val operand with poison min --> undef
2587 assert(C1 && "Must be constant int");
2588 if (C1->isOne() && (!C0 || C0->isMinSignedValue()))
2589 return UndefValue::get(Ty);
2590
2591 // Undef operand with no poison min --> 0 (sign bit must be clear)
2592 if (C1->isZero() && !C0)
2593 return Constant::getNullValue(Ty);
2594
2595 return ConstantInt::get(Ty, C0->abs());
2596 }
2597
2598 return nullptr;
2599 }
2600
2601 // Support ConstantVector in case we have an Undef in the top.
2602 if ((isa<ConstantVector>(Operands[0]) ||
2603 isa<ConstantDataVector>(Operands[0])) &&
2604 // Check for default rounding mode.
2605 // FIXME: Support other rounding modes?
2606 isa<ConstantInt>(Operands[1]) &&
2607 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2608 auto *Op = cast<Constant>(Operands[0]);
2609 switch (IntrinsicID) {
2610 default: break;
2611 case Intrinsic::x86_avx512_vcvtss2si32:
2612 case Intrinsic::x86_avx512_vcvtss2si64:
2613 case Intrinsic::x86_avx512_vcvtsd2si32:
2614 case Intrinsic::x86_avx512_vcvtsd2si64:
2615 if (ConstantFP *FPOp =
2616 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2617 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2618 /*roundTowardZero=*/false, Ty,
2619 /*IsSigned*/true);
2620 break;
2621 case Intrinsic::x86_avx512_vcvtss2usi32:
2622 case Intrinsic::x86_avx512_vcvtss2usi64:
2623 case Intrinsic::x86_avx512_vcvtsd2usi32:
2624 case Intrinsic::x86_avx512_vcvtsd2usi64:
2625 if (ConstantFP *FPOp =
2626 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2627 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2628 /*roundTowardZero=*/false, Ty,
2629 /*IsSigned*/false);
2630 break;
2631 case Intrinsic::x86_avx512_cvttss2si:
2632 case Intrinsic::x86_avx512_cvttss2si64:
2633 case Intrinsic::x86_avx512_cvttsd2si:
2634 case Intrinsic::x86_avx512_cvttsd2si64:
2635 if (ConstantFP *FPOp =
2636 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2637 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2638 /*roundTowardZero=*/true, Ty,
2639 /*IsSigned*/true);
2640 break;
2641 case Intrinsic::x86_avx512_cvttss2usi:
2642 case Intrinsic::x86_avx512_cvttss2usi64:
2643 case Intrinsic::x86_avx512_cvttsd2usi:
2644 case Intrinsic::x86_avx512_cvttsd2usi64:
2645 if (ConstantFP *FPOp =
2646 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2647 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2648 /*roundTowardZero=*/true, Ty,
2649 /*IsSigned*/false);
2650 break;
2651 }
2652 }
2653 return nullptr;
2654 }
2655
ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,const APFloat & S0,const APFloat & S1,const APFloat & S2)2656 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
2657 const APFloat &S0,
2658 const APFloat &S1,
2659 const APFloat &S2) {
2660 unsigned ID;
2661 const fltSemantics &Sem = S0.getSemantics();
2662 APFloat MA(Sem), SC(Sem), TC(Sem);
2663 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
2664 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
2665 // S2 < 0
2666 ID = 5;
2667 SC = -S0;
2668 } else {
2669 ID = 4;
2670 SC = S0;
2671 }
2672 MA = S2;
2673 TC = -S1;
2674 } else if (abs(S1) >= abs(S0)) {
2675 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
2676 // S1 < 0
2677 ID = 3;
2678 TC = -S2;
2679 } else {
2680 ID = 2;
2681 TC = S2;
2682 }
2683 MA = S1;
2684 SC = S0;
2685 } else {
2686 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
2687 // S0 < 0
2688 ID = 1;
2689 SC = S2;
2690 } else {
2691 ID = 0;
2692 SC = -S2;
2693 }
2694 MA = S0;
2695 TC = -S1;
2696 }
2697 switch (IntrinsicID) {
2698 default:
2699 llvm_unreachable("unhandled amdgcn cube intrinsic");
2700 case Intrinsic::amdgcn_cubeid:
2701 return APFloat(Sem, ID);
2702 case Intrinsic::amdgcn_cubema:
2703 return MA + MA;
2704 case Intrinsic::amdgcn_cubesc:
2705 return SC;
2706 case Intrinsic::amdgcn_cubetc:
2707 return TC;
2708 }
2709 }
2710
ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant * > Operands,Type * Ty)2711 static Constant *ConstantFoldAMDGCNPermIntrinsic(ArrayRef<Constant *> Operands,
2712 Type *Ty) {
2713 const APInt *C0, *C1, *C2;
2714 if (!getConstIntOrUndef(Operands[0], C0) ||
2715 !getConstIntOrUndef(Operands[1], C1) ||
2716 !getConstIntOrUndef(Operands[2], C2))
2717 return nullptr;
2718
2719 if (!C2)
2720 return UndefValue::get(Ty);
2721
2722 APInt Val(32, 0);
2723 unsigned NumUndefBytes = 0;
2724 for (unsigned I = 0; I < 32; I += 8) {
2725 unsigned Sel = C2->extractBitsAsZExtValue(8, I);
2726 unsigned B = 0;
2727
2728 if (Sel >= 13)
2729 B = 0xff;
2730 else if (Sel == 12)
2731 B = 0x00;
2732 else {
2733 const APInt *Src = ((Sel & 10) == 10 || (Sel & 12) == 4) ? C0 : C1;
2734 if (!Src)
2735 ++NumUndefBytes;
2736 else if (Sel < 8)
2737 B = Src->extractBitsAsZExtValue(8, (Sel & 3) * 8);
2738 else
2739 B = Src->extractBitsAsZExtValue(1, (Sel & 1) ? 31 : 15) * 0xff;
2740 }
2741
2742 Val.insertBits(B, I, 8);
2743 }
2744
2745 if (NumUndefBytes == 4)
2746 return UndefValue::get(Ty);
2747
2748 return ConstantInt::get(Ty, Val);
2749 }
2750
ConstantFoldScalarCall3(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2751 static Constant *ConstantFoldScalarCall3(StringRef Name,
2752 Intrinsic::ID IntrinsicID,
2753 Type *Ty,
2754 ArrayRef<Constant *> Operands,
2755 const TargetLibraryInfo *TLI,
2756 const CallBase *Call) {
2757 assert(Operands.size() == 3 && "Wrong number of operands.");
2758
2759 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2760 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2761 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2762 const APFloat &C1 = Op1->getValueAPF();
2763 const APFloat &C2 = Op2->getValueAPF();
2764 const APFloat &C3 = Op3->getValueAPF();
2765
2766 if (const auto *ConstrIntr = dyn_cast<ConstrainedFPIntrinsic>(Call)) {
2767 RoundingMode RM = getEvaluationRoundingMode(ConstrIntr);
2768 APFloat Res = C1;
2769 APFloat::opStatus St;
2770 switch (IntrinsicID) {
2771 default:
2772 return nullptr;
2773 case Intrinsic::experimental_constrained_fma:
2774 case Intrinsic::experimental_constrained_fmuladd:
2775 St = Res.fusedMultiplyAdd(C2, C3, RM);
2776 break;
2777 }
2778 if (mayFoldConstrained(
2779 const_cast<ConstrainedFPIntrinsic *>(ConstrIntr), St))
2780 return ConstantFP::get(Ty->getContext(), Res);
2781 return nullptr;
2782 }
2783
2784 switch (IntrinsicID) {
2785 default: break;
2786 case Intrinsic::amdgcn_fma_legacy: {
2787 // The legacy behaviour is that multiplying +/- 0.0 by anything, even
2788 // NaN or infinity, gives +0.0.
2789 if (C1.isZero() || C2.isZero()) {
2790 // It's tempting to just return C3 here, but that would give the
2791 // wrong result if C3 was -0.0.
2792 return ConstantFP::get(Ty->getContext(), APFloat(0.0f) + C3);
2793 }
2794 LLVM_FALLTHROUGH;
2795 }
2796 case Intrinsic::fma:
2797 case Intrinsic::fmuladd: {
2798 APFloat V = C1;
2799 V.fusedMultiplyAdd(C2, C3, APFloat::rmNearestTiesToEven);
2800 return ConstantFP::get(Ty->getContext(), V);
2801 }
2802 case Intrinsic::amdgcn_cubeid:
2803 case Intrinsic::amdgcn_cubema:
2804 case Intrinsic::amdgcn_cubesc:
2805 case Intrinsic::amdgcn_cubetc: {
2806 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(IntrinsicID, C1, C2, C3);
2807 return ConstantFP::get(Ty->getContext(), V);
2808 }
2809 }
2810 }
2811 }
2812 }
2813
2814 if (IntrinsicID == Intrinsic::smul_fix ||
2815 IntrinsicID == Intrinsic::smul_fix_sat) {
2816 // poison * C -> poison
2817 // C * poison -> poison
2818 if (isa<PoisonValue>(Operands[0]) || isa<PoisonValue>(Operands[1]))
2819 return PoisonValue::get(Ty);
2820
2821 const APInt *C0, *C1;
2822 if (!getConstIntOrUndef(Operands[0], C0) ||
2823 !getConstIntOrUndef(Operands[1], C1))
2824 return nullptr;
2825
2826 // undef * C -> 0
2827 // C * undef -> 0
2828 if (!C0 || !C1)
2829 return Constant::getNullValue(Ty);
2830
2831 // This code performs rounding towards negative infinity in case the result
2832 // cannot be represented exactly for the given scale. Targets that do care
2833 // about rounding should use a target hook for specifying how rounding
2834 // should be done, and provide their own folding to be consistent with
2835 // rounding. This is the same approach as used by
2836 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2837 unsigned Scale = cast<ConstantInt>(Operands[2])->getZExtValue();
2838 unsigned Width = C0->getBitWidth();
2839 assert(Scale < Width && "Illegal scale.");
2840 unsigned ExtendedWidth = Width * 2;
2841 APInt Product = (C0->sextOrSelf(ExtendedWidth) *
2842 C1->sextOrSelf(ExtendedWidth)).ashr(Scale);
2843 if (IntrinsicID == Intrinsic::smul_fix_sat) {
2844 APInt Max = APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
2845 APInt Min = APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
2846 Product = APIntOps::smin(Product, Max);
2847 Product = APIntOps::smax(Product, Min);
2848 }
2849 return ConstantInt::get(Ty->getContext(), Product.sextOrTrunc(Width));
2850 }
2851
2852 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2853 const APInt *C0, *C1, *C2;
2854 if (!getConstIntOrUndef(Operands[0], C0) ||
2855 !getConstIntOrUndef(Operands[1], C1) ||
2856 !getConstIntOrUndef(Operands[2], C2))
2857 return nullptr;
2858
2859 bool IsRight = IntrinsicID == Intrinsic::fshr;
2860 if (!C2)
2861 return Operands[IsRight ? 1 : 0];
2862 if (!C0 && !C1)
2863 return UndefValue::get(Ty);
2864
2865 // The shift amount is interpreted as modulo the bitwidth. If the shift
2866 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2867 unsigned BitWidth = C2->getBitWidth();
2868 unsigned ShAmt = C2->urem(BitWidth);
2869 if (!ShAmt)
2870 return Operands[IsRight ? 1 : 0];
2871
2872 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2873 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2874 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2875 if (!C0)
2876 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2877 if (!C1)
2878 return ConstantInt::get(Ty, C0->shl(ShlAmt));
2879 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2880 }
2881
2882 if (IntrinsicID == Intrinsic::amdgcn_perm)
2883 return ConstantFoldAMDGCNPermIntrinsic(Operands, Ty);
2884
2885 return nullptr;
2886 }
2887
ConstantFoldScalarCall(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2888 static Constant *ConstantFoldScalarCall(StringRef Name,
2889 Intrinsic::ID IntrinsicID,
2890 Type *Ty,
2891 ArrayRef<Constant *> Operands,
2892 const TargetLibraryInfo *TLI,
2893 const CallBase *Call) {
2894 if (Operands.size() == 1)
2895 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
2896
2897 if (Operands.size() == 2)
2898 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
2899
2900 if (Operands.size() == 3)
2901 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
2902
2903 return nullptr;
2904 }
2905
ConstantFoldFixedVectorCall(StringRef Name,Intrinsic::ID IntrinsicID,FixedVectorType * FVTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)2906 static Constant *ConstantFoldFixedVectorCall(
2907 StringRef Name, Intrinsic::ID IntrinsicID, FixedVectorType *FVTy,
2908 ArrayRef<Constant *> Operands, const DataLayout &DL,
2909 const TargetLibraryInfo *TLI, const CallBase *Call) {
2910 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
2911 SmallVector<Constant *, 4> Lane(Operands.size());
2912 Type *Ty = FVTy->getElementType();
2913
2914 switch (IntrinsicID) {
2915 case Intrinsic::masked_load: {
2916 auto *SrcPtr = Operands[0];
2917 auto *Mask = Operands[2];
2918 auto *Passthru = Operands[3];
2919
2920 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
2921
2922 SmallVector<Constant *, 32> NewElements;
2923 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
2924 auto *MaskElt = Mask->getAggregateElement(I);
2925 if (!MaskElt)
2926 break;
2927 auto *PassthruElt = Passthru->getAggregateElement(I);
2928 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2929 if (isa<UndefValue>(MaskElt)) {
2930 if (PassthruElt)
2931 NewElements.push_back(PassthruElt);
2932 else if (VecElt)
2933 NewElements.push_back(VecElt);
2934 else
2935 return nullptr;
2936 }
2937 if (MaskElt->isNullValue()) {
2938 if (!PassthruElt)
2939 return nullptr;
2940 NewElements.push_back(PassthruElt);
2941 } else if (MaskElt->isOneValue()) {
2942 if (!VecElt)
2943 return nullptr;
2944 NewElements.push_back(VecElt);
2945 } else {
2946 return nullptr;
2947 }
2948 }
2949 if (NewElements.size() != FVTy->getNumElements())
2950 return nullptr;
2951 return ConstantVector::get(NewElements);
2952 }
2953 case Intrinsic::arm_mve_vctp8:
2954 case Intrinsic::arm_mve_vctp16:
2955 case Intrinsic::arm_mve_vctp32:
2956 case Intrinsic::arm_mve_vctp64: {
2957 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2958 unsigned Lanes = FVTy->getNumElements();
2959 uint64_t Limit = Op->getZExtValue();
2960 // vctp64 are currently modelled as returning a v4i1, not a v2i1. Make
2961 // sure we get the limit right in that case and set all relevant lanes.
2962 if (IntrinsicID == Intrinsic::arm_mve_vctp64)
2963 Limit *= 2;
2964
2965 SmallVector<Constant *, 16> NCs;
2966 for (unsigned i = 0; i < Lanes; i++) {
2967 if (i < Limit)
2968 NCs.push_back(ConstantInt::getTrue(Ty));
2969 else
2970 NCs.push_back(ConstantInt::getFalse(Ty));
2971 }
2972 return ConstantVector::get(NCs);
2973 }
2974 break;
2975 }
2976 case Intrinsic::get_active_lane_mask: {
2977 auto *Op0 = dyn_cast<ConstantInt>(Operands[0]);
2978 auto *Op1 = dyn_cast<ConstantInt>(Operands[1]);
2979 if (Op0 && Op1) {
2980 unsigned Lanes = FVTy->getNumElements();
2981 uint64_t Base = Op0->getZExtValue();
2982 uint64_t Limit = Op1->getZExtValue();
2983
2984 SmallVector<Constant *, 16> NCs;
2985 for (unsigned i = 0; i < Lanes; i++) {
2986 if (Base + i < Limit)
2987 NCs.push_back(ConstantInt::getTrue(Ty));
2988 else
2989 NCs.push_back(ConstantInt::getFalse(Ty));
2990 }
2991 return ConstantVector::get(NCs);
2992 }
2993 break;
2994 }
2995 default:
2996 break;
2997 }
2998
2999 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
3000 // Gather a column of constants.
3001 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
3002 // Some intrinsics use a scalar type for certain arguments.
3003 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
3004 Lane[J] = Operands[J];
3005 continue;
3006 }
3007
3008 Constant *Agg = Operands[J]->getAggregateElement(I);
3009 if (!Agg)
3010 return nullptr;
3011
3012 Lane[J] = Agg;
3013 }
3014
3015 // Use the regular scalar folding to simplify this column.
3016 Constant *Folded =
3017 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
3018 if (!Folded)
3019 return nullptr;
3020 Result[I] = Folded;
3021 }
3022
3023 return ConstantVector::get(Result);
3024 }
3025
ConstantFoldScalableVectorCall(StringRef Name,Intrinsic::ID IntrinsicID,ScalableVectorType * SVTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)3026 static Constant *ConstantFoldScalableVectorCall(
3027 StringRef Name, Intrinsic::ID IntrinsicID, ScalableVectorType *SVTy,
3028 ArrayRef<Constant *> Operands, const DataLayout &DL,
3029 const TargetLibraryInfo *TLI, const CallBase *Call) {
3030 switch (IntrinsicID) {
3031 case Intrinsic::aarch64_sve_convert_from_svbool: {
3032 auto *Src = dyn_cast<Constant>(Operands[0]);
3033 if (!Src || !Src->isNullValue())
3034 break;
3035
3036 return ConstantInt::getFalse(SVTy);
3037 }
3038 default:
3039 break;
3040 }
3041 return nullptr;
3042 }
3043
3044 } // end anonymous namespace
3045
ConstantFoldCall(const CallBase * Call,Function * F,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI)3046 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
3047 ArrayRef<Constant *> Operands,
3048 const TargetLibraryInfo *TLI) {
3049 if (Call->isNoBuiltin())
3050 return nullptr;
3051 if (!F->hasName())
3052 return nullptr;
3053
3054 // If this is not an intrinsic and not recognized as a library call, bail out.
3055 if (F->getIntrinsicID() == Intrinsic::not_intrinsic) {
3056 if (!TLI)
3057 return nullptr;
3058 LibFunc LibF;
3059 if (!TLI->getLibFunc(*F, LibF))
3060 return nullptr;
3061 }
3062
3063 StringRef Name = F->getName();
3064 Type *Ty = F->getReturnType();
3065 if (auto *FVTy = dyn_cast<FixedVectorType>(Ty))
3066 return ConstantFoldFixedVectorCall(
3067 Name, F->getIntrinsicID(), FVTy, Operands,
3068 F->getParent()->getDataLayout(), TLI, Call);
3069
3070 if (auto *SVTy = dyn_cast<ScalableVectorType>(Ty))
3071 return ConstantFoldScalableVectorCall(
3072 Name, F->getIntrinsicID(), SVTy, Operands,
3073 F->getParent()->getDataLayout(), TLI, Call);
3074
3075 // TODO: If this is a library function, we already discovered that above,
3076 // so we should pass the LibFunc, not the name (and it might be better
3077 // still to separate intrinsic handling from libcalls).
3078 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
3079 Call);
3080 }
3081
isMathLibCallNoop(const CallBase * Call,const TargetLibraryInfo * TLI)3082 bool llvm::isMathLibCallNoop(const CallBase *Call,
3083 const TargetLibraryInfo *TLI) {
3084 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
3085 // (and to some extent ConstantFoldScalarCall).
3086 if (Call->isNoBuiltin() || Call->isStrictFP())
3087 return false;
3088 Function *F = Call->getCalledFunction();
3089 if (!F)
3090 return false;
3091
3092 LibFunc Func;
3093 if (!TLI || !TLI->getLibFunc(*F, Func))
3094 return false;
3095
3096 if (Call->arg_size() == 1) {
3097 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
3098 const APFloat &Op = OpC->getValueAPF();
3099 switch (Func) {
3100 case LibFunc_logl:
3101 case LibFunc_log:
3102 case LibFunc_logf:
3103 case LibFunc_log2l:
3104 case LibFunc_log2:
3105 case LibFunc_log2f:
3106 case LibFunc_log10l:
3107 case LibFunc_log10:
3108 case LibFunc_log10f:
3109 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
3110
3111 case LibFunc_expl:
3112 case LibFunc_exp:
3113 case LibFunc_expf:
3114 // FIXME: These boundaries are slightly conservative.
3115 if (OpC->getType()->isDoubleTy())
3116 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
3117 if (OpC->getType()->isFloatTy())
3118 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
3119 break;
3120
3121 case LibFunc_exp2l:
3122 case LibFunc_exp2:
3123 case LibFunc_exp2f:
3124 // FIXME: These boundaries are slightly conservative.
3125 if (OpC->getType()->isDoubleTy())
3126 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
3127 if (OpC->getType()->isFloatTy())
3128 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
3129 break;
3130
3131 case LibFunc_sinl:
3132 case LibFunc_sin:
3133 case LibFunc_sinf:
3134 case LibFunc_cosl:
3135 case LibFunc_cos:
3136 case LibFunc_cosf:
3137 return !Op.isInfinity();
3138
3139 case LibFunc_tanl:
3140 case LibFunc_tan:
3141 case LibFunc_tanf: {
3142 // FIXME: Stop using the host math library.
3143 // FIXME: The computation isn't done in the right precision.
3144 Type *Ty = OpC->getType();
3145 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy())
3146 return ConstantFoldFP(tan, OpC->getValueAPF(), Ty) != nullptr;
3147 break;
3148 }
3149
3150 case LibFunc_asinl:
3151 case LibFunc_asin:
3152 case LibFunc_asinf:
3153 case LibFunc_acosl:
3154 case LibFunc_acos:
3155 case LibFunc_acosf:
3156 return !(Op < APFloat(Op.getSemantics(), "-1") ||
3157 Op > APFloat(Op.getSemantics(), "1"));
3158
3159 case LibFunc_sinh:
3160 case LibFunc_cosh:
3161 case LibFunc_sinhf:
3162 case LibFunc_coshf:
3163 case LibFunc_sinhl:
3164 case LibFunc_coshl:
3165 // FIXME: These boundaries are slightly conservative.
3166 if (OpC->getType()->isDoubleTy())
3167 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
3168 if (OpC->getType()->isFloatTy())
3169 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
3170 break;
3171
3172 case LibFunc_sqrtl:
3173 case LibFunc_sqrt:
3174 case LibFunc_sqrtf:
3175 return Op.isNaN() || Op.isZero() || !Op.isNegative();
3176
3177 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
3178 // maybe others?
3179 default:
3180 break;
3181 }
3182 }
3183 }
3184
3185 if (Call->arg_size() == 2) {
3186 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
3187 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
3188 if (Op0C && Op1C) {
3189 const APFloat &Op0 = Op0C->getValueAPF();
3190 const APFloat &Op1 = Op1C->getValueAPF();
3191
3192 switch (Func) {
3193 case LibFunc_powl:
3194 case LibFunc_pow:
3195 case LibFunc_powf: {
3196 // FIXME: Stop using the host math library.
3197 // FIXME: The computation isn't done in the right precision.
3198 Type *Ty = Op0C->getType();
3199 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
3200 if (Ty == Op1C->getType())
3201 return ConstantFoldBinaryFP(pow, Op0, Op1, Ty) != nullptr;
3202 }
3203 break;
3204 }
3205
3206 case LibFunc_fmodl:
3207 case LibFunc_fmod:
3208 case LibFunc_fmodf:
3209 case LibFunc_remainderl:
3210 case LibFunc_remainder:
3211 case LibFunc_remainderf:
3212 return Op0.isNaN() || Op1.isNaN() ||
3213 (!Op0.isInfinity() && !Op1.isZero());
3214
3215 default:
3216 break;
3217 }
3218 }
3219 }
3220
3221 return false;
3222 }
3223
anchor()3224 void TargetFolder::anchor() {}
3225