1 //===-- ConstantFolding.cpp - Fold instructions into constants ------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines routines for folding instructions into constants.
10 //
11 // Also, to supplement the basic IR ConstantExpr simplifications,
12 // this file defines some additional folding routines that can make use of
13 // DataLayout information. These functions cannot go in IR due to library
14 // dependency issues.
15 //
16 //===----------------------------------------------------------------------===//
17
18 #include "llvm/Analysis/ConstantFolding.h"
19 #include "llvm/ADT/APFloat.h"
20 #include "llvm/ADT/APInt.h"
21 #include "llvm/ADT/ArrayRef.h"
22 #include "llvm/ADT/DenseMap.h"
23 #include "llvm/ADT/STLExtras.h"
24 #include "llvm/ADT/SmallVector.h"
25 #include "llvm/ADT/StringRef.h"
26 #include "llvm/Analysis/TargetFolder.h"
27 #include "llvm/Analysis/TargetLibraryInfo.h"
28 #include "llvm/Analysis/ValueTracking.h"
29 #include "llvm/Analysis/VectorUtils.h"
30 #include "llvm/Config/config.h"
31 #include "llvm/IR/Constant.h"
32 #include "llvm/IR/Constants.h"
33 #include "llvm/IR/DataLayout.h"
34 #include "llvm/IR/DerivedTypes.h"
35 #include "llvm/IR/Function.h"
36 #include "llvm/IR/GlobalValue.h"
37 #include "llvm/IR/GlobalVariable.h"
38 #include "llvm/IR/InstrTypes.h"
39 #include "llvm/IR/Instruction.h"
40 #include "llvm/IR/Instructions.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/IntrinsicsAMDGPU.h"
44 #include "llvm/IR/IntrinsicsX86.h"
45 #include "llvm/IR/Operator.h"
46 #include "llvm/IR/Type.h"
47 #include "llvm/IR/Value.h"
48 #include "llvm/Support/Casting.h"
49 #include "llvm/Support/ErrorHandling.h"
50 #include "llvm/Support/KnownBits.h"
51 #include "llvm/Support/MathExtras.h"
52 #include <cassert>
53 #include <cerrno>
54 #include <cfenv>
55 #include <cmath>
56 #include <cstddef>
57 #include <cstdint>
58
59 using namespace llvm;
60
61 namespace {
62
63 //===----------------------------------------------------------------------===//
64 // Constant Folding internal helper functions
65 //===----------------------------------------------------------------------===//
66
foldConstVectorToAPInt(APInt & Result,Type * DestTy,Constant * C,Type * SrcEltTy,unsigned NumSrcElts,const DataLayout & DL)67 static Constant *foldConstVectorToAPInt(APInt &Result, Type *DestTy,
68 Constant *C, Type *SrcEltTy,
69 unsigned NumSrcElts,
70 const DataLayout &DL) {
71 // Now that we know that the input value is a vector of integers, just shift
72 // and insert them into our result.
73 unsigned BitShift = DL.getTypeSizeInBits(SrcEltTy);
74 for (unsigned i = 0; i != NumSrcElts; ++i) {
75 Constant *Element;
76 if (DL.isLittleEndian())
77 Element = C->getAggregateElement(NumSrcElts - i - 1);
78 else
79 Element = C->getAggregateElement(i);
80
81 if (Element && isa<UndefValue>(Element)) {
82 Result <<= BitShift;
83 continue;
84 }
85
86 auto *ElementCI = dyn_cast_or_null<ConstantInt>(Element);
87 if (!ElementCI)
88 return ConstantExpr::getBitCast(C, DestTy);
89
90 Result <<= BitShift;
91 Result |= ElementCI->getValue().zextOrSelf(Result.getBitWidth());
92 }
93
94 return nullptr;
95 }
96
97 /// Constant fold bitcast, symbolically evaluating it with DataLayout.
98 /// This always returns a non-null constant, but it may be a
99 /// ConstantExpr if unfoldable.
FoldBitCast(Constant * C,Type * DestTy,const DataLayout & DL)100 Constant *FoldBitCast(Constant *C, Type *DestTy, const DataLayout &DL) {
101 assert(CastInst::castIsValid(Instruction::BitCast, C, DestTy) &&
102 "Invalid constantexpr bitcast!");
103
104 // Catch the obvious splat cases.
105 if (C->isNullValue() && !DestTy->isX86_MMXTy())
106 return Constant::getNullValue(DestTy);
107 if (C->isAllOnesValue() && !DestTy->isX86_MMXTy() &&
108 !DestTy->isPtrOrPtrVectorTy()) // Don't get ones for ptr types!
109 return Constant::getAllOnesValue(DestTy);
110
111 if (auto *VTy = dyn_cast<VectorType>(C->getType())) {
112 // Handle a vector->scalar integer/fp cast.
113 if (isa<IntegerType>(DestTy) || DestTy->isFloatingPointTy()) {
114 unsigned NumSrcElts = VTy->getNumElements();
115 Type *SrcEltTy = VTy->getElementType();
116
117 // If the vector is a vector of floating point, convert it to vector of int
118 // to simplify things.
119 if (SrcEltTy->isFloatingPointTy()) {
120 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
121 auto *SrcIVTy = FixedVectorType::get(
122 IntegerType::get(C->getContext(), FPWidth), NumSrcElts);
123 // Ask IR to do the conversion now that #elts line up.
124 C = ConstantExpr::getBitCast(C, SrcIVTy);
125 }
126
127 APInt Result(DL.getTypeSizeInBits(DestTy), 0);
128 if (Constant *CE = foldConstVectorToAPInt(Result, DestTy, C,
129 SrcEltTy, NumSrcElts, DL))
130 return CE;
131
132 if (isa<IntegerType>(DestTy))
133 return ConstantInt::get(DestTy, Result);
134
135 APFloat FP(DestTy->getFltSemantics(), Result);
136 return ConstantFP::get(DestTy->getContext(), FP);
137 }
138 }
139
140 // The code below only handles casts to vectors currently.
141 auto *DestVTy = dyn_cast<VectorType>(DestTy);
142 if (!DestVTy)
143 return ConstantExpr::getBitCast(C, DestTy);
144
145 // If this is a scalar -> vector cast, convert the input into a <1 x scalar>
146 // vector so the code below can handle it uniformly.
147 if (isa<ConstantFP>(C) || isa<ConstantInt>(C)) {
148 Constant *Ops = C; // don't take the address of C!
149 return FoldBitCast(ConstantVector::get(Ops), DestTy, DL);
150 }
151
152 // If this is a bitcast from constant vector -> vector, fold it.
153 if (!isa<ConstantDataVector>(C) && !isa<ConstantVector>(C))
154 return ConstantExpr::getBitCast(C, DestTy);
155
156 // If the element types match, IR can fold it.
157 unsigned NumDstElt = DestVTy->getNumElements();
158 unsigned NumSrcElt = cast<VectorType>(C->getType())->getNumElements();
159 if (NumDstElt == NumSrcElt)
160 return ConstantExpr::getBitCast(C, DestTy);
161
162 Type *SrcEltTy = cast<VectorType>(C->getType())->getElementType();
163 Type *DstEltTy = DestVTy->getElementType();
164
165 // Otherwise, we're changing the number of elements in a vector, which
166 // requires endianness information to do the right thing. For example,
167 // bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
168 // folds to (little endian):
169 // <4 x i32> <i32 0, i32 0, i32 1, i32 0>
170 // and to (big endian):
171 // <4 x i32> <i32 0, i32 0, i32 0, i32 1>
172
173 // First thing is first. We only want to think about integer here, so if
174 // we have something in FP form, recast it as integer.
175 if (DstEltTy->isFloatingPointTy()) {
176 // Fold to an vector of integers with same size as our FP type.
177 unsigned FPWidth = DstEltTy->getPrimitiveSizeInBits();
178 auto *DestIVTy = FixedVectorType::get(
179 IntegerType::get(C->getContext(), FPWidth), NumDstElt);
180 // Recursively handle this integer conversion, if possible.
181 C = FoldBitCast(C, DestIVTy, DL);
182
183 // Finally, IR can handle this now that #elts line up.
184 return ConstantExpr::getBitCast(C, DestTy);
185 }
186
187 // Okay, we know the destination is integer, if the input is FP, convert
188 // it to integer first.
189 if (SrcEltTy->isFloatingPointTy()) {
190 unsigned FPWidth = SrcEltTy->getPrimitiveSizeInBits();
191 auto *SrcIVTy = FixedVectorType::get(
192 IntegerType::get(C->getContext(), FPWidth), NumSrcElt);
193 // Ask IR to do the conversion now that #elts line up.
194 C = ConstantExpr::getBitCast(C, SrcIVTy);
195 // If IR wasn't able to fold it, bail out.
196 if (!isa<ConstantVector>(C) && // FIXME: Remove ConstantVector.
197 !isa<ConstantDataVector>(C))
198 return C;
199 }
200
201 // Now we know that the input and output vectors are both integer vectors
202 // of the same size, and that their #elements is not the same. Do the
203 // conversion here, which depends on whether the input or output has
204 // more elements.
205 bool isLittleEndian = DL.isLittleEndian();
206
207 SmallVector<Constant*, 32> Result;
208 if (NumDstElt < NumSrcElt) {
209 // Handle: bitcast (<4 x i32> <i32 0, i32 1, i32 2, i32 3> to <2 x i64>)
210 Constant *Zero = Constant::getNullValue(DstEltTy);
211 unsigned Ratio = NumSrcElt/NumDstElt;
212 unsigned SrcBitSize = SrcEltTy->getPrimitiveSizeInBits();
213 unsigned SrcElt = 0;
214 for (unsigned i = 0; i != NumDstElt; ++i) {
215 // Build each element of the result.
216 Constant *Elt = Zero;
217 unsigned ShiftAmt = isLittleEndian ? 0 : SrcBitSize*(Ratio-1);
218 for (unsigned j = 0; j != Ratio; ++j) {
219 Constant *Src = C->getAggregateElement(SrcElt++);
220 if (Src && isa<UndefValue>(Src))
221 Src = Constant::getNullValue(
222 cast<VectorType>(C->getType())->getElementType());
223 else
224 Src = dyn_cast_or_null<ConstantInt>(Src);
225 if (!Src) // Reject constantexpr elements.
226 return ConstantExpr::getBitCast(C, DestTy);
227
228 // Zero extend the element to the right size.
229 Src = ConstantExpr::getZExt(Src, Elt->getType());
230
231 // Shift it to the right place, depending on endianness.
232 Src = ConstantExpr::getShl(Src,
233 ConstantInt::get(Src->getType(), ShiftAmt));
234 ShiftAmt += isLittleEndian ? SrcBitSize : -SrcBitSize;
235
236 // Mix it in.
237 Elt = ConstantExpr::getOr(Elt, Src);
238 }
239 Result.push_back(Elt);
240 }
241 return ConstantVector::get(Result);
242 }
243
244 // Handle: bitcast (<2 x i64> <i64 0, i64 1> to <4 x i32>)
245 unsigned Ratio = NumDstElt/NumSrcElt;
246 unsigned DstBitSize = DL.getTypeSizeInBits(DstEltTy);
247
248 // Loop over each source value, expanding into multiple results.
249 for (unsigned i = 0; i != NumSrcElt; ++i) {
250 auto *Element = C->getAggregateElement(i);
251
252 if (!Element) // Reject constantexpr elements.
253 return ConstantExpr::getBitCast(C, DestTy);
254
255 if (isa<UndefValue>(Element)) {
256 // Correctly Propagate undef values.
257 Result.append(Ratio, UndefValue::get(DstEltTy));
258 continue;
259 }
260
261 auto *Src = dyn_cast<ConstantInt>(Element);
262 if (!Src)
263 return ConstantExpr::getBitCast(C, DestTy);
264
265 unsigned ShiftAmt = isLittleEndian ? 0 : DstBitSize*(Ratio-1);
266 for (unsigned j = 0; j != Ratio; ++j) {
267 // Shift the piece of the value into the right place, depending on
268 // endianness.
269 Constant *Elt = ConstantExpr::getLShr(Src,
270 ConstantInt::get(Src->getType(), ShiftAmt));
271 ShiftAmt += isLittleEndian ? DstBitSize : -DstBitSize;
272
273 // Truncate the element to an integer with the same pointer size and
274 // convert the element back to a pointer using a inttoptr.
275 if (DstEltTy->isPointerTy()) {
276 IntegerType *DstIntTy = Type::getIntNTy(C->getContext(), DstBitSize);
277 Constant *CE = ConstantExpr::getTrunc(Elt, DstIntTy);
278 Result.push_back(ConstantExpr::getIntToPtr(CE, DstEltTy));
279 continue;
280 }
281
282 // Truncate and remember this piece.
283 Result.push_back(ConstantExpr::getTrunc(Elt, DstEltTy));
284 }
285 }
286
287 return ConstantVector::get(Result);
288 }
289
290 } // end anonymous namespace
291
292 /// If this constant is a constant offset from a global, return the global and
293 /// the constant. Because of constantexprs, this function is recursive.
IsConstantOffsetFromGlobal(Constant * C,GlobalValue * & GV,APInt & Offset,const DataLayout & DL)294 bool llvm::IsConstantOffsetFromGlobal(Constant *C, GlobalValue *&GV,
295 APInt &Offset, const DataLayout &DL) {
296 // Trivial case, constant is the global.
297 if ((GV = dyn_cast<GlobalValue>(C))) {
298 unsigned BitWidth = DL.getIndexTypeSizeInBits(GV->getType());
299 Offset = APInt(BitWidth, 0);
300 return true;
301 }
302
303 // Otherwise, if this isn't a constant expr, bail out.
304 auto *CE = dyn_cast<ConstantExpr>(C);
305 if (!CE) return false;
306
307 // Look through ptr->int and ptr->ptr casts.
308 if (CE->getOpcode() == Instruction::PtrToInt ||
309 CE->getOpcode() == Instruction::BitCast)
310 return IsConstantOffsetFromGlobal(CE->getOperand(0), GV, Offset, DL);
311
312 // i32* getelementptr ([5 x i32]* @a, i32 0, i32 5)
313 auto *GEP = dyn_cast<GEPOperator>(CE);
314 if (!GEP)
315 return false;
316
317 unsigned BitWidth = DL.getIndexTypeSizeInBits(GEP->getType());
318 APInt TmpOffset(BitWidth, 0);
319
320 // If the base isn't a global+constant, we aren't either.
321 if (!IsConstantOffsetFromGlobal(CE->getOperand(0), GV, TmpOffset, DL))
322 return false;
323
324 // Otherwise, add any offset that our operands provide.
325 if (!GEP->accumulateConstantOffset(DL, TmpOffset))
326 return false;
327
328 Offset = TmpOffset;
329 return true;
330 }
331
ConstantFoldLoadThroughBitcast(Constant * C,Type * DestTy,const DataLayout & DL)332 Constant *llvm::ConstantFoldLoadThroughBitcast(Constant *C, Type *DestTy,
333 const DataLayout &DL) {
334 do {
335 Type *SrcTy = C->getType();
336 uint64_t DestSize = DL.getTypeSizeInBits(DestTy);
337 uint64_t SrcSize = DL.getTypeSizeInBits(SrcTy);
338 if (SrcSize < DestSize)
339 return nullptr;
340
341 // Catch the obvious splat cases (since all-zeros can coerce non-integral
342 // pointers legally).
343 if (C->isNullValue() && !DestTy->isX86_MMXTy())
344 return Constant::getNullValue(DestTy);
345 if (C->isAllOnesValue() &&
346 (DestTy->isIntegerTy() || DestTy->isFloatingPointTy() ||
347 DestTy->isVectorTy()) &&
348 !DestTy->isX86_MMXTy() && !DestTy->isPtrOrPtrVectorTy())
349 // Get ones when the input is trivial, but
350 // only for supported types inside getAllOnesValue.
351 return Constant::getAllOnesValue(DestTy);
352
353 // If the type sizes are the same and a cast is legal, just directly
354 // cast the constant.
355 // But be careful not to coerce non-integral pointers illegally.
356 if (SrcSize == DestSize &&
357 DL.isNonIntegralPointerType(SrcTy->getScalarType()) ==
358 DL.isNonIntegralPointerType(DestTy->getScalarType())) {
359 Instruction::CastOps Cast = Instruction::BitCast;
360 // If we are going from a pointer to int or vice versa, we spell the cast
361 // differently.
362 if (SrcTy->isIntegerTy() && DestTy->isPointerTy())
363 Cast = Instruction::IntToPtr;
364 else if (SrcTy->isPointerTy() && DestTy->isIntegerTy())
365 Cast = Instruction::PtrToInt;
366
367 if (CastInst::castIsValid(Cast, C, DestTy))
368 return ConstantExpr::getCast(Cast, C, DestTy);
369 }
370
371 // If this isn't an aggregate type, there is nothing we can do to drill down
372 // and find a bitcastable constant.
373 if (!SrcTy->isAggregateType())
374 return nullptr;
375
376 // We're simulating a load through a pointer that was bitcast to point to
377 // a different type, so we can try to walk down through the initial
378 // elements of an aggregate to see if some part of the aggregate is
379 // castable to implement the "load" semantic model.
380 if (SrcTy->isStructTy()) {
381 // Struct types might have leading zero-length elements like [0 x i32],
382 // which are certainly not what we are looking for, so skip them.
383 unsigned Elem = 0;
384 Constant *ElemC;
385 do {
386 ElemC = C->getAggregateElement(Elem++);
387 } while (ElemC && DL.getTypeSizeInBits(ElemC->getType()).isZero());
388 C = ElemC;
389 } else {
390 C = C->getAggregateElement(0u);
391 }
392 } while (C);
393
394 return nullptr;
395 }
396
397 namespace {
398
399 /// Recursive helper to read bits out of global. C is the constant being copied
400 /// out of. ByteOffset is an offset into C. CurPtr is the pointer to copy
401 /// results into and BytesLeft is the number of bytes left in
402 /// the CurPtr buffer. DL is the DataLayout.
ReadDataFromGlobal(Constant * C,uint64_t ByteOffset,unsigned char * CurPtr,unsigned BytesLeft,const DataLayout & DL)403 bool ReadDataFromGlobal(Constant *C, uint64_t ByteOffset, unsigned char *CurPtr,
404 unsigned BytesLeft, const DataLayout &DL) {
405 assert(ByteOffset <= DL.getTypeAllocSize(C->getType()) &&
406 "Out of range access");
407
408 // If this element is zero or undefined, we can just return since *CurPtr is
409 // zero initialized.
410 if (isa<ConstantAggregateZero>(C) || isa<UndefValue>(C))
411 return true;
412
413 if (auto *CI = dyn_cast<ConstantInt>(C)) {
414 if (CI->getBitWidth() > 64 ||
415 (CI->getBitWidth() & 7) != 0)
416 return false;
417
418 uint64_t Val = CI->getZExtValue();
419 unsigned IntBytes = unsigned(CI->getBitWidth()/8);
420
421 for (unsigned i = 0; i != BytesLeft && ByteOffset != IntBytes; ++i) {
422 int n = ByteOffset;
423 if (!DL.isLittleEndian())
424 n = IntBytes - n - 1;
425 CurPtr[i] = (unsigned char)(Val >> (n * 8));
426 ++ByteOffset;
427 }
428 return true;
429 }
430
431 if (auto *CFP = dyn_cast<ConstantFP>(C)) {
432 if (CFP->getType()->isDoubleTy()) {
433 C = FoldBitCast(C, Type::getInt64Ty(C->getContext()), DL);
434 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
435 }
436 if (CFP->getType()->isFloatTy()){
437 C = FoldBitCast(C, Type::getInt32Ty(C->getContext()), DL);
438 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
439 }
440 if (CFP->getType()->isHalfTy()){
441 C = FoldBitCast(C, Type::getInt16Ty(C->getContext()), DL);
442 return ReadDataFromGlobal(C, ByteOffset, CurPtr, BytesLeft, DL);
443 }
444 return false;
445 }
446
447 if (auto *CS = dyn_cast<ConstantStruct>(C)) {
448 const StructLayout *SL = DL.getStructLayout(CS->getType());
449 unsigned Index = SL->getElementContainingOffset(ByteOffset);
450 uint64_t CurEltOffset = SL->getElementOffset(Index);
451 ByteOffset -= CurEltOffset;
452
453 while (true) {
454 // If the element access is to the element itself and not to tail padding,
455 // read the bytes from the element.
456 uint64_t EltSize = DL.getTypeAllocSize(CS->getOperand(Index)->getType());
457
458 if (ByteOffset < EltSize &&
459 !ReadDataFromGlobal(CS->getOperand(Index), ByteOffset, CurPtr,
460 BytesLeft, DL))
461 return false;
462
463 ++Index;
464
465 // Check to see if we read from the last struct element, if so we're done.
466 if (Index == CS->getType()->getNumElements())
467 return true;
468
469 // If we read all of the bytes we needed from this element we're done.
470 uint64_t NextEltOffset = SL->getElementOffset(Index);
471
472 if (BytesLeft <= NextEltOffset - CurEltOffset - ByteOffset)
473 return true;
474
475 // Move to the next element of the struct.
476 CurPtr += NextEltOffset - CurEltOffset - ByteOffset;
477 BytesLeft -= NextEltOffset - CurEltOffset - ByteOffset;
478 ByteOffset = 0;
479 CurEltOffset = NextEltOffset;
480 }
481 // not reached.
482 }
483
484 if (isa<ConstantArray>(C) || isa<ConstantVector>(C) ||
485 isa<ConstantDataSequential>(C)) {
486 uint64_t NumElts;
487 Type *EltTy;
488 if (auto *AT = dyn_cast<ArrayType>(C->getType())) {
489 NumElts = AT->getNumElements();
490 EltTy = AT->getElementType();
491 } else {
492 NumElts = cast<VectorType>(C->getType())->getNumElements();
493 EltTy = cast<VectorType>(C->getType())->getElementType();
494 }
495 uint64_t EltSize = DL.getTypeAllocSize(EltTy);
496 uint64_t Index = ByteOffset / EltSize;
497 uint64_t Offset = ByteOffset - Index * EltSize;
498
499 for (; Index != NumElts; ++Index) {
500 if (!ReadDataFromGlobal(C->getAggregateElement(Index), Offset, CurPtr,
501 BytesLeft, DL))
502 return false;
503
504 uint64_t BytesWritten = EltSize - Offset;
505 assert(BytesWritten <= EltSize && "Not indexing into this element?");
506 if (BytesWritten >= BytesLeft)
507 return true;
508
509 Offset = 0;
510 BytesLeft -= BytesWritten;
511 CurPtr += BytesWritten;
512 }
513 return true;
514 }
515
516 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
517 if (CE->getOpcode() == Instruction::IntToPtr &&
518 CE->getOperand(0)->getType() == DL.getIntPtrType(CE->getType())) {
519 return ReadDataFromGlobal(CE->getOperand(0), ByteOffset, CurPtr,
520 BytesLeft, DL);
521 }
522 }
523
524 // Otherwise, unknown initializer type.
525 return false;
526 }
527
FoldReinterpretLoadFromConstPtr(Constant * C,Type * LoadTy,const DataLayout & DL)528 Constant *FoldReinterpretLoadFromConstPtr(Constant *C, Type *LoadTy,
529 const DataLayout &DL) {
530 // Bail out early. Not expect to load from scalable global variable.
531 if (isa<ScalableVectorType>(LoadTy))
532 return nullptr;
533
534 auto *PTy = cast<PointerType>(C->getType());
535 auto *IntType = dyn_cast<IntegerType>(LoadTy);
536
537 // If this isn't an integer load we can't fold it directly.
538 if (!IntType) {
539 unsigned AS = PTy->getAddressSpace();
540
541 // If this is a float/double load, we can try folding it as an int32/64 load
542 // and then bitcast the result. This can be useful for union cases. Note
543 // that address spaces don't matter here since we're not going to result in
544 // an actual new load.
545 Type *MapTy;
546 if (LoadTy->isHalfTy())
547 MapTy = Type::getInt16Ty(C->getContext());
548 else if (LoadTy->isFloatTy())
549 MapTy = Type::getInt32Ty(C->getContext());
550 else if (LoadTy->isDoubleTy())
551 MapTy = Type::getInt64Ty(C->getContext());
552 else if (LoadTy->isVectorTy()) {
553 MapTy = PointerType::getIntNTy(
554 C->getContext(), DL.getTypeSizeInBits(LoadTy).getFixedSize());
555 } else
556 return nullptr;
557
558 C = FoldBitCast(C, MapTy->getPointerTo(AS), DL);
559 if (Constant *Res = FoldReinterpretLoadFromConstPtr(C, MapTy, DL)) {
560 if (Res->isNullValue() && !LoadTy->isX86_MMXTy())
561 // Materializing a zero can be done trivially without a bitcast
562 return Constant::getNullValue(LoadTy);
563 Type *CastTy = LoadTy->isPtrOrPtrVectorTy() ? DL.getIntPtrType(LoadTy) : LoadTy;
564 Res = FoldBitCast(Res, CastTy, DL);
565 if (LoadTy->isPtrOrPtrVectorTy()) {
566 // For vector of pointer, we needed to first convert to a vector of integer, then do vector inttoptr
567 if (Res->isNullValue() && !LoadTy->isX86_MMXTy())
568 return Constant::getNullValue(LoadTy);
569 if (DL.isNonIntegralPointerType(LoadTy->getScalarType()))
570 // Be careful not to replace a load of an addrspace value with an inttoptr here
571 return nullptr;
572 Res = ConstantExpr::getCast(Instruction::IntToPtr, Res, LoadTy);
573 }
574 return Res;
575 }
576 return nullptr;
577 }
578
579 unsigned BytesLoaded = (IntType->getBitWidth() + 7) / 8;
580 if (BytesLoaded > 32 || BytesLoaded == 0)
581 return nullptr;
582
583 GlobalValue *GVal;
584 APInt OffsetAI;
585 if (!IsConstantOffsetFromGlobal(C, GVal, OffsetAI, DL))
586 return nullptr;
587
588 auto *GV = dyn_cast<GlobalVariable>(GVal);
589 if (!GV || !GV->isConstant() || !GV->hasDefinitiveInitializer() ||
590 !GV->getInitializer()->getType()->isSized())
591 return nullptr;
592
593 int64_t Offset = OffsetAI.getSExtValue();
594 int64_t InitializerSize =
595 DL.getTypeAllocSize(GV->getInitializer()->getType()).getFixedSize();
596
597 // If we're not accessing anything in this constant, the result is undefined.
598 if (Offset <= -1 * static_cast<int64_t>(BytesLoaded))
599 return UndefValue::get(IntType);
600
601 // If we're not accessing anything in this constant, the result is undefined.
602 if (Offset >= InitializerSize)
603 return UndefValue::get(IntType);
604
605 unsigned char RawBytes[32] = {0};
606 unsigned char *CurPtr = RawBytes;
607 unsigned BytesLeft = BytesLoaded;
608
609 // If we're loading off the beginning of the global, some bytes may be valid.
610 if (Offset < 0) {
611 CurPtr += -Offset;
612 BytesLeft += Offset;
613 Offset = 0;
614 }
615
616 if (!ReadDataFromGlobal(GV->getInitializer(), Offset, CurPtr, BytesLeft, DL))
617 return nullptr;
618
619 APInt ResultVal = APInt(IntType->getBitWidth(), 0);
620 if (DL.isLittleEndian()) {
621 ResultVal = RawBytes[BytesLoaded - 1];
622 for (unsigned i = 1; i != BytesLoaded; ++i) {
623 ResultVal <<= 8;
624 ResultVal |= RawBytes[BytesLoaded - 1 - i];
625 }
626 } else {
627 ResultVal = RawBytes[0];
628 for (unsigned i = 1; i != BytesLoaded; ++i) {
629 ResultVal <<= 8;
630 ResultVal |= RawBytes[i];
631 }
632 }
633
634 return ConstantInt::get(IntType->getContext(), ResultVal);
635 }
636
ConstantFoldLoadThroughBitcastExpr(ConstantExpr * CE,Type * DestTy,const DataLayout & DL)637 Constant *ConstantFoldLoadThroughBitcastExpr(ConstantExpr *CE, Type *DestTy,
638 const DataLayout &DL) {
639 auto *SrcPtr = CE->getOperand(0);
640 auto *SrcPtrTy = dyn_cast<PointerType>(SrcPtr->getType());
641 if (!SrcPtrTy)
642 return nullptr;
643 Type *SrcTy = SrcPtrTy->getPointerElementType();
644
645 Constant *C = ConstantFoldLoadFromConstPtr(SrcPtr, SrcTy, DL);
646 if (!C)
647 return nullptr;
648
649 return llvm::ConstantFoldLoadThroughBitcast(C, DestTy, DL);
650 }
651
652 } // end anonymous namespace
653
ConstantFoldLoadFromConstPtr(Constant * C,Type * Ty,const DataLayout & DL)654 Constant *llvm::ConstantFoldLoadFromConstPtr(Constant *C, Type *Ty,
655 const DataLayout &DL) {
656 // First, try the easy cases:
657 if (auto *GV = dyn_cast<GlobalVariable>(C))
658 if (GV->isConstant() && GV->hasDefinitiveInitializer())
659 return GV->getInitializer();
660
661 if (auto *GA = dyn_cast<GlobalAlias>(C))
662 if (GA->getAliasee() && !GA->isInterposable())
663 return ConstantFoldLoadFromConstPtr(GA->getAliasee(), Ty, DL);
664
665 // If the loaded value isn't a constant expr, we can't handle it.
666 auto *CE = dyn_cast<ConstantExpr>(C);
667 if (!CE)
668 return nullptr;
669
670 if (CE->getOpcode() == Instruction::GetElementPtr) {
671 if (auto *GV = dyn_cast<GlobalVariable>(CE->getOperand(0))) {
672 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
673 if (Constant *V =
674 ConstantFoldLoadThroughGEPConstantExpr(GV->getInitializer(), CE))
675 return V;
676 }
677 }
678 }
679
680 if (CE->getOpcode() == Instruction::BitCast)
681 if (Constant *LoadedC = ConstantFoldLoadThroughBitcastExpr(CE, Ty, DL))
682 return LoadedC;
683
684 // Instead of loading constant c string, use corresponding integer value
685 // directly if string length is small enough.
686 StringRef Str;
687 if (getConstantStringInfo(CE, Str) && !Str.empty()) {
688 size_t StrLen = Str.size();
689 unsigned NumBits = Ty->getPrimitiveSizeInBits();
690 // Replace load with immediate integer if the result is an integer or fp
691 // value.
692 if ((NumBits >> 3) == StrLen + 1 && (NumBits & 7) == 0 &&
693 (isa<IntegerType>(Ty) || Ty->isFloatingPointTy())) {
694 APInt StrVal(NumBits, 0);
695 APInt SingleChar(NumBits, 0);
696 if (DL.isLittleEndian()) {
697 for (unsigned char C : reverse(Str.bytes())) {
698 SingleChar = static_cast<uint64_t>(C);
699 StrVal = (StrVal << 8) | SingleChar;
700 }
701 } else {
702 for (unsigned char C : Str.bytes()) {
703 SingleChar = static_cast<uint64_t>(C);
704 StrVal = (StrVal << 8) | SingleChar;
705 }
706 // Append NULL at the end.
707 SingleChar = 0;
708 StrVal = (StrVal << 8) | SingleChar;
709 }
710
711 Constant *Res = ConstantInt::get(CE->getContext(), StrVal);
712 if (Ty->isFloatingPointTy())
713 Res = ConstantExpr::getBitCast(Res, Ty);
714 return Res;
715 }
716 }
717
718 // If this load comes from anywhere in a constant global, and if the global
719 // is all undef or zero, we know what it loads.
720 if (auto *GV = dyn_cast<GlobalVariable>(GetUnderlyingObject(CE, DL))) {
721 if (GV->isConstant() && GV->hasDefinitiveInitializer()) {
722 if (GV->getInitializer()->isNullValue())
723 return Constant::getNullValue(Ty);
724 if (isa<UndefValue>(GV->getInitializer()))
725 return UndefValue::get(Ty);
726 }
727 }
728
729 // Try hard to fold loads from bitcasted strange and non-type-safe things.
730 return FoldReinterpretLoadFromConstPtr(CE, Ty, DL);
731 }
732
733 namespace {
734
ConstantFoldLoadInst(const LoadInst * LI,const DataLayout & DL)735 Constant *ConstantFoldLoadInst(const LoadInst *LI, const DataLayout &DL) {
736 if (LI->isVolatile()) return nullptr;
737
738 if (auto *C = dyn_cast<Constant>(LI->getOperand(0)))
739 return ConstantFoldLoadFromConstPtr(C, LI->getType(), DL);
740
741 return nullptr;
742 }
743
744 /// One of Op0/Op1 is a constant expression.
745 /// Attempt to symbolically evaluate the result of a binary operator merging
746 /// these together. If target data info is available, it is provided as DL,
747 /// otherwise DL is null.
SymbolicallyEvaluateBinop(unsigned Opc,Constant * Op0,Constant * Op1,const DataLayout & DL)748 Constant *SymbolicallyEvaluateBinop(unsigned Opc, Constant *Op0, Constant *Op1,
749 const DataLayout &DL) {
750 // SROA
751
752 // Fold (and 0xffffffff00000000, (shl x, 32)) -> shl.
753 // Fold (lshr (or X, Y), 32) -> (lshr [X/Y], 32) if one doesn't contribute
754 // bits.
755
756 if (Opc == Instruction::And) {
757 KnownBits Known0 = computeKnownBits(Op0, DL);
758 KnownBits Known1 = computeKnownBits(Op1, DL);
759 if ((Known1.One | Known0.Zero).isAllOnesValue()) {
760 // All the bits of Op0 that the 'and' could be masking are already zero.
761 return Op0;
762 }
763 if ((Known0.One | Known1.Zero).isAllOnesValue()) {
764 // All the bits of Op1 that the 'and' could be masking are already zero.
765 return Op1;
766 }
767
768 Known0 &= Known1;
769 if (Known0.isConstant())
770 return ConstantInt::get(Op0->getType(), Known0.getConstant());
771 }
772
773 // If the constant expr is something like &A[123] - &A[4].f, fold this into a
774 // constant. This happens frequently when iterating over a global array.
775 if (Opc == Instruction::Sub) {
776 GlobalValue *GV1, *GV2;
777 APInt Offs1, Offs2;
778
779 if (IsConstantOffsetFromGlobal(Op0, GV1, Offs1, DL))
780 if (IsConstantOffsetFromGlobal(Op1, GV2, Offs2, DL) && GV1 == GV2) {
781 unsigned OpSize = DL.getTypeSizeInBits(Op0->getType());
782
783 // (&GV+C1) - (&GV+C2) -> C1-C2, pointer arithmetic cannot overflow.
784 // PtrToInt may change the bitwidth so we have convert to the right size
785 // first.
786 return ConstantInt::get(Op0->getType(), Offs1.zextOrTrunc(OpSize) -
787 Offs2.zextOrTrunc(OpSize));
788 }
789 }
790
791 return nullptr;
792 }
793
794 /// If array indices are not pointer-sized integers, explicitly cast them so
795 /// that they aren't implicitly casted by the getelementptr.
CastGEPIndices(Type * SrcElemTy,ArrayRef<Constant * > Ops,Type * ResultTy,Optional<unsigned> InRangeIndex,const DataLayout & DL,const TargetLibraryInfo * TLI)796 Constant *CastGEPIndices(Type *SrcElemTy, ArrayRef<Constant *> Ops,
797 Type *ResultTy, Optional<unsigned> InRangeIndex,
798 const DataLayout &DL, const TargetLibraryInfo *TLI) {
799 Type *IntIdxTy = DL.getIndexType(ResultTy);
800 Type *IntIdxScalarTy = IntIdxTy->getScalarType();
801
802 bool Any = false;
803 SmallVector<Constant*, 32> NewIdxs;
804 for (unsigned i = 1, e = Ops.size(); i != e; ++i) {
805 if ((i == 1 ||
806 !isa<StructType>(GetElementPtrInst::getIndexedType(
807 SrcElemTy, Ops.slice(1, i - 1)))) &&
808 Ops[i]->getType()->getScalarType() != IntIdxScalarTy) {
809 Any = true;
810 Type *NewType = Ops[i]->getType()->isVectorTy()
811 ? IntIdxTy
812 : IntIdxScalarTy;
813 NewIdxs.push_back(ConstantExpr::getCast(CastInst::getCastOpcode(Ops[i],
814 true,
815 NewType,
816 true),
817 Ops[i], NewType));
818 } else
819 NewIdxs.push_back(Ops[i]);
820 }
821
822 if (!Any)
823 return nullptr;
824
825 Constant *C = ConstantExpr::getGetElementPtr(
826 SrcElemTy, Ops[0], NewIdxs, /*InBounds=*/false, InRangeIndex);
827 return ConstantFoldConstant(C, DL, TLI);
828 }
829
830 /// Strip the pointer casts, but preserve the address space information.
StripPtrCastKeepAS(Constant * Ptr,Type * & ElemTy)831 Constant *StripPtrCastKeepAS(Constant *Ptr, Type *&ElemTy) {
832 assert(Ptr->getType()->isPointerTy() && "Not a pointer type");
833 auto *OldPtrTy = cast<PointerType>(Ptr->getType());
834 Ptr = cast<Constant>(Ptr->stripPointerCasts());
835 auto *NewPtrTy = cast<PointerType>(Ptr->getType());
836
837 ElemTy = NewPtrTy->getPointerElementType();
838
839 // Preserve the address space number of the pointer.
840 if (NewPtrTy->getAddressSpace() != OldPtrTy->getAddressSpace()) {
841 NewPtrTy = ElemTy->getPointerTo(OldPtrTy->getAddressSpace());
842 Ptr = ConstantExpr::getPointerCast(Ptr, NewPtrTy);
843 }
844 return Ptr;
845 }
846
847 /// If we can symbolically evaluate the GEP constant expression, do so.
SymbolicallyEvaluateGEP(const GEPOperator * GEP,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)848 Constant *SymbolicallyEvaluateGEP(const GEPOperator *GEP,
849 ArrayRef<Constant *> Ops,
850 const DataLayout &DL,
851 const TargetLibraryInfo *TLI) {
852 const GEPOperator *InnermostGEP = GEP;
853 bool InBounds = GEP->isInBounds();
854
855 Type *SrcElemTy = GEP->getSourceElementType();
856 Type *ResElemTy = GEP->getResultElementType();
857 Type *ResTy = GEP->getType();
858 if (!SrcElemTy->isSized() || isa<ScalableVectorType>(SrcElemTy))
859 return nullptr;
860
861 if (Constant *C = CastGEPIndices(SrcElemTy, Ops, ResTy,
862 GEP->getInRangeIndex(), DL, TLI))
863 return C;
864
865 Constant *Ptr = Ops[0];
866 if (!Ptr->getType()->isPointerTy())
867 return nullptr;
868
869 Type *IntIdxTy = DL.getIndexType(Ptr->getType());
870
871 // If this is a constant expr gep that is effectively computing an
872 // "offsetof", fold it into 'cast int Size to T*' instead of 'gep 0, 0, 12'
873 for (unsigned i = 1, e = Ops.size(); i != e; ++i)
874 if (!isa<ConstantInt>(Ops[i])) {
875
876 // If this is "gep i8* Ptr, (sub 0, V)", fold this as:
877 // "inttoptr (sub (ptrtoint Ptr), V)"
878 if (Ops.size() == 2 && ResElemTy->isIntegerTy(8)) {
879 auto *CE = dyn_cast<ConstantExpr>(Ops[1]);
880 assert((!CE || CE->getType() == IntIdxTy) &&
881 "CastGEPIndices didn't canonicalize index types!");
882 if (CE && CE->getOpcode() == Instruction::Sub &&
883 CE->getOperand(0)->isNullValue()) {
884 Constant *Res = ConstantExpr::getPtrToInt(Ptr, CE->getType());
885 Res = ConstantExpr::getSub(Res, CE->getOperand(1));
886 Res = ConstantExpr::getIntToPtr(Res, ResTy);
887 return ConstantFoldConstant(Res, DL, TLI);
888 }
889 }
890 return nullptr;
891 }
892
893 unsigned BitWidth = DL.getTypeSizeInBits(IntIdxTy);
894 APInt Offset =
895 APInt(BitWidth,
896 DL.getIndexedOffsetInType(
897 SrcElemTy,
898 makeArrayRef((Value * const *)Ops.data() + 1, Ops.size() - 1)));
899 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
900
901 // If this is a GEP of a GEP, fold it all into a single GEP.
902 while (auto *GEP = dyn_cast<GEPOperator>(Ptr)) {
903 InnermostGEP = GEP;
904 InBounds &= GEP->isInBounds();
905
906 SmallVector<Value *, 4> NestedOps(GEP->op_begin() + 1, GEP->op_end());
907
908 // Do not try the incorporate the sub-GEP if some index is not a number.
909 bool AllConstantInt = true;
910 for (Value *NestedOp : NestedOps)
911 if (!isa<ConstantInt>(NestedOp)) {
912 AllConstantInt = false;
913 break;
914 }
915 if (!AllConstantInt)
916 break;
917
918 Ptr = cast<Constant>(GEP->getOperand(0));
919 SrcElemTy = GEP->getSourceElementType();
920 Offset += APInt(BitWidth, DL.getIndexedOffsetInType(SrcElemTy, NestedOps));
921 Ptr = StripPtrCastKeepAS(Ptr, SrcElemTy);
922 }
923
924 // If the base value for this address is a literal integer value, fold the
925 // getelementptr to the resulting integer value casted to the pointer type.
926 APInt BasePtr(BitWidth, 0);
927 if (auto *CE = dyn_cast<ConstantExpr>(Ptr)) {
928 if (CE->getOpcode() == Instruction::IntToPtr) {
929 if (auto *Base = dyn_cast<ConstantInt>(CE->getOperand(0)))
930 BasePtr = Base->getValue().zextOrTrunc(BitWidth);
931 }
932 }
933
934 auto *PTy = cast<PointerType>(Ptr->getType());
935 if ((Ptr->isNullValue() || BasePtr != 0) &&
936 !DL.isNonIntegralPointerType(PTy)) {
937 Constant *C = ConstantInt::get(Ptr->getContext(), Offset + BasePtr);
938 return ConstantExpr::getIntToPtr(C, ResTy);
939 }
940
941 // Otherwise form a regular getelementptr. Recompute the indices so that
942 // we eliminate over-indexing of the notional static type array bounds.
943 // This makes it easy to determine if the getelementptr is "inbounds".
944 // Also, this helps GlobalOpt do SROA on GlobalVariables.
945 Type *Ty = PTy;
946 SmallVector<Constant *, 32> NewIdxs;
947
948 do {
949 if (!Ty->isStructTy()) {
950 if (Ty->isPointerTy()) {
951 // The only pointer indexing we'll do is on the first index of the GEP.
952 if (!NewIdxs.empty())
953 break;
954
955 Ty = SrcElemTy;
956
957 // Only handle pointers to sized types, not pointers to functions.
958 if (!Ty->isSized())
959 return nullptr;
960 } else {
961 Type *NextTy = GetElementPtrInst::getTypeAtIndex(Ty, (uint64_t)0);
962 if (!NextTy)
963 break;
964 Ty = NextTy;
965 }
966
967 // Determine which element of the array the offset points into.
968 APInt ElemSize(BitWidth, DL.getTypeAllocSize(Ty));
969 if (ElemSize == 0) {
970 // The element size is 0. This may be [0 x Ty]*, so just use a zero
971 // index for this level and proceed to the next level to see if it can
972 // accommodate the offset.
973 NewIdxs.push_back(ConstantInt::get(IntIdxTy, 0));
974 } else {
975 // The element size is non-zero divide the offset by the element
976 // size (rounding down), to compute the index at this level.
977 bool Overflow;
978 APInt NewIdx = Offset.sdiv_ov(ElemSize, Overflow);
979 if (Overflow)
980 break;
981 Offset -= NewIdx * ElemSize;
982 NewIdxs.push_back(ConstantInt::get(IntIdxTy, NewIdx));
983 }
984 } else {
985 auto *STy = cast<StructType>(Ty);
986 // If we end up with an offset that isn't valid for this struct type, we
987 // can't re-form this GEP in a regular form, so bail out. The pointer
988 // operand likely went through casts that are necessary to make the GEP
989 // sensible.
990 const StructLayout &SL = *DL.getStructLayout(STy);
991 if (Offset.isNegative() || Offset.uge(SL.getSizeInBytes()))
992 break;
993
994 // Determine which field of the struct the offset points into. The
995 // getZExtValue is fine as we've already ensured that the offset is
996 // within the range representable by the StructLayout API.
997 unsigned ElIdx = SL.getElementContainingOffset(Offset.getZExtValue());
998 NewIdxs.push_back(ConstantInt::get(Type::getInt32Ty(Ty->getContext()),
999 ElIdx));
1000 Offset -= APInt(BitWidth, SL.getElementOffset(ElIdx));
1001 Ty = STy->getTypeAtIndex(ElIdx);
1002 }
1003 } while (Ty != ResElemTy);
1004
1005 // If we haven't used up the entire offset by descending the static
1006 // type, then the offset is pointing into the middle of an indivisible
1007 // member, so we can't simplify it.
1008 if (Offset != 0)
1009 return nullptr;
1010
1011 // Preserve the inrange index from the innermost GEP if possible. We must
1012 // have calculated the same indices up to and including the inrange index.
1013 Optional<unsigned> InRangeIndex;
1014 if (Optional<unsigned> LastIRIndex = InnermostGEP->getInRangeIndex())
1015 if (SrcElemTy == InnermostGEP->getSourceElementType() &&
1016 NewIdxs.size() > *LastIRIndex) {
1017 InRangeIndex = LastIRIndex;
1018 for (unsigned I = 0; I <= *LastIRIndex; ++I)
1019 if (NewIdxs[I] != InnermostGEP->getOperand(I + 1))
1020 return nullptr;
1021 }
1022
1023 // Create a GEP.
1024 Constant *C = ConstantExpr::getGetElementPtr(SrcElemTy, Ptr, NewIdxs,
1025 InBounds, InRangeIndex);
1026 assert(C->getType()->getPointerElementType() == Ty &&
1027 "Computed GetElementPtr has unexpected type!");
1028
1029 // If we ended up indexing a member with a type that doesn't match
1030 // the type of what the original indices indexed, add a cast.
1031 if (Ty != ResElemTy)
1032 C = FoldBitCast(C, ResTy, DL);
1033
1034 return C;
1035 }
1036
1037 /// Attempt to constant fold an instruction with the
1038 /// specified opcode and operands. If successful, the constant result is
1039 /// returned, if not, null is returned. Note that this function can fail when
1040 /// attempting to fold instructions like loads and stores, which have no
1041 /// constant expression form.
ConstantFoldInstOperandsImpl(const Value * InstOrCE,unsigned Opcode,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1042 Constant *ConstantFoldInstOperandsImpl(const Value *InstOrCE, unsigned Opcode,
1043 ArrayRef<Constant *> Ops,
1044 const DataLayout &DL,
1045 const TargetLibraryInfo *TLI) {
1046 Type *DestTy = InstOrCE->getType();
1047
1048 if (Instruction::isUnaryOp(Opcode))
1049 return ConstantFoldUnaryOpOperand(Opcode, Ops[0], DL);
1050
1051 if (Instruction::isBinaryOp(Opcode))
1052 return ConstantFoldBinaryOpOperands(Opcode, Ops[0], Ops[1], DL);
1053
1054 if (Instruction::isCast(Opcode))
1055 return ConstantFoldCastOperand(Opcode, Ops[0], DestTy, DL);
1056
1057 if (auto *GEP = dyn_cast<GEPOperator>(InstOrCE)) {
1058 if (Constant *C = SymbolicallyEvaluateGEP(GEP, Ops, DL, TLI))
1059 return C;
1060
1061 return ConstantExpr::getGetElementPtr(GEP->getSourceElementType(), Ops[0],
1062 Ops.slice(1), GEP->isInBounds(),
1063 GEP->getInRangeIndex());
1064 }
1065
1066 if (auto *CE = dyn_cast<ConstantExpr>(InstOrCE))
1067 return CE->getWithOperands(Ops);
1068
1069 switch (Opcode) {
1070 default: return nullptr;
1071 case Instruction::ICmp:
1072 case Instruction::FCmp: llvm_unreachable("Invalid for compares");
1073 case Instruction::Call:
1074 if (auto *F = dyn_cast<Function>(Ops.back())) {
1075 const auto *Call = cast<CallBase>(InstOrCE);
1076 if (canConstantFoldCallTo(Call, F))
1077 return ConstantFoldCall(Call, F, Ops.slice(0, Ops.size() - 1), TLI);
1078 }
1079 return nullptr;
1080 case Instruction::Select:
1081 return ConstantExpr::getSelect(Ops[0], Ops[1], Ops[2]);
1082 case Instruction::ExtractElement:
1083 return ConstantExpr::getExtractElement(Ops[0], Ops[1]);
1084 case Instruction::ExtractValue:
1085 return ConstantExpr::getExtractValue(
1086 Ops[0], cast<ExtractValueInst>(InstOrCE)->getIndices());
1087 case Instruction::InsertElement:
1088 return ConstantExpr::getInsertElement(Ops[0], Ops[1], Ops[2]);
1089 case Instruction::ShuffleVector:
1090 return ConstantExpr::getShuffleVector(
1091 Ops[0], Ops[1], cast<ShuffleVectorInst>(InstOrCE)->getShuffleMask());
1092 }
1093 }
1094
1095 } // end anonymous namespace
1096
1097 //===----------------------------------------------------------------------===//
1098 // Constant Folding public APIs
1099 //===----------------------------------------------------------------------===//
1100
1101 namespace {
1102
1103 Constant *
ConstantFoldConstantImpl(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI,SmallDenseMap<Constant *,Constant * > & FoldedOps)1104 ConstantFoldConstantImpl(const Constant *C, const DataLayout &DL,
1105 const TargetLibraryInfo *TLI,
1106 SmallDenseMap<Constant *, Constant *> &FoldedOps) {
1107 if (!isa<ConstantVector>(C) && !isa<ConstantExpr>(C))
1108 return const_cast<Constant *>(C);
1109
1110 SmallVector<Constant *, 8> Ops;
1111 for (const Use &OldU : C->operands()) {
1112 Constant *OldC = cast<Constant>(&OldU);
1113 Constant *NewC = OldC;
1114 // Recursively fold the ConstantExpr's operands. If we have already folded
1115 // a ConstantExpr, we don't have to process it again.
1116 if (isa<ConstantVector>(OldC) || isa<ConstantExpr>(OldC)) {
1117 auto It = FoldedOps.find(OldC);
1118 if (It == FoldedOps.end()) {
1119 NewC = ConstantFoldConstantImpl(OldC, DL, TLI, FoldedOps);
1120 FoldedOps.insert({OldC, NewC});
1121 } else {
1122 NewC = It->second;
1123 }
1124 }
1125 Ops.push_back(NewC);
1126 }
1127
1128 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1129 if (CE->isCompare())
1130 return ConstantFoldCompareInstOperands(CE->getPredicate(), Ops[0], Ops[1],
1131 DL, TLI);
1132
1133 return ConstantFoldInstOperandsImpl(CE, CE->getOpcode(), Ops, DL, TLI);
1134 }
1135
1136 assert(isa<ConstantVector>(C));
1137 return ConstantVector::get(Ops);
1138 }
1139
1140 } // end anonymous namespace
1141
ConstantFoldInstruction(Instruction * I,const DataLayout & DL,const TargetLibraryInfo * TLI)1142 Constant *llvm::ConstantFoldInstruction(Instruction *I, const DataLayout &DL,
1143 const TargetLibraryInfo *TLI) {
1144 // Handle PHI nodes quickly here...
1145 if (auto *PN = dyn_cast<PHINode>(I)) {
1146 Constant *CommonValue = nullptr;
1147
1148 SmallDenseMap<Constant *, Constant *> FoldedOps;
1149 for (Value *Incoming : PN->incoming_values()) {
1150 // If the incoming value is undef then skip it. Note that while we could
1151 // skip the value if it is equal to the phi node itself we choose not to
1152 // because that would break the rule that constant folding only applies if
1153 // all operands are constants.
1154 if (isa<UndefValue>(Incoming))
1155 continue;
1156 // If the incoming value is not a constant, then give up.
1157 auto *C = dyn_cast<Constant>(Incoming);
1158 if (!C)
1159 return nullptr;
1160 // Fold the PHI's operands.
1161 C = ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1162 // If the incoming value is a different constant to
1163 // the one we saw previously, then give up.
1164 if (CommonValue && C != CommonValue)
1165 return nullptr;
1166 CommonValue = C;
1167 }
1168
1169 // If we reach here, all incoming values are the same constant or undef.
1170 return CommonValue ? CommonValue : UndefValue::get(PN->getType());
1171 }
1172
1173 // Scan the operand list, checking to see if they are all constants, if so,
1174 // hand off to ConstantFoldInstOperandsImpl.
1175 if (!all_of(I->operands(), [](Use &U) { return isa<Constant>(U); }))
1176 return nullptr;
1177
1178 SmallDenseMap<Constant *, Constant *> FoldedOps;
1179 SmallVector<Constant *, 8> Ops;
1180 for (const Use &OpU : I->operands()) {
1181 auto *Op = cast<Constant>(&OpU);
1182 // Fold the Instruction's operands.
1183 Op = ConstantFoldConstantImpl(Op, DL, TLI, FoldedOps);
1184 Ops.push_back(Op);
1185 }
1186
1187 if (const auto *CI = dyn_cast<CmpInst>(I))
1188 return ConstantFoldCompareInstOperands(CI->getPredicate(), Ops[0], Ops[1],
1189 DL, TLI);
1190
1191 if (const auto *LI = dyn_cast<LoadInst>(I))
1192 return ConstantFoldLoadInst(LI, DL);
1193
1194 if (auto *IVI = dyn_cast<InsertValueInst>(I)) {
1195 return ConstantExpr::getInsertValue(
1196 cast<Constant>(IVI->getAggregateOperand()),
1197 cast<Constant>(IVI->getInsertedValueOperand()),
1198 IVI->getIndices());
1199 }
1200
1201 if (auto *EVI = dyn_cast<ExtractValueInst>(I)) {
1202 return ConstantExpr::getExtractValue(
1203 cast<Constant>(EVI->getAggregateOperand()),
1204 EVI->getIndices());
1205 }
1206
1207 return ConstantFoldInstOperands(I, Ops, DL, TLI);
1208 }
1209
ConstantFoldConstant(const Constant * C,const DataLayout & DL,const TargetLibraryInfo * TLI)1210 Constant *llvm::ConstantFoldConstant(const Constant *C, const DataLayout &DL,
1211 const TargetLibraryInfo *TLI) {
1212 SmallDenseMap<Constant *, Constant *> FoldedOps;
1213 return ConstantFoldConstantImpl(C, DL, TLI, FoldedOps);
1214 }
1215
ConstantFoldInstOperands(Instruction * I,ArrayRef<Constant * > Ops,const DataLayout & DL,const TargetLibraryInfo * TLI)1216 Constant *llvm::ConstantFoldInstOperands(Instruction *I,
1217 ArrayRef<Constant *> Ops,
1218 const DataLayout &DL,
1219 const TargetLibraryInfo *TLI) {
1220 return ConstantFoldInstOperandsImpl(I, I->getOpcode(), Ops, DL, TLI);
1221 }
1222
ConstantFoldCompareInstOperands(unsigned Predicate,Constant * Ops0,Constant * Ops1,const DataLayout & DL,const TargetLibraryInfo * TLI)1223 Constant *llvm::ConstantFoldCompareInstOperands(unsigned Predicate,
1224 Constant *Ops0, Constant *Ops1,
1225 const DataLayout &DL,
1226 const TargetLibraryInfo *TLI) {
1227 // fold: icmp (inttoptr x), null -> icmp x, 0
1228 // fold: icmp null, (inttoptr x) -> icmp 0, x
1229 // fold: icmp (ptrtoint x), 0 -> icmp x, null
1230 // fold: icmp 0, (ptrtoint x) -> icmp null, x
1231 // fold: icmp (inttoptr x), (inttoptr y) -> icmp trunc/zext x, trunc/zext y
1232 // fold: icmp (ptrtoint x), (ptrtoint y) -> icmp x, y
1233 //
1234 // FIXME: The following comment is out of data and the DataLayout is here now.
1235 // ConstantExpr::getCompare cannot do this, because it doesn't have DL
1236 // around to know if bit truncation is happening.
1237 if (auto *CE0 = dyn_cast<ConstantExpr>(Ops0)) {
1238 if (Ops1->isNullValue()) {
1239 if (CE0->getOpcode() == Instruction::IntToPtr) {
1240 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1241 // Convert the integer value to the right size to ensure we get the
1242 // proper extension or truncation.
1243 Constant *C = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1244 IntPtrTy, false);
1245 Constant *Null = Constant::getNullValue(C->getType());
1246 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1247 }
1248
1249 // Only do this transformation if the int is intptrty in size, otherwise
1250 // there is a truncation or extension that we aren't modeling.
1251 if (CE0->getOpcode() == Instruction::PtrToInt) {
1252 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1253 if (CE0->getType() == IntPtrTy) {
1254 Constant *C = CE0->getOperand(0);
1255 Constant *Null = Constant::getNullValue(C->getType());
1256 return ConstantFoldCompareInstOperands(Predicate, C, Null, DL, TLI);
1257 }
1258 }
1259 }
1260
1261 if (auto *CE1 = dyn_cast<ConstantExpr>(Ops1)) {
1262 if (CE0->getOpcode() == CE1->getOpcode()) {
1263 if (CE0->getOpcode() == Instruction::IntToPtr) {
1264 Type *IntPtrTy = DL.getIntPtrType(CE0->getType());
1265
1266 // Convert the integer value to the right size to ensure we get the
1267 // proper extension or truncation.
1268 Constant *C0 = ConstantExpr::getIntegerCast(CE0->getOperand(0),
1269 IntPtrTy, false);
1270 Constant *C1 = ConstantExpr::getIntegerCast(CE1->getOperand(0),
1271 IntPtrTy, false);
1272 return ConstantFoldCompareInstOperands(Predicate, C0, C1, DL, TLI);
1273 }
1274
1275 // Only do this transformation if the int is intptrty in size, otherwise
1276 // there is a truncation or extension that we aren't modeling.
1277 if (CE0->getOpcode() == Instruction::PtrToInt) {
1278 Type *IntPtrTy = DL.getIntPtrType(CE0->getOperand(0)->getType());
1279 if (CE0->getType() == IntPtrTy &&
1280 CE0->getOperand(0)->getType() == CE1->getOperand(0)->getType()) {
1281 return ConstantFoldCompareInstOperands(
1282 Predicate, CE0->getOperand(0), CE1->getOperand(0), DL, TLI);
1283 }
1284 }
1285 }
1286 }
1287
1288 // icmp eq (or x, y), 0 -> (icmp eq x, 0) & (icmp eq y, 0)
1289 // icmp ne (or x, y), 0 -> (icmp ne x, 0) | (icmp ne y, 0)
1290 if ((Predicate == ICmpInst::ICMP_EQ || Predicate == ICmpInst::ICMP_NE) &&
1291 CE0->getOpcode() == Instruction::Or && Ops1->isNullValue()) {
1292 Constant *LHS = ConstantFoldCompareInstOperands(
1293 Predicate, CE0->getOperand(0), Ops1, DL, TLI);
1294 Constant *RHS = ConstantFoldCompareInstOperands(
1295 Predicate, CE0->getOperand(1), Ops1, DL, TLI);
1296 unsigned OpC =
1297 Predicate == ICmpInst::ICMP_EQ ? Instruction::And : Instruction::Or;
1298 return ConstantFoldBinaryOpOperands(OpC, LHS, RHS, DL);
1299 }
1300 } else if (isa<ConstantExpr>(Ops1)) {
1301 // If RHS is a constant expression, but the left side isn't, swap the
1302 // operands and try again.
1303 Predicate = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)Predicate);
1304 return ConstantFoldCompareInstOperands(Predicate, Ops1, Ops0, DL, TLI);
1305 }
1306
1307 return ConstantExpr::getCompare(Predicate, Ops0, Ops1);
1308 }
1309
ConstantFoldUnaryOpOperand(unsigned Opcode,Constant * Op,const DataLayout & DL)1310 Constant *llvm::ConstantFoldUnaryOpOperand(unsigned Opcode, Constant *Op,
1311 const DataLayout &DL) {
1312 assert(Instruction::isUnaryOp(Opcode));
1313
1314 return ConstantExpr::get(Opcode, Op);
1315 }
1316
ConstantFoldBinaryOpOperands(unsigned Opcode,Constant * LHS,Constant * RHS,const DataLayout & DL)1317 Constant *llvm::ConstantFoldBinaryOpOperands(unsigned Opcode, Constant *LHS,
1318 Constant *RHS,
1319 const DataLayout &DL) {
1320 assert(Instruction::isBinaryOp(Opcode));
1321 if (isa<ConstantExpr>(LHS) || isa<ConstantExpr>(RHS))
1322 if (Constant *C = SymbolicallyEvaluateBinop(Opcode, LHS, RHS, DL))
1323 return C;
1324
1325 return ConstantExpr::get(Opcode, LHS, RHS);
1326 }
1327
ConstantFoldCastOperand(unsigned Opcode,Constant * C,Type * DestTy,const DataLayout & DL)1328 Constant *llvm::ConstantFoldCastOperand(unsigned Opcode, Constant *C,
1329 Type *DestTy, const DataLayout &DL) {
1330 assert(Instruction::isCast(Opcode));
1331 switch (Opcode) {
1332 default:
1333 llvm_unreachable("Missing case");
1334 case Instruction::PtrToInt:
1335 // If the input is a inttoptr, eliminate the pair. This requires knowing
1336 // the width of a pointer, so it can't be done in ConstantExpr::getCast.
1337 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1338 if (CE->getOpcode() == Instruction::IntToPtr) {
1339 Constant *Input = CE->getOperand(0);
1340 unsigned InWidth = Input->getType()->getScalarSizeInBits();
1341 unsigned PtrWidth = DL.getPointerTypeSizeInBits(CE->getType());
1342 if (PtrWidth < InWidth) {
1343 Constant *Mask =
1344 ConstantInt::get(CE->getContext(),
1345 APInt::getLowBitsSet(InWidth, PtrWidth));
1346 Input = ConstantExpr::getAnd(Input, Mask);
1347 }
1348 // Do a zext or trunc to get to the dest size.
1349 return ConstantExpr::getIntegerCast(Input, DestTy, false);
1350 }
1351 }
1352 return ConstantExpr::getCast(Opcode, C, DestTy);
1353 case Instruction::IntToPtr:
1354 // If the input is a ptrtoint, turn the pair into a ptr to ptr bitcast if
1355 // the int size is >= the ptr size and the address spaces are the same.
1356 // This requires knowing the width of a pointer, so it can't be done in
1357 // ConstantExpr::getCast.
1358 if (auto *CE = dyn_cast<ConstantExpr>(C)) {
1359 if (CE->getOpcode() == Instruction::PtrToInt) {
1360 Constant *SrcPtr = CE->getOperand(0);
1361 unsigned SrcPtrSize = DL.getPointerTypeSizeInBits(SrcPtr->getType());
1362 unsigned MidIntSize = CE->getType()->getScalarSizeInBits();
1363
1364 if (MidIntSize >= SrcPtrSize) {
1365 unsigned SrcAS = SrcPtr->getType()->getPointerAddressSpace();
1366 if (SrcAS == DestTy->getPointerAddressSpace())
1367 return FoldBitCast(CE->getOperand(0), DestTy, DL);
1368 }
1369 }
1370 }
1371
1372 return ConstantExpr::getCast(Opcode, C, DestTy);
1373 case Instruction::Trunc:
1374 case Instruction::ZExt:
1375 case Instruction::SExt:
1376 case Instruction::FPTrunc:
1377 case Instruction::FPExt:
1378 case Instruction::UIToFP:
1379 case Instruction::SIToFP:
1380 case Instruction::FPToUI:
1381 case Instruction::FPToSI:
1382 case Instruction::AddrSpaceCast:
1383 return ConstantExpr::getCast(Opcode, C, DestTy);
1384 case Instruction::BitCast:
1385 return FoldBitCast(C, DestTy, DL);
1386 }
1387 }
1388
ConstantFoldLoadThroughGEPConstantExpr(Constant * C,ConstantExpr * CE)1389 Constant *llvm::ConstantFoldLoadThroughGEPConstantExpr(Constant *C,
1390 ConstantExpr *CE) {
1391 if (!CE->getOperand(1)->isNullValue())
1392 return nullptr; // Do not allow stepping over the value!
1393
1394 // Loop over all of the operands, tracking down which value we are
1395 // addressing.
1396 for (unsigned i = 2, e = CE->getNumOperands(); i != e; ++i) {
1397 C = C->getAggregateElement(CE->getOperand(i));
1398 if (!C)
1399 return nullptr;
1400 }
1401 return C;
1402 }
1403
1404 Constant *
ConstantFoldLoadThroughGEPIndices(Constant * C,ArrayRef<Constant * > Indices)1405 llvm::ConstantFoldLoadThroughGEPIndices(Constant *C,
1406 ArrayRef<Constant *> Indices) {
1407 // Loop over all of the operands, tracking down which value we are
1408 // addressing.
1409 for (Constant *Index : Indices) {
1410 C = C->getAggregateElement(Index);
1411 if (!C)
1412 return nullptr;
1413 }
1414 return C;
1415 }
1416
1417 //===----------------------------------------------------------------------===//
1418 // Constant Folding for Calls
1419 //
1420
canConstantFoldCallTo(const CallBase * Call,const Function * F)1421 bool llvm::canConstantFoldCallTo(const CallBase *Call, const Function *F) {
1422 if (Call->isNoBuiltin())
1423 return false;
1424 switch (F->getIntrinsicID()) {
1425 // Operations that do not operate floating-point numbers and do not depend on
1426 // FP environment can be folded even in strictfp functions.
1427 case Intrinsic::bswap:
1428 case Intrinsic::ctpop:
1429 case Intrinsic::ctlz:
1430 case Intrinsic::cttz:
1431 case Intrinsic::fshl:
1432 case Intrinsic::fshr:
1433 case Intrinsic::launder_invariant_group:
1434 case Intrinsic::strip_invariant_group:
1435 case Intrinsic::masked_load:
1436 case Intrinsic::sadd_with_overflow:
1437 case Intrinsic::uadd_with_overflow:
1438 case Intrinsic::ssub_with_overflow:
1439 case Intrinsic::usub_with_overflow:
1440 case Intrinsic::smul_with_overflow:
1441 case Intrinsic::umul_with_overflow:
1442 case Intrinsic::sadd_sat:
1443 case Intrinsic::uadd_sat:
1444 case Intrinsic::ssub_sat:
1445 case Intrinsic::usub_sat:
1446 case Intrinsic::smul_fix:
1447 case Intrinsic::smul_fix_sat:
1448 case Intrinsic::bitreverse:
1449 case Intrinsic::is_constant:
1450 case Intrinsic::experimental_vector_reduce_add:
1451 case Intrinsic::experimental_vector_reduce_mul:
1452 case Intrinsic::experimental_vector_reduce_and:
1453 case Intrinsic::experimental_vector_reduce_or:
1454 case Intrinsic::experimental_vector_reduce_xor:
1455 case Intrinsic::experimental_vector_reduce_smin:
1456 case Intrinsic::experimental_vector_reduce_smax:
1457 case Intrinsic::experimental_vector_reduce_umin:
1458 case Intrinsic::experimental_vector_reduce_umax:
1459 return true;
1460
1461 // Floating point operations cannot be folded in strictfp functions in
1462 // general case. They can be folded if FP environment is known to compiler.
1463 case Intrinsic::minnum:
1464 case Intrinsic::maxnum:
1465 case Intrinsic::minimum:
1466 case Intrinsic::maximum:
1467 case Intrinsic::log:
1468 case Intrinsic::log2:
1469 case Intrinsic::log10:
1470 case Intrinsic::exp:
1471 case Intrinsic::exp2:
1472 case Intrinsic::sqrt:
1473 case Intrinsic::sin:
1474 case Intrinsic::cos:
1475 case Intrinsic::pow:
1476 case Intrinsic::powi:
1477 case Intrinsic::fma:
1478 case Intrinsic::fmuladd:
1479 case Intrinsic::convert_from_fp16:
1480 case Intrinsic::convert_to_fp16:
1481 case Intrinsic::amdgcn_cos:
1482 case Intrinsic::amdgcn_cubeid:
1483 case Intrinsic::amdgcn_cubema:
1484 case Intrinsic::amdgcn_cubesc:
1485 case Intrinsic::amdgcn_cubetc:
1486 case Intrinsic::amdgcn_fmul_legacy:
1487 case Intrinsic::amdgcn_fract:
1488 case Intrinsic::amdgcn_ldexp:
1489 case Intrinsic::amdgcn_sin:
1490 // The intrinsics below depend on rounding mode in MXCSR.
1491 case Intrinsic::x86_sse_cvtss2si:
1492 case Intrinsic::x86_sse_cvtss2si64:
1493 case Intrinsic::x86_sse_cvttss2si:
1494 case Intrinsic::x86_sse_cvttss2si64:
1495 case Intrinsic::x86_sse2_cvtsd2si:
1496 case Intrinsic::x86_sse2_cvtsd2si64:
1497 case Intrinsic::x86_sse2_cvttsd2si:
1498 case Intrinsic::x86_sse2_cvttsd2si64:
1499 case Intrinsic::x86_avx512_vcvtss2si32:
1500 case Intrinsic::x86_avx512_vcvtss2si64:
1501 case Intrinsic::x86_avx512_cvttss2si:
1502 case Intrinsic::x86_avx512_cvttss2si64:
1503 case Intrinsic::x86_avx512_vcvtsd2si32:
1504 case Intrinsic::x86_avx512_vcvtsd2si64:
1505 case Intrinsic::x86_avx512_cvttsd2si:
1506 case Intrinsic::x86_avx512_cvttsd2si64:
1507 case Intrinsic::x86_avx512_vcvtss2usi32:
1508 case Intrinsic::x86_avx512_vcvtss2usi64:
1509 case Intrinsic::x86_avx512_cvttss2usi:
1510 case Intrinsic::x86_avx512_cvttss2usi64:
1511 case Intrinsic::x86_avx512_vcvtsd2usi32:
1512 case Intrinsic::x86_avx512_vcvtsd2usi64:
1513 case Intrinsic::x86_avx512_cvttsd2usi:
1514 case Intrinsic::x86_avx512_cvttsd2usi64:
1515 return !Call->isStrictFP();
1516
1517 // Sign operations are actually bitwise operations, they do not raise
1518 // exceptions even for SNANs.
1519 case Intrinsic::fabs:
1520 case Intrinsic::copysign:
1521 // Non-constrained variants of rounding operations means default FP
1522 // environment, they can be folded in any case.
1523 case Intrinsic::ceil:
1524 case Intrinsic::floor:
1525 case Intrinsic::round:
1526 case Intrinsic::roundeven:
1527 case Intrinsic::trunc:
1528 case Intrinsic::nearbyint:
1529 case Intrinsic::rint:
1530 // Constrained intrinsics can be folded if FP environment is known
1531 // to compiler.
1532 case Intrinsic::experimental_constrained_ceil:
1533 case Intrinsic::experimental_constrained_floor:
1534 case Intrinsic::experimental_constrained_round:
1535 case Intrinsic::experimental_constrained_roundeven:
1536 case Intrinsic::experimental_constrained_trunc:
1537 case Intrinsic::experimental_constrained_nearbyint:
1538 case Intrinsic::experimental_constrained_rint:
1539 return true;
1540 default:
1541 return false;
1542 case Intrinsic::not_intrinsic: break;
1543 }
1544
1545 if (!F->hasName() || Call->isStrictFP())
1546 return false;
1547
1548 // In these cases, the check of the length is required. We don't want to
1549 // return true for a name like "cos\0blah" which strcmp would return equal to
1550 // "cos", but has length 8.
1551 StringRef Name = F->getName();
1552 switch (Name[0]) {
1553 default:
1554 return false;
1555 case 'a':
1556 return Name == "acos" || Name == "acosf" ||
1557 Name == "asin" || Name == "asinf" ||
1558 Name == "atan" || Name == "atanf" ||
1559 Name == "atan2" || Name == "atan2f";
1560 case 'c':
1561 return Name == "ceil" || Name == "ceilf" ||
1562 Name == "cos" || Name == "cosf" ||
1563 Name == "cosh" || Name == "coshf";
1564 case 'e':
1565 return Name == "exp" || Name == "expf" ||
1566 Name == "exp2" || Name == "exp2f";
1567 case 'f':
1568 return Name == "fabs" || Name == "fabsf" ||
1569 Name == "floor" || Name == "floorf" ||
1570 Name == "fmod" || Name == "fmodf";
1571 case 'l':
1572 return Name == "log" || Name == "logf" ||
1573 Name == "log2" || Name == "log2f" ||
1574 Name == "log10" || Name == "log10f";
1575 case 'n':
1576 return Name == "nearbyint" || Name == "nearbyintf";
1577 case 'p':
1578 return Name == "pow" || Name == "powf";
1579 case 'r':
1580 return Name == "remainder" || Name == "remainderf" ||
1581 Name == "rint" || Name == "rintf" ||
1582 Name == "round" || Name == "roundf";
1583 case 's':
1584 return Name == "sin" || Name == "sinf" ||
1585 Name == "sinh" || Name == "sinhf" ||
1586 Name == "sqrt" || Name == "sqrtf";
1587 case 't':
1588 return Name == "tan" || Name == "tanf" ||
1589 Name == "tanh" || Name == "tanhf" ||
1590 Name == "trunc" || Name == "truncf";
1591 case '_':
1592 // Check for various function names that get used for the math functions
1593 // when the header files are preprocessed with the macro
1594 // __FINITE_MATH_ONLY__ enabled.
1595 // The '12' here is the length of the shortest name that can match.
1596 // We need to check the size before looking at Name[1] and Name[2]
1597 // so we may as well check a limit that will eliminate mismatches.
1598 if (Name.size() < 12 || Name[1] != '_')
1599 return false;
1600 switch (Name[2]) {
1601 default:
1602 return false;
1603 case 'a':
1604 return Name == "__acos_finite" || Name == "__acosf_finite" ||
1605 Name == "__asin_finite" || Name == "__asinf_finite" ||
1606 Name == "__atan2_finite" || Name == "__atan2f_finite";
1607 case 'c':
1608 return Name == "__cosh_finite" || Name == "__coshf_finite";
1609 case 'e':
1610 return Name == "__exp_finite" || Name == "__expf_finite" ||
1611 Name == "__exp2_finite" || Name == "__exp2f_finite";
1612 case 'l':
1613 return Name == "__log_finite" || Name == "__logf_finite" ||
1614 Name == "__log10_finite" || Name == "__log10f_finite";
1615 case 'p':
1616 return Name == "__pow_finite" || Name == "__powf_finite";
1617 case 's':
1618 return Name == "__sinh_finite" || Name == "__sinhf_finite";
1619 }
1620 }
1621 }
1622
1623 namespace {
1624
GetConstantFoldFPValue(double V,Type * Ty)1625 Constant *GetConstantFoldFPValue(double V, Type *Ty) {
1626 if (Ty->isHalfTy() || Ty->isFloatTy()) {
1627 APFloat APF(V);
1628 bool unused;
1629 APF.convert(Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &unused);
1630 return ConstantFP::get(Ty->getContext(), APF);
1631 }
1632 if (Ty->isDoubleTy())
1633 return ConstantFP::get(Ty->getContext(), APFloat(V));
1634 llvm_unreachable("Can only constant fold half/float/double");
1635 }
1636
1637 /// Clear the floating-point exception state.
llvm_fenv_clearexcept()1638 inline void llvm_fenv_clearexcept() {
1639 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT
1640 feclearexcept(FE_ALL_EXCEPT);
1641 #endif
1642 errno = 0;
1643 }
1644
1645 /// Test if a floating-point exception was raised.
llvm_fenv_testexcept()1646 inline bool llvm_fenv_testexcept() {
1647 int errno_val = errno;
1648 if (errno_val == ERANGE || errno_val == EDOM)
1649 return true;
1650 #if defined(HAVE_FENV_H) && HAVE_DECL_FE_ALL_EXCEPT && HAVE_DECL_FE_INEXACT
1651 if (fetestexcept(FE_ALL_EXCEPT & ~FE_INEXACT))
1652 return true;
1653 #endif
1654 return false;
1655 }
1656
ConstantFoldFP(double (* NativeFP)(double),double V,Type * Ty)1657 Constant *ConstantFoldFP(double (*NativeFP)(double), double V, Type *Ty) {
1658 llvm_fenv_clearexcept();
1659 V = NativeFP(V);
1660 if (llvm_fenv_testexcept()) {
1661 llvm_fenv_clearexcept();
1662 return nullptr;
1663 }
1664
1665 return GetConstantFoldFPValue(V, Ty);
1666 }
1667
ConstantFoldBinaryFP(double (* NativeFP)(double,double),double V,double W,Type * Ty)1668 Constant *ConstantFoldBinaryFP(double (*NativeFP)(double, double), double V,
1669 double W, Type *Ty) {
1670 llvm_fenv_clearexcept();
1671 V = NativeFP(V, W);
1672 if (llvm_fenv_testexcept()) {
1673 llvm_fenv_clearexcept();
1674 return nullptr;
1675 }
1676
1677 return GetConstantFoldFPValue(V, Ty);
1678 }
1679
ConstantFoldVectorReduce(Intrinsic::ID IID,Constant * Op)1680 Constant *ConstantFoldVectorReduce(Intrinsic::ID IID, Constant *Op) {
1681 FixedVectorType *VT = dyn_cast<FixedVectorType>(Op->getType());
1682 if (!VT)
1683 return nullptr;
1684 ConstantInt *CI = dyn_cast<ConstantInt>(Op->getAggregateElement(0U));
1685 if (!CI)
1686 return nullptr;
1687 APInt Acc = CI->getValue();
1688
1689 for (unsigned I = 1; I < VT->getNumElements(); I++) {
1690 if (!(CI = dyn_cast<ConstantInt>(Op->getAggregateElement(I))))
1691 return nullptr;
1692 const APInt &X = CI->getValue();
1693 switch (IID) {
1694 case Intrinsic::experimental_vector_reduce_add:
1695 Acc = Acc + X;
1696 break;
1697 case Intrinsic::experimental_vector_reduce_mul:
1698 Acc = Acc * X;
1699 break;
1700 case Intrinsic::experimental_vector_reduce_and:
1701 Acc = Acc & X;
1702 break;
1703 case Intrinsic::experimental_vector_reduce_or:
1704 Acc = Acc | X;
1705 break;
1706 case Intrinsic::experimental_vector_reduce_xor:
1707 Acc = Acc ^ X;
1708 break;
1709 case Intrinsic::experimental_vector_reduce_smin:
1710 Acc = APIntOps::smin(Acc, X);
1711 break;
1712 case Intrinsic::experimental_vector_reduce_smax:
1713 Acc = APIntOps::smax(Acc, X);
1714 break;
1715 case Intrinsic::experimental_vector_reduce_umin:
1716 Acc = APIntOps::umin(Acc, X);
1717 break;
1718 case Intrinsic::experimental_vector_reduce_umax:
1719 Acc = APIntOps::umax(Acc, X);
1720 break;
1721 }
1722 }
1723
1724 return ConstantInt::get(Op->getContext(), Acc);
1725 }
1726
1727 /// Attempt to fold an SSE floating point to integer conversion of a constant
1728 /// floating point. If roundTowardZero is false, the default IEEE rounding is
1729 /// used (toward nearest, ties to even). This matches the behavior of the
1730 /// non-truncating SSE instructions in the default rounding mode. The desired
1731 /// integer type Ty is used to select how many bits are available for the
1732 /// result. Returns null if the conversion cannot be performed, otherwise
1733 /// returns the Constant value resulting from the conversion.
ConstantFoldSSEConvertToInt(const APFloat & Val,bool roundTowardZero,Type * Ty,bool IsSigned)1734 Constant *ConstantFoldSSEConvertToInt(const APFloat &Val, bool roundTowardZero,
1735 Type *Ty, bool IsSigned) {
1736 // All of these conversion intrinsics form an integer of at most 64bits.
1737 unsigned ResultWidth = Ty->getIntegerBitWidth();
1738 assert(ResultWidth <= 64 &&
1739 "Can only constant fold conversions to 64 and 32 bit ints");
1740
1741 uint64_t UIntVal;
1742 bool isExact = false;
1743 APFloat::roundingMode mode = roundTowardZero? APFloat::rmTowardZero
1744 : APFloat::rmNearestTiesToEven;
1745 APFloat::opStatus status =
1746 Val.convertToInteger(makeMutableArrayRef(UIntVal), ResultWidth,
1747 IsSigned, mode, &isExact);
1748 if (status != APFloat::opOK &&
1749 (!roundTowardZero || status != APFloat::opInexact))
1750 return nullptr;
1751 return ConstantInt::get(Ty, UIntVal, IsSigned);
1752 }
1753
getValueAsDouble(ConstantFP * Op)1754 double getValueAsDouble(ConstantFP *Op) {
1755 Type *Ty = Op->getType();
1756
1757 if (Ty->isFloatTy())
1758 return Op->getValueAPF().convertToFloat();
1759
1760 if (Ty->isDoubleTy())
1761 return Op->getValueAPF().convertToDouble();
1762
1763 bool unused;
1764 APFloat APF = Op->getValueAPF();
1765 APF.convert(APFloat::IEEEdouble(), APFloat::rmNearestTiesToEven, &unused);
1766 return APF.convertToDouble();
1767 }
1768
isManifestConstant(const Constant * c)1769 static bool isManifestConstant(const Constant *c) {
1770 if (isa<ConstantData>(c)) {
1771 return true;
1772 } else if (isa<ConstantAggregate>(c) || isa<ConstantExpr>(c)) {
1773 for (const Value *subc : c->operand_values()) {
1774 if (!isManifestConstant(cast<Constant>(subc)))
1775 return false;
1776 }
1777 return true;
1778 }
1779 return false;
1780 }
1781
getConstIntOrUndef(Value * Op,const APInt * & C)1782 static bool getConstIntOrUndef(Value *Op, const APInt *&C) {
1783 if (auto *CI = dyn_cast<ConstantInt>(Op)) {
1784 C = &CI->getValue();
1785 return true;
1786 }
1787 if (isa<UndefValue>(Op)) {
1788 C = nullptr;
1789 return true;
1790 }
1791 return false;
1792 }
1793
ConstantFoldScalarCall1(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)1794 static Constant *ConstantFoldScalarCall1(StringRef Name,
1795 Intrinsic::ID IntrinsicID,
1796 Type *Ty,
1797 ArrayRef<Constant *> Operands,
1798 const TargetLibraryInfo *TLI,
1799 const CallBase *Call) {
1800 assert(Operands.size() == 1 && "Wrong number of operands.");
1801
1802 if (IntrinsicID == Intrinsic::is_constant) {
1803 // We know we have a "Constant" argument. But we want to only
1804 // return true for manifest constants, not those that depend on
1805 // constants with unknowable values, e.g. GlobalValue or BlockAddress.
1806 if (isManifestConstant(Operands[0]))
1807 return ConstantInt::getTrue(Ty->getContext());
1808 return nullptr;
1809 }
1810 if (isa<UndefValue>(Operands[0])) {
1811 // cosine(arg) is between -1 and 1. cosine(invalid arg) is NaN.
1812 // ctpop() is between 0 and bitwidth, pick 0 for undef.
1813 if (IntrinsicID == Intrinsic::cos ||
1814 IntrinsicID == Intrinsic::ctpop)
1815 return Constant::getNullValue(Ty);
1816 if (IntrinsicID == Intrinsic::bswap ||
1817 IntrinsicID == Intrinsic::bitreverse ||
1818 IntrinsicID == Intrinsic::launder_invariant_group ||
1819 IntrinsicID == Intrinsic::strip_invariant_group)
1820 return Operands[0];
1821 }
1822
1823 if (isa<ConstantPointerNull>(Operands[0])) {
1824 // launder(null) == null == strip(null) iff in addrspace 0
1825 if (IntrinsicID == Intrinsic::launder_invariant_group ||
1826 IntrinsicID == Intrinsic::strip_invariant_group) {
1827 // If instruction is not yet put in a basic block (e.g. when cloning
1828 // a function during inlining), Call's caller may not be available.
1829 // So check Call's BB first before querying Call->getCaller.
1830 const Function *Caller =
1831 Call->getParent() ? Call->getCaller() : nullptr;
1832 if (Caller &&
1833 !NullPointerIsDefined(
1834 Caller, Operands[0]->getType()->getPointerAddressSpace())) {
1835 return Operands[0];
1836 }
1837 return nullptr;
1838 }
1839 }
1840
1841 if (auto *Op = dyn_cast<ConstantFP>(Operands[0])) {
1842 if (IntrinsicID == Intrinsic::convert_to_fp16) {
1843 APFloat Val(Op->getValueAPF());
1844
1845 bool lost = false;
1846 Val.convert(APFloat::IEEEhalf(), APFloat::rmNearestTiesToEven, &lost);
1847
1848 return ConstantInt::get(Ty->getContext(), Val.bitcastToAPInt());
1849 }
1850
1851 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
1852 return nullptr;
1853
1854 // Use internal versions of these intrinsics.
1855 APFloat U = Op->getValueAPF();
1856
1857 if (IntrinsicID == Intrinsic::nearbyint || IntrinsicID == Intrinsic::rint) {
1858 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1859 return ConstantFP::get(Ty->getContext(), U);
1860 }
1861
1862 if (IntrinsicID == Intrinsic::round) {
1863 U.roundToIntegral(APFloat::rmNearestTiesToAway);
1864 return ConstantFP::get(Ty->getContext(), U);
1865 }
1866
1867 if (IntrinsicID == Intrinsic::roundeven) {
1868 U.roundToIntegral(APFloat::rmNearestTiesToEven);
1869 return ConstantFP::get(Ty->getContext(), U);
1870 }
1871
1872 if (IntrinsicID == Intrinsic::ceil) {
1873 U.roundToIntegral(APFloat::rmTowardPositive);
1874 return ConstantFP::get(Ty->getContext(), U);
1875 }
1876
1877 if (IntrinsicID == Intrinsic::floor) {
1878 U.roundToIntegral(APFloat::rmTowardNegative);
1879 return ConstantFP::get(Ty->getContext(), U);
1880 }
1881
1882 if (IntrinsicID == Intrinsic::trunc) {
1883 U.roundToIntegral(APFloat::rmTowardZero);
1884 return ConstantFP::get(Ty->getContext(), U);
1885 }
1886
1887 if (IntrinsicID == Intrinsic::fabs) {
1888 U.clearSign();
1889 return ConstantFP::get(Ty->getContext(), U);
1890 }
1891
1892 if (IntrinsicID == Intrinsic::amdgcn_fract) {
1893 // The v_fract instruction behaves like the OpenCL spec, which defines
1894 // fract(x) as fmin(x - floor(x), 0x1.fffffep-1f): "The min() operator is
1895 // there to prevent fract(-small) from returning 1.0. It returns the
1896 // largest positive floating-point number less than 1.0."
1897 APFloat FloorU(U);
1898 FloorU.roundToIntegral(APFloat::rmTowardNegative);
1899 APFloat FractU(U - FloorU);
1900 APFloat AlmostOne(U.getSemantics(), 1);
1901 AlmostOne.next(/*nextDown*/ true);
1902 return ConstantFP::get(Ty->getContext(), minimum(FractU, AlmostOne));
1903 }
1904
1905 // Rounding operations (floor, trunc, ceil, round and nearbyint) do not
1906 // raise FP exceptions, unless the argument is signaling NaN.
1907
1908 Optional<APFloat::roundingMode> RM;
1909 switch (IntrinsicID) {
1910 default:
1911 break;
1912 case Intrinsic::experimental_constrained_nearbyint:
1913 case Intrinsic::experimental_constrained_rint: {
1914 auto CI = cast<ConstrainedFPIntrinsic>(Call);
1915 RM = CI->getRoundingMode();
1916 if (!RM || RM.getValue() == RoundingMode::Dynamic)
1917 return nullptr;
1918 break;
1919 }
1920 case Intrinsic::experimental_constrained_round:
1921 RM = APFloat::rmNearestTiesToAway;
1922 break;
1923 case Intrinsic::experimental_constrained_ceil:
1924 RM = APFloat::rmTowardPositive;
1925 break;
1926 case Intrinsic::experimental_constrained_floor:
1927 RM = APFloat::rmTowardNegative;
1928 break;
1929 case Intrinsic::experimental_constrained_trunc:
1930 RM = APFloat::rmTowardZero;
1931 break;
1932 }
1933 if (RM) {
1934 auto CI = cast<ConstrainedFPIntrinsic>(Call);
1935 if (U.isFinite()) {
1936 APFloat::opStatus St = U.roundToIntegral(*RM);
1937 if (IntrinsicID == Intrinsic::experimental_constrained_rint &&
1938 St == APFloat::opInexact) {
1939 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1940 if (EB && *EB == fp::ebStrict)
1941 return nullptr;
1942 }
1943 } else if (U.isSignaling()) {
1944 Optional<fp::ExceptionBehavior> EB = CI->getExceptionBehavior();
1945 if (EB && *EB != fp::ebIgnore)
1946 return nullptr;
1947 U = APFloat::getQNaN(U.getSemantics());
1948 }
1949 return ConstantFP::get(Ty->getContext(), U);
1950 }
1951
1952 /// We only fold functions with finite arguments. Folding NaN and inf is
1953 /// likely to be aborted with an exception anyway, and some host libms
1954 /// have known errors raising exceptions.
1955 if (!U.isFinite())
1956 return nullptr;
1957
1958 /// Currently APFloat versions of these functions do not exist, so we use
1959 /// the host native double versions. Float versions are not called
1960 /// directly but for all these it is true (float)(f((double)arg)) ==
1961 /// f(arg). Long double not supported yet.
1962 double V = getValueAsDouble(Op);
1963
1964 switch (IntrinsicID) {
1965 default: break;
1966 case Intrinsic::log:
1967 return ConstantFoldFP(log, V, Ty);
1968 case Intrinsic::log2:
1969 // TODO: What about hosts that lack a C99 library?
1970 return ConstantFoldFP(Log2, V, Ty);
1971 case Intrinsic::log10:
1972 // TODO: What about hosts that lack a C99 library?
1973 return ConstantFoldFP(log10, V, Ty);
1974 case Intrinsic::exp:
1975 return ConstantFoldFP(exp, V, Ty);
1976 case Intrinsic::exp2:
1977 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
1978 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
1979 case Intrinsic::sin:
1980 return ConstantFoldFP(sin, V, Ty);
1981 case Intrinsic::cos:
1982 return ConstantFoldFP(cos, V, Ty);
1983 case Intrinsic::sqrt:
1984 return ConstantFoldFP(sqrt, V, Ty);
1985 case Intrinsic::amdgcn_cos:
1986 case Intrinsic::amdgcn_sin:
1987 if (V < -256.0 || V > 256.0)
1988 // The gfx8 and gfx9 architectures handle arguments outside the range
1989 // [-256, 256] differently. This should be a rare case so bail out
1990 // rather than trying to handle the difference.
1991 return nullptr;
1992 bool IsCos = IntrinsicID == Intrinsic::amdgcn_cos;
1993 double V4 = V * 4.0;
1994 if (V4 == floor(V4)) {
1995 // Force exact results for quarter-integer inputs.
1996 const double SinVals[4] = { 0.0, 1.0, 0.0, -1.0 };
1997 V = SinVals[((int)V4 + (IsCos ? 1 : 0)) & 3];
1998 } else {
1999 if (IsCos)
2000 V = cos(V * 2.0 * numbers::pi);
2001 else
2002 V = sin(V * 2.0 * numbers::pi);
2003 }
2004 return GetConstantFoldFPValue(V, Ty);
2005 }
2006
2007 if (!TLI)
2008 return nullptr;
2009
2010 LibFunc Func = NotLibFunc;
2011 TLI->getLibFunc(Name, Func);
2012 switch (Func) {
2013 default:
2014 break;
2015 case LibFunc_acos:
2016 case LibFunc_acosf:
2017 case LibFunc_acos_finite:
2018 case LibFunc_acosf_finite:
2019 if (TLI->has(Func))
2020 return ConstantFoldFP(acos, V, Ty);
2021 break;
2022 case LibFunc_asin:
2023 case LibFunc_asinf:
2024 case LibFunc_asin_finite:
2025 case LibFunc_asinf_finite:
2026 if (TLI->has(Func))
2027 return ConstantFoldFP(asin, V, Ty);
2028 break;
2029 case LibFunc_atan:
2030 case LibFunc_atanf:
2031 if (TLI->has(Func))
2032 return ConstantFoldFP(atan, V, Ty);
2033 break;
2034 case LibFunc_ceil:
2035 case LibFunc_ceilf:
2036 if (TLI->has(Func)) {
2037 U.roundToIntegral(APFloat::rmTowardPositive);
2038 return ConstantFP::get(Ty->getContext(), U);
2039 }
2040 break;
2041 case LibFunc_cos:
2042 case LibFunc_cosf:
2043 if (TLI->has(Func))
2044 return ConstantFoldFP(cos, V, Ty);
2045 break;
2046 case LibFunc_cosh:
2047 case LibFunc_coshf:
2048 case LibFunc_cosh_finite:
2049 case LibFunc_coshf_finite:
2050 if (TLI->has(Func))
2051 return ConstantFoldFP(cosh, V, Ty);
2052 break;
2053 case LibFunc_exp:
2054 case LibFunc_expf:
2055 case LibFunc_exp_finite:
2056 case LibFunc_expf_finite:
2057 if (TLI->has(Func))
2058 return ConstantFoldFP(exp, V, Ty);
2059 break;
2060 case LibFunc_exp2:
2061 case LibFunc_exp2f:
2062 case LibFunc_exp2_finite:
2063 case LibFunc_exp2f_finite:
2064 if (TLI->has(Func))
2065 // Fold exp2(x) as pow(2, x), in case the host lacks a C99 library.
2066 return ConstantFoldBinaryFP(pow, 2.0, V, Ty);
2067 break;
2068 case LibFunc_fabs:
2069 case LibFunc_fabsf:
2070 if (TLI->has(Func)) {
2071 U.clearSign();
2072 return ConstantFP::get(Ty->getContext(), U);
2073 }
2074 break;
2075 case LibFunc_floor:
2076 case LibFunc_floorf:
2077 if (TLI->has(Func)) {
2078 U.roundToIntegral(APFloat::rmTowardNegative);
2079 return ConstantFP::get(Ty->getContext(), U);
2080 }
2081 break;
2082 case LibFunc_log:
2083 case LibFunc_logf:
2084 case LibFunc_log_finite:
2085 case LibFunc_logf_finite:
2086 if (V > 0.0 && TLI->has(Func))
2087 return ConstantFoldFP(log, V, Ty);
2088 break;
2089 case LibFunc_log2:
2090 case LibFunc_log2f:
2091 case LibFunc_log2_finite:
2092 case LibFunc_log2f_finite:
2093 if (V > 0.0 && TLI->has(Func))
2094 // TODO: What about hosts that lack a C99 library?
2095 return ConstantFoldFP(Log2, V, Ty);
2096 break;
2097 case LibFunc_log10:
2098 case LibFunc_log10f:
2099 case LibFunc_log10_finite:
2100 case LibFunc_log10f_finite:
2101 if (V > 0.0 && TLI->has(Func))
2102 // TODO: What about hosts that lack a C99 library?
2103 return ConstantFoldFP(log10, V, Ty);
2104 break;
2105 case LibFunc_nearbyint:
2106 case LibFunc_nearbyintf:
2107 case LibFunc_rint:
2108 case LibFunc_rintf:
2109 if (TLI->has(Func)) {
2110 U.roundToIntegral(APFloat::rmNearestTiesToEven);
2111 return ConstantFP::get(Ty->getContext(), U);
2112 }
2113 break;
2114 case LibFunc_round:
2115 case LibFunc_roundf:
2116 if (TLI->has(Func)) {
2117 U.roundToIntegral(APFloat::rmNearestTiesToAway);
2118 return ConstantFP::get(Ty->getContext(), U);
2119 }
2120 break;
2121 case LibFunc_sin:
2122 case LibFunc_sinf:
2123 if (TLI->has(Func))
2124 return ConstantFoldFP(sin, V, Ty);
2125 break;
2126 case LibFunc_sinh:
2127 case LibFunc_sinhf:
2128 case LibFunc_sinh_finite:
2129 case LibFunc_sinhf_finite:
2130 if (TLI->has(Func))
2131 return ConstantFoldFP(sinh, V, Ty);
2132 break;
2133 case LibFunc_sqrt:
2134 case LibFunc_sqrtf:
2135 if (V >= 0.0 && TLI->has(Func))
2136 return ConstantFoldFP(sqrt, V, Ty);
2137 break;
2138 case LibFunc_tan:
2139 case LibFunc_tanf:
2140 if (TLI->has(Func))
2141 return ConstantFoldFP(tan, V, Ty);
2142 break;
2143 case LibFunc_tanh:
2144 case LibFunc_tanhf:
2145 if (TLI->has(Func))
2146 return ConstantFoldFP(tanh, V, Ty);
2147 break;
2148 case LibFunc_trunc:
2149 case LibFunc_truncf:
2150 if (TLI->has(Func)) {
2151 U.roundToIntegral(APFloat::rmTowardZero);
2152 return ConstantFP::get(Ty->getContext(), U);
2153 }
2154 break;
2155 }
2156 return nullptr;
2157 }
2158
2159 if (auto *Op = dyn_cast<ConstantInt>(Operands[0])) {
2160 switch (IntrinsicID) {
2161 case Intrinsic::bswap:
2162 return ConstantInt::get(Ty->getContext(), Op->getValue().byteSwap());
2163 case Intrinsic::ctpop:
2164 return ConstantInt::get(Ty, Op->getValue().countPopulation());
2165 case Intrinsic::bitreverse:
2166 return ConstantInt::get(Ty->getContext(), Op->getValue().reverseBits());
2167 case Intrinsic::convert_from_fp16: {
2168 APFloat Val(APFloat::IEEEhalf(), Op->getValue());
2169
2170 bool lost = false;
2171 APFloat::opStatus status = Val.convert(
2172 Ty->getFltSemantics(), APFloat::rmNearestTiesToEven, &lost);
2173
2174 // Conversion is always precise.
2175 (void)status;
2176 assert(status == APFloat::opOK && !lost &&
2177 "Precision lost during fp16 constfolding");
2178
2179 return ConstantFP::get(Ty->getContext(), Val);
2180 }
2181 default:
2182 return nullptr;
2183 }
2184 }
2185
2186 if (isa<ConstantAggregateZero>(Operands[0])) {
2187 switch (IntrinsicID) {
2188 default: break;
2189 case Intrinsic::experimental_vector_reduce_add:
2190 case Intrinsic::experimental_vector_reduce_mul:
2191 case Intrinsic::experimental_vector_reduce_and:
2192 case Intrinsic::experimental_vector_reduce_or:
2193 case Intrinsic::experimental_vector_reduce_xor:
2194 case Intrinsic::experimental_vector_reduce_smin:
2195 case Intrinsic::experimental_vector_reduce_smax:
2196 case Intrinsic::experimental_vector_reduce_umin:
2197 case Intrinsic::experimental_vector_reduce_umax:
2198 return ConstantInt::get(Ty, 0);
2199 }
2200 }
2201
2202 // Support ConstantVector in case we have an Undef in the top.
2203 if (isa<ConstantVector>(Operands[0]) ||
2204 isa<ConstantDataVector>(Operands[0])) {
2205 auto *Op = cast<Constant>(Operands[0]);
2206 switch (IntrinsicID) {
2207 default: break;
2208 case Intrinsic::experimental_vector_reduce_add:
2209 case Intrinsic::experimental_vector_reduce_mul:
2210 case Intrinsic::experimental_vector_reduce_and:
2211 case Intrinsic::experimental_vector_reduce_or:
2212 case Intrinsic::experimental_vector_reduce_xor:
2213 case Intrinsic::experimental_vector_reduce_smin:
2214 case Intrinsic::experimental_vector_reduce_smax:
2215 case Intrinsic::experimental_vector_reduce_umin:
2216 case Intrinsic::experimental_vector_reduce_umax:
2217 if (Constant *C = ConstantFoldVectorReduce(IntrinsicID, Op))
2218 return C;
2219 break;
2220 case Intrinsic::x86_sse_cvtss2si:
2221 case Intrinsic::x86_sse_cvtss2si64:
2222 case Intrinsic::x86_sse2_cvtsd2si:
2223 case Intrinsic::x86_sse2_cvtsd2si64:
2224 if (ConstantFP *FPOp =
2225 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2226 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2227 /*roundTowardZero=*/false, Ty,
2228 /*IsSigned*/true);
2229 break;
2230 case Intrinsic::x86_sse_cvttss2si:
2231 case Intrinsic::x86_sse_cvttss2si64:
2232 case Intrinsic::x86_sse2_cvttsd2si:
2233 case Intrinsic::x86_sse2_cvttsd2si64:
2234 if (ConstantFP *FPOp =
2235 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2236 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2237 /*roundTowardZero=*/true, Ty,
2238 /*IsSigned*/true);
2239 break;
2240 }
2241 }
2242
2243 return nullptr;
2244 }
2245
ConstantFoldScalarCall2(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2246 static Constant *ConstantFoldScalarCall2(StringRef Name,
2247 Intrinsic::ID IntrinsicID,
2248 Type *Ty,
2249 ArrayRef<Constant *> Operands,
2250 const TargetLibraryInfo *TLI,
2251 const CallBase *Call) {
2252 assert(Operands.size() == 2 && "Wrong number of operands.");
2253
2254 if (auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2255 if (!Ty->isHalfTy() && !Ty->isFloatTy() && !Ty->isDoubleTy())
2256 return nullptr;
2257 double Op1V = getValueAsDouble(Op1);
2258
2259 if (auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2260 if (Op2->getType() != Op1->getType())
2261 return nullptr;
2262
2263 double Op2V = getValueAsDouble(Op2);
2264 if (IntrinsicID == Intrinsic::pow) {
2265 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2266 }
2267 if (IntrinsicID == Intrinsic::copysign) {
2268 APFloat V1 = Op1->getValueAPF();
2269 const APFloat &V2 = Op2->getValueAPF();
2270 V1.copySign(V2);
2271 return ConstantFP::get(Ty->getContext(), V1);
2272 }
2273
2274 if (IntrinsicID == Intrinsic::minnum) {
2275 const APFloat &C1 = Op1->getValueAPF();
2276 const APFloat &C2 = Op2->getValueAPF();
2277 return ConstantFP::get(Ty->getContext(), minnum(C1, C2));
2278 }
2279
2280 if (IntrinsicID == Intrinsic::maxnum) {
2281 const APFloat &C1 = Op1->getValueAPF();
2282 const APFloat &C2 = Op2->getValueAPF();
2283 return ConstantFP::get(Ty->getContext(), maxnum(C1, C2));
2284 }
2285
2286 if (IntrinsicID == Intrinsic::minimum) {
2287 const APFloat &C1 = Op1->getValueAPF();
2288 const APFloat &C2 = Op2->getValueAPF();
2289 return ConstantFP::get(Ty->getContext(), minimum(C1, C2));
2290 }
2291
2292 if (IntrinsicID == Intrinsic::maximum) {
2293 const APFloat &C1 = Op1->getValueAPF();
2294 const APFloat &C2 = Op2->getValueAPF();
2295 return ConstantFP::get(Ty->getContext(), maximum(C1, C2));
2296 }
2297
2298 if (IntrinsicID == Intrinsic::amdgcn_fmul_legacy) {
2299 const APFloat &C1 = Op1->getValueAPF();
2300 const APFloat &C2 = Op2->getValueAPF();
2301 // The legacy behaviour is that multiplying zero by anything, even NaN
2302 // or infinity, gives +0.0.
2303 if (C1.isZero() || C2.isZero())
2304 return ConstantFP::getNullValue(Ty);
2305 return ConstantFP::get(Ty->getContext(), C1 * C2);
2306 }
2307
2308 if (!TLI)
2309 return nullptr;
2310
2311 LibFunc Func = NotLibFunc;
2312 TLI->getLibFunc(Name, Func);
2313 switch (Func) {
2314 default:
2315 break;
2316 case LibFunc_pow:
2317 case LibFunc_powf:
2318 case LibFunc_pow_finite:
2319 case LibFunc_powf_finite:
2320 if (TLI->has(Func))
2321 return ConstantFoldBinaryFP(pow, Op1V, Op2V, Ty);
2322 break;
2323 case LibFunc_fmod:
2324 case LibFunc_fmodf:
2325 if (TLI->has(Func)) {
2326 APFloat V = Op1->getValueAPF();
2327 if (APFloat::opStatus::opOK == V.mod(Op2->getValueAPF()))
2328 return ConstantFP::get(Ty->getContext(), V);
2329 }
2330 break;
2331 case LibFunc_remainder:
2332 case LibFunc_remainderf:
2333 if (TLI->has(Func)) {
2334 APFloat V = Op1->getValueAPF();
2335 if (APFloat::opStatus::opOK == V.remainder(Op2->getValueAPF()))
2336 return ConstantFP::get(Ty->getContext(), V);
2337 }
2338 break;
2339 case LibFunc_atan2:
2340 case LibFunc_atan2f:
2341 case LibFunc_atan2_finite:
2342 case LibFunc_atan2f_finite:
2343 if (TLI->has(Func))
2344 return ConstantFoldBinaryFP(atan2, Op1V, Op2V, Ty);
2345 break;
2346 }
2347 } else if (auto *Op2C = dyn_cast<ConstantInt>(Operands[1])) {
2348 if (IntrinsicID == Intrinsic::powi && Ty->isHalfTy())
2349 return ConstantFP::get(Ty->getContext(),
2350 APFloat((float)std::pow((float)Op1V,
2351 (int)Op2C->getZExtValue())));
2352 if (IntrinsicID == Intrinsic::powi && Ty->isFloatTy())
2353 return ConstantFP::get(Ty->getContext(),
2354 APFloat((float)std::pow((float)Op1V,
2355 (int)Op2C->getZExtValue())));
2356 if (IntrinsicID == Intrinsic::powi && Ty->isDoubleTy())
2357 return ConstantFP::get(Ty->getContext(),
2358 APFloat((double)std::pow((double)Op1V,
2359 (int)Op2C->getZExtValue())));
2360
2361 if (IntrinsicID == Intrinsic::amdgcn_ldexp) {
2362 // FIXME: Should flush denorms depending on FP mode, but that's ignored
2363 // everywhere else.
2364
2365 // scalbn is equivalent to ldexp with float radix 2
2366 APFloat Result = scalbn(Op1->getValueAPF(), Op2C->getSExtValue(),
2367 APFloat::rmNearestTiesToEven);
2368 return ConstantFP::get(Ty->getContext(), Result);
2369 }
2370 }
2371 return nullptr;
2372 }
2373
2374 if (Operands[0]->getType()->isIntegerTy() &&
2375 Operands[1]->getType()->isIntegerTy()) {
2376 const APInt *C0, *C1;
2377 if (!getConstIntOrUndef(Operands[0], C0) ||
2378 !getConstIntOrUndef(Operands[1], C1))
2379 return nullptr;
2380
2381 switch (IntrinsicID) {
2382 default: break;
2383 case Intrinsic::usub_with_overflow:
2384 case Intrinsic::ssub_with_overflow:
2385 case Intrinsic::uadd_with_overflow:
2386 case Intrinsic::sadd_with_overflow:
2387 // X - undef -> { undef, false }
2388 // undef - X -> { undef, false }
2389 // X + undef -> { undef, false }
2390 // undef + x -> { undef, false }
2391 if (!C0 || !C1) {
2392 return ConstantStruct::get(
2393 cast<StructType>(Ty),
2394 {UndefValue::get(Ty->getStructElementType(0)),
2395 Constant::getNullValue(Ty->getStructElementType(1))});
2396 }
2397 LLVM_FALLTHROUGH;
2398 case Intrinsic::smul_with_overflow:
2399 case Intrinsic::umul_with_overflow: {
2400 // undef * X -> { 0, false }
2401 // X * undef -> { 0, false }
2402 if (!C0 || !C1)
2403 return Constant::getNullValue(Ty);
2404
2405 APInt Res;
2406 bool Overflow;
2407 switch (IntrinsicID) {
2408 default: llvm_unreachable("Invalid case");
2409 case Intrinsic::sadd_with_overflow:
2410 Res = C0->sadd_ov(*C1, Overflow);
2411 break;
2412 case Intrinsic::uadd_with_overflow:
2413 Res = C0->uadd_ov(*C1, Overflow);
2414 break;
2415 case Intrinsic::ssub_with_overflow:
2416 Res = C0->ssub_ov(*C1, Overflow);
2417 break;
2418 case Intrinsic::usub_with_overflow:
2419 Res = C0->usub_ov(*C1, Overflow);
2420 break;
2421 case Intrinsic::smul_with_overflow:
2422 Res = C0->smul_ov(*C1, Overflow);
2423 break;
2424 case Intrinsic::umul_with_overflow:
2425 Res = C0->umul_ov(*C1, Overflow);
2426 break;
2427 }
2428 Constant *Ops[] = {
2429 ConstantInt::get(Ty->getContext(), Res),
2430 ConstantInt::get(Type::getInt1Ty(Ty->getContext()), Overflow)
2431 };
2432 return ConstantStruct::get(cast<StructType>(Ty), Ops);
2433 }
2434 case Intrinsic::uadd_sat:
2435 case Intrinsic::sadd_sat:
2436 if (!C0 && !C1)
2437 return UndefValue::get(Ty);
2438 if (!C0 || !C1)
2439 return Constant::getAllOnesValue(Ty);
2440 if (IntrinsicID == Intrinsic::uadd_sat)
2441 return ConstantInt::get(Ty, C0->uadd_sat(*C1));
2442 else
2443 return ConstantInt::get(Ty, C0->sadd_sat(*C1));
2444 case Intrinsic::usub_sat:
2445 case Intrinsic::ssub_sat:
2446 if (!C0 && !C1)
2447 return UndefValue::get(Ty);
2448 if (!C0 || !C1)
2449 return Constant::getNullValue(Ty);
2450 if (IntrinsicID == Intrinsic::usub_sat)
2451 return ConstantInt::get(Ty, C0->usub_sat(*C1));
2452 else
2453 return ConstantInt::get(Ty, C0->ssub_sat(*C1));
2454 case Intrinsic::cttz:
2455 case Intrinsic::ctlz:
2456 assert(C1 && "Must be constant int");
2457
2458 // cttz(0, 1) and ctlz(0, 1) are undef.
2459 if (C1->isOneValue() && (!C0 || C0->isNullValue()))
2460 return UndefValue::get(Ty);
2461 if (!C0)
2462 return Constant::getNullValue(Ty);
2463 if (IntrinsicID == Intrinsic::cttz)
2464 return ConstantInt::get(Ty, C0->countTrailingZeros());
2465 else
2466 return ConstantInt::get(Ty, C0->countLeadingZeros());
2467 }
2468
2469 return nullptr;
2470 }
2471
2472 // Support ConstantVector in case we have an Undef in the top.
2473 if ((isa<ConstantVector>(Operands[0]) ||
2474 isa<ConstantDataVector>(Operands[0])) &&
2475 // Check for default rounding mode.
2476 // FIXME: Support other rounding modes?
2477 isa<ConstantInt>(Operands[1]) &&
2478 cast<ConstantInt>(Operands[1])->getValue() == 4) {
2479 auto *Op = cast<Constant>(Operands[0]);
2480 switch (IntrinsicID) {
2481 default: break;
2482 case Intrinsic::x86_avx512_vcvtss2si32:
2483 case Intrinsic::x86_avx512_vcvtss2si64:
2484 case Intrinsic::x86_avx512_vcvtsd2si32:
2485 case Intrinsic::x86_avx512_vcvtsd2si64:
2486 if (ConstantFP *FPOp =
2487 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2488 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2489 /*roundTowardZero=*/false, Ty,
2490 /*IsSigned*/true);
2491 break;
2492 case Intrinsic::x86_avx512_vcvtss2usi32:
2493 case Intrinsic::x86_avx512_vcvtss2usi64:
2494 case Intrinsic::x86_avx512_vcvtsd2usi32:
2495 case Intrinsic::x86_avx512_vcvtsd2usi64:
2496 if (ConstantFP *FPOp =
2497 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2498 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2499 /*roundTowardZero=*/false, Ty,
2500 /*IsSigned*/false);
2501 break;
2502 case Intrinsic::x86_avx512_cvttss2si:
2503 case Intrinsic::x86_avx512_cvttss2si64:
2504 case Intrinsic::x86_avx512_cvttsd2si:
2505 case Intrinsic::x86_avx512_cvttsd2si64:
2506 if (ConstantFP *FPOp =
2507 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2508 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2509 /*roundTowardZero=*/true, Ty,
2510 /*IsSigned*/true);
2511 break;
2512 case Intrinsic::x86_avx512_cvttss2usi:
2513 case Intrinsic::x86_avx512_cvttss2usi64:
2514 case Intrinsic::x86_avx512_cvttsd2usi:
2515 case Intrinsic::x86_avx512_cvttsd2usi64:
2516 if (ConstantFP *FPOp =
2517 dyn_cast_or_null<ConstantFP>(Op->getAggregateElement(0U)))
2518 return ConstantFoldSSEConvertToInt(FPOp->getValueAPF(),
2519 /*roundTowardZero=*/true, Ty,
2520 /*IsSigned*/false);
2521 break;
2522 }
2523 }
2524 return nullptr;
2525 }
2526
ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,const APFloat & S0,const APFloat & S1,const APFloat & S2)2527 static APFloat ConstantFoldAMDGCNCubeIntrinsic(Intrinsic::ID IntrinsicID,
2528 const APFloat &S0,
2529 const APFloat &S1,
2530 const APFloat &S2) {
2531 unsigned ID;
2532 const fltSemantics &Sem = S0.getSemantics();
2533 APFloat MA(Sem), SC(Sem), TC(Sem);
2534 if (abs(S2) >= abs(S0) && abs(S2) >= abs(S1)) {
2535 if (S2.isNegative() && S2.isNonZero() && !S2.isNaN()) {
2536 // S2 < 0
2537 ID = 5;
2538 SC = -S0;
2539 } else {
2540 ID = 4;
2541 SC = S0;
2542 }
2543 MA = S2;
2544 TC = -S1;
2545 } else if (abs(S1) >= abs(S0)) {
2546 if (S1.isNegative() && S1.isNonZero() && !S1.isNaN()) {
2547 // S1 < 0
2548 ID = 3;
2549 TC = -S2;
2550 } else {
2551 ID = 2;
2552 TC = S2;
2553 }
2554 MA = S1;
2555 SC = S0;
2556 } else {
2557 if (S0.isNegative() && S0.isNonZero() && !S0.isNaN()) {
2558 // S0 < 0
2559 ID = 1;
2560 SC = S2;
2561 } else {
2562 ID = 0;
2563 SC = -S2;
2564 }
2565 MA = S0;
2566 TC = -S1;
2567 }
2568 switch (IntrinsicID) {
2569 default:
2570 llvm_unreachable("unhandled amdgcn cube intrinsic");
2571 case Intrinsic::amdgcn_cubeid:
2572 return APFloat(Sem, ID);
2573 case Intrinsic::amdgcn_cubema:
2574 return MA + MA;
2575 case Intrinsic::amdgcn_cubesc:
2576 return SC;
2577 case Intrinsic::amdgcn_cubetc:
2578 return TC;
2579 }
2580 }
2581
ConstantFoldScalarCall3(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2582 static Constant *ConstantFoldScalarCall3(StringRef Name,
2583 Intrinsic::ID IntrinsicID,
2584 Type *Ty,
2585 ArrayRef<Constant *> Operands,
2586 const TargetLibraryInfo *TLI,
2587 const CallBase *Call) {
2588 assert(Operands.size() == 3 && "Wrong number of operands.");
2589
2590 if (const auto *Op1 = dyn_cast<ConstantFP>(Operands[0])) {
2591 if (const auto *Op2 = dyn_cast<ConstantFP>(Operands[1])) {
2592 if (const auto *Op3 = dyn_cast<ConstantFP>(Operands[2])) {
2593 switch (IntrinsicID) {
2594 default: break;
2595 case Intrinsic::fma:
2596 case Intrinsic::fmuladd: {
2597 APFloat V = Op1->getValueAPF();
2598 V.fusedMultiplyAdd(Op2->getValueAPF(), Op3->getValueAPF(),
2599 APFloat::rmNearestTiesToEven);
2600 return ConstantFP::get(Ty->getContext(), V);
2601 }
2602 case Intrinsic::amdgcn_cubeid:
2603 case Intrinsic::amdgcn_cubema:
2604 case Intrinsic::amdgcn_cubesc:
2605 case Intrinsic::amdgcn_cubetc: {
2606 APFloat V = ConstantFoldAMDGCNCubeIntrinsic(
2607 IntrinsicID, Op1->getValueAPF(), Op2->getValueAPF(),
2608 Op3->getValueAPF());
2609 return ConstantFP::get(Ty->getContext(), V);
2610 }
2611 }
2612 }
2613 }
2614 }
2615
2616 if (const auto *Op1 = dyn_cast<ConstantInt>(Operands[0])) {
2617 if (const auto *Op2 = dyn_cast<ConstantInt>(Operands[1])) {
2618 if (const auto *Op3 = dyn_cast<ConstantInt>(Operands[2])) {
2619 switch (IntrinsicID) {
2620 default: break;
2621 case Intrinsic::smul_fix:
2622 case Intrinsic::smul_fix_sat: {
2623 // This code performs rounding towards negative infinity in case the
2624 // result cannot be represented exactly for the given scale. Targets
2625 // that do care about rounding should use a target hook for specifying
2626 // how rounding should be done, and provide their own folding to be
2627 // consistent with rounding. This is the same approach as used by
2628 // DAGTypeLegalizer::ExpandIntRes_MULFIX.
2629 const APInt &Lhs = Op1->getValue();
2630 const APInt &Rhs = Op2->getValue();
2631 unsigned Scale = Op3->getValue().getZExtValue();
2632 unsigned Width = Lhs.getBitWidth();
2633 assert(Scale < Width && "Illegal scale.");
2634 unsigned ExtendedWidth = Width * 2;
2635 APInt Product = (Lhs.sextOrSelf(ExtendedWidth) *
2636 Rhs.sextOrSelf(ExtendedWidth)).ashr(Scale);
2637 if (IntrinsicID == Intrinsic::smul_fix_sat) {
2638 APInt MaxValue =
2639 APInt::getSignedMaxValue(Width).sextOrSelf(ExtendedWidth);
2640 APInt MinValue =
2641 APInt::getSignedMinValue(Width).sextOrSelf(ExtendedWidth);
2642 Product = APIntOps::smin(Product, MaxValue);
2643 Product = APIntOps::smax(Product, MinValue);
2644 }
2645 return ConstantInt::get(Ty->getContext(),
2646 Product.sextOrTrunc(Width));
2647 }
2648 }
2649 }
2650 }
2651 }
2652
2653 if (IntrinsicID == Intrinsic::fshl || IntrinsicID == Intrinsic::fshr) {
2654 const APInt *C0, *C1, *C2;
2655 if (!getConstIntOrUndef(Operands[0], C0) ||
2656 !getConstIntOrUndef(Operands[1], C1) ||
2657 !getConstIntOrUndef(Operands[2], C2))
2658 return nullptr;
2659
2660 bool IsRight = IntrinsicID == Intrinsic::fshr;
2661 if (!C2)
2662 return Operands[IsRight ? 1 : 0];
2663 if (!C0 && !C1)
2664 return UndefValue::get(Ty);
2665
2666 // The shift amount is interpreted as modulo the bitwidth. If the shift
2667 // amount is effectively 0, avoid UB due to oversized inverse shift below.
2668 unsigned BitWidth = C2->getBitWidth();
2669 unsigned ShAmt = C2->urem(BitWidth);
2670 if (!ShAmt)
2671 return Operands[IsRight ? 1 : 0];
2672
2673 // (C0 << ShlAmt) | (C1 >> LshrAmt)
2674 unsigned LshrAmt = IsRight ? ShAmt : BitWidth - ShAmt;
2675 unsigned ShlAmt = !IsRight ? ShAmt : BitWidth - ShAmt;
2676 if (!C0)
2677 return ConstantInt::get(Ty, C1->lshr(LshrAmt));
2678 if (!C1)
2679 return ConstantInt::get(Ty, C0->shl(ShlAmt));
2680 return ConstantInt::get(Ty, C0->shl(ShlAmt) | C1->lshr(LshrAmt));
2681 }
2682
2683 return nullptr;
2684 }
2685
ConstantFoldScalarCall(StringRef Name,Intrinsic::ID IntrinsicID,Type * Ty,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI,const CallBase * Call)2686 static Constant *ConstantFoldScalarCall(StringRef Name,
2687 Intrinsic::ID IntrinsicID,
2688 Type *Ty,
2689 ArrayRef<Constant *> Operands,
2690 const TargetLibraryInfo *TLI,
2691 const CallBase *Call) {
2692 if (Operands.size() == 1)
2693 return ConstantFoldScalarCall1(Name, IntrinsicID, Ty, Operands, TLI, Call);
2694
2695 if (Operands.size() == 2)
2696 return ConstantFoldScalarCall2(Name, IntrinsicID, Ty, Operands, TLI, Call);
2697
2698 if (Operands.size() == 3)
2699 return ConstantFoldScalarCall3(Name, IntrinsicID, Ty, Operands, TLI, Call);
2700
2701 return nullptr;
2702 }
2703
ConstantFoldVectorCall(StringRef Name,Intrinsic::ID IntrinsicID,VectorType * VTy,ArrayRef<Constant * > Operands,const DataLayout & DL,const TargetLibraryInfo * TLI,const CallBase * Call)2704 static Constant *ConstantFoldVectorCall(StringRef Name,
2705 Intrinsic::ID IntrinsicID,
2706 VectorType *VTy,
2707 ArrayRef<Constant *> Operands,
2708 const DataLayout &DL,
2709 const TargetLibraryInfo *TLI,
2710 const CallBase *Call) {
2711 // Do not iterate on scalable vector. The number of elements is unknown at
2712 // compile-time.
2713 if (isa<ScalableVectorType>(VTy))
2714 return nullptr;
2715
2716 auto *FVTy = cast<FixedVectorType>(VTy);
2717
2718 SmallVector<Constant *, 4> Result(FVTy->getNumElements());
2719 SmallVector<Constant *, 4> Lane(Operands.size());
2720 Type *Ty = FVTy->getElementType();
2721
2722 if (IntrinsicID == Intrinsic::masked_load) {
2723 auto *SrcPtr = Operands[0];
2724 auto *Mask = Operands[2];
2725 auto *Passthru = Operands[3];
2726
2727 Constant *VecData = ConstantFoldLoadFromConstPtr(SrcPtr, FVTy, DL);
2728
2729 SmallVector<Constant *, 32> NewElements;
2730 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
2731 auto *MaskElt = Mask->getAggregateElement(I);
2732 if (!MaskElt)
2733 break;
2734 auto *PassthruElt = Passthru->getAggregateElement(I);
2735 auto *VecElt = VecData ? VecData->getAggregateElement(I) : nullptr;
2736 if (isa<UndefValue>(MaskElt)) {
2737 if (PassthruElt)
2738 NewElements.push_back(PassthruElt);
2739 else if (VecElt)
2740 NewElements.push_back(VecElt);
2741 else
2742 return nullptr;
2743 }
2744 if (MaskElt->isNullValue()) {
2745 if (!PassthruElt)
2746 return nullptr;
2747 NewElements.push_back(PassthruElt);
2748 } else if (MaskElt->isOneValue()) {
2749 if (!VecElt)
2750 return nullptr;
2751 NewElements.push_back(VecElt);
2752 } else {
2753 return nullptr;
2754 }
2755 }
2756 if (NewElements.size() != FVTy->getNumElements())
2757 return nullptr;
2758 return ConstantVector::get(NewElements);
2759 }
2760
2761 for (unsigned I = 0, E = FVTy->getNumElements(); I != E; ++I) {
2762 // Gather a column of constants.
2763 for (unsigned J = 0, JE = Operands.size(); J != JE; ++J) {
2764 // Some intrinsics use a scalar type for certain arguments.
2765 if (hasVectorInstrinsicScalarOpd(IntrinsicID, J)) {
2766 Lane[J] = Operands[J];
2767 continue;
2768 }
2769
2770 Constant *Agg = Operands[J]->getAggregateElement(I);
2771 if (!Agg)
2772 return nullptr;
2773
2774 Lane[J] = Agg;
2775 }
2776
2777 // Use the regular scalar folding to simplify this column.
2778 Constant *Folded =
2779 ConstantFoldScalarCall(Name, IntrinsicID, Ty, Lane, TLI, Call);
2780 if (!Folded)
2781 return nullptr;
2782 Result[I] = Folded;
2783 }
2784
2785 return ConstantVector::get(Result);
2786 }
2787
2788 } // end anonymous namespace
2789
ConstantFoldCall(const CallBase * Call,Function * F,ArrayRef<Constant * > Operands,const TargetLibraryInfo * TLI)2790 Constant *llvm::ConstantFoldCall(const CallBase *Call, Function *F,
2791 ArrayRef<Constant *> Operands,
2792 const TargetLibraryInfo *TLI) {
2793 if (Call->isNoBuiltin())
2794 return nullptr;
2795 if (!F->hasName())
2796 return nullptr;
2797 StringRef Name = F->getName();
2798
2799 Type *Ty = F->getReturnType();
2800
2801 if (auto *VTy = dyn_cast<VectorType>(Ty))
2802 return ConstantFoldVectorCall(Name, F->getIntrinsicID(), VTy, Operands,
2803 F->getParent()->getDataLayout(), TLI, Call);
2804
2805 return ConstantFoldScalarCall(Name, F->getIntrinsicID(), Ty, Operands, TLI,
2806 Call);
2807 }
2808
isMathLibCallNoop(const CallBase * Call,const TargetLibraryInfo * TLI)2809 bool llvm::isMathLibCallNoop(const CallBase *Call,
2810 const TargetLibraryInfo *TLI) {
2811 // FIXME: Refactor this code; this duplicates logic in LibCallsShrinkWrap
2812 // (and to some extent ConstantFoldScalarCall).
2813 if (Call->isNoBuiltin() || Call->isStrictFP())
2814 return false;
2815 Function *F = Call->getCalledFunction();
2816 if (!F)
2817 return false;
2818
2819 LibFunc Func;
2820 if (!TLI || !TLI->getLibFunc(*F, Func))
2821 return false;
2822
2823 if (Call->getNumArgOperands() == 1) {
2824 if (ConstantFP *OpC = dyn_cast<ConstantFP>(Call->getArgOperand(0))) {
2825 const APFloat &Op = OpC->getValueAPF();
2826 switch (Func) {
2827 case LibFunc_logl:
2828 case LibFunc_log:
2829 case LibFunc_logf:
2830 case LibFunc_log2l:
2831 case LibFunc_log2:
2832 case LibFunc_log2f:
2833 case LibFunc_log10l:
2834 case LibFunc_log10:
2835 case LibFunc_log10f:
2836 return Op.isNaN() || (!Op.isZero() && !Op.isNegative());
2837
2838 case LibFunc_expl:
2839 case LibFunc_exp:
2840 case LibFunc_expf:
2841 // FIXME: These boundaries are slightly conservative.
2842 if (OpC->getType()->isDoubleTy())
2843 return !(Op < APFloat(-745.0) || Op > APFloat(709.0));
2844 if (OpC->getType()->isFloatTy())
2845 return !(Op < APFloat(-103.0f) || Op > APFloat(88.0f));
2846 break;
2847
2848 case LibFunc_exp2l:
2849 case LibFunc_exp2:
2850 case LibFunc_exp2f:
2851 // FIXME: These boundaries are slightly conservative.
2852 if (OpC->getType()->isDoubleTy())
2853 return !(Op < APFloat(-1074.0) || Op > APFloat(1023.0));
2854 if (OpC->getType()->isFloatTy())
2855 return !(Op < APFloat(-149.0f) || Op > APFloat(127.0f));
2856 break;
2857
2858 case LibFunc_sinl:
2859 case LibFunc_sin:
2860 case LibFunc_sinf:
2861 case LibFunc_cosl:
2862 case LibFunc_cos:
2863 case LibFunc_cosf:
2864 return !Op.isInfinity();
2865
2866 case LibFunc_tanl:
2867 case LibFunc_tan:
2868 case LibFunc_tanf: {
2869 // FIXME: Stop using the host math library.
2870 // FIXME: The computation isn't done in the right precision.
2871 Type *Ty = OpC->getType();
2872 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2873 double OpV = getValueAsDouble(OpC);
2874 return ConstantFoldFP(tan, OpV, Ty) != nullptr;
2875 }
2876 break;
2877 }
2878
2879 case LibFunc_asinl:
2880 case LibFunc_asin:
2881 case LibFunc_asinf:
2882 case LibFunc_acosl:
2883 case LibFunc_acos:
2884 case LibFunc_acosf:
2885 return !(Op < APFloat(Op.getSemantics(), "-1") ||
2886 Op > APFloat(Op.getSemantics(), "1"));
2887
2888 case LibFunc_sinh:
2889 case LibFunc_cosh:
2890 case LibFunc_sinhf:
2891 case LibFunc_coshf:
2892 case LibFunc_sinhl:
2893 case LibFunc_coshl:
2894 // FIXME: These boundaries are slightly conservative.
2895 if (OpC->getType()->isDoubleTy())
2896 return !(Op < APFloat(-710.0) || Op > APFloat(710.0));
2897 if (OpC->getType()->isFloatTy())
2898 return !(Op < APFloat(-89.0f) || Op > APFloat(89.0f));
2899 break;
2900
2901 case LibFunc_sqrtl:
2902 case LibFunc_sqrt:
2903 case LibFunc_sqrtf:
2904 return Op.isNaN() || Op.isZero() || !Op.isNegative();
2905
2906 // FIXME: Add more functions: sqrt_finite, atanh, expm1, log1p,
2907 // maybe others?
2908 default:
2909 break;
2910 }
2911 }
2912 }
2913
2914 if (Call->getNumArgOperands() == 2) {
2915 ConstantFP *Op0C = dyn_cast<ConstantFP>(Call->getArgOperand(0));
2916 ConstantFP *Op1C = dyn_cast<ConstantFP>(Call->getArgOperand(1));
2917 if (Op0C && Op1C) {
2918 const APFloat &Op0 = Op0C->getValueAPF();
2919 const APFloat &Op1 = Op1C->getValueAPF();
2920
2921 switch (Func) {
2922 case LibFunc_powl:
2923 case LibFunc_pow:
2924 case LibFunc_powf: {
2925 // FIXME: Stop using the host math library.
2926 // FIXME: The computation isn't done in the right precision.
2927 Type *Ty = Op0C->getType();
2928 if (Ty->isDoubleTy() || Ty->isFloatTy() || Ty->isHalfTy()) {
2929 if (Ty == Op1C->getType()) {
2930 double Op0V = getValueAsDouble(Op0C);
2931 double Op1V = getValueAsDouble(Op1C);
2932 return ConstantFoldBinaryFP(pow, Op0V, Op1V, Ty) != nullptr;
2933 }
2934 }
2935 break;
2936 }
2937
2938 case LibFunc_fmodl:
2939 case LibFunc_fmod:
2940 case LibFunc_fmodf:
2941 case LibFunc_remainderl:
2942 case LibFunc_remainder:
2943 case LibFunc_remainderf:
2944 return Op0.isNaN() || Op1.isNaN() ||
2945 (!Op0.isInfinity() && !Op1.isZero());
2946
2947 default:
2948 break;
2949 }
2950 }
2951 }
2952
2953 return false;
2954 }
2955
anchor()2956 void TargetFolder::anchor() {}
2957