1 //===- ConstantFold.cpp - LLVM constant folder ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements folding of constants for LLVM.  This implements the
10 // (internal) ConstantFold.h interface, which is used by the
11 // ConstantExpr::get* methods to automatically fold constants when possible.
12 //
13 // The current constant folding implementation is implemented in two pieces: the
14 // pieces that don't need DataLayout, and the pieces that do. This is to avoid
15 // a dependence in IR on Target.
16 //
17 //===----------------------------------------------------------------------===//
18 
19 #include "ConstantFold.h"
20 #include "llvm/ADT/APSInt.h"
21 #include "llvm/ADT/SmallVector.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DerivedTypes.h"
24 #include "llvm/IR/Function.h"
25 #include "llvm/IR/GetElementPtrTypeIterator.h"
26 #include "llvm/IR/GlobalAlias.h"
27 #include "llvm/IR/GlobalVariable.h"
28 #include "llvm/IR/Instructions.h"
29 #include "llvm/IR/Module.h"
30 #include "llvm/IR/Operator.h"
31 #include "llvm/IR/PatternMatch.h"
32 #include "llvm/Support/ErrorHandling.h"
33 #include "llvm/Support/ManagedStatic.h"
34 #include "llvm/Support/MathExtras.h"
35 using namespace llvm;
36 using namespace llvm::PatternMatch;
37 
38 //===----------------------------------------------------------------------===//
39 //                ConstantFold*Instruction Implementations
40 //===----------------------------------------------------------------------===//
41 
42 /// Convert the specified vector Constant node to the specified vector type.
43 /// At this point, we know that the elements of the input vector constant are
44 /// all simple integer or FP values.
45 static Constant *BitCastConstantVector(Constant *CV, VectorType *DstTy) {
46 
47   if (CV->isAllOnesValue()) return Constant::getAllOnesValue(DstTy);
48   if (CV->isNullValue()) return Constant::getNullValue(DstTy);
49 
50   // If this cast changes element count then we can't handle it here:
51   // doing so requires endianness information.  This should be handled by
52   // Analysis/ConstantFolding.cpp
53   unsigned NumElts = DstTy->getNumElements();
54   if (NumElts != CV->getType()->getVectorNumElements())
55     return nullptr;
56 
57   Type *DstEltTy = DstTy->getElementType();
58 
59   SmallVector<Constant*, 16> Result;
60   Type *Ty = IntegerType::get(CV->getContext(), 32);
61   for (unsigned i = 0; i != NumElts; ++i) {
62     Constant *C =
63       ConstantExpr::getExtractElement(CV, ConstantInt::get(Ty, i));
64     C = ConstantExpr::getBitCast(C, DstEltTy);
65     Result.push_back(C);
66   }
67 
68   return ConstantVector::get(Result);
69 }
70 
71 /// This function determines which opcode to use to fold two constant cast
72 /// expressions together. It uses CastInst::isEliminableCastPair to determine
73 /// the opcode. Consequently its just a wrapper around that function.
74 /// Determine if it is valid to fold a cast of a cast
75 static unsigned
76 foldConstantCastPair(
77   unsigned opc,          ///< opcode of the second cast constant expression
78   ConstantExpr *Op,      ///< the first cast constant expression
79   Type *DstTy            ///< destination type of the first cast
80 ) {
81   assert(Op && Op->isCast() && "Can't fold cast of cast without a cast!");
82   assert(DstTy && DstTy->isFirstClassType() && "Invalid cast destination type");
83   assert(CastInst::isCast(opc) && "Invalid cast opcode");
84 
85   // The types and opcodes for the two Cast constant expressions
86   Type *SrcTy = Op->getOperand(0)->getType();
87   Type *MidTy = Op->getType();
88   Instruction::CastOps firstOp = Instruction::CastOps(Op->getOpcode());
89   Instruction::CastOps secondOp = Instruction::CastOps(opc);
90 
91   // Assume that pointers are never more than 64 bits wide, and only use this
92   // for the middle type. Otherwise we could end up folding away illegal
93   // bitcasts between address spaces with different sizes.
94   IntegerType *FakeIntPtrTy = Type::getInt64Ty(DstTy->getContext());
95 
96   // Let CastInst::isEliminableCastPair do the heavy lifting.
97   return CastInst::isEliminableCastPair(firstOp, secondOp, SrcTy, MidTy, DstTy,
98                                         nullptr, FakeIntPtrTy, nullptr);
99 }
100 
101 static Constant *FoldBitCast(Constant *V, Type *DestTy) {
102   Type *SrcTy = V->getType();
103   if (SrcTy == DestTy)
104     return V; // no-op cast
105 
106   // Check to see if we are casting a pointer to an aggregate to a pointer to
107   // the first element.  If so, return the appropriate GEP instruction.
108   if (PointerType *PTy = dyn_cast<PointerType>(V->getType()))
109     if (PointerType *DPTy = dyn_cast<PointerType>(DestTy))
110       if (PTy->getAddressSpace() == DPTy->getAddressSpace()
111           && PTy->getElementType()->isSized()) {
112         SmallVector<Value*, 8> IdxList;
113         Value *Zero =
114           Constant::getNullValue(Type::getInt32Ty(DPTy->getContext()));
115         IdxList.push_back(Zero);
116         Type *ElTy = PTy->getElementType();
117         while (ElTy != DPTy->getElementType()) {
118           if (StructType *STy = dyn_cast<StructType>(ElTy)) {
119             if (STy->getNumElements() == 0) break;
120             ElTy = STy->getElementType(0);
121             IdxList.push_back(Zero);
122           } else if (SequentialType *STy =
123                      dyn_cast<SequentialType>(ElTy)) {
124             ElTy = STy->getElementType();
125             IdxList.push_back(Zero);
126           } else {
127             break;
128           }
129         }
130 
131         if (ElTy == DPTy->getElementType())
132           // This GEP is inbounds because all indices are zero.
133           return ConstantExpr::getInBoundsGetElementPtr(PTy->getElementType(),
134                                                         V, IdxList);
135       }
136 
137   // Handle casts from one vector constant to another.  We know that the src
138   // and dest type have the same size (otherwise its an illegal cast).
139   if (VectorType *DestPTy = dyn_cast<VectorType>(DestTy)) {
140     if (VectorType *SrcTy = dyn_cast<VectorType>(V->getType())) {
141       assert(DestPTy->getBitWidth() == SrcTy->getBitWidth() &&
142              "Not cast between same sized vectors!");
143       SrcTy = nullptr;
144       // First, check for null.  Undef is already handled.
145       if (isa<ConstantAggregateZero>(V))
146         return Constant::getNullValue(DestTy);
147 
148       // Handle ConstantVector and ConstantAggregateVector.
149       return BitCastConstantVector(V, DestPTy);
150     }
151 
152     // Canonicalize scalar-to-vector bitcasts into vector-to-vector bitcasts
153     // This allows for other simplifications (although some of them
154     // can only be handled by Analysis/ConstantFolding.cpp).
155     if (isa<ConstantInt>(V) || isa<ConstantFP>(V))
156       return ConstantExpr::getBitCast(ConstantVector::get(V), DestPTy);
157   }
158 
159   // Finally, implement bitcast folding now.   The code below doesn't handle
160   // bitcast right.
161   if (isa<ConstantPointerNull>(V))  // ptr->ptr cast.
162     return ConstantPointerNull::get(cast<PointerType>(DestTy));
163 
164   // Handle integral constant input.
165   if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
166     if (DestTy->isIntegerTy())
167       // Integral -> Integral. This is a no-op because the bit widths must
168       // be the same. Consequently, we just fold to V.
169       return V;
170 
171     // See note below regarding the PPC_FP128 restriction.
172     if (DestTy->isFloatingPointTy() && !DestTy->isPPC_FP128Ty())
173       return ConstantFP::get(DestTy->getContext(),
174                              APFloat(DestTy->getFltSemantics(),
175                                      CI->getValue()));
176 
177     // Otherwise, can't fold this (vector?)
178     return nullptr;
179   }
180 
181   // Handle ConstantFP input: FP -> Integral.
182   if (ConstantFP *FP = dyn_cast<ConstantFP>(V)) {
183     // PPC_FP128 is really the sum of two consecutive doubles, where the first
184     // double is always stored first in memory, regardless of the target
185     // endianness. The memory layout of i128, however, depends on the target
186     // endianness, and so we can't fold this without target endianness
187     // information. This should instead be handled by
188     // Analysis/ConstantFolding.cpp
189     if (FP->getType()->isPPC_FP128Ty())
190       return nullptr;
191 
192     // Make sure dest type is compatible with the folded integer constant.
193     if (!DestTy->isIntegerTy())
194       return nullptr;
195 
196     return ConstantInt::get(FP->getContext(),
197                             FP->getValueAPF().bitcastToAPInt());
198   }
199 
200   return nullptr;
201 }
202 
203 
204 /// V is an integer constant which only has a subset of its bytes used.
205 /// The bytes used are indicated by ByteStart (which is the first byte used,
206 /// counting from the least significant byte) and ByteSize, which is the number
207 /// of bytes used.
208 ///
209 /// This function analyzes the specified constant to see if the specified byte
210 /// range can be returned as a simplified constant.  If so, the constant is
211 /// returned, otherwise null is returned.
212 static Constant *ExtractConstantBytes(Constant *C, unsigned ByteStart,
213                                       unsigned ByteSize) {
214   assert(C->getType()->isIntegerTy() &&
215          (cast<IntegerType>(C->getType())->getBitWidth() & 7) == 0 &&
216          "Non-byte sized integer input");
217   unsigned CSize = cast<IntegerType>(C->getType())->getBitWidth()/8;
218   assert(ByteSize && "Must be accessing some piece");
219   assert(ByteStart+ByteSize <= CSize && "Extracting invalid piece from input");
220   assert(ByteSize != CSize && "Should not extract everything");
221 
222   // Constant Integers are simple.
223   if (ConstantInt *CI = dyn_cast<ConstantInt>(C)) {
224     APInt V = CI->getValue();
225     if (ByteStart)
226       V.lshrInPlace(ByteStart*8);
227     V = V.trunc(ByteSize*8);
228     return ConstantInt::get(CI->getContext(), V);
229   }
230 
231   // In the input is a constant expr, we might be able to recursively simplify.
232   // If not, we definitely can't do anything.
233   ConstantExpr *CE = dyn_cast<ConstantExpr>(C);
234   if (!CE) return nullptr;
235 
236   switch (CE->getOpcode()) {
237   default: return nullptr;
238   case Instruction::Or: {
239     Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
240     if (!RHS)
241       return nullptr;
242 
243     // X | -1 -> -1.
244     if (ConstantInt *RHSC = dyn_cast<ConstantInt>(RHS))
245       if (RHSC->isMinusOne())
246         return RHSC;
247 
248     Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
249     if (!LHS)
250       return nullptr;
251     return ConstantExpr::getOr(LHS, RHS);
252   }
253   case Instruction::And: {
254     Constant *RHS = ExtractConstantBytes(CE->getOperand(1), ByteStart,ByteSize);
255     if (!RHS)
256       return nullptr;
257 
258     // X & 0 -> 0.
259     if (RHS->isNullValue())
260       return RHS;
261 
262     Constant *LHS = ExtractConstantBytes(CE->getOperand(0), ByteStart,ByteSize);
263     if (!LHS)
264       return nullptr;
265     return ConstantExpr::getAnd(LHS, RHS);
266   }
267   case Instruction::LShr: {
268     ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
269     if (!Amt)
270       return nullptr;
271     APInt ShAmt = Amt->getValue();
272     // Cannot analyze non-byte shifts.
273     if ((ShAmt & 7) != 0)
274       return nullptr;
275     ShAmt.lshrInPlace(3);
276 
277     // If the extract is known to be all zeros, return zero.
278     if (ShAmt.uge(CSize - ByteStart))
279       return Constant::getNullValue(
280           IntegerType::get(CE->getContext(), ByteSize * 8));
281     // If the extract is known to be fully in the input, extract it.
282     if (ShAmt.ule(CSize - (ByteStart + ByteSize)))
283       return ExtractConstantBytes(CE->getOperand(0),
284                                   ByteStart + ShAmt.getZExtValue(), ByteSize);
285 
286     // TODO: Handle the 'partially zero' case.
287     return nullptr;
288   }
289 
290   case Instruction::Shl: {
291     ConstantInt *Amt = dyn_cast<ConstantInt>(CE->getOperand(1));
292     if (!Amt)
293       return nullptr;
294     APInt ShAmt = Amt->getValue();
295     // Cannot analyze non-byte shifts.
296     if ((ShAmt & 7) != 0)
297       return nullptr;
298     ShAmt.lshrInPlace(3);
299 
300     // If the extract is known to be all zeros, return zero.
301     if (ShAmt.uge(ByteStart + ByteSize))
302       return Constant::getNullValue(
303           IntegerType::get(CE->getContext(), ByteSize * 8));
304     // If the extract is known to be fully in the input, extract it.
305     if (ShAmt.ule(ByteStart))
306       return ExtractConstantBytes(CE->getOperand(0),
307                                   ByteStart - ShAmt.getZExtValue(), ByteSize);
308 
309     // TODO: Handle the 'partially zero' case.
310     return nullptr;
311   }
312 
313   case Instruction::ZExt: {
314     unsigned SrcBitSize =
315       cast<IntegerType>(CE->getOperand(0)->getType())->getBitWidth();
316 
317     // If extracting something that is completely zero, return 0.
318     if (ByteStart*8 >= SrcBitSize)
319       return Constant::getNullValue(IntegerType::get(CE->getContext(),
320                                                      ByteSize*8));
321 
322     // If exactly extracting the input, return it.
323     if (ByteStart == 0 && ByteSize*8 == SrcBitSize)
324       return CE->getOperand(0);
325 
326     // If extracting something completely in the input, if the input is a
327     // multiple of 8 bits, recurse.
328     if ((SrcBitSize&7) == 0 && (ByteStart+ByteSize)*8 <= SrcBitSize)
329       return ExtractConstantBytes(CE->getOperand(0), ByteStart, ByteSize);
330 
331     // Otherwise, if extracting a subset of the input, which is not multiple of
332     // 8 bits, do a shift and trunc to get the bits.
333     if ((ByteStart+ByteSize)*8 < SrcBitSize) {
334       assert((SrcBitSize&7) && "Shouldn't get byte sized case here");
335       Constant *Res = CE->getOperand(0);
336       if (ByteStart)
337         Res = ConstantExpr::getLShr(Res,
338                                  ConstantInt::get(Res->getType(), ByteStart*8));
339       return ConstantExpr::getTrunc(Res, IntegerType::get(C->getContext(),
340                                                           ByteSize*8));
341     }
342 
343     // TODO: Handle the 'partially zero' case.
344     return nullptr;
345   }
346   }
347 }
348 
349 /// Return a ConstantExpr with type DestTy for sizeof on Ty, with any known
350 /// factors factored out. If Folded is false, return null if no factoring was
351 /// possible, to avoid endlessly bouncing an unfoldable expression back into the
352 /// top-level folder.
353 static Constant *getFoldedSizeOf(Type *Ty, Type *DestTy, bool Folded) {
354   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
355     Constant *N = ConstantInt::get(DestTy, ATy->getNumElements());
356     Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
357     return ConstantExpr::getNUWMul(E, N);
358   }
359 
360   if (StructType *STy = dyn_cast<StructType>(Ty))
361     if (!STy->isPacked()) {
362       unsigned NumElems = STy->getNumElements();
363       // An empty struct has size zero.
364       if (NumElems == 0)
365         return ConstantExpr::getNullValue(DestTy);
366       // Check for a struct with all members having the same size.
367       Constant *MemberSize =
368         getFoldedSizeOf(STy->getElementType(0), DestTy, true);
369       bool AllSame = true;
370       for (unsigned i = 1; i != NumElems; ++i)
371         if (MemberSize !=
372             getFoldedSizeOf(STy->getElementType(i), DestTy, true)) {
373           AllSame = false;
374           break;
375         }
376       if (AllSame) {
377         Constant *N = ConstantInt::get(DestTy, NumElems);
378         return ConstantExpr::getNUWMul(MemberSize, N);
379       }
380     }
381 
382   // Pointer size doesn't depend on the pointee type, so canonicalize them
383   // to an arbitrary pointee.
384   if (PointerType *PTy = dyn_cast<PointerType>(Ty))
385     if (!PTy->getElementType()->isIntegerTy(1))
386       return
387         getFoldedSizeOf(PointerType::get(IntegerType::get(PTy->getContext(), 1),
388                                          PTy->getAddressSpace()),
389                         DestTy, true);
390 
391   // If there's no interesting folding happening, bail so that we don't create
392   // a constant that looks like it needs folding but really doesn't.
393   if (!Folded)
394     return nullptr;
395 
396   // Base case: Get a regular sizeof expression.
397   Constant *C = ConstantExpr::getSizeOf(Ty);
398   C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
399                                                     DestTy, false),
400                             C, DestTy);
401   return C;
402 }
403 
404 /// Return a ConstantExpr with type DestTy for alignof on Ty, with any known
405 /// factors factored out. If Folded is false, return null if no factoring was
406 /// possible, to avoid endlessly bouncing an unfoldable expression back into the
407 /// top-level folder.
408 static Constant *getFoldedAlignOf(Type *Ty, Type *DestTy, bool Folded) {
409   // The alignment of an array is equal to the alignment of the
410   // array element. Note that this is not always true for vectors.
411   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
412     Constant *C = ConstantExpr::getAlignOf(ATy->getElementType());
413     C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
414                                                       DestTy,
415                                                       false),
416                               C, DestTy);
417     return C;
418   }
419 
420   if (StructType *STy = dyn_cast<StructType>(Ty)) {
421     // Packed structs always have an alignment of 1.
422     if (STy->isPacked())
423       return ConstantInt::get(DestTy, 1);
424 
425     // Otherwise, struct alignment is the maximum alignment of any member.
426     // Without target data, we can't compare much, but we can check to see
427     // if all the members have the same alignment.
428     unsigned NumElems = STy->getNumElements();
429     // An empty struct has minimal alignment.
430     if (NumElems == 0)
431       return ConstantInt::get(DestTy, 1);
432     // Check for a struct with all members having the same alignment.
433     Constant *MemberAlign =
434       getFoldedAlignOf(STy->getElementType(0), DestTy, true);
435     bool AllSame = true;
436     for (unsigned i = 1; i != NumElems; ++i)
437       if (MemberAlign != getFoldedAlignOf(STy->getElementType(i), DestTy, true)) {
438         AllSame = false;
439         break;
440       }
441     if (AllSame)
442       return MemberAlign;
443   }
444 
445   // Pointer alignment doesn't depend on the pointee type, so canonicalize them
446   // to an arbitrary pointee.
447   if (PointerType *PTy = dyn_cast<PointerType>(Ty))
448     if (!PTy->getElementType()->isIntegerTy(1))
449       return
450         getFoldedAlignOf(PointerType::get(IntegerType::get(PTy->getContext(),
451                                                            1),
452                                           PTy->getAddressSpace()),
453                          DestTy, true);
454 
455   // If there's no interesting folding happening, bail so that we don't create
456   // a constant that looks like it needs folding but really doesn't.
457   if (!Folded)
458     return nullptr;
459 
460   // Base case: Get a regular alignof expression.
461   Constant *C = ConstantExpr::getAlignOf(Ty);
462   C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
463                                                     DestTy, false),
464                             C, DestTy);
465   return C;
466 }
467 
468 /// Return a ConstantExpr with type DestTy for offsetof on Ty and FieldNo, with
469 /// any known factors factored out. If Folded is false, return null if no
470 /// factoring was possible, to avoid endlessly bouncing an unfoldable expression
471 /// back into the top-level folder.
472 static Constant *getFoldedOffsetOf(Type *Ty, Constant *FieldNo, Type *DestTy,
473                                    bool Folded) {
474   if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
475     Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo, false,
476                                                                 DestTy, false),
477                                         FieldNo, DestTy);
478     Constant *E = getFoldedSizeOf(ATy->getElementType(), DestTy, true);
479     return ConstantExpr::getNUWMul(E, N);
480   }
481 
482   if (StructType *STy = dyn_cast<StructType>(Ty))
483     if (!STy->isPacked()) {
484       unsigned NumElems = STy->getNumElements();
485       // An empty struct has no members.
486       if (NumElems == 0)
487         return nullptr;
488       // Check for a struct with all members having the same size.
489       Constant *MemberSize =
490         getFoldedSizeOf(STy->getElementType(0), DestTy, true);
491       bool AllSame = true;
492       for (unsigned i = 1; i != NumElems; ++i)
493         if (MemberSize !=
494             getFoldedSizeOf(STy->getElementType(i), DestTy, true)) {
495           AllSame = false;
496           break;
497         }
498       if (AllSame) {
499         Constant *N = ConstantExpr::getCast(CastInst::getCastOpcode(FieldNo,
500                                                                     false,
501                                                                     DestTy,
502                                                                     false),
503                                             FieldNo, DestTy);
504         return ConstantExpr::getNUWMul(MemberSize, N);
505       }
506     }
507 
508   // If there's no interesting folding happening, bail so that we don't create
509   // a constant that looks like it needs folding but really doesn't.
510   if (!Folded)
511     return nullptr;
512 
513   // Base case: Get a regular offsetof expression.
514   Constant *C = ConstantExpr::getOffsetOf(Ty, FieldNo);
515   C = ConstantExpr::getCast(CastInst::getCastOpcode(C, false,
516                                                     DestTy, false),
517                             C, DestTy);
518   return C;
519 }
520 
521 Constant *llvm::ConstantFoldCastInstruction(unsigned opc, Constant *V,
522                                             Type *DestTy) {
523   if (isa<UndefValue>(V)) {
524     // zext(undef) = 0, because the top bits will be zero.
525     // sext(undef) = 0, because the top bits will all be the same.
526     // [us]itofp(undef) = 0, because the result value is bounded.
527     if (opc == Instruction::ZExt || opc == Instruction::SExt ||
528         opc == Instruction::UIToFP || opc == Instruction::SIToFP)
529       return Constant::getNullValue(DestTy);
530     return UndefValue::get(DestTy);
531   }
532 
533   if (V->isNullValue() && !DestTy->isX86_MMXTy() &&
534       opc != Instruction::AddrSpaceCast)
535     return Constant::getNullValue(DestTy);
536 
537   // If the cast operand is a constant expression, there's a few things we can
538   // do to try to simplify it.
539   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V)) {
540     if (CE->isCast()) {
541       // Try hard to fold cast of cast because they are often eliminable.
542       if (unsigned newOpc = foldConstantCastPair(opc, CE, DestTy))
543         return ConstantExpr::getCast(newOpc, CE->getOperand(0), DestTy);
544     } else if (CE->getOpcode() == Instruction::GetElementPtr &&
545                // Do not fold addrspacecast (gep 0, .., 0). It might make the
546                // addrspacecast uncanonicalized.
547                opc != Instruction::AddrSpaceCast &&
548                // Do not fold bitcast (gep) with inrange index, as this loses
549                // information.
550                !cast<GEPOperator>(CE)->getInRangeIndex().hasValue() &&
551                // Do not fold if the gep type is a vector, as bitcasting
552                // operand 0 of a vector gep will result in a bitcast between
553                // different sizes.
554                !CE->getType()->isVectorTy()) {
555       // If all of the indexes in the GEP are null values, there is no pointer
556       // adjustment going on.  We might as well cast the source pointer.
557       bool isAllNull = true;
558       for (unsigned i = 1, e = CE->getNumOperands(); i != e; ++i)
559         if (!CE->getOperand(i)->isNullValue()) {
560           isAllNull = false;
561           break;
562         }
563       if (isAllNull)
564         // This is casting one pointer type to another, always BitCast
565         return ConstantExpr::getPointerCast(CE->getOperand(0), DestTy);
566     }
567   }
568 
569   // If the cast operand is a constant vector, perform the cast by
570   // operating on each element. In the cast of bitcasts, the element
571   // count may be mismatched; don't attempt to handle that here.
572   if ((isa<ConstantVector>(V) || isa<ConstantDataVector>(V)) &&
573       DestTy->isVectorTy() &&
574       DestTy->getVectorNumElements() == V->getType()->getVectorNumElements()) {
575     SmallVector<Constant*, 16> res;
576     VectorType *DestVecTy = cast<VectorType>(DestTy);
577     Type *DstEltTy = DestVecTy->getElementType();
578     Type *Ty = IntegerType::get(V->getContext(), 32);
579     for (unsigned i = 0, e = V->getType()->getVectorNumElements(); i != e; ++i) {
580       Constant *C =
581         ConstantExpr::getExtractElement(V, ConstantInt::get(Ty, i));
582       res.push_back(ConstantExpr::getCast(opc, C, DstEltTy));
583     }
584     return ConstantVector::get(res);
585   }
586 
587   // We actually have to do a cast now. Perform the cast according to the
588   // opcode specified.
589   switch (opc) {
590   default:
591     llvm_unreachable("Failed to cast constant expression");
592   case Instruction::FPTrunc:
593   case Instruction::FPExt:
594     if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
595       bool ignored;
596       APFloat Val = FPC->getValueAPF();
597       Val.convert(DestTy->isHalfTy() ? APFloat::IEEEhalf() :
598                   DestTy->isFloatTy() ? APFloat::IEEEsingle() :
599                   DestTy->isDoubleTy() ? APFloat::IEEEdouble() :
600                   DestTy->isX86_FP80Ty() ? APFloat::x87DoubleExtended() :
601                   DestTy->isFP128Ty() ? APFloat::IEEEquad() :
602                   DestTy->isPPC_FP128Ty() ? APFloat::PPCDoubleDouble() :
603                   APFloat::Bogus(),
604                   APFloat::rmNearestTiesToEven, &ignored);
605       return ConstantFP::get(V->getContext(), Val);
606     }
607     return nullptr; // Can't fold.
608   case Instruction::FPToUI:
609   case Instruction::FPToSI:
610     if (ConstantFP *FPC = dyn_cast<ConstantFP>(V)) {
611       const APFloat &V = FPC->getValueAPF();
612       bool ignored;
613       uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
614       APSInt IntVal(DestBitWidth, opc == Instruction::FPToUI);
615       if (APFloat::opInvalidOp ==
616           V.convertToInteger(IntVal, APFloat::rmTowardZero, &ignored)) {
617         // Undefined behavior invoked - the destination type can't represent
618         // the input constant.
619         return UndefValue::get(DestTy);
620       }
621       return ConstantInt::get(FPC->getContext(), IntVal);
622     }
623     return nullptr; // Can't fold.
624   case Instruction::IntToPtr:   //always treated as unsigned
625     if (V->isNullValue())       // Is it an integral null value?
626       return ConstantPointerNull::get(cast<PointerType>(DestTy));
627     return nullptr;                   // Other pointer types cannot be casted
628   case Instruction::PtrToInt:   // always treated as unsigned
629     // Is it a null pointer value?
630     if (V->isNullValue())
631       return ConstantInt::get(DestTy, 0);
632     // If this is a sizeof-like expression, pull out multiplications by
633     // known factors to expose them to subsequent folding. If it's an
634     // alignof-like expression, factor out known factors.
635     if (ConstantExpr *CE = dyn_cast<ConstantExpr>(V))
636       if (CE->getOpcode() == Instruction::GetElementPtr &&
637           CE->getOperand(0)->isNullValue()) {
638         // FIXME: Looks like getFoldedSizeOf(), getFoldedOffsetOf() and
639         // getFoldedAlignOf() don't handle the case when DestTy is a vector of
640         // pointers yet. We end up in asserts in CastInst::getCastOpcode (see
641         // test/Analysis/ConstantFolding/cast-vector.ll). I've only seen this
642         // happen in one "real" C-code test case, so it does not seem to be an
643         // important optimization to handle vectors here. For now, simply bail
644         // out.
645         if (DestTy->isVectorTy())
646           return nullptr;
647         GEPOperator *GEPO = cast<GEPOperator>(CE);
648         Type *Ty = GEPO->getSourceElementType();
649         if (CE->getNumOperands() == 2) {
650           // Handle a sizeof-like expression.
651           Constant *Idx = CE->getOperand(1);
652           bool isOne = isa<ConstantInt>(Idx) && cast<ConstantInt>(Idx)->isOne();
653           if (Constant *C = getFoldedSizeOf(Ty, DestTy, !isOne)) {
654             Idx = ConstantExpr::getCast(CastInst::getCastOpcode(Idx, true,
655                                                                 DestTy, false),
656                                         Idx, DestTy);
657             return ConstantExpr::getMul(C, Idx);
658           }
659         } else if (CE->getNumOperands() == 3 &&
660                    CE->getOperand(1)->isNullValue()) {
661           // Handle an alignof-like expression.
662           if (StructType *STy = dyn_cast<StructType>(Ty))
663             if (!STy->isPacked()) {
664               ConstantInt *CI = cast<ConstantInt>(CE->getOperand(2));
665               if (CI->isOne() &&
666                   STy->getNumElements() == 2 &&
667                   STy->getElementType(0)->isIntegerTy(1)) {
668                 return getFoldedAlignOf(STy->getElementType(1), DestTy, false);
669               }
670             }
671           // Handle an offsetof-like expression.
672           if (Ty->isStructTy() || Ty->isArrayTy()) {
673             if (Constant *C = getFoldedOffsetOf(Ty, CE->getOperand(2),
674                                                 DestTy, false))
675               return C;
676           }
677         }
678       }
679     // Other pointer types cannot be casted
680     return nullptr;
681   case Instruction::UIToFP:
682   case Instruction::SIToFP:
683     if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
684       const APInt &api = CI->getValue();
685       APFloat apf(DestTy->getFltSemantics(),
686                   APInt::getNullValue(DestTy->getPrimitiveSizeInBits()));
687       apf.convertFromAPInt(api, opc==Instruction::SIToFP,
688                            APFloat::rmNearestTiesToEven);
689       return ConstantFP::get(V->getContext(), apf);
690     }
691     return nullptr;
692   case Instruction::ZExt:
693     if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
694       uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
695       return ConstantInt::get(V->getContext(),
696                               CI->getValue().zext(BitWidth));
697     }
698     return nullptr;
699   case Instruction::SExt:
700     if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
701       uint32_t BitWidth = cast<IntegerType>(DestTy)->getBitWidth();
702       return ConstantInt::get(V->getContext(),
703                               CI->getValue().sext(BitWidth));
704     }
705     return nullptr;
706   case Instruction::Trunc: {
707     if (V->getType()->isVectorTy())
708       return nullptr;
709 
710     uint32_t DestBitWidth = cast<IntegerType>(DestTy)->getBitWidth();
711     if (ConstantInt *CI = dyn_cast<ConstantInt>(V)) {
712       return ConstantInt::get(V->getContext(),
713                               CI->getValue().trunc(DestBitWidth));
714     }
715 
716     // The input must be a constantexpr.  See if we can simplify this based on
717     // the bytes we are demanding.  Only do this if the source and dest are an
718     // even multiple of a byte.
719     if ((DestBitWidth & 7) == 0 &&
720         (cast<IntegerType>(V->getType())->getBitWidth() & 7) == 0)
721       if (Constant *Res = ExtractConstantBytes(V, 0, DestBitWidth / 8))
722         return Res;
723 
724     return nullptr;
725   }
726   case Instruction::BitCast:
727     return FoldBitCast(V, DestTy);
728   case Instruction::AddrSpaceCast:
729     return nullptr;
730   }
731 }
732 
733 Constant *llvm::ConstantFoldSelectInstruction(Constant *Cond,
734                                               Constant *V1, Constant *V2) {
735   // Check for i1 and vector true/false conditions.
736   if (Cond->isNullValue()) return V2;
737   if (Cond->isAllOnesValue()) return V1;
738 
739   // If the condition is a vector constant, fold the result elementwise.
740   if (ConstantVector *CondV = dyn_cast<ConstantVector>(Cond)) {
741     SmallVector<Constant*, 16> Result;
742     Type *Ty = IntegerType::get(CondV->getContext(), 32);
743     for (unsigned i = 0, e = V1->getType()->getVectorNumElements(); i != e;++i){
744       Constant *V;
745       Constant *V1Element = ConstantExpr::getExtractElement(V1,
746                                                     ConstantInt::get(Ty, i));
747       Constant *V2Element = ConstantExpr::getExtractElement(V2,
748                                                     ConstantInt::get(Ty, i));
749       auto *Cond = cast<Constant>(CondV->getOperand(i));
750       if (V1Element == V2Element) {
751         V = V1Element;
752       } else if (isa<UndefValue>(Cond)) {
753         V = isa<UndefValue>(V1Element) ? V1Element : V2Element;
754       } else {
755         if (!isa<ConstantInt>(Cond)) break;
756         V = Cond->isNullValue() ? V2Element : V1Element;
757       }
758       Result.push_back(V);
759     }
760 
761     // If we were able to build the vector, return it.
762     if (Result.size() == V1->getType()->getVectorNumElements())
763       return ConstantVector::get(Result);
764   }
765 
766   if (isa<UndefValue>(Cond)) {
767     if (isa<UndefValue>(V1)) return V1;
768     return V2;
769   }
770   if (isa<UndefValue>(V1)) return V2;
771   if (isa<UndefValue>(V2)) return V1;
772   if (V1 == V2) return V1;
773 
774   if (ConstantExpr *TrueVal = dyn_cast<ConstantExpr>(V1)) {
775     if (TrueVal->getOpcode() == Instruction::Select)
776       if (TrueVal->getOperand(0) == Cond)
777         return ConstantExpr::getSelect(Cond, TrueVal->getOperand(1), V2);
778   }
779   if (ConstantExpr *FalseVal = dyn_cast<ConstantExpr>(V2)) {
780     if (FalseVal->getOpcode() == Instruction::Select)
781       if (FalseVal->getOperand(0) == Cond)
782         return ConstantExpr::getSelect(Cond, V1, FalseVal->getOperand(2));
783   }
784 
785   return nullptr;
786 }
787 
788 Constant *llvm::ConstantFoldExtractElementInstruction(Constant *Val,
789                                                       Constant *Idx) {
790   // extractelt undef, C -> undef
791   // extractelt C, undef -> undef
792   if (isa<UndefValue>(Val) || isa<UndefValue>(Idx))
793     return UndefValue::get(Val->getType()->getVectorElementType());
794 
795   auto *CIdx = dyn_cast<ConstantInt>(Idx);
796   if (!CIdx)
797     return nullptr;
798 
799   // ee({w,x,y,z}, wrong_value) -> undef
800   if (CIdx->uge(Val->getType()->getVectorNumElements()))
801     return UndefValue::get(Val->getType()->getVectorElementType());
802 
803   // ee (gep (ptr, idx0, ...), idx) -> gep (ee (ptr, idx), ee (idx0, idx), ...)
804   if (auto *CE = dyn_cast<ConstantExpr>(Val)) {
805     if (CE->getOpcode() == Instruction::GetElementPtr) {
806       SmallVector<Constant *, 8> Ops;
807       Ops.reserve(CE->getNumOperands());
808       for (unsigned i = 0, e = CE->getNumOperands(); i != e; ++i) {
809         Constant *Op = CE->getOperand(i);
810         if (Op->getType()->isVectorTy()) {
811           Constant *ScalarOp = ConstantExpr::getExtractElement(Op, Idx);
812           if (!ScalarOp)
813             return  nullptr;
814           Ops.push_back(ScalarOp);
815         } else
816           Ops.push_back(Op);
817       }
818       return CE->getWithOperands(Ops, CE->getType()->getVectorElementType(),
819                                  false,
820                                  Ops[0]->getType()->getPointerElementType());
821     }
822   }
823 
824   return Val->getAggregateElement(CIdx);
825 }
826 
827 Constant *llvm::ConstantFoldInsertElementInstruction(Constant *Val,
828                                                      Constant *Elt,
829                                                      Constant *Idx) {
830   if (isa<UndefValue>(Idx))
831     return UndefValue::get(Val->getType());
832 
833   ConstantInt *CIdx = dyn_cast<ConstantInt>(Idx);
834   if (!CIdx) return nullptr;
835 
836   // Do not iterate on scalable vector. The num of elements is unknown at
837   // compile-time.
838   VectorType *ValTy = cast<VectorType>(Val->getType());
839   if (ValTy->isScalable())
840     return nullptr;
841 
842   unsigned NumElts = Val->getType()->getVectorNumElements();
843   if (CIdx->uge(NumElts))
844     return UndefValue::get(Val->getType());
845 
846   SmallVector<Constant*, 16> Result;
847   Result.reserve(NumElts);
848   auto *Ty = Type::getInt32Ty(Val->getContext());
849   uint64_t IdxVal = CIdx->getZExtValue();
850   for (unsigned i = 0; i != NumElts; ++i) {
851     if (i == IdxVal) {
852       Result.push_back(Elt);
853       continue;
854     }
855 
856     Constant *C = ConstantExpr::getExtractElement(Val, ConstantInt::get(Ty, i));
857     Result.push_back(C);
858   }
859 
860   return ConstantVector::get(Result);
861 }
862 
863 Constant *llvm::ConstantFoldShuffleVectorInstruction(Constant *V1,
864                                                      Constant *V2,
865                                                      Constant *Mask) {
866   unsigned MaskNumElts = Mask->getType()->getVectorNumElements();
867   Type *EltTy = V1->getType()->getVectorElementType();
868 
869   // Undefined shuffle mask -> undefined value.
870   if (isa<UndefValue>(Mask))
871     return UndefValue::get(VectorType::get(EltTy, MaskNumElts));
872 
873   // Don't break the bitcode reader hack.
874   if (isa<ConstantExpr>(Mask)) return nullptr;
875 
876   // Do not iterate on scalable vector. The num of elements is unknown at
877   // compile-time.
878   VectorType *ValTy = cast<VectorType>(V1->getType());
879   if (ValTy->isScalable())
880     return nullptr;
881 
882   unsigned SrcNumElts = V1->getType()->getVectorNumElements();
883 
884   // Loop over the shuffle mask, evaluating each element.
885   SmallVector<Constant*, 32> Result;
886   for (unsigned i = 0; i != MaskNumElts; ++i) {
887     int Elt = ShuffleVectorInst::getMaskValue(Mask, i);
888     if (Elt == -1) {
889       Result.push_back(UndefValue::get(EltTy));
890       continue;
891     }
892     Constant *InElt;
893     if (unsigned(Elt) >= SrcNumElts*2)
894       InElt = UndefValue::get(EltTy);
895     else if (unsigned(Elt) >= SrcNumElts) {
896       Type *Ty = IntegerType::get(V2->getContext(), 32);
897       InElt =
898         ConstantExpr::getExtractElement(V2,
899                                         ConstantInt::get(Ty, Elt - SrcNumElts));
900     } else {
901       Type *Ty = IntegerType::get(V1->getContext(), 32);
902       InElt = ConstantExpr::getExtractElement(V1, ConstantInt::get(Ty, Elt));
903     }
904     Result.push_back(InElt);
905   }
906 
907   return ConstantVector::get(Result);
908 }
909 
910 Constant *llvm::ConstantFoldExtractValueInstruction(Constant *Agg,
911                                                     ArrayRef<unsigned> Idxs) {
912   // Base case: no indices, so return the entire value.
913   if (Idxs.empty())
914     return Agg;
915 
916   if (Constant *C = Agg->getAggregateElement(Idxs[0]))
917     return ConstantFoldExtractValueInstruction(C, Idxs.slice(1));
918 
919   return nullptr;
920 }
921 
922 Constant *llvm::ConstantFoldInsertValueInstruction(Constant *Agg,
923                                                    Constant *Val,
924                                                    ArrayRef<unsigned> Idxs) {
925   // Base case: no indices, so replace the entire value.
926   if (Idxs.empty())
927     return Val;
928 
929   unsigned NumElts;
930   if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
931     NumElts = ST->getNumElements();
932   else
933     NumElts = cast<SequentialType>(Agg->getType())->getNumElements();
934 
935   SmallVector<Constant*, 32> Result;
936   for (unsigned i = 0; i != NumElts; ++i) {
937     Constant *C = Agg->getAggregateElement(i);
938     if (!C) return nullptr;
939 
940     if (Idxs[0] == i)
941       C = ConstantFoldInsertValueInstruction(C, Val, Idxs.slice(1));
942 
943     Result.push_back(C);
944   }
945 
946   if (StructType *ST = dyn_cast<StructType>(Agg->getType()))
947     return ConstantStruct::get(ST, Result);
948   if (ArrayType *AT = dyn_cast<ArrayType>(Agg->getType()))
949     return ConstantArray::get(AT, Result);
950   return ConstantVector::get(Result);
951 }
952 
953 Constant *llvm::ConstantFoldUnaryInstruction(unsigned Opcode, Constant *C) {
954   assert(Instruction::isUnaryOp(Opcode) && "Non-unary instruction detected");
955 
956   // Handle scalar UndefValue. Vectors are always evaluated per element.
957   bool HasScalarUndef = !C->getType()->isVectorTy() && isa<UndefValue>(C);
958 
959   if (HasScalarUndef) {
960     switch (static_cast<Instruction::UnaryOps>(Opcode)) {
961     case Instruction::FNeg:
962       return C; // -undef -> undef
963     case Instruction::UnaryOpsEnd:
964       llvm_unreachable("Invalid UnaryOp");
965     }
966   }
967 
968   // Constant should not be UndefValue, unless these are vector constants.
969   assert(!HasScalarUndef && "Unexpected UndefValue");
970   // We only have FP UnaryOps right now.
971   assert(!isa<ConstantInt>(C) && "Unexpected Integer UnaryOp");
972 
973   if (ConstantFP *CFP = dyn_cast<ConstantFP>(C)) {
974     const APFloat &CV = CFP->getValueAPF();
975     switch (Opcode) {
976     default:
977       break;
978     case Instruction::FNeg:
979       return ConstantFP::get(C->getContext(), neg(CV));
980     }
981   } else if (VectorType *VTy = dyn_cast<VectorType>(C->getType())) {
982     // Fold each element and create a vector constant from those constants.
983     SmallVector<Constant*, 16> Result;
984     Type *Ty = IntegerType::get(VTy->getContext(), 32);
985     for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
986       Constant *ExtractIdx = ConstantInt::get(Ty, i);
987       Constant *Elt = ConstantExpr::getExtractElement(C, ExtractIdx);
988 
989       Result.push_back(ConstantExpr::get(Opcode, Elt));
990     }
991 
992     return ConstantVector::get(Result);
993   }
994 
995   // We don't know how to fold this.
996   return nullptr;
997 }
998 
999 Constant *llvm::ConstantFoldBinaryInstruction(unsigned Opcode, Constant *C1,
1000                                               Constant *C2) {
1001   assert(Instruction::isBinaryOp(Opcode) && "Non-binary instruction detected");
1002 
1003   // Simplify BinOps with their identity values first. They are no-ops and we
1004   // can always return the other value, including undef or poison values.
1005   // FIXME: remove unnecessary duplicated identity patterns below.
1006   // FIXME: Use AllowRHSConstant with getBinOpIdentity to handle additional ops,
1007   //        like X << 0 = X.
1008   Constant *Identity = ConstantExpr::getBinOpIdentity(Opcode, C1->getType());
1009   if (Identity) {
1010     if (C1 == Identity)
1011       return C2;
1012     if (C2 == Identity)
1013       return C1;
1014   }
1015 
1016   // Handle scalar UndefValue. Vectors are always evaluated per element.
1017   bool HasScalarUndef = !C1->getType()->isVectorTy() &&
1018                         (isa<UndefValue>(C1) || isa<UndefValue>(C2));
1019   if (HasScalarUndef) {
1020     switch (static_cast<Instruction::BinaryOps>(Opcode)) {
1021     case Instruction::Xor:
1022       if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
1023         // Handle undef ^ undef -> 0 special case. This is a common
1024         // idiom (misuse).
1025         return Constant::getNullValue(C1->getType());
1026       LLVM_FALLTHROUGH;
1027     case Instruction::Add:
1028     case Instruction::Sub:
1029       return UndefValue::get(C1->getType());
1030     case Instruction::And:
1031       if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef & undef -> undef
1032         return C1;
1033       return Constant::getNullValue(C1->getType());   // undef & X -> 0
1034     case Instruction::Mul: {
1035       // undef * undef -> undef
1036       if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
1037         return C1;
1038       const APInt *CV;
1039       // X * undef -> undef   if X is odd
1040       if (match(C1, m_APInt(CV)) || match(C2, m_APInt(CV)))
1041         if ((*CV)[0])
1042           return UndefValue::get(C1->getType());
1043 
1044       // X * undef -> 0       otherwise
1045       return Constant::getNullValue(C1->getType());
1046     }
1047     case Instruction::SDiv:
1048     case Instruction::UDiv:
1049       // X / undef -> undef
1050       if (isa<UndefValue>(C2))
1051         return C2;
1052       // undef / 0 -> undef
1053       // undef / 1 -> undef
1054       if (match(C2, m_Zero()) || match(C2, m_One()))
1055         return C1;
1056       // undef / X -> 0       otherwise
1057       return Constant::getNullValue(C1->getType());
1058     case Instruction::URem:
1059     case Instruction::SRem:
1060       // X % undef -> undef
1061       if (match(C2, m_Undef()))
1062         return C2;
1063       // undef % 0 -> undef
1064       if (match(C2, m_Zero()))
1065         return C1;
1066       // undef % X -> 0       otherwise
1067       return Constant::getNullValue(C1->getType());
1068     case Instruction::Or:                          // X | undef -> -1
1069       if (isa<UndefValue>(C1) && isa<UndefValue>(C2)) // undef | undef -> undef
1070         return C1;
1071       return Constant::getAllOnesValue(C1->getType()); // undef | X -> ~0
1072     case Instruction::LShr:
1073       // X >>l undef -> undef
1074       if (isa<UndefValue>(C2))
1075         return C2;
1076       // undef >>l 0 -> undef
1077       if (match(C2, m_Zero()))
1078         return C1;
1079       // undef >>l X -> 0
1080       return Constant::getNullValue(C1->getType());
1081     case Instruction::AShr:
1082       // X >>a undef -> undef
1083       if (isa<UndefValue>(C2))
1084         return C2;
1085       // undef >>a 0 -> undef
1086       if (match(C2, m_Zero()))
1087         return C1;
1088       // TODO: undef >>a X -> undef if the shift is exact
1089       // undef >>a X -> 0
1090       return Constant::getNullValue(C1->getType());
1091     case Instruction::Shl:
1092       // X << undef -> undef
1093       if (isa<UndefValue>(C2))
1094         return C2;
1095       // undef << 0 -> undef
1096       if (match(C2, m_Zero()))
1097         return C1;
1098       // undef << X -> 0
1099       return Constant::getNullValue(C1->getType());
1100     case Instruction::FAdd:
1101     case Instruction::FSub:
1102     case Instruction::FMul:
1103     case Instruction::FDiv:
1104     case Instruction::FRem:
1105       // [any flop] undef, undef -> undef
1106       if (isa<UndefValue>(C1) && isa<UndefValue>(C2))
1107         return C1;
1108       // [any flop] C, undef -> NaN
1109       // [any flop] undef, C -> NaN
1110       // We could potentially specialize NaN/Inf constants vs. 'normal'
1111       // constants (possibly differently depending on opcode and operand). This
1112       // would allow returning undef sometimes. But it is always safe to fold to
1113       // NaN because we can choose the undef operand as NaN, and any FP opcode
1114       // with a NaN operand will propagate NaN.
1115       return ConstantFP::getNaN(C1->getType());
1116     case Instruction::BinaryOpsEnd:
1117       llvm_unreachable("Invalid BinaryOp");
1118     }
1119   }
1120 
1121   // Neither constant should be UndefValue, unless these are vector constants.
1122   assert(!HasScalarUndef && "Unexpected UndefValue");
1123 
1124   // Handle simplifications when the RHS is a constant int.
1125   if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
1126     switch (Opcode) {
1127     case Instruction::Add:
1128       if (CI2->isZero()) return C1;                             // X + 0 == X
1129       break;
1130     case Instruction::Sub:
1131       if (CI2->isZero()) return C1;                             // X - 0 == X
1132       break;
1133     case Instruction::Mul:
1134       if (CI2->isZero()) return C2;                             // X * 0 == 0
1135       if (CI2->isOne())
1136         return C1;                                              // X * 1 == X
1137       break;
1138     case Instruction::UDiv:
1139     case Instruction::SDiv:
1140       if (CI2->isOne())
1141         return C1;                                            // X / 1 == X
1142       if (CI2->isZero())
1143         return UndefValue::get(CI2->getType());               // X / 0 == undef
1144       break;
1145     case Instruction::URem:
1146     case Instruction::SRem:
1147       if (CI2->isOne())
1148         return Constant::getNullValue(CI2->getType());        // X % 1 == 0
1149       if (CI2->isZero())
1150         return UndefValue::get(CI2->getType());               // X % 0 == undef
1151       break;
1152     case Instruction::And:
1153       if (CI2->isZero()) return C2;                           // X & 0 == 0
1154       if (CI2->isMinusOne())
1155         return C1;                                            // X & -1 == X
1156 
1157       if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
1158         // (zext i32 to i64) & 4294967295 -> (zext i32 to i64)
1159         if (CE1->getOpcode() == Instruction::ZExt) {
1160           unsigned DstWidth = CI2->getType()->getBitWidth();
1161           unsigned SrcWidth =
1162             CE1->getOperand(0)->getType()->getPrimitiveSizeInBits();
1163           APInt PossiblySetBits(APInt::getLowBitsSet(DstWidth, SrcWidth));
1164           if ((PossiblySetBits & CI2->getValue()) == PossiblySetBits)
1165             return C1;
1166         }
1167 
1168         // If and'ing the address of a global with a constant, fold it.
1169         if (CE1->getOpcode() == Instruction::PtrToInt &&
1170             isa<GlobalValue>(CE1->getOperand(0))) {
1171           GlobalValue *GV = cast<GlobalValue>(CE1->getOperand(0));
1172 
1173           MaybeAlign GVAlign;
1174 
1175           if (Module *TheModule = GV->getParent()) {
1176             GVAlign = GV->getPointerAlignment(TheModule->getDataLayout());
1177 
1178             // If the function alignment is not specified then assume that it
1179             // is 4.
1180             // This is dangerous; on x86, the alignment of the pointer
1181             // corresponds to the alignment of the function, but might be less
1182             // than 4 if it isn't explicitly specified.
1183             // However, a fix for this behaviour was reverted because it
1184             // increased code size (see https://reviews.llvm.org/D55115)
1185             // FIXME: This code should be deleted once existing targets have
1186             // appropriate defaults
1187             if (!GVAlign && isa<Function>(GV))
1188               GVAlign = Align(4);
1189           } else if (isa<Function>(GV)) {
1190             // Without a datalayout we have to assume the worst case: that the
1191             // function pointer isn't aligned at all.
1192             GVAlign = llvm::None;
1193           } else {
1194             GVAlign = MaybeAlign(GV->getAlignment());
1195           }
1196 
1197           if (GVAlign && *GVAlign > 1) {
1198             unsigned DstWidth = CI2->getType()->getBitWidth();
1199             unsigned SrcWidth = std::min(DstWidth, Log2(*GVAlign));
1200             APInt BitsNotSet(APInt::getLowBitsSet(DstWidth, SrcWidth));
1201 
1202             // If checking bits we know are clear, return zero.
1203             if ((CI2->getValue() & BitsNotSet) == CI2->getValue())
1204               return Constant::getNullValue(CI2->getType());
1205           }
1206         }
1207       }
1208       break;
1209     case Instruction::Or:
1210       if (CI2->isZero()) return C1;        // X | 0 == X
1211       if (CI2->isMinusOne())
1212         return C2;                         // X | -1 == -1
1213       break;
1214     case Instruction::Xor:
1215       if (CI2->isZero()) return C1;        // X ^ 0 == X
1216 
1217       if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
1218         switch (CE1->getOpcode()) {
1219         default: break;
1220         case Instruction::ICmp:
1221         case Instruction::FCmp:
1222           // cmp pred ^ true -> cmp !pred
1223           assert(CI2->isOne());
1224           CmpInst::Predicate pred = (CmpInst::Predicate)CE1->getPredicate();
1225           pred = CmpInst::getInversePredicate(pred);
1226           return ConstantExpr::getCompare(pred, CE1->getOperand(0),
1227                                           CE1->getOperand(1));
1228         }
1229       }
1230       break;
1231     case Instruction::AShr:
1232       // ashr (zext C to Ty), C2 -> lshr (zext C, CSA), C2
1233       if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1))
1234         if (CE1->getOpcode() == Instruction::ZExt)  // Top bits known zero.
1235           return ConstantExpr::getLShr(C1, C2);
1236       break;
1237     }
1238   } else if (isa<ConstantInt>(C1)) {
1239     // If C1 is a ConstantInt and C2 is not, swap the operands.
1240     if (Instruction::isCommutative(Opcode))
1241       return ConstantExpr::get(Opcode, C2, C1);
1242   }
1243 
1244   if (ConstantInt *CI1 = dyn_cast<ConstantInt>(C1)) {
1245     if (ConstantInt *CI2 = dyn_cast<ConstantInt>(C2)) {
1246       const APInt &C1V = CI1->getValue();
1247       const APInt &C2V = CI2->getValue();
1248       switch (Opcode) {
1249       default:
1250         break;
1251       case Instruction::Add:
1252         return ConstantInt::get(CI1->getContext(), C1V + C2V);
1253       case Instruction::Sub:
1254         return ConstantInt::get(CI1->getContext(), C1V - C2V);
1255       case Instruction::Mul:
1256         return ConstantInt::get(CI1->getContext(), C1V * C2V);
1257       case Instruction::UDiv:
1258         assert(!CI2->isZero() && "Div by zero handled above");
1259         return ConstantInt::get(CI1->getContext(), C1V.udiv(C2V));
1260       case Instruction::SDiv:
1261         assert(!CI2->isZero() && "Div by zero handled above");
1262         if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
1263           return UndefValue::get(CI1->getType());   // MIN_INT / -1 -> undef
1264         return ConstantInt::get(CI1->getContext(), C1V.sdiv(C2V));
1265       case Instruction::URem:
1266         assert(!CI2->isZero() && "Div by zero handled above");
1267         return ConstantInt::get(CI1->getContext(), C1V.urem(C2V));
1268       case Instruction::SRem:
1269         assert(!CI2->isZero() && "Div by zero handled above");
1270         if (C2V.isAllOnesValue() && C1V.isMinSignedValue())
1271           return UndefValue::get(CI1->getType());   // MIN_INT % -1 -> undef
1272         return ConstantInt::get(CI1->getContext(), C1V.srem(C2V));
1273       case Instruction::And:
1274         return ConstantInt::get(CI1->getContext(), C1V & C2V);
1275       case Instruction::Or:
1276         return ConstantInt::get(CI1->getContext(), C1V | C2V);
1277       case Instruction::Xor:
1278         return ConstantInt::get(CI1->getContext(), C1V ^ C2V);
1279       case Instruction::Shl:
1280         if (C2V.ult(C1V.getBitWidth()))
1281           return ConstantInt::get(CI1->getContext(), C1V.shl(C2V));
1282         return UndefValue::get(C1->getType()); // too big shift is undef
1283       case Instruction::LShr:
1284         if (C2V.ult(C1V.getBitWidth()))
1285           return ConstantInt::get(CI1->getContext(), C1V.lshr(C2V));
1286         return UndefValue::get(C1->getType()); // too big shift is undef
1287       case Instruction::AShr:
1288         if (C2V.ult(C1V.getBitWidth()))
1289           return ConstantInt::get(CI1->getContext(), C1V.ashr(C2V));
1290         return UndefValue::get(C1->getType()); // too big shift is undef
1291       }
1292     }
1293 
1294     switch (Opcode) {
1295     case Instruction::SDiv:
1296     case Instruction::UDiv:
1297     case Instruction::URem:
1298     case Instruction::SRem:
1299     case Instruction::LShr:
1300     case Instruction::AShr:
1301     case Instruction::Shl:
1302       if (CI1->isZero()) return C1;
1303       break;
1304     default:
1305       break;
1306     }
1307   } else if (ConstantFP *CFP1 = dyn_cast<ConstantFP>(C1)) {
1308     if (ConstantFP *CFP2 = dyn_cast<ConstantFP>(C2)) {
1309       const APFloat &C1V = CFP1->getValueAPF();
1310       const APFloat &C2V = CFP2->getValueAPF();
1311       APFloat C3V = C1V;  // copy for modification
1312       switch (Opcode) {
1313       default:
1314         break;
1315       case Instruction::FAdd:
1316         (void)C3V.add(C2V, APFloat::rmNearestTiesToEven);
1317         return ConstantFP::get(C1->getContext(), C3V);
1318       case Instruction::FSub:
1319         (void)C3V.subtract(C2V, APFloat::rmNearestTiesToEven);
1320         return ConstantFP::get(C1->getContext(), C3V);
1321       case Instruction::FMul:
1322         (void)C3V.multiply(C2V, APFloat::rmNearestTiesToEven);
1323         return ConstantFP::get(C1->getContext(), C3V);
1324       case Instruction::FDiv:
1325         (void)C3V.divide(C2V, APFloat::rmNearestTiesToEven);
1326         return ConstantFP::get(C1->getContext(), C3V);
1327       case Instruction::FRem:
1328         (void)C3V.mod(C2V);
1329         return ConstantFP::get(C1->getContext(), C3V);
1330       }
1331     }
1332   } else if (VectorType *VTy = dyn_cast<VectorType>(C1->getType())) {
1333     // Fold each element and create a vector constant from those constants.
1334     SmallVector<Constant*, 16> Result;
1335     Type *Ty = IntegerType::get(VTy->getContext(), 32);
1336     for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i) {
1337       Constant *ExtractIdx = ConstantInt::get(Ty, i);
1338       Constant *LHS = ConstantExpr::getExtractElement(C1, ExtractIdx);
1339       Constant *RHS = ConstantExpr::getExtractElement(C2, ExtractIdx);
1340 
1341       // If any element of a divisor vector is zero, the whole op is undef.
1342       if (Instruction::isIntDivRem(Opcode) && RHS->isNullValue())
1343         return UndefValue::get(VTy);
1344 
1345       Result.push_back(ConstantExpr::get(Opcode, LHS, RHS));
1346     }
1347 
1348     return ConstantVector::get(Result);
1349   }
1350 
1351   if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
1352     // There are many possible foldings we could do here.  We should probably
1353     // at least fold add of a pointer with an integer into the appropriate
1354     // getelementptr.  This will improve alias analysis a bit.
1355 
1356     // Given ((a + b) + c), if (b + c) folds to something interesting, return
1357     // (a + (b + c)).
1358     if (Instruction::isAssociative(Opcode) && CE1->getOpcode() == Opcode) {
1359       Constant *T = ConstantExpr::get(Opcode, CE1->getOperand(1), C2);
1360       if (!isa<ConstantExpr>(T) || cast<ConstantExpr>(T)->getOpcode() != Opcode)
1361         return ConstantExpr::get(Opcode, CE1->getOperand(0), T);
1362     }
1363   } else if (isa<ConstantExpr>(C2)) {
1364     // If C2 is a constant expr and C1 isn't, flop them around and fold the
1365     // other way if possible.
1366     if (Instruction::isCommutative(Opcode))
1367       return ConstantFoldBinaryInstruction(Opcode, C2, C1);
1368   }
1369 
1370   // i1 can be simplified in many cases.
1371   if (C1->getType()->isIntegerTy(1)) {
1372     switch (Opcode) {
1373     case Instruction::Add:
1374     case Instruction::Sub:
1375       return ConstantExpr::getXor(C1, C2);
1376     case Instruction::Mul:
1377       return ConstantExpr::getAnd(C1, C2);
1378     case Instruction::Shl:
1379     case Instruction::LShr:
1380     case Instruction::AShr:
1381       // We can assume that C2 == 0.  If it were one the result would be
1382       // undefined because the shift value is as large as the bitwidth.
1383       return C1;
1384     case Instruction::SDiv:
1385     case Instruction::UDiv:
1386       // We can assume that C2 == 1.  If it were zero the result would be
1387       // undefined through division by zero.
1388       return C1;
1389     case Instruction::URem:
1390     case Instruction::SRem:
1391       // We can assume that C2 == 1.  If it were zero the result would be
1392       // undefined through division by zero.
1393       return ConstantInt::getFalse(C1->getContext());
1394     default:
1395       break;
1396     }
1397   }
1398 
1399   // We don't know how to fold this.
1400   return nullptr;
1401 }
1402 
1403 /// This type is zero-sized if it's an array or structure of zero-sized types.
1404 /// The only leaf zero-sized type is an empty structure.
1405 static bool isMaybeZeroSizedType(Type *Ty) {
1406   if (StructType *STy = dyn_cast<StructType>(Ty)) {
1407     if (STy->isOpaque()) return true;  // Can't say.
1408 
1409     // If all of elements have zero size, this does too.
1410     for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i)
1411       if (!isMaybeZeroSizedType(STy->getElementType(i))) return false;
1412     return true;
1413 
1414   } else if (ArrayType *ATy = dyn_cast<ArrayType>(Ty)) {
1415     return isMaybeZeroSizedType(ATy->getElementType());
1416   }
1417   return false;
1418 }
1419 
1420 /// Compare the two constants as though they were getelementptr indices.
1421 /// This allows coercion of the types to be the same thing.
1422 ///
1423 /// If the two constants are the "same" (after coercion), return 0.  If the
1424 /// first is less than the second, return -1, if the second is less than the
1425 /// first, return 1.  If the constants are not integral, return -2.
1426 ///
1427 static int IdxCompare(Constant *C1, Constant *C2, Type *ElTy) {
1428   if (C1 == C2) return 0;
1429 
1430   // Ok, we found a different index.  If they are not ConstantInt, we can't do
1431   // anything with them.
1432   if (!isa<ConstantInt>(C1) || !isa<ConstantInt>(C2))
1433     return -2; // don't know!
1434 
1435   // We cannot compare the indices if they don't fit in an int64_t.
1436   if (cast<ConstantInt>(C1)->getValue().getActiveBits() > 64 ||
1437       cast<ConstantInt>(C2)->getValue().getActiveBits() > 64)
1438     return -2; // don't know!
1439 
1440   // Ok, we have two differing integer indices.  Sign extend them to be the same
1441   // type.
1442   int64_t C1Val = cast<ConstantInt>(C1)->getSExtValue();
1443   int64_t C2Val = cast<ConstantInt>(C2)->getSExtValue();
1444 
1445   if (C1Val == C2Val) return 0;  // They are equal
1446 
1447   // If the type being indexed over is really just a zero sized type, there is
1448   // no pointer difference being made here.
1449   if (isMaybeZeroSizedType(ElTy))
1450     return -2; // dunno.
1451 
1452   // If they are really different, now that they are the same type, then we
1453   // found a difference!
1454   if (C1Val < C2Val)
1455     return -1;
1456   else
1457     return 1;
1458 }
1459 
1460 /// This function determines if there is anything we can decide about the two
1461 /// constants provided. This doesn't need to handle simple things like
1462 /// ConstantFP comparisons, but should instead handle ConstantExprs.
1463 /// If we can determine that the two constants have a particular relation to
1464 /// each other, we should return the corresponding FCmpInst predicate,
1465 /// otherwise return FCmpInst::BAD_FCMP_PREDICATE. This is used below in
1466 /// ConstantFoldCompareInstruction.
1467 ///
1468 /// To simplify this code we canonicalize the relation so that the first
1469 /// operand is always the most "complex" of the two.  We consider ConstantFP
1470 /// to be the simplest, and ConstantExprs to be the most complex.
1471 static FCmpInst::Predicate evaluateFCmpRelation(Constant *V1, Constant *V2) {
1472   assert(V1->getType() == V2->getType() &&
1473          "Cannot compare values of different types!");
1474 
1475   // We do not know if a constant expression will evaluate to a number or NaN.
1476   // Therefore, we can only say that the relation is unordered or equal.
1477   if (V1 == V2) return FCmpInst::FCMP_UEQ;
1478 
1479   if (!isa<ConstantExpr>(V1)) {
1480     if (!isa<ConstantExpr>(V2)) {
1481       // Simple case, use the standard constant folder.
1482       ConstantInt *R = nullptr;
1483       R = dyn_cast<ConstantInt>(
1484                       ConstantExpr::getFCmp(FCmpInst::FCMP_OEQ, V1, V2));
1485       if (R && !R->isZero())
1486         return FCmpInst::FCMP_OEQ;
1487       R = dyn_cast<ConstantInt>(
1488                       ConstantExpr::getFCmp(FCmpInst::FCMP_OLT, V1, V2));
1489       if (R && !R->isZero())
1490         return FCmpInst::FCMP_OLT;
1491       R = dyn_cast<ConstantInt>(
1492                       ConstantExpr::getFCmp(FCmpInst::FCMP_OGT, V1, V2));
1493       if (R && !R->isZero())
1494         return FCmpInst::FCMP_OGT;
1495 
1496       // Nothing more we can do
1497       return FCmpInst::BAD_FCMP_PREDICATE;
1498     }
1499 
1500     // If the first operand is simple and second is ConstantExpr, swap operands.
1501     FCmpInst::Predicate SwappedRelation = evaluateFCmpRelation(V2, V1);
1502     if (SwappedRelation != FCmpInst::BAD_FCMP_PREDICATE)
1503       return FCmpInst::getSwappedPredicate(SwappedRelation);
1504   } else {
1505     // Ok, the LHS is known to be a constantexpr.  The RHS can be any of a
1506     // constantexpr or a simple constant.
1507     ConstantExpr *CE1 = cast<ConstantExpr>(V1);
1508     switch (CE1->getOpcode()) {
1509     case Instruction::FPTrunc:
1510     case Instruction::FPExt:
1511     case Instruction::UIToFP:
1512     case Instruction::SIToFP:
1513       // We might be able to do something with these but we don't right now.
1514       break;
1515     default:
1516       break;
1517     }
1518   }
1519   // There are MANY other foldings that we could perform here.  They will
1520   // probably be added on demand, as they seem needed.
1521   return FCmpInst::BAD_FCMP_PREDICATE;
1522 }
1523 
1524 static ICmpInst::Predicate areGlobalsPotentiallyEqual(const GlobalValue *GV1,
1525                                                       const GlobalValue *GV2) {
1526   auto isGlobalUnsafeForEquality = [](const GlobalValue *GV) {
1527     if (GV->hasExternalWeakLinkage() || GV->hasWeakAnyLinkage())
1528       return true;
1529     if (const auto *GVar = dyn_cast<GlobalVariable>(GV)) {
1530       Type *Ty = GVar->getValueType();
1531       // A global with opaque type might end up being zero sized.
1532       if (!Ty->isSized())
1533         return true;
1534       // A global with an empty type might lie at the address of any other
1535       // global.
1536       if (Ty->isEmptyTy())
1537         return true;
1538     }
1539     return false;
1540   };
1541   // Don't try to decide equality of aliases.
1542   if (!isa<GlobalAlias>(GV1) && !isa<GlobalAlias>(GV2))
1543     if (!isGlobalUnsafeForEquality(GV1) && !isGlobalUnsafeForEquality(GV2))
1544       return ICmpInst::ICMP_NE;
1545   return ICmpInst::BAD_ICMP_PREDICATE;
1546 }
1547 
1548 /// This function determines if there is anything we can decide about the two
1549 /// constants provided. This doesn't need to handle simple things like integer
1550 /// comparisons, but should instead handle ConstantExprs and GlobalValues.
1551 /// If we can determine that the two constants have a particular relation to
1552 /// each other, we should return the corresponding ICmp predicate, otherwise
1553 /// return ICmpInst::BAD_ICMP_PREDICATE.
1554 ///
1555 /// To simplify this code we canonicalize the relation so that the first
1556 /// operand is always the most "complex" of the two.  We consider simple
1557 /// constants (like ConstantInt) to be the simplest, followed by
1558 /// GlobalValues, followed by ConstantExpr's (the most complex).
1559 ///
1560 static ICmpInst::Predicate evaluateICmpRelation(Constant *V1, Constant *V2,
1561                                                 bool isSigned) {
1562   assert(V1->getType() == V2->getType() &&
1563          "Cannot compare different types of values!");
1564   if (V1 == V2) return ICmpInst::ICMP_EQ;
1565 
1566   if (!isa<ConstantExpr>(V1) && !isa<GlobalValue>(V1) &&
1567       !isa<BlockAddress>(V1)) {
1568     if (!isa<GlobalValue>(V2) && !isa<ConstantExpr>(V2) &&
1569         !isa<BlockAddress>(V2)) {
1570       // We distilled this down to a simple case, use the standard constant
1571       // folder.
1572       ConstantInt *R = nullptr;
1573       ICmpInst::Predicate pred = ICmpInst::ICMP_EQ;
1574       R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
1575       if (R && !R->isZero())
1576         return pred;
1577       pred = isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1578       R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
1579       if (R && !R->isZero())
1580         return pred;
1581       pred = isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1582       R = dyn_cast<ConstantInt>(ConstantExpr::getICmp(pred, V1, V2));
1583       if (R && !R->isZero())
1584         return pred;
1585 
1586       // If we couldn't figure it out, bail.
1587       return ICmpInst::BAD_ICMP_PREDICATE;
1588     }
1589 
1590     // If the first operand is simple, swap operands.
1591     ICmpInst::Predicate SwappedRelation =
1592       evaluateICmpRelation(V2, V1, isSigned);
1593     if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
1594       return ICmpInst::getSwappedPredicate(SwappedRelation);
1595 
1596   } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(V1)) {
1597     if (isa<ConstantExpr>(V2)) {  // Swap as necessary.
1598       ICmpInst::Predicate SwappedRelation =
1599         evaluateICmpRelation(V2, V1, isSigned);
1600       if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
1601         return ICmpInst::getSwappedPredicate(SwappedRelation);
1602       return ICmpInst::BAD_ICMP_PREDICATE;
1603     }
1604 
1605     // Now we know that the RHS is a GlobalValue, BlockAddress or simple
1606     // constant (which, since the types must match, means that it's a
1607     // ConstantPointerNull).
1608     if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
1609       return areGlobalsPotentiallyEqual(GV, GV2);
1610     } else if (isa<BlockAddress>(V2)) {
1611       return ICmpInst::ICMP_NE; // Globals never equal labels.
1612     } else {
1613       assert(isa<ConstantPointerNull>(V2) && "Canonicalization guarantee!");
1614       // GlobalVals can never be null unless they have external weak linkage.
1615       // We don't try to evaluate aliases here.
1616       // NOTE: We should not be doing this constant folding if null pointer
1617       // is considered valid for the function. But currently there is no way to
1618       // query it from the Constant type.
1619       if (!GV->hasExternalWeakLinkage() && !isa<GlobalAlias>(GV) &&
1620           !NullPointerIsDefined(nullptr /* F */,
1621                                 GV->getType()->getAddressSpace()))
1622         return ICmpInst::ICMP_NE;
1623     }
1624   } else if (const BlockAddress *BA = dyn_cast<BlockAddress>(V1)) {
1625     if (isa<ConstantExpr>(V2)) {  // Swap as necessary.
1626       ICmpInst::Predicate SwappedRelation =
1627         evaluateICmpRelation(V2, V1, isSigned);
1628       if (SwappedRelation != ICmpInst::BAD_ICMP_PREDICATE)
1629         return ICmpInst::getSwappedPredicate(SwappedRelation);
1630       return ICmpInst::BAD_ICMP_PREDICATE;
1631     }
1632 
1633     // Now we know that the RHS is a GlobalValue, BlockAddress or simple
1634     // constant (which, since the types must match, means that it is a
1635     // ConstantPointerNull).
1636     if (const BlockAddress *BA2 = dyn_cast<BlockAddress>(V2)) {
1637       // Block address in another function can't equal this one, but block
1638       // addresses in the current function might be the same if blocks are
1639       // empty.
1640       if (BA2->getFunction() != BA->getFunction())
1641         return ICmpInst::ICMP_NE;
1642     } else {
1643       // Block addresses aren't null, don't equal the address of globals.
1644       assert((isa<ConstantPointerNull>(V2) || isa<GlobalValue>(V2)) &&
1645              "Canonicalization guarantee!");
1646       return ICmpInst::ICMP_NE;
1647     }
1648   } else {
1649     // Ok, the LHS is known to be a constantexpr.  The RHS can be any of a
1650     // constantexpr, a global, block address, or a simple constant.
1651     ConstantExpr *CE1 = cast<ConstantExpr>(V1);
1652     Constant *CE1Op0 = CE1->getOperand(0);
1653 
1654     switch (CE1->getOpcode()) {
1655     case Instruction::Trunc:
1656     case Instruction::FPTrunc:
1657     case Instruction::FPExt:
1658     case Instruction::FPToUI:
1659     case Instruction::FPToSI:
1660       break; // We can't evaluate floating point casts or truncations.
1661 
1662     case Instruction::UIToFP:
1663     case Instruction::SIToFP:
1664     case Instruction::BitCast:
1665     case Instruction::ZExt:
1666     case Instruction::SExt:
1667       // We can't evaluate floating point casts or truncations.
1668       if (CE1Op0->getType()->isFPOrFPVectorTy())
1669         break;
1670 
1671       // If the cast is not actually changing bits, and the second operand is a
1672       // null pointer, do the comparison with the pre-casted value.
1673       if (V2->isNullValue() && CE1->getType()->isIntOrPtrTy()) {
1674         if (CE1->getOpcode() == Instruction::ZExt) isSigned = false;
1675         if (CE1->getOpcode() == Instruction::SExt) isSigned = true;
1676         return evaluateICmpRelation(CE1Op0,
1677                                     Constant::getNullValue(CE1Op0->getType()),
1678                                     isSigned);
1679       }
1680       break;
1681 
1682     case Instruction::GetElementPtr: {
1683       GEPOperator *CE1GEP = cast<GEPOperator>(CE1);
1684       // Ok, since this is a getelementptr, we know that the constant has a
1685       // pointer type.  Check the various cases.
1686       if (isa<ConstantPointerNull>(V2)) {
1687         // If we are comparing a GEP to a null pointer, check to see if the base
1688         // of the GEP equals the null pointer.
1689         if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
1690           if (GV->hasExternalWeakLinkage())
1691             // Weak linkage GVals could be zero or not. We're comparing that
1692             // to null pointer so its greater-or-equal
1693             return isSigned ? ICmpInst::ICMP_SGE : ICmpInst::ICMP_UGE;
1694           else
1695             // If its not weak linkage, the GVal must have a non-zero address
1696             // so the result is greater-than
1697             return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1698         } else if (isa<ConstantPointerNull>(CE1Op0)) {
1699           // If we are indexing from a null pointer, check to see if we have any
1700           // non-zero indices.
1701           for (unsigned i = 1, e = CE1->getNumOperands(); i != e; ++i)
1702             if (!CE1->getOperand(i)->isNullValue())
1703               // Offsetting from null, must not be equal.
1704               return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1705           // Only zero indexes from null, must still be zero.
1706           return ICmpInst::ICMP_EQ;
1707         }
1708         // Otherwise, we can't really say if the first operand is null or not.
1709       } else if (const GlobalValue *GV2 = dyn_cast<GlobalValue>(V2)) {
1710         if (isa<ConstantPointerNull>(CE1Op0)) {
1711           if (GV2->hasExternalWeakLinkage())
1712             // Weak linkage GVals could be zero or not. We're comparing it to
1713             // a null pointer, so its less-or-equal
1714             return isSigned ? ICmpInst::ICMP_SLE : ICmpInst::ICMP_ULE;
1715           else
1716             // If its not weak linkage, the GVal must have a non-zero address
1717             // so the result is less-than
1718             return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1719         } else if (const GlobalValue *GV = dyn_cast<GlobalValue>(CE1Op0)) {
1720           if (GV == GV2) {
1721             // If this is a getelementptr of the same global, then it must be
1722             // different.  Because the types must match, the getelementptr could
1723             // only have at most one index, and because we fold getelementptr's
1724             // with a single zero index, it must be nonzero.
1725             assert(CE1->getNumOperands() == 2 &&
1726                    !CE1->getOperand(1)->isNullValue() &&
1727                    "Surprising getelementptr!");
1728             return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1729           } else {
1730             if (CE1GEP->hasAllZeroIndices())
1731               return areGlobalsPotentiallyEqual(GV, GV2);
1732             return ICmpInst::BAD_ICMP_PREDICATE;
1733           }
1734         }
1735       } else {
1736         ConstantExpr *CE2 = cast<ConstantExpr>(V2);
1737         Constant *CE2Op0 = CE2->getOperand(0);
1738 
1739         // There are MANY other foldings that we could perform here.  They will
1740         // probably be added on demand, as they seem needed.
1741         switch (CE2->getOpcode()) {
1742         default: break;
1743         case Instruction::GetElementPtr:
1744           // By far the most common case to handle is when the base pointers are
1745           // obviously to the same global.
1746           if (isa<GlobalValue>(CE1Op0) && isa<GlobalValue>(CE2Op0)) {
1747             // Don't know relative ordering, but check for inequality.
1748             if (CE1Op0 != CE2Op0) {
1749               GEPOperator *CE2GEP = cast<GEPOperator>(CE2);
1750               if (CE1GEP->hasAllZeroIndices() && CE2GEP->hasAllZeroIndices())
1751                 return areGlobalsPotentiallyEqual(cast<GlobalValue>(CE1Op0),
1752                                                   cast<GlobalValue>(CE2Op0));
1753               return ICmpInst::BAD_ICMP_PREDICATE;
1754             }
1755             // Ok, we know that both getelementptr instructions are based on the
1756             // same global.  From this, we can precisely determine the relative
1757             // ordering of the resultant pointers.
1758             unsigned i = 1;
1759 
1760             // The logic below assumes that the result of the comparison
1761             // can be determined by finding the first index that differs.
1762             // This doesn't work if there is over-indexing in any
1763             // subsequent indices, so check for that case first.
1764             if (!CE1->isGEPWithNoNotionalOverIndexing() ||
1765                 !CE2->isGEPWithNoNotionalOverIndexing())
1766                return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
1767 
1768             // Compare all of the operands the GEP's have in common.
1769             gep_type_iterator GTI = gep_type_begin(CE1);
1770             for (;i != CE1->getNumOperands() && i != CE2->getNumOperands();
1771                  ++i, ++GTI)
1772               switch (IdxCompare(CE1->getOperand(i),
1773                                  CE2->getOperand(i), GTI.getIndexedType())) {
1774               case -1: return isSigned ? ICmpInst::ICMP_SLT:ICmpInst::ICMP_ULT;
1775               case 1:  return isSigned ? ICmpInst::ICMP_SGT:ICmpInst::ICMP_UGT;
1776               case -2: return ICmpInst::BAD_ICMP_PREDICATE;
1777               }
1778 
1779             // Ok, we ran out of things they have in common.  If any leftovers
1780             // are non-zero then we have a difference, otherwise we are equal.
1781             for (; i < CE1->getNumOperands(); ++i)
1782               if (!CE1->getOperand(i)->isNullValue()) {
1783                 if (isa<ConstantInt>(CE1->getOperand(i)))
1784                   return isSigned ? ICmpInst::ICMP_SGT : ICmpInst::ICMP_UGT;
1785                 else
1786                   return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
1787               }
1788 
1789             for (; i < CE2->getNumOperands(); ++i)
1790               if (!CE2->getOperand(i)->isNullValue()) {
1791                 if (isa<ConstantInt>(CE2->getOperand(i)))
1792                   return isSigned ? ICmpInst::ICMP_SLT : ICmpInst::ICMP_ULT;
1793                 else
1794                   return ICmpInst::BAD_ICMP_PREDICATE; // Might be equal.
1795               }
1796             return ICmpInst::ICMP_EQ;
1797           }
1798         }
1799       }
1800       break;
1801     }
1802     default:
1803       break;
1804     }
1805   }
1806 
1807   return ICmpInst::BAD_ICMP_PREDICATE;
1808 }
1809 
1810 Constant *llvm::ConstantFoldCompareInstruction(unsigned short pred,
1811                                                Constant *C1, Constant *C2) {
1812   Type *ResultTy;
1813   if (VectorType *VT = dyn_cast<VectorType>(C1->getType()))
1814     ResultTy = VectorType::get(Type::getInt1Ty(C1->getContext()),
1815                                VT->getNumElements());
1816   else
1817     ResultTy = Type::getInt1Ty(C1->getContext());
1818 
1819   // Fold FCMP_FALSE/FCMP_TRUE unconditionally.
1820   if (pred == FCmpInst::FCMP_FALSE)
1821     return Constant::getNullValue(ResultTy);
1822 
1823   if (pred == FCmpInst::FCMP_TRUE)
1824     return Constant::getAllOnesValue(ResultTy);
1825 
1826   // Handle some degenerate cases first
1827   if (isa<UndefValue>(C1) || isa<UndefValue>(C2)) {
1828     CmpInst::Predicate Predicate = CmpInst::Predicate(pred);
1829     bool isIntegerPredicate = ICmpInst::isIntPredicate(Predicate);
1830     // For EQ and NE, we can always pick a value for the undef to make the
1831     // predicate pass or fail, so we can return undef.
1832     // Also, if both operands are undef, we can return undef for int comparison.
1833     if (ICmpInst::isEquality(Predicate) || (isIntegerPredicate && C1 == C2))
1834       return UndefValue::get(ResultTy);
1835 
1836     // Otherwise, for integer compare, pick the same value as the non-undef
1837     // operand, and fold it to true or false.
1838     if (isIntegerPredicate)
1839       return ConstantInt::get(ResultTy, CmpInst::isTrueWhenEqual(Predicate));
1840 
1841     // Choosing NaN for the undef will always make unordered comparison succeed
1842     // and ordered comparison fails.
1843     return ConstantInt::get(ResultTy, CmpInst::isUnordered(Predicate));
1844   }
1845 
1846   // icmp eq/ne(null,GV) -> false/true
1847   if (C1->isNullValue()) {
1848     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C2))
1849       // Don't try to evaluate aliases.  External weak GV can be null.
1850       if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage() &&
1851           !NullPointerIsDefined(nullptr /* F */,
1852                                 GV->getType()->getAddressSpace())) {
1853         if (pred == ICmpInst::ICMP_EQ)
1854           return ConstantInt::getFalse(C1->getContext());
1855         else if (pred == ICmpInst::ICMP_NE)
1856           return ConstantInt::getTrue(C1->getContext());
1857       }
1858   // icmp eq/ne(GV,null) -> false/true
1859   } else if (C2->isNullValue()) {
1860     if (const GlobalValue *GV = dyn_cast<GlobalValue>(C1))
1861       // Don't try to evaluate aliases.  External weak GV can be null.
1862       if (!isa<GlobalAlias>(GV) && !GV->hasExternalWeakLinkage() &&
1863           !NullPointerIsDefined(nullptr /* F */,
1864                                 GV->getType()->getAddressSpace())) {
1865         if (pred == ICmpInst::ICMP_EQ)
1866           return ConstantInt::getFalse(C1->getContext());
1867         else if (pred == ICmpInst::ICMP_NE)
1868           return ConstantInt::getTrue(C1->getContext());
1869       }
1870   }
1871 
1872   // If the comparison is a comparison between two i1's, simplify it.
1873   if (C1->getType()->isIntegerTy(1)) {
1874     switch(pred) {
1875     case ICmpInst::ICMP_EQ:
1876       if (isa<ConstantInt>(C2))
1877         return ConstantExpr::getXor(C1, ConstantExpr::getNot(C2));
1878       return ConstantExpr::getXor(ConstantExpr::getNot(C1), C2);
1879     case ICmpInst::ICMP_NE:
1880       return ConstantExpr::getXor(C1, C2);
1881     default:
1882       break;
1883     }
1884   }
1885 
1886   if (isa<ConstantInt>(C1) && isa<ConstantInt>(C2)) {
1887     const APInt &V1 = cast<ConstantInt>(C1)->getValue();
1888     const APInt &V2 = cast<ConstantInt>(C2)->getValue();
1889     switch (pred) {
1890     default: llvm_unreachable("Invalid ICmp Predicate");
1891     case ICmpInst::ICMP_EQ:  return ConstantInt::get(ResultTy, V1 == V2);
1892     case ICmpInst::ICMP_NE:  return ConstantInt::get(ResultTy, V1 != V2);
1893     case ICmpInst::ICMP_SLT: return ConstantInt::get(ResultTy, V1.slt(V2));
1894     case ICmpInst::ICMP_SGT: return ConstantInt::get(ResultTy, V1.sgt(V2));
1895     case ICmpInst::ICMP_SLE: return ConstantInt::get(ResultTy, V1.sle(V2));
1896     case ICmpInst::ICMP_SGE: return ConstantInt::get(ResultTy, V1.sge(V2));
1897     case ICmpInst::ICMP_ULT: return ConstantInt::get(ResultTy, V1.ult(V2));
1898     case ICmpInst::ICMP_UGT: return ConstantInt::get(ResultTy, V1.ugt(V2));
1899     case ICmpInst::ICMP_ULE: return ConstantInt::get(ResultTy, V1.ule(V2));
1900     case ICmpInst::ICMP_UGE: return ConstantInt::get(ResultTy, V1.uge(V2));
1901     }
1902   } else if (isa<ConstantFP>(C1) && isa<ConstantFP>(C2)) {
1903     const APFloat &C1V = cast<ConstantFP>(C1)->getValueAPF();
1904     const APFloat &C2V = cast<ConstantFP>(C2)->getValueAPF();
1905     APFloat::cmpResult R = C1V.compare(C2V);
1906     switch (pred) {
1907     default: llvm_unreachable("Invalid FCmp Predicate");
1908     case FCmpInst::FCMP_FALSE: return Constant::getNullValue(ResultTy);
1909     case FCmpInst::FCMP_TRUE:  return Constant::getAllOnesValue(ResultTy);
1910     case FCmpInst::FCMP_UNO:
1911       return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered);
1912     case FCmpInst::FCMP_ORD:
1913       return ConstantInt::get(ResultTy, R!=APFloat::cmpUnordered);
1914     case FCmpInst::FCMP_UEQ:
1915       return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
1916                                         R==APFloat::cmpEqual);
1917     case FCmpInst::FCMP_OEQ:
1918       return ConstantInt::get(ResultTy, R==APFloat::cmpEqual);
1919     case FCmpInst::FCMP_UNE:
1920       return ConstantInt::get(ResultTy, R!=APFloat::cmpEqual);
1921     case FCmpInst::FCMP_ONE:
1922       return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan ||
1923                                         R==APFloat::cmpGreaterThan);
1924     case FCmpInst::FCMP_ULT:
1925       return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
1926                                         R==APFloat::cmpLessThan);
1927     case FCmpInst::FCMP_OLT:
1928       return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan);
1929     case FCmpInst::FCMP_UGT:
1930       return ConstantInt::get(ResultTy, R==APFloat::cmpUnordered ||
1931                                         R==APFloat::cmpGreaterThan);
1932     case FCmpInst::FCMP_OGT:
1933       return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan);
1934     case FCmpInst::FCMP_ULE:
1935       return ConstantInt::get(ResultTy, R!=APFloat::cmpGreaterThan);
1936     case FCmpInst::FCMP_OLE:
1937       return ConstantInt::get(ResultTy, R==APFloat::cmpLessThan ||
1938                                         R==APFloat::cmpEqual);
1939     case FCmpInst::FCMP_UGE:
1940       return ConstantInt::get(ResultTy, R!=APFloat::cmpLessThan);
1941     case FCmpInst::FCMP_OGE:
1942       return ConstantInt::get(ResultTy, R==APFloat::cmpGreaterThan ||
1943                                         R==APFloat::cmpEqual);
1944     }
1945   } else if (C1->getType()->isVectorTy()) {
1946     // If we can constant fold the comparison of each element, constant fold
1947     // the whole vector comparison.
1948     SmallVector<Constant*, 4> ResElts;
1949     Type *Ty = IntegerType::get(C1->getContext(), 32);
1950     // Compare the elements, producing an i1 result or constant expr.
1951     for (unsigned i = 0, e = C1->getType()->getVectorNumElements(); i != e;++i){
1952       Constant *C1E =
1953         ConstantExpr::getExtractElement(C1, ConstantInt::get(Ty, i));
1954       Constant *C2E =
1955         ConstantExpr::getExtractElement(C2, ConstantInt::get(Ty, i));
1956 
1957       ResElts.push_back(ConstantExpr::getCompare(pred, C1E, C2E));
1958     }
1959 
1960     return ConstantVector::get(ResElts);
1961   }
1962 
1963   if (C1->getType()->isFloatingPointTy() &&
1964       // Only call evaluateFCmpRelation if we have a constant expr to avoid
1965       // infinite recursive loop
1966       (isa<ConstantExpr>(C1) || isa<ConstantExpr>(C2))) {
1967     int Result = -1;  // -1 = unknown, 0 = known false, 1 = known true.
1968     switch (evaluateFCmpRelation(C1, C2)) {
1969     default: llvm_unreachable("Unknown relation!");
1970     case FCmpInst::FCMP_UNO:
1971     case FCmpInst::FCMP_ORD:
1972     case FCmpInst::FCMP_UNE:
1973     case FCmpInst::FCMP_ULT:
1974     case FCmpInst::FCMP_UGT:
1975     case FCmpInst::FCMP_ULE:
1976     case FCmpInst::FCMP_UGE:
1977     case FCmpInst::FCMP_TRUE:
1978     case FCmpInst::FCMP_FALSE:
1979     case FCmpInst::BAD_FCMP_PREDICATE:
1980       break; // Couldn't determine anything about these constants.
1981     case FCmpInst::FCMP_OEQ: // We know that C1 == C2
1982       Result = (pred == FCmpInst::FCMP_UEQ || pred == FCmpInst::FCMP_OEQ ||
1983                 pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE ||
1984                 pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE);
1985       break;
1986     case FCmpInst::FCMP_OLT: // We know that C1 < C2
1987       Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE ||
1988                 pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT ||
1989                 pred == FCmpInst::FCMP_ULE || pred == FCmpInst::FCMP_OLE);
1990       break;
1991     case FCmpInst::FCMP_OGT: // We know that C1 > C2
1992       Result = (pred == FCmpInst::FCMP_UNE || pred == FCmpInst::FCMP_ONE ||
1993                 pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT ||
1994                 pred == FCmpInst::FCMP_UGE || pred == FCmpInst::FCMP_OGE);
1995       break;
1996     case FCmpInst::FCMP_OLE: // We know that C1 <= C2
1997       // We can only partially decide this relation.
1998       if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
1999         Result = 0;
2000       else if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT)
2001         Result = 1;
2002       break;
2003     case FCmpInst::FCMP_OGE: // We known that C1 >= C2
2004       // We can only partially decide this relation.
2005       if (pred == FCmpInst::FCMP_ULT || pred == FCmpInst::FCMP_OLT)
2006         Result = 0;
2007       else if (pred == FCmpInst::FCMP_UGT || pred == FCmpInst::FCMP_OGT)
2008         Result = 1;
2009       break;
2010     case FCmpInst::FCMP_ONE: // We know that C1 != C2
2011       // We can only partially decide this relation.
2012       if (pred == FCmpInst::FCMP_OEQ || pred == FCmpInst::FCMP_UEQ)
2013         Result = 0;
2014       else if (pred == FCmpInst::FCMP_ONE || pred == FCmpInst::FCMP_UNE)
2015         Result = 1;
2016       break;
2017     case FCmpInst::FCMP_UEQ: // We know that C1 == C2 || isUnordered(C1, C2).
2018       // We can only partially decide this relation.
2019       if (pred == FCmpInst::FCMP_ONE)
2020         Result = 0;
2021       else if (pred == FCmpInst::FCMP_UEQ)
2022         Result = 1;
2023       break;
2024     }
2025 
2026     // If we evaluated the result, return it now.
2027     if (Result != -1)
2028       return ConstantInt::get(ResultTy, Result);
2029 
2030   } else {
2031     // Evaluate the relation between the two constants, per the predicate.
2032     int Result = -1;  // -1 = unknown, 0 = known false, 1 = known true.
2033     switch (evaluateICmpRelation(C1, C2,
2034                                  CmpInst::isSigned((CmpInst::Predicate)pred))) {
2035     default: llvm_unreachable("Unknown relational!");
2036     case ICmpInst::BAD_ICMP_PREDICATE:
2037       break;  // Couldn't determine anything about these constants.
2038     case ICmpInst::ICMP_EQ:   // We know the constants are equal!
2039       // If we know the constants are equal, we can decide the result of this
2040       // computation precisely.
2041       Result = ICmpInst::isTrueWhenEqual((ICmpInst::Predicate)pred);
2042       break;
2043     case ICmpInst::ICMP_ULT:
2044       switch (pred) {
2045       case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_ULE:
2046         Result = 1; break;
2047       case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_UGE:
2048         Result = 0; break;
2049       }
2050       break;
2051     case ICmpInst::ICMP_SLT:
2052       switch (pred) {
2053       case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SLE:
2054         Result = 1; break;
2055       case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SGE:
2056         Result = 0; break;
2057       }
2058       break;
2059     case ICmpInst::ICMP_UGT:
2060       switch (pred) {
2061       case ICmpInst::ICMP_UGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_UGE:
2062         Result = 1; break;
2063       case ICmpInst::ICMP_ULT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_ULE:
2064         Result = 0; break;
2065       }
2066       break;
2067     case ICmpInst::ICMP_SGT:
2068       switch (pred) {
2069       case ICmpInst::ICMP_SGT: case ICmpInst::ICMP_NE: case ICmpInst::ICMP_SGE:
2070         Result = 1; break;
2071       case ICmpInst::ICMP_SLT: case ICmpInst::ICMP_EQ: case ICmpInst::ICMP_SLE:
2072         Result = 0; break;
2073       }
2074       break;
2075     case ICmpInst::ICMP_ULE:
2076       if (pred == ICmpInst::ICMP_UGT) Result = 0;
2077       if (pred == ICmpInst::ICMP_ULT || pred == ICmpInst::ICMP_ULE) Result = 1;
2078       break;
2079     case ICmpInst::ICMP_SLE:
2080       if (pred == ICmpInst::ICMP_SGT) Result = 0;
2081       if (pred == ICmpInst::ICMP_SLT || pred == ICmpInst::ICMP_SLE) Result = 1;
2082       break;
2083     case ICmpInst::ICMP_UGE:
2084       if (pred == ICmpInst::ICMP_ULT) Result = 0;
2085       if (pred == ICmpInst::ICMP_UGT || pred == ICmpInst::ICMP_UGE) Result = 1;
2086       break;
2087     case ICmpInst::ICMP_SGE:
2088       if (pred == ICmpInst::ICMP_SLT) Result = 0;
2089       if (pred == ICmpInst::ICMP_SGT || pred == ICmpInst::ICMP_SGE) Result = 1;
2090       break;
2091     case ICmpInst::ICMP_NE:
2092       if (pred == ICmpInst::ICMP_EQ) Result = 0;
2093       if (pred == ICmpInst::ICMP_NE) Result = 1;
2094       break;
2095     }
2096 
2097     // If we evaluated the result, return it now.
2098     if (Result != -1)
2099       return ConstantInt::get(ResultTy, Result);
2100 
2101     // If the right hand side is a bitcast, try using its inverse to simplify
2102     // it by moving it to the left hand side.  We can't do this if it would turn
2103     // a vector compare into a scalar compare or visa versa, or if it would turn
2104     // the operands into FP values.
2105     if (ConstantExpr *CE2 = dyn_cast<ConstantExpr>(C2)) {
2106       Constant *CE2Op0 = CE2->getOperand(0);
2107       if (CE2->getOpcode() == Instruction::BitCast &&
2108           CE2->getType()->isVectorTy() == CE2Op0->getType()->isVectorTy() &&
2109           !CE2Op0->getType()->isFPOrFPVectorTy()) {
2110         Constant *Inverse = ConstantExpr::getBitCast(C1, CE2Op0->getType());
2111         return ConstantExpr::getICmp(pred, Inverse, CE2Op0);
2112       }
2113     }
2114 
2115     // If the left hand side is an extension, try eliminating it.
2116     if (ConstantExpr *CE1 = dyn_cast<ConstantExpr>(C1)) {
2117       if ((CE1->getOpcode() == Instruction::SExt &&
2118            ICmpInst::isSigned((ICmpInst::Predicate)pred)) ||
2119           (CE1->getOpcode() == Instruction::ZExt &&
2120            !ICmpInst::isSigned((ICmpInst::Predicate)pred))){
2121         Constant *CE1Op0 = CE1->getOperand(0);
2122         Constant *CE1Inverse = ConstantExpr::getTrunc(CE1, CE1Op0->getType());
2123         if (CE1Inverse == CE1Op0) {
2124           // Check whether we can safely truncate the right hand side.
2125           Constant *C2Inverse = ConstantExpr::getTrunc(C2, CE1Op0->getType());
2126           if (ConstantExpr::getCast(CE1->getOpcode(), C2Inverse,
2127                                     C2->getType()) == C2)
2128             return ConstantExpr::getICmp(pred, CE1Inverse, C2Inverse);
2129         }
2130       }
2131     }
2132 
2133     if ((!isa<ConstantExpr>(C1) && isa<ConstantExpr>(C2)) ||
2134         (C1->isNullValue() && !C2->isNullValue())) {
2135       // If C2 is a constant expr and C1 isn't, flip them around and fold the
2136       // other way if possible.
2137       // Also, if C1 is null and C2 isn't, flip them around.
2138       pred = ICmpInst::getSwappedPredicate((ICmpInst::Predicate)pred);
2139       return ConstantExpr::getICmp(pred, C2, C1);
2140     }
2141   }
2142   return nullptr;
2143 }
2144 
2145 /// Test whether the given sequence of *normalized* indices is "inbounds".
2146 template<typename IndexTy>
2147 static bool isInBoundsIndices(ArrayRef<IndexTy> Idxs) {
2148   // No indices means nothing that could be out of bounds.
2149   if (Idxs.empty()) return true;
2150 
2151   // If the first index is zero, it's in bounds.
2152   if (cast<Constant>(Idxs[0])->isNullValue()) return true;
2153 
2154   // If the first index is one and all the rest are zero, it's in bounds,
2155   // by the one-past-the-end rule.
2156   if (auto *CI = dyn_cast<ConstantInt>(Idxs[0])) {
2157     if (!CI->isOne())
2158       return false;
2159   } else {
2160     auto *CV = cast<ConstantDataVector>(Idxs[0]);
2161     CI = dyn_cast_or_null<ConstantInt>(CV->getSplatValue());
2162     if (!CI || !CI->isOne())
2163       return false;
2164   }
2165 
2166   for (unsigned i = 1, e = Idxs.size(); i != e; ++i)
2167     if (!cast<Constant>(Idxs[i])->isNullValue())
2168       return false;
2169   return true;
2170 }
2171 
2172 /// Test whether a given ConstantInt is in-range for a SequentialType.
2173 static bool isIndexInRangeOfArrayType(uint64_t NumElements,
2174                                       const ConstantInt *CI) {
2175   // We cannot bounds check the index if it doesn't fit in an int64_t.
2176   if (CI->getValue().getMinSignedBits() > 64)
2177     return false;
2178 
2179   // A negative index or an index past the end of our sequential type is
2180   // considered out-of-range.
2181   int64_t IndexVal = CI->getSExtValue();
2182   if (IndexVal < 0 || (NumElements > 0 && (uint64_t)IndexVal >= NumElements))
2183     return false;
2184 
2185   // Otherwise, it is in-range.
2186   return true;
2187 }
2188 
2189 Constant *llvm::ConstantFoldGetElementPtr(Type *PointeeTy, Constant *C,
2190                                           bool InBounds,
2191                                           Optional<unsigned> InRangeIndex,
2192                                           ArrayRef<Value *> Idxs) {
2193   if (Idxs.empty()) return C;
2194 
2195   Type *GEPTy = GetElementPtrInst::getGEPReturnType(
2196       PointeeTy, C, makeArrayRef((Value *const *)Idxs.data(), Idxs.size()));
2197 
2198   if (isa<UndefValue>(C))
2199     return UndefValue::get(GEPTy);
2200 
2201   Constant *Idx0 = cast<Constant>(Idxs[0]);
2202   if (Idxs.size() == 1 && (Idx0->isNullValue() || isa<UndefValue>(Idx0)))
2203     return GEPTy->isVectorTy() && !C->getType()->isVectorTy()
2204                ? ConstantVector::getSplat(
2205                      cast<VectorType>(GEPTy)->getNumElements(), C)
2206                : C;
2207 
2208   if (C->isNullValue()) {
2209     bool isNull = true;
2210     for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
2211       if (!isa<UndefValue>(Idxs[i]) &&
2212           !cast<Constant>(Idxs[i])->isNullValue()) {
2213         isNull = false;
2214         break;
2215       }
2216     if (isNull) {
2217       PointerType *PtrTy = cast<PointerType>(C->getType()->getScalarType());
2218       Type *Ty = GetElementPtrInst::getIndexedType(PointeeTy, Idxs);
2219 
2220       assert(Ty && "Invalid indices for GEP!");
2221       Type *OrigGEPTy = PointerType::get(Ty, PtrTy->getAddressSpace());
2222       Type *GEPTy = PointerType::get(Ty, PtrTy->getAddressSpace());
2223       if (VectorType *VT = dyn_cast<VectorType>(C->getType()))
2224         GEPTy = VectorType::get(OrigGEPTy, VT->getNumElements());
2225 
2226       // The GEP returns a vector of pointers when one of more of
2227       // its arguments is a vector.
2228       for (unsigned i = 0, e = Idxs.size(); i != e; ++i) {
2229         if (auto *VT = dyn_cast<VectorType>(Idxs[i]->getType())) {
2230           GEPTy = VectorType::get(OrigGEPTy, VT->getNumElements());
2231           break;
2232         }
2233       }
2234 
2235       return Constant::getNullValue(GEPTy);
2236     }
2237   }
2238 
2239   if (ConstantExpr *CE = dyn_cast<ConstantExpr>(C)) {
2240     // Combine Indices - If the source pointer to this getelementptr instruction
2241     // is a getelementptr instruction, combine the indices of the two
2242     // getelementptr instructions into a single instruction.
2243     //
2244     if (CE->getOpcode() == Instruction::GetElementPtr) {
2245       gep_type_iterator LastI = gep_type_end(CE);
2246       for (gep_type_iterator I = gep_type_begin(CE), E = gep_type_end(CE);
2247            I != E; ++I)
2248         LastI = I;
2249 
2250       // We cannot combine indices if doing so would take us outside of an
2251       // array or vector.  Doing otherwise could trick us if we evaluated such a
2252       // GEP as part of a load.
2253       //
2254       // e.g. Consider if the original GEP was:
2255       // i8* getelementptr ({ [2 x i8], i32, i8, [3 x i8] }* @main.c,
2256       //                    i32 0, i32 0, i64 0)
2257       //
2258       // If we then tried to offset it by '8' to get to the third element,
2259       // an i8, we should *not* get:
2260       // i8* getelementptr ({ [2 x i8], i32, i8, [3 x i8] }* @main.c,
2261       //                    i32 0, i32 0, i64 8)
2262       //
2263       // This GEP tries to index array element '8  which runs out-of-bounds.
2264       // Subsequent evaluation would get confused and produce erroneous results.
2265       //
2266       // The following prohibits such a GEP from being formed by checking to see
2267       // if the index is in-range with respect to an array.
2268       // TODO: This code may be extended to handle vectors as well.
2269       bool PerformFold = false;
2270       if (Idx0->isNullValue())
2271         PerformFold = true;
2272       else if (LastI.isSequential())
2273         if (ConstantInt *CI = dyn_cast<ConstantInt>(Idx0))
2274           PerformFold = (!LastI.isBoundedSequential() ||
2275                          isIndexInRangeOfArrayType(
2276                              LastI.getSequentialNumElements(), CI)) &&
2277                         !CE->getOperand(CE->getNumOperands() - 1)
2278                              ->getType()
2279                              ->isVectorTy();
2280 
2281       if (PerformFold) {
2282         SmallVector<Value*, 16> NewIndices;
2283         NewIndices.reserve(Idxs.size() + CE->getNumOperands());
2284         NewIndices.append(CE->op_begin() + 1, CE->op_end() - 1);
2285 
2286         // Add the last index of the source with the first index of the new GEP.
2287         // Make sure to handle the case when they are actually different types.
2288         Constant *Combined = CE->getOperand(CE->getNumOperands()-1);
2289         // Otherwise it must be an array.
2290         if (!Idx0->isNullValue()) {
2291           Type *IdxTy = Combined->getType();
2292           if (IdxTy != Idx0->getType()) {
2293             unsigned CommonExtendedWidth =
2294                 std::max(IdxTy->getIntegerBitWidth(),
2295                          Idx0->getType()->getIntegerBitWidth());
2296             CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
2297 
2298             Type *CommonTy =
2299                 Type::getIntNTy(IdxTy->getContext(), CommonExtendedWidth);
2300             Constant *C1 = ConstantExpr::getSExtOrBitCast(Idx0, CommonTy);
2301             Constant *C2 = ConstantExpr::getSExtOrBitCast(Combined, CommonTy);
2302             Combined = ConstantExpr::get(Instruction::Add, C1, C2);
2303           } else {
2304             Combined =
2305               ConstantExpr::get(Instruction::Add, Idx0, Combined);
2306           }
2307         }
2308 
2309         NewIndices.push_back(Combined);
2310         NewIndices.append(Idxs.begin() + 1, Idxs.end());
2311 
2312         // The combined GEP normally inherits its index inrange attribute from
2313         // the inner GEP, but if the inner GEP's last index was adjusted by the
2314         // outer GEP, any inbounds attribute on that index is invalidated.
2315         Optional<unsigned> IRIndex = cast<GEPOperator>(CE)->getInRangeIndex();
2316         if (IRIndex && *IRIndex == CE->getNumOperands() - 2 && !Idx0->isNullValue())
2317           IRIndex = None;
2318 
2319         return ConstantExpr::getGetElementPtr(
2320             cast<GEPOperator>(CE)->getSourceElementType(), CE->getOperand(0),
2321             NewIndices, InBounds && cast<GEPOperator>(CE)->isInBounds(),
2322             IRIndex);
2323       }
2324     }
2325 
2326     // Attempt to fold casts to the same type away.  For example, folding:
2327     //
2328     //   i32* getelementptr ([2 x i32]* bitcast ([3 x i32]* %X to [2 x i32]*),
2329     //                       i64 0, i64 0)
2330     // into:
2331     //
2332     //   i32* getelementptr ([3 x i32]* %X, i64 0, i64 0)
2333     //
2334     // Don't fold if the cast is changing address spaces.
2335     if (CE->isCast() && Idxs.size() > 1 && Idx0->isNullValue()) {
2336       PointerType *SrcPtrTy =
2337         dyn_cast<PointerType>(CE->getOperand(0)->getType());
2338       PointerType *DstPtrTy = dyn_cast<PointerType>(CE->getType());
2339       if (SrcPtrTy && DstPtrTy) {
2340         ArrayType *SrcArrayTy =
2341           dyn_cast<ArrayType>(SrcPtrTy->getElementType());
2342         ArrayType *DstArrayTy =
2343           dyn_cast<ArrayType>(DstPtrTy->getElementType());
2344         if (SrcArrayTy && DstArrayTy
2345             && SrcArrayTy->getElementType() == DstArrayTy->getElementType()
2346             && SrcPtrTy->getAddressSpace() == DstPtrTy->getAddressSpace())
2347           return ConstantExpr::getGetElementPtr(SrcArrayTy,
2348                                                 (Constant *)CE->getOperand(0),
2349                                                 Idxs, InBounds, InRangeIndex);
2350       }
2351     }
2352   }
2353 
2354   // Check to see if any array indices are not within the corresponding
2355   // notional array or vector bounds. If so, try to determine if they can be
2356   // factored out into preceding dimensions.
2357   SmallVector<Constant *, 8> NewIdxs;
2358   Type *Ty = PointeeTy;
2359   Type *Prev = C->getType();
2360   bool Unknown =
2361       !isa<ConstantInt>(Idxs[0]) && !isa<ConstantDataVector>(Idxs[0]);
2362   for (unsigned i = 1, e = Idxs.size(); i != e;
2363        Prev = Ty, Ty = cast<CompositeType>(Ty)->getTypeAtIndex(Idxs[i]), ++i) {
2364     if (!isa<ConstantInt>(Idxs[i]) && !isa<ConstantDataVector>(Idxs[i])) {
2365       // We don't know if it's in range or not.
2366       Unknown = true;
2367       continue;
2368     }
2369     if (!isa<ConstantInt>(Idxs[i - 1]) && !isa<ConstantDataVector>(Idxs[i - 1]))
2370       // Skip if the type of the previous index is not supported.
2371       continue;
2372     if (InRangeIndex && i == *InRangeIndex + 1) {
2373       // If an index is marked inrange, we cannot apply this canonicalization to
2374       // the following index, as that will cause the inrange index to point to
2375       // the wrong element.
2376       continue;
2377     }
2378     if (isa<StructType>(Ty)) {
2379       // The verify makes sure that GEPs into a struct are in range.
2380       continue;
2381     }
2382     auto *STy = cast<SequentialType>(Ty);
2383     if (isa<VectorType>(STy)) {
2384       // There can be awkward padding in after a non-power of two vector.
2385       Unknown = true;
2386       continue;
2387     }
2388     if (ConstantInt *CI = dyn_cast<ConstantInt>(Idxs[i])) {
2389       if (isIndexInRangeOfArrayType(STy->getNumElements(), CI))
2390         // It's in range, skip to the next index.
2391         continue;
2392       if (CI->getSExtValue() < 0) {
2393         // It's out of range and negative, don't try to factor it.
2394         Unknown = true;
2395         continue;
2396       }
2397     } else {
2398       auto *CV = cast<ConstantDataVector>(Idxs[i]);
2399       bool InRange = true;
2400       for (unsigned I = 0, E = CV->getNumElements(); I != E; ++I) {
2401         auto *CI = cast<ConstantInt>(CV->getElementAsConstant(I));
2402         InRange &= isIndexInRangeOfArrayType(STy->getNumElements(), CI);
2403         if (CI->getSExtValue() < 0) {
2404           Unknown = true;
2405           break;
2406         }
2407       }
2408       if (InRange || Unknown)
2409         // It's in range, skip to the next index.
2410         // It's out of range and negative, don't try to factor it.
2411         continue;
2412     }
2413     if (isa<StructType>(Prev)) {
2414       // It's out of range, but the prior dimension is a struct
2415       // so we can't do anything about it.
2416       Unknown = true;
2417       continue;
2418     }
2419     // It's out of range, but we can factor it into the prior
2420     // dimension.
2421     NewIdxs.resize(Idxs.size());
2422     // Determine the number of elements in our sequential type.
2423     uint64_t NumElements = STy->getArrayNumElements();
2424 
2425     // Expand the current index or the previous index to a vector from a scalar
2426     // if necessary.
2427     Constant *CurrIdx = cast<Constant>(Idxs[i]);
2428     auto *PrevIdx =
2429         NewIdxs[i - 1] ? NewIdxs[i - 1] : cast<Constant>(Idxs[i - 1]);
2430     bool IsCurrIdxVector = CurrIdx->getType()->isVectorTy();
2431     bool IsPrevIdxVector = PrevIdx->getType()->isVectorTy();
2432     bool UseVector = IsCurrIdxVector || IsPrevIdxVector;
2433 
2434     if (!IsCurrIdxVector && IsPrevIdxVector)
2435       CurrIdx = ConstantDataVector::getSplat(
2436           PrevIdx->getType()->getVectorNumElements(), CurrIdx);
2437 
2438     if (!IsPrevIdxVector && IsCurrIdxVector)
2439       PrevIdx = ConstantDataVector::getSplat(
2440           CurrIdx->getType()->getVectorNumElements(), PrevIdx);
2441 
2442     Constant *Factor =
2443         ConstantInt::get(CurrIdx->getType()->getScalarType(), NumElements);
2444     if (UseVector)
2445       Factor = ConstantDataVector::getSplat(
2446           IsPrevIdxVector ? PrevIdx->getType()->getVectorNumElements()
2447                           : CurrIdx->getType()->getVectorNumElements(),
2448           Factor);
2449 
2450     NewIdxs[i] = ConstantExpr::getSRem(CurrIdx, Factor);
2451 
2452     Constant *Div = ConstantExpr::getSDiv(CurrIdx, Factor);
2453 
2454     unsigned CommonExtendedWidth =
2455         std::max(PrevIdx->getType()->getScalarSizeInBits(),
2456                  Div->getType()->getScalarSizeInBits());
2457     CommonExtendedWidth = std::max(CommonExtendedWidth, 64U);
2458 
2459     // Before adding, extend both operands to i64 to avoid
2460     // overflow trouble.
2461     Type *ExtendedTy = Type::getIntNTy(Div->getContext(), CommonExtendedWidth);
2462     if (UseVector)
2463       ExtendedTy = VectorType::get(
2464           ExtendedTy, IsPrevIdxVector
2465                           ? PrevIdx->getType()->getVectorNumElements()
2466                           : CurrIdx->getType()->getVectorNumElements());
2467 
2468     if (!PrevIdx->getType()->isIntOrIntVectorTy(CommonExtendedWidth))
2469       PrevIdx = ConstantExpr::getSExt(PrevIdx, ExtendedTy);
2470 
2471     if (!Div->getType()->isIntOrIntVectorTy(CommonExtendedWidth))
2472       Div = ConstantExpr::getSExt(Div, ExtendedTy);
2473 
2474     NewIdxs[i - 1] = ConstantExpr::getAdd(PrevIdx, Div);
2475   }
2476 
2477   // If we did any factoring, start over with the adjusted indices.
2478   if (!NewIdxs.empty()) {
2479     for (unsigned i = 0, e = Idxs.size(); i != e; ++i)
2480       if (!NewIdxs[i]) NewIdxs[i] = cast<Constant>(Idxs[i]);
2481     return ConstantExpr::getGetElementPtr(PointeeTy, C, NewIdxs, InBounds,
2482                                           InRangeIndex);
2483   }
2484 
2485   // If all indices are known integers and normalized, we can do a simple
2486   // check for the "inbounds" property.
2487   if (!Unknown && !InBounds)
2488     if (auto *GV = dyn_cast<GlobalVariable>(C))
2489       if (!GV->hasExternalWeakLinkage() && isInBoundsIndices(Idxs))
2490         return ConstantExpr::getGetElementPtr(PointeeTy, C, Idxs,
2491                                               /*InBounds=*/true, InRangeIndex);
2492 
2493   return nullptr;
2494 }
2495