1 //===- InstCombineCalls.cpp -----------------------------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the visitCall, visitInvoke, and visitCallBr functions.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "InstCombineInternal.h"
14 #include "llvm/ADT/APFloat.h"
15 #include "llvm/ADT/APInt.h"
16 #include "llvm/ADT/APSInt.h"
17 #include "llvm/ADT/ArrayRef.h"
18 #include "llvm/ADT/None.h"
19 #include "llvm/ADT/Optional.h"
20 #include "llvm/ADT/STLFunctionalExtras.h"
21 #include "llvm/ADT/SmallBitVector.h"
22 #include "llvm/ADT/SmallVector.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/Analysis/AliasAnalysis.h"
25 #include "llvm/Analysis/AssumeBundleQueries.h"
26 #include "llvm/Analysis/AssumptionCache.h"
27 #include "llvm/Analysis/InstructionSimplify.h"
28 #include "llvm/Analysis/Loads.h"
29 #include "llvm/Analysis/MemoryBuiltins.h"
30 #include "llvm/Analysis/ValueTracking.h"
31 #include "llvm/Analysis/VectorUtils.h"
32 #include "llvm/IR/Attributes.h"
33 #include "llvm/IR/BasicBlock.h"
34 #include "llvm/IR/Constant.h"
35 #include "llvm/IR/Constants.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/DerivedTypes.h"
38 #include "llvm/IR/Function.h"
39 #include "llvm/IR/GlobalVariable.h"
40 #include "llvm/IR/InlineAsm.h"
41 #include "llvm/IR/InstrTypes.h"
42 #include "llvm/IR/Instruction.h"
43 #include "llvm/IR/Instructions.h"
44 #include "llvm/IR/IntrinsicInst.h"
45 #include "llvm/IR/Intrinsics.h"
46 #include "llvm/IR/IntrinsicsAArch64.h"
47 #include "llvm/IR/IntrinsicsAMDGPU.h"
48 #include "llvm/IR/IntrinsicsARM.h"
49 #include "llvm/IR/IntrinsicsHexagon.h"
50 #include "llvm/IR/LLVMContext.h"
51 #include "llvm/IR/Metadata.h"
52 #include "llvm/IR/PatternMatch.h"
53 #include "llvm/IR/Statepoint.h"
54 #include "llvm/IR/Type.h"
55 #include "llvm/IR/User.h"
56 #include "llvm/IR/Value.h"
57 #include "llvm/IR/ValueHandle.h"
58 #include "llvm/Support/AtomicOrdering.h"
59 #include "llvm/Support/Casting.h"
60 #include "llvm/Support/CommandLine.h"
61 #include "llvm/Support/Compiler.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/KnownBits.h"
65 #include "llvm/Support/MathExtras.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "llvm/Transforms/InstCombine/InstCombiner.h"
68 #include "llvm/Transforms/Utils/AssumeBundleBuilder.h"
69 #include "llvm/Transforms/Utils/Local.h"
70 #include "llvm/Transforms/Utils/SimplifyLibCalls.h"
71 #include <algorithm>
72 #include <cassert>
73 #include <cstdint>
74 #include <utility>
75 #include <vector>
76 
77 #define DEBUG_TYPE "instcombine"
78 #include "llvm/Transforms/Utils/InstructionWorklist.h"
79 
80 using namespace llvm;
81 using namespace PatternMatch;
82 
83 STATISTIC(NumSimplified, "Number of library calls simplified");
84 
85 static cl::opt<unsigned> GuardWideningWindow(
86     "instcombine-guard-widening-window",
87     cl::init(3),
88     cl::desc("How wide an instruction window to bypass looking for "
89              "another guard"));
90 
91 namespace llvm {
92 /// enable preservation of attributes in assume like:
93 /// call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ]
94 extern cl::opt<bool> EnableKnowledgeRetention;
95 } // namespace llvm
96 
97 /// Return the specified type promoted as it would be to pass though a va_arg
98 /// area.
99 static Type *getPromotedType(Type *Ty) {
100   if (IntegerType* ITy = dyn_cast<IntegerType>(Ty)) {
101     if (ITy->getBitWidth() < 32)
102       return Type::getInt32Ty(Ty->getContext());
103   }
104   return Ty;
105 }
106 
107 /// Recognize a memcpy/memmove from a trivially otherwise unused alloca.
108 /// TODO: This should probably be integrated with visitAllocSites, but that
109 /// requires a deeper change to allow either unread or unwritten objects.
110 static bool hasUndefSource(AnyMemTransferInst *MI) {
111   auto *Src = MI->getRawSource();
112   while (isa<GetElementPtrInst>(Src) || isa<BitCastInst>(Src)) {
113     if (!Src->hasOneUse())
114       return false;
115     Src = cast<Instruction>(Src)->getOperand(0);
116   }
117   return isa<AllocaInst>(Src) && Src->hasOneUse();
118 }
119 
120 Instruction *InstCombinerImpl::SimplifyAnyMemTransfer(AnyMemTransferInst *MI) {
121   Align DstAlign = getKnownAlignment(MI->getRawDest(), DL, MI, &AC, &DT);
122   MaybeAlign CopyDstAlign = MI->getDestAlign();
123   if (!CopyDstAlign || *CopyDstAlign < DstAlign) {
124     MI->setDestAlignment(DstAlign);
125     return MI;
126   }
127 
128   Align SrcAlign = getKnownAlignment(MI->getRawSource(), DL, MI, &AC, &DT);
129   MaybeAlign CopySrcAlign = MI->getSourceAlign();
130   if (!CopySrcAlign || *CopySrcAlign < SrcAlign) {
131     MI->setSourceAlignment(SrcAlign);
132     return MI;
133   }
134 
135   // If we have a store to a location which is known constant, we can conclude
136   // that the store must be storing the constant value (else the memory
137   // wouldn't be constant), and this must be a noop.
138   if (AA->pointsToConstantMemory(MI->getDest())) {
139     // Set the size of the copy to 0, it will be deleted on the next iteration.
140     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
141     return MI;
142   }
143 
144   // If the source is provably undef, the memcpy/memmove doesn't do anything
145   // (unless the transfer is volatile).
146   if (hasUndefSource(MI) && !MI->isVolatile()) {
147     // Set the size of the copy to 0, it will be deleted on the next iteration.
148     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
149     return MI;
150   }
151 
152   // If MemCpyInst length is 1/2/4/8 bytes then replace memcpy with
153   // load/store.
154   ConstantInt *MemOpLength = dyn_cast<ConstantInt>(MI->getLength());
155   if (!MemOpLength) return nullptr;
156 
157   // Source and destination pointer types are always "i8*" for intrinsic.  See
158   // if the size is something we can handle with a single primitive load/store.
159   // A single load+store correctly handles overlapping memory in the memmove
160   // case.
161   uint64_t Size = MemOpLength->getLimitedValue();
162   assert(Size && "0-sized memory transferring should be removed already.");
163 
164   if (Size > 8 || (Size&(Size-1)))
165     return nullptr;  // If not 1/2/4/8 bytes, exit.
166 
167   // If it is an atomic and alignment is less than the size then we will
168   // introduce the unaligned memory access which will be later transformed
169   // into libcall in CodeGen. This is not evident performance gain so disable
170   // it now.
171   if (isa<AtomicMemTransferInst>(MI))
172     if (*CopyDstAlign < Size || *CopySrcAlign < Size)
173       return nullptr;
174 
175   // Use an integer load+store unless we can find something better.
176   unsigned SrcAddrSp =
177     cast<PointerType>(MI->getArgOperand(1)->getType())->getAddressSpace();
178   unsigned DstAddrSp =
179     cast<PointerType>(MI->getArgOperand(0)->getType())->getAddressSpace();
180 
181   IntegerType* IntType = IntegerType::get(MI->getContext(), Size<<3);
182   Type *NewSrcPtrTy = PointerType::get(IntType, SrcAddrSp);
183   Type *NewDstPtrTy = PointerType::get(IntType, DstAddrSp);
184 
185   // If the memcpy has metadata describing the members, see if we can get the
186   // TBAA tag describing our copy.
187   MDNode *CopyMD = nullptr;
188   if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa)) {
189     CopyMD = M;
190   } else if (MDNode *M = MI->getMetadata(LLVMContext::MD_tbaa_struct)) {
191     if (M->getNumOperands() == 3 && M->getOperand(0) &&
192         mdconst::hasa<ConstantInt>(M->getOperand(0)) &&
193         mdconst::extract<ConstantInt>(M->getOperand(0))->isZero() &&
194         M->getOperand(1) &&
195         mdconst::hasa<ConstantInt>(M->getOperand(1)) &&
196         mdconst::extract<ConstantInt>(M->getOperand(1))->getValue() ==
197         Size &&
198         M->getOperand(2) && isa<MDNode>(M->getOperand(2)))
199       CopyMD = cast<MDNode>(M->getOperand(2));
200   }
201 
202   Value *Src = Builder.CreateBitCast(MI->getArgOperand(1), NewSrcPtrTy);
203   Value *Dest = Builder.CreateBitCast(MI->getArgOperand(0), NewDstPtrTy);
204   LoadInst *L = Builder.CreateLoad(IntType, Src);
205   // Alignment from the mem intrinsic will be better, so use it.
206   L->setAlignment(*CopySrcAlign);
207   if (CopyMD)
208     L->setMetadata(LLVMContext::MD_tbaa, CopyMD);
209   MDNode *LoopMemParallelMD =
210     MI->getMetadata(LLVMContext::MD_mem_parallel_loop_access);
211   if (LoopMemParallelMD)
212     L->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
213   MDNode *AccessGroupMD = MI->getMetadata(LLVMContext::MD_access_group);
214   if (AccessGroupMD)
215     L->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
216 
217   StoreInst *S = Builder.CreateStore(L, Dest);
218   // Alignment from the mem intrinsic will be better, so use it.
219   S->setAlignment(*CopyDstAlign);
220   if (CopyMD)
221     S->setMetadata(LLVMContext::MD_tbaa, CopyMD);
222   if (LoopMemParallelMD)
223     S->setMetadata(LLVMContext::MD_mem_parallel_loop_access, LoopMemParallelMD);
224   if (AccessGroupMD)
225     S->setMetadata(LLVMContext::MD_access_group, AccessGroupMD);
226 
227   if (auto *MT = dyn_cast<MemTransferInst>(MI)) {
228     // non-atomics can be volatile
229     L->setVolatile(MT->isVolatile());
230     S->setVolatile(MT->isVolatile());
231   }
232   if (isa<AtomicMemTransferInst>(MI)) {
233     // atomics have to be unordered
234     L->setOrdering(AtomicOrdering::Unordered);
235     S->setOrdering(AtomicOrdering::Unordered);
236   }
237 
238   // Set the size of the copy to 0, it will be deleted on the next iteration.
239   MI->setLength(Constant::getNullValue(MemOpLength->getType()));
240   return MI;
241 }
242 
243 Instruction *InstCombinerImpl::SimplifyAnyMemSet(AnyMemSetInst *MI) {
244   const Align KnownAlignment =
245       getKnownAlignment(MI->getDest(), DL, MI, &AC, &DT);
246   MaybeAlign MemSetAlign = MI->getDestAlign();
247   if (!MemSetAlign || *MemSetAlign < KnownAlignment) {
248     MI->setDestAlignment(KnownAlignment);
249     return MI;
250   }
251 
252   // If we have a store to a location which is known constant, we can conclude
253   // that the store must be storing the constant value (else the memory
254   // wouldn't be constant), and this must be a noop.
255   if (AA->pointsToConstantMemory(MI->getDest())) {
256     // Set the size of the copy to 0, it will be deleted on the next iteration.
257     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
258     return MI;
259   }
260 
261   // Remove memset with an undef value.
262   // FIXME: This is technically incorrect because it might overwrite a poison
263   // value. Change to PoisonValue once #52930 is resolved.
264   if (isa<UndefValue>(MI->getValue())) {
265     // Set the size of the copy to 0, it will be deleted on the next iteration.
266     MI->setLength(Constant::getNullValue(MI->getLength()->getType()));
267     return MI;
268   }
269 
270   // Extract the length and alignment and fill if they are constant.
271   ConstantInt *LenC = dyn_cast<ConstantInt>(MI->getLength());
272   ConstantInt *FillC = dyn_cast<ConstantInt>(MI->getValue());
273   if (!LenC || !FillC || !FillC->getType()->isIntegerTy(8))
274     return nullptr;
275   const uint64_t Len = LenC->getLimitedValue();
276   assert(Len && "0-sized memory setting should be removed already.");
277   const Align Alignment = MI->getDestAlign().valueOrOne();
278 
279   // If it is an atomic and alignment is less than the size then we will
280   // introduce the unaligned memory access which will be later transformed
281   // into libcall in CodeGen. This is not evident performance gain so disable
282   // it now.
283   if (isa<AtomicMemSetInst>(MI))
284     if (Alignment < Len)
285       return nullptr;
286 
287   // memset(s,c,n) -> store s, c (for n=1,2,4,8)
288   if (Len <= 8 && isPowerOf2_32((uint32_t)Len)) {
289     Type *ITy = IntegerType::get(MI->getContext(), Len*8);  // n=1 -> i8.
290 
291     Value *Dest = MI->getDest();
292     unsigned DstAddrSp = cast<PointerType>(Dest->getType())->getAddressSpace();
293     Type *NewDstPtrTy = PointerType::get(ITy, DstAddrSp);
294     Dest = Builder.CreateBitCast(Dest, NewDstPtrTy);
295 
296     // Extract the fill value and store.
297     uint64_t Fill = FillC->getZExtValue()*0x0101010101010101ULL;
298     StoreInst *S = Builder.CreateStore(ConstantInt::get(ITy, Fill), Dest,
299                                        MI->isVolatile());
300     S->setAlignment(Alignment);
301     if (isa<AtomicMemSetInst>(MI))
302       S->setOrdering(AtomicOrdering::Unordered);
303 
304     // Set the size of the copy to 0, it will be deleted on the next iteration.
305     MI->setLength(Constant::getNullValue(LenC->getType()));
306     return MI;
307   }
308 
309   return nullptr;
310 }
311 
312 // TODO, Obvious Missing Transforms:
313 // * Narrow width by halfs excluding zero/undef lanes
314 Value *InstCombinerImpl::simplifyMaskedLoad(IntrinsicInst &II) {
315   Value *LoadPtr = II.getArgOperand(0);
316   const Align Alignment =
317       cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
318 
319   // If the mask is all ones or undefs, this is a plain vector load of the 1st
320   // argument.
321   if (maskIsAllOneOrUndef(II.getArgOperand(2))) {
322     LoadInst *L = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
323                                             "unmaskedload");
324     L->copyMetadata(II);
325     return L;
326   }
327 
328   // If we can unconditionally load from this address, replace with a
329   // load/select idiom. TODO: use DT for context sensitive query
330   if (isDereferenceablePointer(LoadPtr, II.getType(),
331                                II.getModule()->getDataLayout(), &II, nullptr)) {
332     LoadInst *LI = Builder.CreateAlignedLoad(II.getType(), LoadPtr, Alignment,
333                                              "unmaskedload");
334     LI->copyMetadata(II);
335     return Builder.CreateSelect(II.getArgOperand(2), LI, II.getArgOperand(3));
336   }
337 
338   return nullptr;
339 }
340 
341 // TODO, Obvious Missing Transforms:
342 // * Single constant active lane -> store
343 // * Narrow width by halfs excluding zero/undef lanes
344 Instruction *InstCombinerImpl::simplifyMaskedStore(IntrinsicInst &II) {
345   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
346   if (!ConstMask)
347     return nullptr;
348 
349   // If the mask is all zeros, this instruction does nothing.
350   if (ConstMask->isNullValue())
351     return eraseInstFromFunction(II);
352 
353   // If the mask is all ones, this is a plain vector store of the 1st argument.
354   if (ConstMask->isAllOnesValue()) {
355     Value *StorePtr = II.getArgOperand(1);
356     Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
357     StoreInst *S =
358         new StoreInst(II.getArgOperand(0), StorePtr, false, Alignment);
359     S->copyMetadata(II);
360     return S;
361   }
362 
363   if (isa<ScalableVectorType>(ConstMask->getType()))
364     return nullptr;
365 
366   // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
367   APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
368   APInt UndefElts(DemandedElts.getBitWidth(), 0);
369   if (Value *V =
370           SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
371     return replaceOperand(II, 0, V);
372 
373   return nullptr;
374 }
375 
376 // TODO, Obvious Missing Transforms:
377 // * Single constant active lane load -> load
378 // * Dereferenceable address & few lanes -> scalarize speculative load/selects
379 // * Adjacent vector addresses -> masked.load
380 // * Narrow width by halfs excluding zero/undef lanes
381 // * Vector incrementing address -> vector masked load
382 Instruction *InstCombinerImpl::simplifyMaskedGather(IntrinsicInst &II) {
383   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(2));
384   if (!ConstMask)
385     return nullptr;
386 
387   // Vector splat address w/known mask -> scalar load
388   // Fold the gather to load the source vector first lane
389   // because it is reloading the same value each time
390   if (ConstMask->isAllOnesValue())
391     if (auto *SplatPtr = getSplatValue(II.getArgOperand(0))) {
392       auto *VecTy = cast<VectorType>(II.getType());
393       const Align Alignment =
394           cast<ConstantInt>(II.getArgOperand(1))->getAlignValue();
395       LoadInst *L = Builder.CreateAlignedLoad(VecTy->getElementType(), SplatPtr,
396                                               Alignment, "load.scalar");
397       Value *Shuf =
398           Builder.CreateVectorSplat(VecTy->getElementCount(), L, "broadcast");
399       return replaceInstUsesWith(II, cast<Instruction>(Shuf));
400     }
401 
402   return nullptr;
403 }
404 
405 // TODO, Obvious Missing Transforms:
406 // * Single constant active lane -> store
407 // * Adjacent vector addresses -> masked.store
408 // * Narrow store width by halfs excluding zero/undef lanes
409 // * Vector incrementing address -> vector masked store
410 Instruction *InstCombinerImpl::simplifyMaskedScatter(IntrinsicInst &II) {
411   auto *ConstMask = dyn_cast<Constant>(II.getArgOperand(3));
412   if (!ConstMask)
413     return nullptr;
414 
415   // If the mask is all zeros, a scatter does nothing.
416   if (ConstMask->isNullValue())
417     return eraseInstFromFunction(II);
418 
419   // Vector splat address -> scalar store
420   if (auto *SplatPtr = getSplatValue(II.getArgOperand(1))) {
421     // scatter(splat(value), splat(ptr), non-zero-mask) -> store value, ptr
422     if (auto *SplatValue = getSplatValue(II.getArgOperand(0))) {
423       Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
424       StoreInst *S =
425           new StoreInst(SplatValue, SplatPtr, /*IsVolatile=*/false, Alignment);
426       S->copyMetadata(II);
427       return S;
428     }
429     // scatter(vector, splat(ptr), splat(true)) -> store extract(vector,
430     // lastlane), ptr
431     if (ConstMask->isAllOnesValue()) {
432       Align Alignment = cast<ConstantInt>(II.getArgOperand(2))->getAlignValue();
433       VectorType *WideLoadTy = cast<VectorType>(II.getArgOperand(1)->getType());
434       ElementCount VF = WideLoadTy->getElementCount();
435       Constant *EC =
436           ConstantInt::get(Builder.getInt32Ty(), VF.getKnownMinValue());
437       Value *RunTimeVF = VF.isScalable() ? Builder.CreateVScale(EC) : EC;
438       Value *LastLane = Builder.CreateSub(RunTimeVF, Builder.getInt32(1));
439       Value *Extract =
440           Builder.CreateExtractElement(II.getArgOperand(0), LastLane);
441       StoreInst *S =
442           new StoreInst(Extract, SplatPtr, /*IsVolatile=*/false, Alignment);
443       S->copyMetadata(II);
444       return S;
445     }
446   }
447   if (isa<ScalableVectorType>(ConstMask->getType()))
448     return nullptr;
449 
450   // Use masked off lanes to simplify operands via SimplifyDemandedVectorElts
451   APInt DemandedElts = possiblyDemandedEltsInMask(ConstMask);
452   APInt UndefElts(DemandedElts.getBitWidth(), 0);
453   if (Value *V =
454           SimplifyDemandedVectorElts(II.getOperand(0), DemandedElts, UndefElts))
455     return replaceOperand(II, 0, V);
456   if (Value *V =
457           SimplifyDemandedVectorElts(II.getOperand(1), DemandedElts, UndefElts))
458     return replaceOperand(II, 1, V);
459 
460   return nullptr;
461 }
462 
463 /// This function transforms launder.invariant.group and strip.invariant.group
464 /// like:
465 /// launder(launder(%x)) -> launder(%x)       (the result is not the argument)
466 /// launder(strip(%x)) -> launder(%x)
467 /// strip(strip(%x)) -> strip(%x)             (the result is not the argument)
468 /// strip(launder(%x)) -> strip(%x)
469 /// This is legal because it preserves the most recent information about
470 /// the presence or absence of invariant.group.
471 static Instruction *simplifyInvariantGroupIntrinsic(IntrinsicInst &II,
472                                                     InstCombinerImpl &IC) {
473   auto *Arg = II.getArgOperand(0);
474   auto *StrippedArg = Arg->stripPointerCasts();
475   auto *StrippedInvariantGroupsArg = StrippedArg;
476   while (auto *Intr = dyn_cast<IntrinsicInst>(StrippedInvariantGroupsArg)) {
477     if (Intr->getIntrinsicID() != Intrinsic::launder_invariant_group &&
478         Intr->getIntrinsicID() != Intrinsic::strip_invariant_group)
479       break;
480     StrippedInvariantGroupsArg = Intr->getArgOperand(0)->stripPointerCasts();
481   }
482   if (StrippedArg == StrippedInvariantGroupsArg)
483     return nullptr; // No launders/strips to remove.
484 
485   Value *Result = nullptr;
486 
487   if (II.getIntrinsicID() == Intrinsic::launder_invariant_group)
488     Result = IC.Builder.CreateLaunderInvariantGroup(StrippedInvariantGroupsArg);
489   else if (II.getIntrinsicID() == Intrinsic::strip_invariant_group)
490     Result = IC.Builder.CreateStripInvariantGroup(StrippedInvariantGroupsArg);
491   else
492     llvm_unreachable(
493         "simplifyInvariantGroupIntrinsic only handles launder and strip");
494   if (Result->getType()->getPointerAddressSpace() !=
495       II.getType()->getPointerAddressSpace())
496     Result = IC.Builder.CreateAddrSpaceCast(Result, II.getType());
497   if (Result->getType() != II.getType())
498     Result = IC.Builder.CreateBitCast(Result, II.getType());
499 
500   return cast<Instruction>(Result);
501 }
502 
503 static Instruction *foldCttzCtlz(IntrinsicInst &II, InstCombinerImpl &IC) {
504   assert((II.getIntrinsicID() == Intrinsic::cttz ||
505           II.getIntrinsicID() == Intrinsic::ctlz) &&
506          "Expected cttz or ctlz intrinsic");
507   bool IsTZ = II.getIntrinsicID() == Intrinsic::cttz;
508   Value *Op0 = II.getArgOperand(0);
509   Value *Op1 = II.getArgOperand(1);
510   Value *X;
511   // ctlz(bitreverse(x)) -> cttz(x)
512   // cttz(bitreverse(x)) -> ctlz(x)
513   if (match(Op0, m_BitReverse(m_Value(X)))) {
514     Intrinsic::ID ID = IsTZ ? Intrinsic::ctlz : Intrinsic::cttz;
515     Function *F = Intrinsic::getDeclaration(II.getModule(), ID, II.getType());
516     return CallInst::Create(F, {X, II.getArgOperand(1)});
517   }
518 
519   if (II.getType()->isIntOrIntVectorTy(1)) {
520     // ctlz/cttz i1 Op0 --> not Op0
521     if (match(Op1, m_Zero()))
522       return BinaryOperator::CreateNot(Op0);
523     // If zero is poison, then the input can be assumed to be "true", so the
524     // instruction simplifies to "false".
525     assert(match(Op1, m_One()) && "Expected ctlz/cttz operand to be 0 or 1");
526     return IC.replaceInstUsesWith(II, ConstantInt::getNullValue(II.getType()));
527   }
528 
529   // If the operand is a select with constant arm(s), try to hoist ctlz/cttz.
530   if (auto *Sel = dyn_cast<SelectInst>(Op0))
531     if (Instruction *R = IC.FoldOpIntoSelect(II, Sel))
532       return R;
533 
534   if (IsTZ) {
535     // cttz(-x) -> cttz(x)
536     if (match(Op0, m_Neg(m_Value(X))))
537       return IC.replaceOperand(II, 0, X);
538 
539     // cttz(sext(x)) -> cttz(zext(x))
540     if (match(Op0, m_OneUse(m_SExt(m_Value(X))))) {
541       auto *Zext = IC.Builder.CreateZExt(X, II.getType());
542       auto *CttzZext =
543           IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, Zext, Op1);
544       return IC.replaceInstUsesWith(II, CttzZext);
545     }
546 
547     // Zext doesn't change the number of trailing zeros, so narrow:
548     // cttz(zext(x)) -> zext(cttz(x)) if the 'ZeroIsPoison' parameter is 'true'.
549     if (match(Op0, m_OneUse(m_ZExt(m_Value(X)))) && match(Op1, m_One())) {
550       auto *Cttz = IC.Builder.CreateBinaryIntrinsic(Intrinsic::cttz, X,
551                                                     IC.Builder.getTrue());
552       auto *ZextCttz = IC.Builder.CreateZExt(Cttz, II.getType());
553       return IC.replaceInstUsesWith(II, ZextCttz);
554     }
555 
556     // cttz(abs(x)) -> cttz(x)
557     // cttz(nabs(x)) -> cttz(x)
558     Value *Y;
559     SelectPatternFlavor SPF = matchSelectPattern(Op0, X, Y).Flavor;
560     if (SPF == SPF_ABS || SPF == SPF_NABS)
561       return IC.replaceOperand(II, 0, X);
562 
563     if (match(Op0, m_Intrinsic<Intrinsic::abs>(m_Value(X))))
564       return IC.replaceOperand(II, 0, X);
565   }
566 
567   KnownBits Known = IC.computeKnownBits(Op0, 0, &II);
568 
569   // Create a mask for bits above (ctlz) or below (cttz) the first known one.
570   unsigned PossibleZeros = IsTZ ? Known.countMaxTrailingZeros()
571                                 : Known.countMaxLeadingZeros();
572   unsigned DefiniteZeros = IsTZ ? Known.countMinTrailingZeros()
573                                 : Known.countMinLeadingZeros();
574 
575   // If all bits above (ctlz) or below (cttz) the first known one are known
576   // zero, this value is constant.
577   // FIXME: This should be in InstSimplify because we're replacing an
578   // instruction with a constant.
579   if (PossibleZeros == DefiniteZeros) {
580     auto *C = ConstantInt::get(Op0->getType(), DefiniteZeros);
581     return IC.replaceInstUsesWith(II, C);
582   }
583 
584   // If the input to cttz/ctlz is known to be non-zero,
585   // then change the 'ZeroIsPoison' parameter to 'true'
586   // because we know the zero behavior can't affect the result.
587   if (!Known.One.isZero() ||
588       isKnownNonZero(Op0, IC.getDataLayout(), 0, &IC.getAssumptionCache(), &II,
589                      &IC.getDominatorTree())) {
590     if (!match(II.getArgOperand(1), m_One()))
591       return IC.replaceOperand(II, 1, IC.Builder.getTrue());
592   }
593 
594   // Add range metadata since known bits can't completely reflect what we know.
595   // TODO: Handle splat vectors.
596   auto *IT = dyn_cast<IntegerType>(Op0->getType());
597   if (IT && IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
598     Metadata *LowAndHigh[] = {
599         ConstantAsMetadata::get(ConstantInt::get(IT, DefiniteZeros)),
600         ConstantAsMetadata::get(ConstantInt::get(IT, PossibleZeros + 1))};
601     II.setMetadata(LLVMContext::MD_range,
602                    MDNode::get(II.getContext(), LowAndHigh));
603     return &II;
604   }
605 
606   return nullptr;
607 }
608 
609 static Instruction *foldCtpop(IntrinsicInst &II, InstCombinerImpl &IC) {
610   assert(II.getIntrinsicID() == Intrinsic::ctpop &&
611          "Expected ctpop intrinsic");
612   Type *Ty = II.getType();
613   unsigned BitWidth = Ty->getScalarSizeInBits();
614   Value *Op0 = II.getArgOperand(0);
615   Value *X, *Y;
616 
617   // ctpop(bitreverse(x)) -> ctpop(x)
618   // ctpop(bswap(x)) -> ctpop(x)
619   if (match(Op0, m_BitReverse(m_Value(X))) || match(Op0, m_BSwap(m_Value(X))))
620     return IC.replaceOperand(II, 0, X);
621 
622   // ctpop(rot(x)) -> ctpop(x)
623   if ((match(Op0, m_FShl(m_Value(X), m_Value(Y), m_Value())) ||
624        match(Op0, m_FShr(m_Value(X), m_Value(Y), m_Value()))) &&
625       X == Y)
626     return IC.replaceOperand(II, 0, X);
627 
628   // ctpop(x | -x) -> bitwidth - cttz(x, false)
629   if (Op0->hasOneUse() &&
630       match(Op0, m_c_Or(m_Value(X), m_Neg(m_Deferred(X))))) {
631     Function *F =
632         Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
633     auto *Cttz = IC.Builder.CreateCall(F, {X, IC.Builder.getFalse()});
634     auto *Bw = ConstantInt::get(Ty, APInt(BitWidth, BitWidth));
635     return IC.replaceInstUsesWith(II, IC.Builder.CreateSub(Bw, Cttz));
636   }
637 
638   // ctpop(~x & (x - 1)) -> cttz(x, false)
639   if (match(Op0,
640             m_c_And(m_Not(m_Value(X)), m_Add(m_Deferred(X), m_AllOnes())))) {
641     Function *F =
642         Intrinsic::getDeclaration(II.getModule(), Intrinsic::cttz, Ty);
643     return CallInst::Create(F, {X, IC.Builder.getFalse()});
644   }
645 
646   // Zext doesn't change the number of set bits, so narrow:
647   // ctpop (zext X) --> zext (ctpop X)
648   if (match(Op0, m_OneUse(m_ZExt(m_Value(X))))) {
649     Value *NarrowPop = IC.Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, X);
650     return CastInst::Create(Instruction::ZExt, NarrowPop, Ty);
651   }
652 
653   // If the operand is a select with constant arm(s), try to hoist ctpop.
654   if (auto *Sel = dyn_cast<SelectInst>(Op0))
655     if (Instruction *R = IC.FoldOpIntoSelect(II, Sel))
656       return R;
657 
658   KnownBits Known(BitWidth);
659   IC.computeKnownBits(Op0, Known, 0, &II);
660 
661   // If all bits are zero except for exactly one fixed bit, then the result
662   // must be 0 or 1, and we can get that answer by shifting to LSB:
663   // ctpop (X & 32) --> (X & 32) >> 5
664   if ((~Known.Zero).isPowerOf2())
665     return BinaryOperator::CreateLShr(
666         Op0, ConstantInt::get(Ty, (~Known.Zero).exactLogBase2()));
667 
668   // FIXME: Try to simplify vectors of integers.
669   auto *IT = dyn_cast<IntegerType>(Ty);
670   if (!IT)
671     return nullptr;
672 
673   // Add range metadata since known bits can't completely reflect what we know.
674   unsigned MinCount = Known.countMinPopulation();
675   unsigned MaxCount = Known.countMaxPopulation();
676   if (IT->getBitWidth() != 1 && !II.getMetadata(LLVMContext::MD_range)) {
677     Metadata *LowAndHigh[] = {
678         ConstantAsMetadata::get(ConstantInt::get(IT, MinCount)),
679         ConstantAsMetadata::get(ConstantInt::get(IT, MaxCount + 1))};
680     II.setMetadata(LLVMContext::MD_range,
681                    MDNode::get(II.getContext(), LowAndHigh));
682     return &II;
683   }
684 
685   return nullptr;
686 }
687 
688 /// Convert a table lookup to shufflevector if the mask is constant.
689 /// This could benefit tbl1 if the mask is { 7,6,5,4,3,2,1,0 }, in
690 /// which case we could lower the shufflevector with rev64 instructions
691 /// as it's actually a byte reverse.
692 static Value *simplifyNeonTbl1(const IntrinsicInst &II,
693                                InstCombiner::BuilderTy &Builder) {
694   // Bail out if the mask is not a constant.
695   auto *C = dyn_cast<Constant>(II.getArgOperand(1));
696   if (!C)
697     return nullptr;
698 
699   auto *VecTy = cast<FixedVectorType>(II.getType());
700   unsigned NumElts = VecTy->getNumElements();
701 
702   // Only perform this transformation for <8 x i8> vector types.
703   if (!VecTy->getElementType()->isIntegerTy(8) || NumElts != 8)
704     return nullptr;
705 
706   int Indexes[8];
707 
708   for (unsigned I = 0; I < NumElts; ++I) {
709     Constant *COp = C->getAggregateElement(I);
710 
711     if (!COp || !isa<ConstantInt>(COp))
712       return nullptr;
713 
714     Indexes[I] = cast<ConstantInt>(COp)->getLimitedValue();
715 
716     // Make sure the mask indices are in range.
717     if ((unsigned)Indexes[I] >= NumElts)
718       return nullptr;
719   }
720 
721   auto *V1 = II.getArgOperand(0);
722   auto *V2 = Constant::getNullValue(V1->getType());
723   return Builder.CreateShuffleVector(V1, V2, makeArrayRef(Indexes));
724 }
725 
726 // Returns true iff the 2 intrinsics have the same operands, limiting the
727 // comparison to the first NumOperands.
728 static bool haveSameOperands(const IntrinsicInst &I, const IntrinsicInst &E,
729                              unsigned NumOperands) {
730   assert(I.arg_size() >= NumOperands && "Not enough operands");
731   assert(E.arg_size() >= NumOperands && "Not enough operands");
732   for (unsigned i = 0; i < NumOperands; i++)
733     if (I.getArgOperand(i) != E.getArgOperand(i))
734       return false;
735   return true;
736 }
737 
738 // Remove trivially empty start/end intrinsic ranges, i.e. a start
739 // immediately followed by an end (ignoring debuginfo or other
740 // start/end intrinsics in between). As this handles only the most trivial
741 // cases, tracking the nesting level is not needed:
742 //
743 //   call @llvm.foo.start(i1 0)
744 //   call @llvm.foo.start(i1 0) ; This one won't be skipped: it will be removed
745 //   call @llvm.foo.end(i1 0)
746 //   call @llvm.foo.end(i1 0) ; &I
747 static bool
748 removeTriviallyEmptyRange(IntrinsicInst &EndI, InstCombinerImpl &IC,
749                           std::function<bool(const IntrinsicInst &)> IsStart) {
750   // We start from the end intrinsic and scan backwards, so that InstCombine
751   // has already processed (and potentially removed) all the instructions
752   // before the end intrinsic.
753   BasicBlock::reverse_iterator BI(EndI), BE(EndI.getParent()->rend());
754   for (; BI != BE; ++BI) {
755     if (auto *I = dyn_cast<IntrinsicInst>(&*BI)) {
756       if (I->isDebugOrPseudoInst() ||
757           I->getIntrinsicID() == EndI.getIntrinsicID())
758         continue;
759       if (IsStart(*I)) {
760         if (haveSameOperands(EndI, *I, EndI.arg_size())) {
761           IC.eraseInstFromFunction(*I);
762           IC.eraseInstFromFunction(EndI);
763           return true;
764         }
765         // Skip start intrinsics that don't pair with this end intrinsic.
766         continue;
767       }
768     }
769     break;
770   }
771 
772   return false;
773 }
774 
775 Instruction *InstCombinerImpl::visitVAEndInst(VAEndInst &I) {
776   removeTriviallyEmptyRange(I, *this, [](const IntrinsicInst &I) {
777     return I.getIntrinsicID() == Intrinsic::vastart ||
778            I.getIntrinsicID() == Intrinsic::vacopy;
779   });
780   return nullptr;
781 }
782 
783 static CallInst *canonicalizeConstantArg0ToArg1(CallInst &Call) {
784   assert(Call.arg_size() > 1 && "Need at least 2 args to swap");
785   Value *Arg0 = Call.getArgOperand(0), *Arg1 = Call.getArgOperand(1);
786   if (isa<Constant>(Arg0) && !isa<Constant>(Arg1)) {
787     Call.setArgOperand(0, Arg1);
788     Call.setArgOperand(1, Arg0);
789     return &Call;
790   }
791   return nullptr;
792 }
793 
794 /// Creates a result tuple for an overflow intrinsic \p II with a given
795 /// \p Result and a constant \p Overflow value.
796 static Instruction *createOverflowTuple(IntrinsicInst *II, Value *Result,
797                                         Constant *Overflow) {
798   Constant *V[] = {PoisonValue::get(Result->getType()), Overflow};
799   StructType *ST = cast<StructType>(II->getType());
800   Constant *Struct = ConstantStruct::get(ST, V);
801   return InsertValueInst::Create(Struct, Result, 0);
802 }
803 
804 Instruction *
805 InstCombinerImpl::foldIntrinsicWithOverflowCommon(IntrinsicInst *II) {
806   WithOverflowInst *WO = cast<WithOverflowInst>(II);
807   Value *OperationResult = nullptr;
808   Constant *OverflowResult = nullptr;
809   if (OptimizeOverflowCheck(WO->getBinaryOp(), WO->isSigned(), WO->getLHS(),
810                             WO->getRHS(), *WO, OperationResult, OverflowResult))
811     return createOverflowTuple(WO, OperationResult, OverflowResult);
812   return nullptr;
813 }
814 
815 static Optional<bool> getKnownSign(Value *Op, Instruction *CxtI,
816                                    const DataLayout &DL, AssumptionCache *AC,
817                                    DominatorTree *DT) {
818   KnownBits Known = computeKnownBits(Op, DL, 0, AC, CxtI, DT);
819   if (Known.isNonNegative())
820     return false;
821   if (Known.isNegative())
822     return true;
823 
824   Value *X, *Y;
825   if (match(Op, m_NSWSub(m_Value(X), m_Value(Y))))
826     return isImpliedByDomCondition(ICmpInst::ICMP_SLT, X, Y, CxtI, DL);
827 
828   return isImpliedByDomCondition(
829       ICmpInst::ICMP_SLT, Op, Constant::getNullValue(Op->getType()), CxtI, DL);
830 }
831 
832 /// Try to canonicalize min/max(X + C0, C1) as min/max(X, C1 - C0) + C0. This
833 /// can trigger other combines.
834 static Instruction *moveAddAfterMinMax(IntrinsicInst *II,
835                                        InstCombiner::BuilderTy &Builder) {
836   Intrinsic::ID MinMaxID = II->getIntrinsicID();
837   assert((MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin ||
838           MinMaxID == Intrinsic::umax || MinMaxID == Intrinsic::umin) &&
839          "Expected a min or max intrinsic");
840 
841   // TODO: Match vectors with undef elements, but undef may not propagate.
842   Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
843   Value *X;
844   const APInt *C0, *C1;
845   if (!match(Op0, m_OneUse(m_Add(m_Value(X), m_APInt(C0)))) ||
846       !match(Op1, m_APInt(C1)))
847     return nullptr;
848 
849   // Check for necessary no-wrap and overflow constraints.
850   bool IsSigned = MinMaxID == Intrinsic::smax || MinMaxID == Intrinsic::smin;
851   auto *Add = cast<BinaryOperator>(Op0);
852   if ((IsSigned && !Add->hasNoSignedWrap()) ||
853       (!IsSigned && !Add->hasNoUnsignedWrap()))
854     return nullptr;
855 
856   // If the constant difference overflows, then instsimplify should reduce the
857   // min/max to the add or C1.
858   bool Overflow;
859   APInt CDiff =
860       IsSigned ? C1->ssub_ov(*C0, Overflow) : C1->usub_ov(*C0, Overflow);
861   assert(!Overflow && "Expected simplify of min/max");
862 
863   // min/max (add X, C0), C1 --> add (min/max X, C1 - C0), C0
864   // Note: the "mismatched" no-overflow setting does not propagate.
865   Constant *NewMinMaxC = ConstantInt::get(II->getType(), CDiff);
866   Value *NewMinMax = Builder.CreateBinaryIntrinsic(MinMaxID, X, NewMinMaxC);
867   return IsSigned ? BinaryOperator::CreateNSWAdd(NewMinMax, Add->getOperand(1))
868                   : BinaryOperator::CreateNUWAdd(NewMinMax, Add->getOperand(1));
869 }
870 /// Match a sadd_sat or ssub_sat which is using min/max to clamp the value.
871 Instruction *InstCombinerImpl::matchSAddSubSat(IntrinsicInst &MinMax1) {
872   Type *Ty = MinMax1.getType();
873 
874   // We are looking for a tree of:
875   // max(INT_MIN, min(INT_MAX, add(sext(A), sext(B))))
876   // Where the min and max could be reversed
877   Instruction *MinMax2;
878   BinaryOperator *AddSub;
879   const APInt *MinValue, *MaxValue;
880   if (match(&MinMax1, m_SMin(m_Instruction(MinMax2), m_APInt(MaxValue)))) {
881     if (!match(MinMax2, m_SMax(m_BinOp(AddSub), m_APInt(MinValue))))
882       return nullptr;
883   } else if (match(&MinMax1,
884                    m_SMax(m_Instruction(MinMax2), m_APInt(MinValue)))) {
885     if (!match(MinMax2, m_SMin(m_BinOp(AddSub), m_APInt(MaxValue))))
886       return nullptr;
887   } else
888     return nullptr;
889 
890   // Check that the constants clamp a saturate, and that the new type would be
891   // sensible to convert to.
892   if (!(*MaxValue + 1).isPowerOf2() || -*MinValue != *MaxValue + 1)
893     return nullptr;
894   // In what bitwidth can this be treated as saturating arithmetics?
895   unsigned NewBitWidth = (*MaxValue + 1).logBase2() + 1;
896   // FIXME: This isn't quite right for vectors, but using the scalar type is a
897   // good first approximation for what should be done there.
898   if (!shouldChangeType(Ty->getScalarType()->getIntegerBitWidth(), NewBitWidth))
899     return nullptr;
900 
901   // Also make sure that the inner min/max and the add/sub have one use.
902   if (!MinMax2->hasOneUse() || !AddSub->hasOneUse())
903     return nullptr;
904 
905   // Create the new type (which can be a vector type)
906   Type *NewTy = Ty->getWithNewBitWidth(NewBitWidth);
907 
908   Intrinsic::ID IntrinsicID;
909   if (AddSub->getOpcode() == Instruction::Add)
910     IntrinsicID = Intrinsic::sadd_sat;
911   else if (AddSub->getOpcode() == Instruction::Sub)
912     IntrinsicID = Intrinsic::ssub_sat;
913   else
914     return nullptr;
915 
916   // The two operands of the add/sub must be nsw-truncatable to the NewTy. This
917   // is usually achieved via a sext from a smaller type.
918   if (ComputeMaxSignificantBits(AddSub->getOperand(0), 0, AddSub) >
919           NewBitWidth ||
920       ComputeMaxSignificantBits(AddSub->getOperand(1), 0, AddSub) > NewBitWidth)
921     return nullptr;
922 
923   // Finally create and return the sat intrinsic, truncated to the new type
924   Function *F = Intrinsic::getDeclaration(MinMax1.getModule(), IntrinsicID, NewTy);
925   Value *AT = Builder.CreateTrunc(AddSub->getOperand(0), NewTy);
926   Value *BT = Builder.CreateTrunc(AddSub->getOperand(1), NewTy);
927   Value *Sat = Builder.CreateCall(F, {AT, BT});
928   return CastInst::Create(Instruction::SExt, Sat, Ty);
929 }
930 
931 
932 /// If we have a clamp pattern like max (min X, 42), 41 -- where the output
933 /// can only be one of two possible constant values -- turn that into a select
934 /// of constants.
935 static Instruction *foldClampRangeOfTwo(IntrinsicInst *II,
936                                         InstCombiner::BuilderTy &Builder) {
937   Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
938   Value *X;
939   const APInt *C0, *C1;
940   if (!match(I1, m_APInt(C1)) || !I0->hasOneUse())
941     return nullptr;
942 
943   CmpInst::Predicate Pred = CmpInst::BAD_ICMP_PREDICATE;
944   switch (II->getIntrinsicID()) {
945   case Intrinsic::smax:
946     if (match(I0, m_SMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
947       Pred = ICmpInst::ICMP_SGT;
948     break;
949   case Intrinsic::smin:
950     if (match(I0, m_SMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
951       Pred = ICmpInst::ICMP_SLT;
952     break;
953   case Intrinsic::umax:
954     if (match(I0, m_UMin(m_Value(X), m_APInt(C0))) && *C0 == *C1 + 1)
955       Pred = ICmpInst::ICMP_UGT;
956     break;
957   case Intrinsic::umin:
958     if (match(I0, m_UMax(m_Value(X), m_APInt(C0))) && *C1 == *C0 + 1)
959       Pred = ICmpInst::ICMP_ULT;
960     break;
961   default:
962     llvm_unreachable("Expected min/max intrinsic");
963   }
964   if (Pred == CmpInst::BAD_ICMP_PREDICATE)
965     return nullptr;
966 
967   // max (min X, 42), 41 --> X > 41 ? 42 : 41
968   // min (max X, 42), 43 --> X < 43 ? 42 : 43
969   Value *Cmp = Builder.CreateICmp(Pred, X, I1);
970   return SelectInst::Create(Cmp, ConstantInt::get(II->getType(), *C0), I1);
971 }
972 
973 /// If this min/max has a constant operand and an operand that is a matching
974 /// min/max with a constant operand, constant-fold the 2 constant operands.
975 static Instruction *reassociateMinMaxWithConstants(IntrinsicInst *II) {
976   Intrinsic::ID MinMaxID = II->getIntrinsicID();
977   auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
978   if (!LHS || LHS->getIntrinsicID() != MinMaxID)
979     return nullptr;
980 
981   Constant *C0, *C1;
982   if (!match(LHS->getArgOperand(1), m_ImmConstant(C0)) ||
983       !match(II->getArgOperand(1), m_ImmConstant(C1)))
984     return nullptr;
985 
986   // max (max X, C0), C1 --> max X, (max C0, C1) --> max X, NewC
987   ICmpInst::Predicate Pred = MinMaxIntrinsic::getPredicate(MinMaxID);
988   Constant *CondC = ConstantExpr::getICmp(Pred, C0, C1);
989   Constant *NewC = ConstantExpr::getSelect(CondC, C0, C1);
990 
991   Module *Mod = II->getModule();
992   Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType());
993   return CallInst::Create(MinMax, {LHS->getArgOperand(0), NewC});
994 }
995 
996 /// If this min/max has a matching min/max operand with a constant, try to push
997 /// the constant operand into this instruction. This can enable more folds.
998 static Instruction *
999 reassociateMinMaxWithConstantInOperand(IntrinsicInst *II,
1000                                        InstCombiner::BuilderTy &Builder) {
1001   // Match and capture a min/max operand candidate.
1002   Value *X, *Y;
1003   Constant *C;
1004   Instruction *Inner;
1005   if (!match(II, m_c_MaxOrMin(m_OneUse(m_CombineAnd(
1006                                   m_Instruction(Inner),
1007                                   m_MaxOrMin(m_Value(X), m_ImmConstant(C)))),
1008                               m_Value(Y))))
1009     return nullptr;
1010 
1011   // The inner op must match. Check for constants to avoid infinite loops.
1012   Intrinsic::ID MinMaxID = II->getIntrinsicID();
1013   auto *InnerMM = dyn_cast<IntrinsicInst>(Inner);
1014   if (!InnerMM || InnerMM->getIntrinsicID() != MinMaxID ||
1015       match(X, m_ImmConstant()) || match(Y, m_ImmConstant()))
1016     return nullptr;
1017 
1018   // max (max X, C), Y --> max (max X, Y), C
1019   Function *MinMax =
1020       Intrinsic::getDeclaration(II->getModule(), MinMaxID, II->getType());
1021   Value *NewInner = Builder.CreateBinaryIntrinsic(MinMaxID, X, Y);
1022   NewInner->takeName(Inner);
1023   return CallInst::Create(MinMax, {NewInner, C});
1024 }
1025 
1026 /// Reduce a sequence of min/max intrinsics with a common operand.
1027 static Instruction *factorizeMinMaxTree(IntrinsicInst *II) {
1028   // Match 3 of the same min/max ops. Example: umin(umin(), umin()).
1029   auto *LHS = dyn_cast<IntrinsicInst>(II->getArgOperand(0));
1030   auto *RHS = dyn_cast<IntrinsicInst>(II->getArgOperand(1));
1031   Intrinsic::ID MinMaxID = II->getIntrinsicID();
1032   if (!LHS || !RHS || LHS->getIntrinsicID() != MinMaxID ||
1033       RHS->getIntrinsicID() != MinMaxID ||
1034       (!LHS->hasOneUse() && !RHS->hasOneUse()))
1035     return nullptr;
1036 
1037   Value *A = LHS->getArgOperand(0);
1038   Value *B = LHS->getArgOperand(1);
1039   Value *C = RHS->getArgOperand(0);
1040   Value *D = RHS->getArgOperand(1);
1041 
1042   // Look for a common operand.
1043   Value *MinMaxOp = nullptr;
1044   Value *ThirdOp = nullptr;
1045   if (LHS->hasOneUse()) {
1046     // If the LHS is only used in this chain and the RHS is used outside of it,
1047     // reuse the RHS min/max because that will eliminate the LHS.
1048     if (D == A || C == A) {
1049       // min(min(a, b), min(c, a)) --> min(min(c, a), b)
1050       // min(min(a, b), min(a, d)) --> min(min(a, d), b)
1051       MinMaxOp = RHS;
1052       ThirdOp = B;
1053     } else if (D == B || C == B) {
1054       // min(min(a, b), min(c, b)) --> min(min(c, b), a)
1055       // min(min(a, b), min(b, d)) --> min(min(b, d), a)
1056       MinMaxOp = RHS;
1057       ThirdOp = A;
1058     }
1059   } else {
1060     assert(RHS->hasOneUse() && "Expected one-use operand");
1061     // Reuse the LHS. This will eliminate the RHS.
1062     if (D == A || D == B) {
1063       // min(min(a, b), min(c, a)) --> min(min(a, b), c)
1064       // min(min(a, b), min(c, b)) --> min(min(a, b), c)
1065       MinMaxOp = LHS;
1066       ThirdOp = C;
1067     } else if (C == A || C == B) {
1068       // min(min(a, b), min(b, d)) --> min(min(a, b), d)
1069       // min(min(a, b), min(c, b)) --> min(min(a, b), d)
1070       MinMaxOp = LHS;
1071       ThirdOp = D;
1072     }
1073   }
1074 
1075   if (!MinMaxOp || !ThirdOp)
1076     return nullptr;
1077 
1078   Module *Mod = II->getModule();
1079   Function *MinMax = Intrinsic::getDeclaration(Mod, MinMaxID, II->getType());
1080   return CallInst::Create(MinMax, { MinMaxOp, ThirdOp });
1081 }
1082 
1083 /// If all arguments of the intrinsic are unary shuffles with the same mask,
1084 /// try to shuffle after the intrinsic.
1085 static Instruction *
1086 foldShuffledIntrinsicOperands(IntrinsicInst *II,
1087                               InstCombiner::BuilderTy &Builder) {
1088   // TODO: This should be extended to handle other intrinsics like fshl, ctpop,
1089   //       etc. Use llvm::isTriviallyVectorizable() and related to determine
1090   //       which intrinsics are safe to shuffle?
1091   switch (II->getIntrinsicID()) {
1092   case Intrinsic::smax:
1093   case Intrinsic::smin:
1094   case Intrinsic::umax:
1095   case Intrinsic::umin:
1096   case Intrinsic::fma:
1097   case Intrinsic::fshl:
1098   case Intrinsic::fshr:
1099     break;
1100   default:
1101     return nullptr;
1102   }
1103 
1104   Value *X;
1105   ArrayRef<int> Mask;
1106   if (!match(II->getArgOperand(0),
1107              m_Shuffle(m_Value(X), m_Undef(), m_Mask(Mask))))
1108     return nullptr;
1109 
1110   // At least 1 operand must have 1 use because we are creating 2 instructions.
1111   if (none_of(II->args(), [](Value *V) { return V->hasOneUse(); }))
1112     return nullptr;
1113 
1114   // See if all arguments are shuffled with the same mask.
1115   SmallVector<Value *, 4> NewArgs(II->arg_size());
1116   NewArgs[0] = X;
1117   Type *SrcTy = X->getType();
1118   for (unsigned i = 1, e = II->arg_size(); i != e; ++i) {
1119     if (!match(II->getArgOperand(i),
1120                m_Shuffle(m_Value(X), m_Undef(), m_SpecificMask(Mask))) ||
1121         X->getType() != SrcTy)
1122       return nullptr;
1123     NewArgs[i] = X;
1124   }
1125 
1126   // intrinsic (shuf X, M), (shuf Y, M), ... --> shuf (intrinsic X, Y, ...), M
1127   Instruction *FPI = isa<FPMathOperator>(II) ? II : nullptr;
1128   Value *NewIntrinsic =
1129       Builder.CreateIntrinsic(II->getIntrinsicID(), SrcTy, NewArgs, FPI);
1130   return new ShuffleVectorInst(NewIntrinsic, Mask);
1131 }
1132 
1133 /// CallInst simplification. This mostly only handles folding of intrinsic
1134 /// instructions. For normal calls, it allows visitCallBase to do the heavy
1135 /// lifting.
1136 Instruction *InstCombinerImpl::visitCallInst(CallInst &CI) {
1137   // Don't try to simplify calls without uses. It will not do anything useful,
1138   // but will result in the following folds being skipped.
1139   if (!CI.use_empty())
1140     if (Value *V = simplifyCall(&CI, SQ.getWithInstruction(&CI)))
1141       return replaceInstUsesWith(CI, V);
1142 
1143   if (Value *FreedOp = getFreedOperand(&CI, &TLI))
1144     return visitFree(CI, FreedOp);
1145 
1146   // If the caller function (i.e. us, the function that contains this CallInst)
1147   // is nounwind, mark the call as nounwind, even if the callee isn't.
1148   if (CI.getFunction()->doesNotThrow() && !CI.doesNotThrow()) {
1149     CI.setDoesNotThrow();
1150     return &CI;
1151   }
1152 
1153   IntrinsicInst *II = dyn_cast<IntrinsicInst>(&CI);
1154   if (!II) return visitCallBase(CI);
1155 
1156   // For atomic unordered mem intrinsics if len is not a positive or
1157   // not a multiple of element size then behavior is undefined.
1158   if (auto *AMI = dyn_cast<AtomicMemIntrinsic>(II))
1159     if (ConstantInt *NumBytes = dyn_cast<ConstantInt>(AMI->getLength()))
1160       if (NumBytes->getSExtValue() < 0 ||
1161           (NumBytes->getZExtValue() % AMI->getElementSizeInBytes() != 0)) {
1162         CreateNonTerminatorUnreachable(AMI);
1163         assert(AMI->getType()->isVoidTy() &&
1164                "non void atomic unordered mem intrinsic");
1165         return eraseInstFromFunction(*AMI);
1166       }
1167 
1168   // Intrinsics cannot occur in an invoke or a callbr, so handle them here
1169   // instead of in visitCallBase.
1170   if (auto *MI = dyn_cast<AnyMemIntrinsic>(II)) {
1171     bool Changed = false;
1172 
1173     // memmove/cpy/set of zero bytes is a noop.
1174     if (Constant *NumBytes = dyn_cast<Constant>(MI->getLength())) {
1175       if (NumBytes->isNullValue())
1176         return eraseInstFromFunction(CI);
1177     }
1178 
1179     // No other transformations apply to volatile transfers.
1180     if (auto *M = dyn_cast<MemIntrinsic>(MI))
1181       if (M->isVolatile())
1182         return nullptr;
1183 
1184     // If we have a memmove and the source operation is a constant global,
1185     // then the source and dest pointers can't alias, so we can change this
1186     // into a call to memcpy.
1187     if (auto *MMI = dyn_cast<AnyMemMoveInst>(MI)) {
1188       if (GlobalVariable *GVSrc = dyn_cast<GlobalVariable>(MMI->getSource()))
1189         if (GVSrc->isConstant()) {
1190           Module *M = CI.getModule();
1191           Intrinsic::ID MemCpyID =
1192               isa<AtomicMemMoveInst>(MMI)
1193                   ? Intrinsic::memcpy_element_unordered_atomic
1194                   : Intrinsic::memcpy;
1195           Type *Tys[3] = { CI.getArgOperand(0)->getType(),
1196                            CI.getArgOperand(1)->getType(),
1197                            CI.getArgOperand(2)->getType() };
1198           CI.setCalledFunction(Intrinsic::getDeclaration(M, MemCpyID, Tys));
1199           Changed = true;
1200         }
1201     }
1202 
1203     if (AnyMemTransferInst *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1204       // memmove(x,x,size) -> noop.
1205       if (MTI->getSource() == MTI->getDest())
1206         return eraseInstFromFunction(CI);
1207     }
1208 
1209     // If we can determine a pointer alignment that is bigger than currently
1210     // set, update the alignment.
1211     if (auto *MTI = dyn_cast<AnyMemTransferInst>(MI)) {
1212       if (Instruction *I = SimplifyAnyMemTransfer(MTI))
1213         return I;
1214     } else if (auto *MSI = dyn_cast<AnyMemSetInst>(MI)) {
1215       if (Instruction *I = SimplifyAnyMemSet(MSI))
1216         return I;
1217     }
1218 
1219     if (Changed) return II;
1220   }
1221 
1222   // For fixed width vector result intrinsics, use the generic demanded vector
1223   // support.
1224   if (auto *IIFVTy = dyn_cast<FixedVectorType>(II->getType())) {
1225     auto VWidth = IIFVTy->getNumElements();
1226     APInt UndefElts(VWidth, 0);
1227     APInt AllOnesEltMask(APInt::getAllOnes(VWidth));
1228     if (Value *V = SimplifyDemandedVectorElts(II, AllOnesEltMask, UndefElts)) {
1229       if (V != II)
1230         return replaceInstUsesWith(*II, V);
1231       return II;
1232     }
1233   }
1234 
1235   if (II->isCommutative()) {
1236     if (CallInst *NewCall = canonicalizeConstantArg0ToArg1(CI))
1237       return NewCall;
1238   }
1239 
1240   // Unused constrained FP intrinsic calls may have declared side effect, which
1241   // prevents it from being removed. In some cases however the side effect is
1242   // actually absent. To detect this case, call SimplifyConstrainedFPCall. If it
1243   // returns a replacement, the call may be removed.
1244   if (CI.use_empty() && isa<ConstrainedFPIntrinsic>(CI)) {
1245     if (simplifyConstrainedFPCall(&CI, SQ.getWithInstruction(&CI)))
1246       return eraseInstFromFunction(CI);
1247   }
1248 
1249   Intrinsic::ID IID = II->getIntrinsicID();
1250   switch (IID) {
1251   case Intrinsic::objectsize:
1252     if (Value *V = lowerObjectSizeCall(II, DL, &TLI, AA, /*MustSucceed=*/false))
1253       return replaceInstUsesWith(CI, V);
1254     return nullptr;
1255   case Intrinsic::abs: {
1256     Value *IIOperand = II->getArgOperand(0);
1257     bool IntMinIsPoison = cast<Constant>(II->getArgOperand(1))->isOneValue();
1258 
1259     // abs(-x) -> abs(x)
1260     // TODO: Copy nsw if it was present on the neg?
1261     Value *X;
1262     if (match(IIOperand, m_Neg(m_Value(X))))
1263       return replaceOperand(*II, 0, X);
1264     if (match(IIOperand, m_Select(m_Value(), m_Value(X), m_Neg(m_Deferred(X)))))
1265       return replaceOperand(*II, 0, X);
1266     if (match(IIOperand, m_Select(m_Value(), m_Neg(m_Value(X)), m_Deferred(X))))
1267       return replaceOperand(*II, 0, X);
1268 
1269     if (Optional<bool> Sign = getKnownSign(IIOperand, II, DL, &AC, &DT)) {
1270       // abs(x) -> x if x >= 0
1271       if (!*Sign)
1272         return replaceInstUsesWith(*II, IIOperand);
1273 
1274       // abs(x) -> -x if x < 0
1275       if (IntMinIsPoison)
1276         return BinaryOperator::CreateNSWNeg(IIOperand);
1277       return BinaryOperator::CreateNeg(IIOperand);
1278     }
1279 
1280     // abs (sext X) --> zext (abs X*)
1281     // Clear the IsIntMin (nsw) bit on the abs to allow narrowing.
1282     if (match(IIOperand, m_OneUse(m_SExt(m_Value(X))))) {
1283       Value *NarrowAbs =
1284           Builder.CreateBinaryIntrinsic(Intrinsic::abs, X, Builder.getFalse());
1285       return CastInst::Create(Instruction::ZExt, NarrowAbs, II->getType());
1286     }
1287 
1288     // Match a complicated way to check if a number is odd/even:
1289     // abs (srem X, 2) --> and X, 1
1290     const APInt *C;
1291     if (match(IIOperand, m_SRem(m_Value(X), m_APInt(C))) && *C == 2)
1292       return BinaryOperator::CreateAnd(X, ConstantInt::get(II->getType(), 1));
1293 
1294     break;
1295   }
1296   case Intrinsic::umin: {
1297     Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1298     // umin(x, 1) == zext(x != 0)
1299     if (match(I1, m_One())) {
1300       Value *Zero = Constant::getNullValue(I0->getType());
1301       Value *Cmp = Builder.CreateICmpNE(I0, Zero);
1302       return CastInst::Create(Instruction::ZExt, Cmp, II->getType());
1303     }
1304     LLVM_FALLTHROUGH;
1305   }
1306   case Intrinsic::umax: {
1307     Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1308     Value *X, *Y;
1309     if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_ZExt(m_Value(Y))) &&
1310         (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
1311       Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
1312       return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
1313     }
1314     Constant *C;
1315     if (match(I0, m_ZExt(m_Value(X))) && match(I1, m_Constant(C)) &&
1316         I0->hasOneUse()) {
1317       Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType());
1318       if (ConstantExpr::getZExt(NarrowC, II->getType()) == C) {
1319         Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
1320         return CastInst::Create(Instruction::ZExt, NarrowMaxMin, II->getType());
1321       }
1322     }
1323     // If both operands of unsigned min/max are sign-extended, it is still ok
1324     // to narrow the operation.
1325     LLVM_FALLTHROUGH;
1326   }
1327   case Intrinsic::smax:
1328   case Intrinsic::smin: {
1329     Value *I0 = II->getArgOperand(0), *I1 = II->getArgOperand(1);
1330     Value *X, *Y;
1331     if (match(I0, m_SExt(m_Value(X))) && match(I1, m_SExt(m_Value(Y))) &&
1332         (I0->hasOneUse() || I1->hasOneUse()) && X->getType() == Y->getType()) {
1333       Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, Y);
1334       return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
1335     }
1336 
1337     Constant *C;
1338     if (match(I0, m_SExt(m_Value(X))) && match(I1, m_Constant(C)) &&
1339         I0->hasOneUse()) {
1340       Constant *NarrowC = ConstantExpr::getTrunc(C, X->getType());
1341       if (ConstantExpr::getSExt(NarrowC, II->getType()) == C) {
1342         Value *NarrowMaxMin = Builder.CreateBinaryIntrinsic(IID, X, NarrowC);
1343         return CastInst::Create(Instruction::SExt, NarrowMaxMin, II->getType());
1344       }
1345     }
1346 
1347     if (IID == Intrinsic::smax || IID == Intrinsic::smin) {
1348       // smax (neg nsw X), (neg nsw Y) --> neg nsw (smin X, Y)
1349       // smin (neg nsw X), (neg nsw Y) --> neg nsw (smax X, Y)
1350       // TODO: Canonicalize neg after min/max if I1 is constant.
1351       if (match(I0, m_NSWNeg(m_Value(X))) && match(I1, m_NSWNeg(m_Value(Y))) &&
1352           (I0->hasOneUse() || I1->hasOneUse())) {
1353         Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
1354         Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, X, Y);
1355         return BinaryOperator::CreateNSWNeg(InvMaxMin);
1356       }
1357     }
1358 
1359     // If we can eliminate ~A and Y is free to invert:
1360     // max ~A, Y --> ~(min A, ~Y)
1361     //
1362     // Examples:
1363     // max ~A, ~Y --> ~(min A, Y)
1364     // max ~A, C --> ~(min A, ~C)
1365     // max ~A, (max ~Y, ~Z) --> ~min( A, (min Y, Z))
1366     auto moveNotAfterMinMax = [&](Value *X, Value *Y) -> Instruction * {
1367       Value *A;
1368       if (match(X, m_OneUse(m_Not(m_Value(A)))) &&
1369           !isFreeToInvert(A, A->hasOneUse()) &&
1370           isFreeToInvert(Y, Y->hasOneUse())) {
1371         Value *NotY = Builder.CreateNot(Y);
1372         Intrinsic::ID InvID = getInverseMinMaxIntrinsic(IID);
1373         Value *InvMaxMin = Builder.CreateBinaryIntrinsic(InvID, A, NotY);
1374         return BinaryOperator::CreateNot(InvMaxMin);
1375       }
1376       return nullptr;
1377     };
1378 
1379     if (Instruction *I = moveNotAfterMinMax(I0, I1))
1380       return I;
1381     if (Instruction *I = moveNotAfterMinMax(I1, I0))
1382       return I;
1383 
1384     if (Instruction *I = moveAddAfterMinMax(II, Builder))
1385       return I;
1386 
1387     // smax(X, -X) --> abs(X)
1388     // smin(X, -X) --> -abs(X)
1389     // umax(X, -X) --> -abs(X)
1390     // umin(X, -X) --> abs(X)
1391     if (isKnownNegation(I0, I1)) {
1392       // We can choose either operand as the input to abs(), but if we can
1393       // eliminate the only use of a value, that's better for subsequent
1394       // transforms/analysis.
1395       if (I0->hasOneUse() && !I1->hasOneUse())
1396         std::swap(I0, I1);
1397 
1398       // This is some variant of abs(). See if we can propagate 'nsw' to the abs
1399       // operation and potentially its negation.
1400       bool IntMinIsPoison = isKnownNegation(I0, I1, /* NeedNSW */ true);
1401       Value *Abs = Builder.CreateBinaryIntrinsic(
1402           Intrinsic::abs, I0,
1403           ConstantInt::getBool(II->getContext(), IntMinIsPoison));
1404 
1405       // We don't have a "nabs" intrinsic, so negate if needed based on the
1406       // max/min operation.
1407       if (IID == Intrinsic::smin || IID == Intrinsic::umax)
1408         Abs = Builder.CreateNeg(Abs, "nabs", /* NUW */ false, IntMinIsPoison);
1409       return replaceInstUsesWith(CI, Abs);
1410     }
1411 
1412     if (Instruction *Sel = foldClampRangeOfTwo(II, Builder))
1413       return Sel;
1414 
1415     if (Instruction *SAdd = matchSAddSubSat(*II))
1416       return SAdd;
1417 
1418     if (match(I1, m_ImmConstant()))
1419       if (auto *Sel = dyn_cast<SelectInst>(I0))
1420         if (Instruction *R = FoldOpIntoSelect(*II, Sel))
1421           return R;
1422 
1423     if (Instruction *NewMinMax = reassociateMinMaxWithConstants(II))
1424       return NewMinMax;
1425 
1426     if (Instruction *R = reassociateMinMaxWithConstantInOperand(II, Builder))
1427       return R;
1428 
1429     if (Instruction *NewMinMax = factorizeMinMaxTree(II))
1430        return NewMinMax;
1431 
1432     break;
1433   }
1434   case Intrinsic::bswap: {
1435     Value *IIOperand = II->getArgOperand(0);
1436 
1437     // Try to canonicalize bswap-of-logical-shift-by-8-bit-multiple as
1438     // inverse-shift-of-bswap:
1439     // bswap (shl X, Y) --> lshr (bswap X), Y
1440     // bswap (lshr X, Y) --> shl (bswap X), Y
1441     Value *X, *Y;
1442     if (match(IIOperand, m_OneUse(m_LogicalShift(m_Value(X), m_Value(Y))))) {
1443       // The transform allows undef vector elements, so try a constant match
1444       // first. If knownbits can handle that case, that clause could be removed.
1445       unsigned BitWidth = IIOperand->getType()->getScalarSizeInBits();
1446       const APInt *C;
1447       if ((match(Y, m_APIntAllowUndef(C)) && (*C & 7) == 0) ||
1448           MaskedValueIsZero(Y, APInt::getLowBitsSet(BitWidth, 3))) {
1449         Value *NewSwap = Builder.CreateUnaryIntrinsic(Intrinsic::bswap, X);
1450         BinaryOperator::BinaryOps InverseShift =
1451             cast<BinaryOperator>(IIOperand)->getOpcode() == Instruction::Shl
1452                 ? Instruction::LShr
1453                 : Instruction::Shl;
1454         return BinaryOperator::Create(InverseShift, NewSwap, Y);
1455       }
1456     }
1457 
1458     KnownBits Known = computeKnownBits(IIOperand, 0, II);
1459     uint64_t LZ = alignDown(Known.countMinLeadingZeros(), 8);
1460     uint64_t TZ = alignDown(Known.countMinTrailingZeros(), 8);
1461     unsigned BW = Known.getBitWidth();
1462 
1463     // bswap(x) -> shift(x) if x has exactly one "active byte"
1464     if (BW - LZ - TZ == 8) {
1465       assert(LZ != TZ && "active byte cannot be in the middle");
1466       if (LZ > TZ)  // -> shl(x) if the "active byte" is in the low part of x
1467         return BinaryOperator::CreateNUWShl(
1468             IIOperand, ConstantInt::get(IIOperand->getType(), LZ - TZ));
1469       // -> lshr(x) if the "active byte" is in the high part of x
1470       return BinaryOperator::CreateExactLShr(
1471             IIOperand, ConstantInt::get(IIOperand->getType(), TZ - LZ));
1472     }
1473 
1474     // bswap(trunc(bswap(x))) -> trunc(lshr(x, c))
1475     if (match(IIOperand, m_Trunc(m_BSwap(m_Value(X))))) {
1476       unsigned C = X->getType()->getScalarSizeInBits() - BW;
1477       Value *CV = ConstantInt::get(X->getType(), C);
1478       Value *V = Builder.CreateLShr(X, CV);
1479       return new TruncInst(V, IIOperand->getType());
1480     }
1481     break;
1482   }
1483   case Intrinsic::masked_load:
1484     if (Value *SimplifiedMaskedOp = simplifyMaskedLoad(*II))
1485       return replaceInstUsesWith(CI, SimplifiedMaskedOp);
1486     break;
1487   case Intrinsic::masked_store:
1488     return simplifyMaskedStore(*II);
1489   case Intrinsic::masked_gather:
1490     return simplifyMaskedGather(*II);
1491   case Intrinsic::masked_scatter:
1492     return simplifyMaskedScatter(*II);
1493   case Intrinsic::launder_invariant_group:
1494   case Intrinsic::strip_invariant_group:
1495     if (auto *SkippedBarrier = simplifyInvariantGroupIntrinsic(*II, *this))
1496       return replaceInstUsesWith(*II, SkippedBarrier);
1497     break;
1498   case Intrinsic::powi:
1499     if (ConstantInt *Power = dyn_cast<ConstantInt>(II->getArgOperand(1))) {
1500       // 0 and 1 are handled in instsimplify
1501       // powi(x, -1) -> 1/x
1502       if (Power->isMinusOne())
1503         return BinaryOperator::CreateFDivFMF(ConstantFP::get(CI.getType(), 1.0),
1504                                              II->getArgOperand(0), II);
1505       // powi(x, 2) -> x*x
1506       if (Power->equalsInt(2))
1507         return BinaryOperator::CreateFMulFMF(II->getArgOperand(0),
1508                                              II->getArgOperand(0), II);
1509 
1510       if (!Power->getValue()[0]) {
1511         Value *X;
1512         // If power is even:
1513         // powi(-x, p) -> powi(x, p)
1514         // powi(fabs(x), p) -> powi(x, p)
1515         // powi(copysign(x, y), p) -> powi(x, p)
1516         if (match(II->getArgOperand(0), m_FNeg(m_Value(X))) ||
1517             match(II->getArgOperand(0), m_FAbs(m_Value(X))) ||
1518             match(II->getArgOperand(0),
1519                   m_Intrinsic<Intrinsic::copysign>(m_Value(X), m_Value())))
1520           return replaceOperand(*II, 0, X);
1521       }
1522     }
1523     break;
1524 
1525   case Intrinsic::cttz:
1526   case Intrinsic::ctlz:
1527     if (auto *I = foldCttzCtlz(*II, *this))
1528       return I;
1529     break;
1530 
1531   case Intrinsic::ctpop:
1532     if (auto *I = foldCtpop(*II, *this))
1533       return I;
1534     break;
1535 
1536   case Intrinsic::fshl:
1537   case Intrinsic::fshr: {
1538     Value *Op0 = II->getArgOperand(0), *Op1 = II->getArgOperand(1);
1539     Type *Ty = II->getType();
1540     unsigned BitWidth = Ty->getScalarSizeInBits();
1541     Constant *ShAmtC;
1542     if (match(II->getArgOperand(2), m_ImmConstant(ShAmtC))) {
1543       // Canonicalize a shift amount constant operand to modulo the bit-width.
1544       Constant *WidthC = ConstantInt::get(Ty, BitWidth);
1545       Constant *ModuloC =
1546           ConstantFoldBinaryOpOperands(Instruction::URem, ShAmtC, WidthC, DL);
1547       if (!ModuloC)
1548         return nullptr;
1549       if (ModuloC != ShAmtC)
1550         return replaceOperand(*II, 2, ModuloC);
1551 
1552       assert(ConstantExpr::getICmp(ICmpInst::ICMP_UGT, WidthC, ShAmtC) ==
1553                  ConstantInt::getTrue(CmpInst::makeCmpResultType(Ty)) &&
1554              "Shift amount expected to be modulo bitwidth");
1555 
1556       // Canonicalize funnel shift right by constant to funnel shift left. This
1557       // is not entirely arbitrary. For historical reasons, the backend may
1558       // recognize rotate left patterns but miss rotate right patterns.
1559       if (IID == Intrinsic::fshr) {
1560         // fshr X, Y, C --> fshl X, Y, (BitWidth - C)
1561         Constant *LeftShiftC = ConstantExpr::getSub(WidthC, ShAmtC);
1562         Module *Mod = II->getModule();
1563         Function *Fshl = Intrinsic::getDeclaration(Mod, Intrinsic::fshl, Ty);
1564         return CallInst::Create(Fshl, { Op0, Op1, LeftShiftC });
1565       }
1566       assert(IID == Intrinsic::fshl &&
1567              "All funnel shifts by simple constants should go left");
1568 
1569       // fshl(X, 0, C) --> shl X, C
1570       // fshl(X, undef, C) --> shl X, C
1571       if (match(Op1, m_ZeroInt()) || match(Op1, m_Undef()))
1572         return BinaryOperator::CreateShl(Op0, ShAmtC);
1573 
1574       // fshl(0, X, C) --> lshr X, (BW-C)
1575       // fshl(undef, X, C) --> lshr X, (BW-C)
1576       if (match(Op0, m_ZeroInt()) || match(Op0, m_Undef()))
1577         return BinaryOperator::CreateLShr(Op1,
1578                                           ConstantExpr::getSub(WidthC, ShAmtC));
1579 
1580       // fshl i16 X, X, 8 --> bswap i16 X (reduce to more-specific form)
1581       if (Op0 == Op1 && BitWidth == 16 && match(ShAmtC, m_SpecificInt(8))) {
1582         Module *Mod = II->getModule();
1583         Function *Bswap = Intrinsic::getDeclaration(Mod, Intrinsic::bswap, Ty);
1584         return CallInst::Create(Bswap, { Op0 });
1585       }
1586     }
1587 
1588     // Left or right might be masked.
1589     if (SimplifyDemandedInstructionBits(*II))
1590       return &CI;
1591 
1592     // The shift amount (operand 2) of a funnel shift is modulo the bitwidth,
1593     // so only the low bits of the shift amount are demanded if the bitwidth is
1594     // a power-of-2.
1595     if (!isPowerOf2_32(BitWidth))
1596       break;
1597     APInt Op2Demanded = APInt::getLowBitsSet(BitWidth, Log2_32_Ceil(BitWidth));
1598     KnownBits Op2Known(BitWidth);
1599     if (SimplifyDemandedBits(II, 2, Op2Demanded, Op2Known))
1600       return &CI;
1601     break;
1602   }
1603   case Intrinsic::uadd_with_overflow:
1604   case Intrinsic::sadd_with_overflow: {
1605     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1606       return I;
1607 
1608     // Given 2 constant operands whose sum does not overflow:
1609     // uaddo (X +nuw C0), C1 -> uaddo X, C0 + C1
1610     // saddo (X +nsw C0), C1 -> saddo X, C0 + C1
1611     Value *X;
1612     const APInt *C0, *C1;
1613     Value *Arg0 = II->getArgOperand(0);
1614     Value *Arg1 = II->getArgOperand(1);
1615     bool IsSigned = IID == Intrinsic::sadd_with_overflow;
1616     bool HasNWAdd = IsSigned ? match(Arg0, m_NSWAdd(m_Value(X), m_APInt(C0)))
1617                              : match(Arg0, m_NUWAdd(m_Value(X), m_APInt(C0)));
1618     if (HasNWAdd && match(Arg1, m_APInt(C1))) {
1619       bool Overflow;
1620       APInt NewC =
1621           IsSigned ? C1->sadd_ov(*C0, Overflow) : C1->uadd_ov(*C0, Overflow);
1622       if (!Overflow)
1623         return replaceInstUsesWith(
1624             *II, Builder.CreateBinaryIntrinsic(
1625                      IID, X, ConstantInt::get(Arg1->getType(), NewC)));
1626     }
1627     break;
1628   }
1629 
1630   case Intrinsic::umul_with_overflow:
1631   case Intrinsic::smul_with_overflow:
1632   case Intrinsic::usub_with_overflow:
1633     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1634       return I;
1635     break;
1636 
1637   case Intrinsic::ssub_with_overflow: {
1638     if (Instruction *I = foldIntrinsicWithOverflowCommon(II))
1639       return I;
1640 
1641     Constant *C;
1642     Value *Arg0 = II->getArgOperand(0);
1643     Value *Arg1 = II->getArgOperand(1);
1644     // Given a constant C that is not the minimum signed value
1645     // for an integer of a given bit width:
1646     //
1647     // ssubo X, C -> saddo X, -C
1648     if (match(Arg1, m_Constant(C)) && C->isNotMinSignedValue()) {
1649       Value *NegVal = ConstantExpr::getNeg(C);
1650       // Build a saddo call that is equivalent to the discovered
1651       // ssubo call.
1652       return replaceInstUsesWith(
1653           *II, Builder.CreateBinaryIntrinsic(Intrinsic::sadd_with_overflow,
1654                                              Arg0, NegVal));
1655     }
1656 
1657     break;
1658   }
1659 
1660   case Intrinsic::uadd_sat:
1661   case Intrinsic::sadd_sat:
1662   case Intrinsic::usub_sat:
1663   case Intrinsic::ssub_sat: {
1664     SaturatingInst *SI = cast<SaturatingInst>(II);
1665     Type *Ty = SI->getType();
1666     Value *Arg0 = SI->getLHS();
1667     Value *Arg1 = SI->getRHS();
1668 
1669     // Make use of known overflow information.
1670     OverflowResult OR = computeOverflow(SI->getBinaryOp(), SI->isSigned(),
1671                                         Arg0, Arg1, SI);
1672     switch (OR) {
1673       case OverflowResult::MayOverflow:
1674         break;
1675       case OverflowResult::NeverOverflows:
1676         if (SI->isSigned())
1677           return BinaryOperator::CreateNSW(SI->getBinaryOp(), Arg0, Arg1);
1678         else
1679           return BinaryOperator::CreateNUW(SI->getBinaryOp(), Arg0, Arg1);
1680       case OverflowResult::AlwaysOverflowsLow: {
1681         unsigned BitWidth = Ty->getScalarSizeInBits();
1682         APInt Min = APSInt::getMinValue(BitWidth, !SI->isSigned());
1683         return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Min));
1684       }
1685       case OverflowResult::AlwaysOverflowsHigh: {
1686         unsigned BitWidth = Ty->getScalarSizeInBits();
1687         APInt Max = APSInt::getMaxValue(BitWidth, !SI->isSigned());
1688         return replaceInstUsesWith(*SI, ConstantInt::get(Ty, Max));
1689       }
1690     }
1691 
1692     // ssub.sat(X, C) -> sadd.sat(X, -C) if C != MIN
1693     Constant *C;
1694     if (IID == Intrinsic::ssub_sat && match(Arg1, m_Constant(C)) &&
1695         C->isNotMinSignedValue()) {
1696       Value *NegVal = ConstantExpr::getNeg(C);
1697       return replaceInstUsesWith(
1698           *II, Builder.CreateBinaryIntrinsic(
1699               Intrinsic::sadd_sat, Arg0, NegVal));
1700     }
1701 
1702     // sat(sat(X + Val2) + Val) -> sat(X + (Val+Val2))
1703     // sat(sat(X - Val2) - Val) -> sat(X - (Val+Val2))
1704     // if Val and Val2 have the same sign
1705     if (auto *Other = dyn_cast<IntrinsicInst>(Arg0)) {
1706       Value *X;
1707       const APInt *Val, *Val2;
1708       APInt NewVal;
1709       bool IsUnsigned =
1710           IID == Intrinsic::uadd_sat || IID == Intrinsic::usub_sat;
1711       if (Other->getIntrinsicID() == IID &&
1712           match(Arg1, m_APInt(Val)) &&
1713           match(Other->getArgOperand(0), m_Value(X)) &&
1714           match(Other->getArgOperand(1), m_APInt(Val2))) {
1715         if (IsUnsigned)
1716           NewVal = Val->uadd_sat(*Val2);
1717         else if (Val->isNonNegative() == Val2->isNonNegative()) {
1718           bool Overflow;
1719           NewVal = Val->sadd_ov(*Val2, Overflow);
1720           if (Overflow) {
1721             // Both adds together may add more than SignedMaxValue
1722             // without saturating the final result.
1723             break;
1724           }
1725         } else {
1726           // Cannot fold saturated addition with different signs.
1727           break;
1728         }
1729 
1730         return replaceInstUsesWith(
1731             *II, Builder.CreateBinaryIntrinsic(
1732                      IID, X, ConstantInt::get(II->getType(), NewVal)));
1733       }
1734     }
1735     break;
1736   }
1737 
1738   case Intrinsic::minnum:
1739   case Intrinsic::maxnum:
1740   case Intrinsic::minimum:
1741   case Intrinsic::maximum: {
1742     Value *Arg0 = II->getArgOperand(0);
1743     Value *Arg1 = II->getArgOperand(1);
1744     Value *X, *Y;
1745     if (match(Arg0, m_FNeg(m_Value(X))) && match(Arg1, m_FNeg(m_Value(Y))) &&
1746         (Arg0->hasOneUse() || Arg1->hasOneUse())) {
1747       // If both operands are negated, invert the call and negate the result:
1748       // min(-X, -Y) --> -(max(X, Y))
1749       // max(-X, -Y) --> -(min(X, Y))
1750       Intrinsic::ID NewIID;
1751       switch (IID) {
1752       case Intrinsic::maxnum:
1753         NewIID = Intrinsic::minnum;
1754         break;
1755       case Intrinsic::minnum:
1756         NewIID = Intrinsic::maxnum;
1757         break;
1758       case Intrinsic::maximum:
1759         NewIID = Intrinsic::minimum;
1760         break;
1761       case Intrinsic::minimum:
1762         NewIID = Intrinsic::maximum;
1763         break;
1764       default:
1765         llvm_unreachable("unexpected intrinsic ID");
1766       }
1767       Value *NewCall = Builder.CreateBinaryIntrinsic(NewIID, X, Y, II);
1768       Instruction *FNeg = UnaryOperator::CreateFNeg(NewCall);
1769       FNeg->copyIRFlags(II);
1770       return FNeg;
1771     }
1772 
1773     // m(m(X, C2), C1) -> m(X, C)
1774     const APFloat *C1, *C2;
1775     if (auto *M = dyn_cast<IntrinsicInst>(Arg0)) {
1776       if (M->getIntrinsicID() == IID && match(Arg1, m_APFloat(C1)) &&
1777           ((match(M->getArgOperand(0), m_Value(X)) &&
1778             match(M->getArgOperand(1), m_APFloat(C2))) ||
1779            (match(M->getArgOperand(1), m_Value(X)) &&
1780             match(M->getArgOperand(0), m_APFloat(C2))))) {
1781         APFloat Res(0.0);
1782         switch (IID) {
1783         case Intrinsic::maxnum:
1784           Res = maxnum(*C1, *C2);
1785           break;
1786         case Intrinsic::minnum:
1787           Res = minnum(*C1, *C2);
1788           break;
1789         case Intrinsic::maximum:
1790           Res = maximum(*C1, *C2);
1791           break;
1792         case Intrinsic::minimum:
1793           Res = minimum(*C1, *C2);
1794           break;
1795         default:
1796           llvm_unreachable("unexpected intrinsic ID");
1797         }
1798         Instruction *NewCall = Builder.CreateBinaryIntrinsic(
1799             IID, X, ConstantFP::get(Arg0->getType(), Res), II);
1800         // TODO: Conservatively intersecting FMF. If Res == C2, the transform
1801         //       was a simplification (so Arg0 and its original flags could
1802         //       propagate?)
1803         NewCall->andIRFlags(M);
1804         return replaceInstUsesWith(*II, NewCall);
1805       }
1806     }
1807 
1808     // m((fpext X), (fpext Y)) -> fpext (m(X, Y))
1809     if (match(Arg0, m_OneUse(m_FPExt(m_Value(X)))) &&
1810         match(Arg1, m_OneUse(m_FPExt(m_Value(Y)))) &&
1811         X->getType() == Y->getType()) {
1812       Value *NewCall =
1813           Builder.CreateBinaryIntrinsic(IID, X, Y, II, II->getName());
1814       return new FPExtInst(NewCall, II->getType());
1815     }
1816 
1817     // max X, -X --> fabs X
1818     // min X, -X --> -(fabs X)
1819     // TODO: Remove one-use limitation? That is obviously better for max.
1820     //       It would be an extra instruction for min (fnabs), but that is
1821     //       still likely better for analysis and codegen.
1822     if ((match(Arg0, m_OneUse(m_FNeg(m_Value(X)))) && Arg1 == X) ||
1823         (match(Arg1, m_OneUse(m_FNeg(m_Value(X)))) && Arg0 == X)) {
1824       Value *R = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, X, II);
1825       if (IID == Intrinsic::minimum || IID == Intrinsic::minnum)
1826         R = Builder.CreateFNegFMF(R, II);
1827       return replaceInstUsesWith(*II, R);
1828     }
1829 
1830     break;
1831   }
1832   case Intrinsic::fmuladd: {
1833     // Canonicalize fast fmuladd to the separate fmul + fadd.
1834     if (II->isFast()) {
1835       BuilderTy::FastMathFlagGuard Guard(Builder);
1836       Builder.setFastMathFlags(II->getFastMathFlags());
1837       Value *Mul = Builder.CreateFMul(II->getArgOperand(0),
1838                                       II->getArgOperand(1));
1839       Value *Add = Builder.CreateFAdd(Mul, II->getArgOperand(2));
1840       Add->takeName(II);
1841       return replaceInstUsesWith(*II, Add);
1842     }
1843 
1844     // Try to simplify the underlying FMul.
1845     if (Value *V = simplifyFMulInst(II->getArgOperand(0), II->getArgOperand(1),
1846                                     II->getFastMathFlags(),
1847                                     SQ.getWithInstruction(II))) {
1848       auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
1849       FAdd->copyFastMathFlags(II);
1850       return FAdd;
1851     }
1852 
1853     LLVM_FALLTHROUGH;
1854   }
1855   case Intrinsic::fma: {
1856     // fma fneg(x), fneg(y), z -> fma x, y, z
1857     Value *Src0 = II->getArgOperand(0);
1858     Value *Src1 = II->getArgOperand(1);
1859     Value *X, *Y;
1860     if (match(Src0, m_FNeg(m_Value(X))) && match(Src1, m_FNeg(m_Value(Y)))) {
1861       replaceOperand(*II, 0, X);
1862       replaceOperand(*II, 1, Y);
1863       return II;
1864     }
1865 
1866     // fma fabs(x), fabs(x), z -> fma x, x, z
1867     if (match(Src0, m_FAbs(m_Value(X))) &&
1868         match(Src1, m_FAbs(m_Specific(X)))) {
1869       replaceOperand(*II, 0, X);
1870       replaceOperand(*II, 1, X);
1871       return II;
1872     }
1873 
1874     // Try to simplify the underlying FMul. We can only apply simplifications
1875     // that do not require rounding.
1876     if (Value *V = simplifyFMAFMul(II->getArgOperand(0), II->getArgOperand(1),
1877                                    II->getFastMathFlags(),
1878                                    SQ.getWithInstruction(II))) {
1879       auto *FAdd = BinaryOperator::CreateFAdd(V, II->getArgOperand(2));
1880       FAdd->copyFastMathFlags(II);
1881       return FAdd;
1882     }
1883 
1884     // fma x, y, 0 -> fmul x, y
1885     // This is always valid for -0.0, but requires nsz for +0.0 as
1886     // -0.0 + 0.0 = 0.0, which would not be the same as the fmul on its own.
1887     if (match(II->getArgOperand(2), m_NegZeroFP()) ||
1888         (match(II->getArgOperand(2), m_PosZeroFP()) &&
1889          II->getFastMathFlags().noSignedZeros()))
1890       return BinaryOperator::CreateFMulFMF(Src0, Src1, II);
1891 
1892     break;
1893   }
1894   case Intrinsic::copysign: {
1895     Value *Mag = II->getArgOperand(0), *Sign = II->getArgOperand(1);
1896     if (SignBitMustBeZero(Sign, &TLI)) {
1897       // If we know that the sign argument is positive, reduce to FABS:
1898       // copysign Mag, +Sign --> fabs Mag
1899       Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
1900       return replaceInstUsesWith(*II, Fabs);
1901     }
1902     // TODO: There should be a ValueTracking sibling like SignBitMustBeOne.
1903     const APFloat *C;
1904     if (match(Sign, m_APFloat(C)) && C->isNegative()) {
1905       // If we know that the sign argument is negative, reduce to FNABS:
1906       // copysign Mag, -Sign --> fneg (fabs Mag)
1907       Value *Fabs = Builder.CreateUnaryIntrinsic(Intrinsic::fabs, Mag, II);
1908       return replaceInstUsesWith(*II, Builder.CreateFNegFMF(Fabs, II));
1909     }
1910 
1911     // Propagate sign argument through nested calls:
1912     // copysign Mag, (copysign ?, X) --> copysign Mag, X
1913     Value *X;
1914     if (match(Sign, m_Intrinsic<Intrinsic::copysign>(m_Value(), m_Value(X))))
1915       return replaceOperand(*II, 1, X);
1916 
1917     // Peek through changes of magnitude's sign-bit. This call rewrites those:
1918     // copysign (fabs X), Sign --> copysign X, Sign
1919     // copysign (fneg X), Sign --> copysign X, Sign
1920     if (match(Mag, m_FAbs(m_Value(X))) || match(Mag, m_FNeg(m_Value(X))))
1921       return replaceOperand(*II, 0, X);
1922 
1923     break;
1924   }
1925   case Intrinsic::fabs: {
1926     Value *Cond, *TVal, *FVal;
1927     if (match(II->getArgOperand(0),
1928               m_Select(m_Value(Cond), m_Value(TVal), m_Value(FVal)))) {
1929       // fabs (select Cond, TrueC, FalseC) --> select Cond, AbsT, AbsF
1930       if (isa<Constant>(TVal) && isa<Constant>(FVal)) {
1931         CallInst *AbsT = Builder.CreateCall(II->getCalledFunction(), {TVal});
1932         CallInst *AbsF = Builder.CreateCall(II->getCalledFunction(), {FVal});
1933         return SelectInst::Create(Cond, AbsT, AbsF);
1934       }
1935       // fabs (select Cond, -FVal, FVal) --> fabs FVal
1936       if (match(TVal, m_FNeg(m_Specific(FVal))))
1937         return replaceOperand(*II, 0, FVal);
1938       // fabs (select Cond, TVal, -TVal) --> fabs TVal
1939       if (match(FVal, m_FNeg(m_Specific(TVal))))
1940         return replaceOperand(*II, 0, TVal);
1941     }
1942 
1943     LLVM_FALLTHROUGH;
1944   }
1945   case Intrinsic::ceil:
1946   case Intrinsic::floor:
1947   case Intrinsic::round:
1948   case Intrinsic::roundeven:
1949   case Intrinsic::nearbyint:
1950   case Intrinsic::rint:
1951   case Intrinsic::trunc: {
1952     Value *ExtSrc;
1953     if (match(II->getArgOperand(0), m_OneUse(m_FPExt(m_Value(ExtSrc))))) {
1954       // Narrow the call: intrinsic (fpext x) -> fpext (intrinsic x)
1955       Value *NarrowII = Builder.CreateUnaryIntrinsic(IID, ExtSrc, II);
1956       return new FPExtInst(NarrowII, II->getType());
1957     }
1958     break;
1959   }
1960   case Intrinsic::cos:
1961   case Intrinsic::amdgcn_cos: {
1962     Value *X;
1963     Value *Src = II->getArgOperand(0);
1964     if (match(Src, m_FNeg(m_Value(X))) || match(Src, m_FAbs(m_Value(X)))) {
1965       // cos(-x) -> cos(x)
1966       // cos(fabs(x)) -> cos(x)
1967       return replaceOperand(*II, 0, X);
1968     }
1969     break;
1970   }
1971   case Intrinsic::sin: {
1972     Value *X;
1973     if (match(II->getArgOperand(0), m_OneUse(m_FNeg(m_Value(X))))) {
1974       // sin(-x) --> -sin(x)
1975       Value *NewSin = Builder.CreateUnaryIntrinsic(Intrinsic::sin, X, II);
1976       Instruction *FNeg = UnaryOperator::CreateFNeg(NewSin);
1977       FNeg->copyFastMathFlags(II);
1978       return FNeg;
1979     }
1980     break;
1981   }
1982 
1983   case Intrinsic::arm_neon_vtbl1:
1984   case Intrinsic::aarch64_neon_tbl1:
1985     if (Value *V = simplifyNeonTbl1(*II, Builder))
1986       return replaceInstUsesWith(*II, V);
1987     break;
1988 
1989   case Intrinsic::arm_neon_vmulls:
1990   case Intrinsic::arm_neon_vmullu:
1991   case Intrinsic::aarch64_neon_smull:
1992   case Intrinsic::aarch64_neon_umull: {
1993     Value *Arg0 = II->getArgOperand(0);
1994     Value *Arg1 = II->getArgOperand(1);
1995 
1996     // Handle mul by zero first:
1997     if (isa<ConstantAggregateZero>(Arg0) || isa<ConstantAggregateZero>(Arg1)) {
1998       return replaceInstUsesWith(CI, ConstantAggregateZero::get(II->getType()));
1999     }
2000 
2001     // Check for constant LHS & RHS - in this case we just simplify.
2002     bool Zext = (IID == Intrinsic::arm_neon_vmullu ||
2003                  IID == Intrinsic::aarch64_neon_umull);
2004     VectorType *NewVT = cast<VectorType>(II->getType());
2005     if (Constant *CV0 = dyn_cast<Constant>(Arg0)) {
2006       if (Constant *CV1 = dyn_cast<Constant>(Arg1)) {
2007         CV0 = ConstantExpr::getIntegerCast(CV0, NewVT, /*isSigned=*/!Zext);
2008         CV1 = ConstantExpr::getIntegerCast(CV1, NewVT, /*isSigned=*/!Zext);
2009 
2010         return replaceInstUsesWith(CI, ConstantExpr::getMul(CV0, CV1));
2011       }
2012 
2013       // Couldn't simplify - canonicalize constant to the RHS.
2014       std::swap(Arg0, Arg1);
2015     }
2016 
2017     // Handle mul by one:
2018     if (Constant *CV1 = dyn_cast<Constant>(Arg1))
2019       if (ConstantInt *Splat =
2020               dyn_cast_or_null<ConstantInt>(CV1->getSplatValue()))
2021         if (Splat->isOne())
2022           return CastInst::CreateIntegerCast(Arg0, II->getType(),
2023                                              /*isSigned=*/!Zext);
2024 
2025     break;
2026   }
2027   case Intrinsic::arm_neon_aesd:
2028   case Intrinsic::arm_neon_aese:
2029   case Intrinsic::aarch64_crypto_aesd:
2030   case Intrinsic::aarch64_crypto_aese: {
2031     Value *DataArg = II->getArgOperand(0);
2032     Value *KeyArg  = II->getArgOperand(1);
2033 
2034     // Try to use the builtin XOR in AESE and AESD to eliminate a prior XOR
2035     Value *Data, *Key;
2036     if (match(KeyArg, m_ZeroInt()) &&
2037         match(DataArg, m_Xor(m_Value(Data), m_Value(Key)))) {
2038       replaceOperand(*II, 0, Data);
2039       replaceOperand(*II, 1, Key);
2040       return II;
2041     }
2042     break;
2043   }
2044   case Intrinsic::hexagon_V6_vandvrt:
2045   case Intrinsic::hexagon_V6_vandvrt_128B: {
2046     // Simplify Q -> V -> Q conversion.
2047     if (auto Op0 = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
2048       Intrinsic::ID ID0 = Op0->getIntrinsicID();
2049       if (ID0 != Intrinsic::hexagon_V6_vandqrt &&
2050           ID0 != Intrinsic::hexagon_V6_vandqrt_128B)
2051         break;
2052       Value *Bytes = Op0->getArgOperand(1), *Mask = II->getArgOperand(1);
2053       uint64_t Bytes1 = computeKnownBits(Bytes, 0, Op0).One.getZExtValue();
2054       uint64_t Mask1 = computeKnownBits(Mask, 0, II).One.getZExtValue();
2055       // Check if every byte has common bits in Bytes and Mask.
2056       uint64_t C = Bytes1 & Mask1;
2057       if ((C & 0xFF) && (C & 0xFF00) && (C & 0xFF0000) && (C & 0xFF000000))
2058         return replaceInstUsesWith(*II, Op0->getArgOperand(0));
2059     }
2060     break;
2061   }
2062   case Intrinsic::stackrestore: {
2063     enum class ClassifyResult {
2064       None,
2065       Alloca,
2066       StackRestore,
2067       CallWithSideEffects,
2068     };
2069     auto Classify = [](const Instruction *I) {
2070       if (isa<AllocaInst>(I))
2071         return ClassifyResult::Alloca;
2072 
2073       if (auto *CI = dyn_cast<CallInst>(I)) {
2074         if (auto *II = dyn_cast<IntrinsicInst>(CI)) {
2075           if (II->getIntrinsicID() == Intrinsic::stackrestore)
2076             return ClassifyResult::StackRestore;
2077 
2078           if (II->mayHaveSideEffects())
2079             return ClassifyResult::CallWithSideEffects;
2080         } else {
2081           // Consider all non-intrinsic calls to be side effects
2082           return ClassifyResult::CallWithSideEffects;
2083         }
2084       }
2085 
2086       return ClassifyResult::None;
2087     };
2088 
2089     // If the stacksave and the stackrestore are in the same BB, and there is
2090     // no intervening call, alloca, or stackrestore of a different stacksave,
2091     // remove the restore. This can happen when variable allocas are DCE'd.
2092     if (IntrinsicInst *SS = dyn_cast<IntrinsicInst>(II->getArgOperand(0))) {
2093       if (SS->getIntrinsicID() == Intrinsic::stacksave &&
2094           SS->getParent() == II->getParent()) {
2095         BasicBlock::iterator BI(SS);
2096         bool CannotRemove = false;
2097         for (++BI; &*BI != II; ++BI) {
2098           switch (Classify(&*BI)) {
2099           case ClassifyResult::None:
2100             // So far so good, look at next instructions.
2101             break;
2102 
2103           case ClassifyResult::StackRestore:
2104             // If we found an intervening stackrestore for a different
2105             // stacksave, we can't remove the stackrestore. Otherwise, continue.
2106             if (cast<IntrinsicInst>(*BI).getArgOperand(0) != SS)
2107               CannotRemove = true;
2108             break;
2109 
2110           case ClassifyResult::Alloca:
2111           case ClassifyResult::CallWithSideEffects:
2112             // If we found an alloca, a non-intrinsic call, or an intrinsic
2113             // call with side effects, we can't remove the stackrestore.
2114             CannotRemove = true;
2115             break;
2116           }
2117           if (CannotRemove)
2118             break;
2119         }
2120 
2121         if (!CannotRemove)
2122           return eraseInstFromFunction(CI);
2123       }
2124     }
2125 
2126     // Scan down this block to see if there is another stack restore in the
2127     // same block without an intervening call/alloca.
2128     BasicBlock::iterator BI(II);
2129     Instruction *TI = II->getParent()->getTerminator();
2130     bool CannotRemove = false;
2131     for (++BI; &*BI != TI; ++BI) {
2132       switch (Classify(&*BI)) {
2133       case ClassifyResult::None:
2134         // So far so good, look at next instructions.
2135         break;
2136 
2137       case ClassifyResult::StackRestore:
2138         // If there is a stackrestore below this one, remove this one.
2139         return eraseInstFromFunction(CI);
2140 
2141       case ClassifyResult::Alloca:
2142       case ClassifyResult::CallWithSideEffects:
2143         // If we found an alloca, a non-intrinsic call, or an intrinsic call
2144         // with side effects (such as llvm.stacksave and llvm.read_register),
2145         // we can't remove the stack restore.
2146         CannotRemove = true;
2147         break;
2148       }
2149       if (CannotRemove)
2150         break;
2151     }
2152 
2153     // If the stack restore is in a return, resume, or unwind block and if there
2154     // are no allocas or calls between the restore and the return, nuke the
2155     // restore.
2156     if (!CannotRemove && (isa<ReturnInst>(TI) || isa<ResumeInst>(TI)))
2157       return eraseInstFromFunction(CI);
2158     break;
2159   }
2160   case Intrinsic::lifetime_end:
2161     // Asan needs to poison memory to detect invalid access which is possible
2162     // even for empty lifetime range.
2163     if (II->getFunction()->hasFnAttribute(Attribute::SanitizeAddress) ||
2164         II->getFunction()->hasFnAttribute(Attribute::SanitizeMemory) ||
2165         II->getFunction()->hasFnAttribute(Attribute::SanitizeHWAddress))
2166       break;
2167 
2168     if (removeTriviallyEmptyRange(*II, *this, [](const IntrinsicInst &I) {
2169           return I.getIntrinsicID() == Intrinsic::lifetime_start;
2170         }))
2171       return nullptr;
2172     break;
2173   case Intrinsic::assume: {
2174     Value *IIOperand = II->getArgOperand(0);
2175     SmallVector<OperandBundleDef, 4> OpBundles;
2176     II->getOperandBundlesAsDefs(OpBundles);
2177 
2178     /// This will remove the boolean Condition from the assume given as
2179     /// argument and remove the assume if it becomes useless.
2180     /// always returns nullptr for use as a return values.
2181     auto RemoveConditionFromAssume = [&](Instruction *Assume) -> Instruction * {
2182       assert(isa<AssumeInst>(Assume));
2183       if (isAssumeWithEmptyBundle(*cast<AssumeInst>(II)))
2184         return eraseInstFromFunction(CI);
2185       replaceUse(II->getOperandUse(0), ConstantInt::getTrue(II->getContext()));
2186       return nullptr;
2187     };
2188     // Remove an assume if it is followed by an identical assume.
2189     // TODO: Do we need this? Unless there are conflicting assumptions, the
2190     // computeKnownBits(IIOperand) below here eliminates redundant assumes.
2191     Instruction *Next = II->getNextNonDebugInstruction();
2192     if (match(Next, m_Intrinsic<Intrinsic::assume>(m_Specific(IIOperand))))
2193       return RemoveConditionFromAssume(Next);
2194 
2195     // Canonicalize assume(a && b) -> assume(a); assume(b);
2196     // Note: New assumption intrinsics created here are registered by
2197     // the InstCombineIRInserter object.
2198     FunctionType *AssumeIntrinsicTy = II->getFunctionType();
2199     Value *AssumeIntrinsic = II->getCalledOperand();
2200     Value *A, *B;
2201     if (match(IIOperand, m_LogicalAnd(m_Value(A), m_Value(B)))) {
2202       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, A, OpBundles,
2203                          II->getName());
2204       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic, B, II->getName());
2205       return eraseInstFromFunction(*II);
2206     }
2207     // assume(!(a || b)) -> assume(!a); assume(!b);
2208     if (match(IIOperand, m_Not(m_LogicalOr(m_Value(A), m_Value(B))))) {
2209       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
2210                          Builder.CreateNot(A), OpBundles, II->getName());
2211       Builder.CreateCall(AssumeIntrinsicTy, AssumeIntrinsic,
2212                          Builder.CreateNot(B), II->getName());
2213       return eraseInstFromFunction(*II);
2214     }
2215 
2216     // assume( (load addr) != null ) -> add 'nonnull' metadata to load
2217     // (if assume is valid at the load)
2218     CmpInst::Predicate Pred;
2219     Instruction *LHS;
2220     if (match(IIOperand, m_ICmp(Pred, m_Instruction(LHS), m_Zero())) &&
2221         Pred == ICmpInst::ICMP_NE && LHS->getOpcode() == Instruction::Load &&
2222         LHS->getType()->isPointerTy() &&
2223         isValidAssumeForContext(II, LHS, &DT)) {
2224       MDNode *MD = MDNode::get(II->getContext(), None);
2225       LHS->setMetadata(LLVMContext::MD_nonnull, MD);
2226       return RemoveConditionFromAssume(II);
2227 
2228       // TODO: apply nonnull return attributes to calls and invokes
2229       // TODO: apply range metadata for range check patterns?
2230     }
2231 
2232     // Convert nonnull assume like:
2233     // %A = icmp ne i32* %PTR, null
2234     // call void @llvm.assume(i1 %A)
2235     // into
2236     // call void @llvm.assume(i1 true) [ "nonnull"(i32* %PTR) ]
2237     if (EnableKnowledgeRetention &&
2238         match(IIOperand, m_Cmp(Pred, m_Value(A), m_Zero())) &&
2239         Pred == CmpInst::ICMP_NE && A->getType()->isPointerTy()) {
2240       if (auto *Replacement = buildAssumeFromKnowledge(
2241               {RetainedKnowledge{Attribute::NonNull, 0, A}}, Next, &AC, &DT)) {
2242 
2243         Replacement->insertBefore(Next);
2244         AC.registerAssumption(Replacement);
2245         return RemoveConditionFromAssume(II);
2246       }
2247     }
2248 
2249     // Convert alignment assume like:
2250     // %B = ptrtoint i32* %A to i64
2251     // %C = and i64 %B, Constant
2252     // %D = icmp eq i64 %C, 0
2253     // call void @llvm.assume(i1 %D)
2254     // into
2255     // call void @llvm.assume(i1 true) [ "align"(i32* [[A]], i64  Constant + 1)]
2256     uint64_t AlignMask;
2257     if (EnableKnowledgeRetention &&
2258         match(IIOperand,
2259               m_Cmp(Pred, m_And(m_Value(A), m_ConstantInt(AlignMask)),
2260                     m_Zero())) &&
2261         Pred == CmpInst::ICMP_EQ) {
2262       if (isPowerOf2_64(AlignMask + 1)) {
2263         uint64_t Offset = 0;
2264         match(A, m_Add(m_Value(A), m_ConstantInt(Offset)));
2265         if (match(A, m_PtrToInt(m_Value(A)))) {
2266           /// Note: this doesn't preserve the offset information but merges
2267           /// offset and alignment.
2268           /// TODO: we can generate a GEP instead of merging the alignment with
2269           /// the offset.
2270           RetainedKnowledge RK{Attribute::Alignment,
2271                                (unsigned)MinAlign(Offset, AlignMask + 1), A};
2272           if (auto *Replacement =
2273                   buildAssumeFromKnowledge(RK, Next, &AC, &DT)) {
2274 
2275             Replacement->insertAfter(II);
2276             AC.registerAssumption(Replacement);
2277           }
2278           return RemoveConditionFromAssume(II);
2279         }
2280       }
2281     }
2282 
2283     /// Canonicalize Knowledge in operand bundles.
2284     if (EnableKnowledgeRetention && II->hasOperandBundles()) {
2285       for (unsigned Idx = 0; Idx < II->getNumOperandBundles(); Idx++) {
2286         auto &BOI = II->bundle_op_info_begin()[Idx];
2287         RetainedKnowledge RK =
2288           llvm::getKnowledgeFromBundle(cast<AssumeInst>(*II), BOI);
2289         if (BOI.End - BOI.Begin > 2)
2290           continue; // Prevent reducing knowledge in an align with offset since
2291                     // extracting a RetainedKnowledge form them looses offset
2292                     // information
2293         RetainedKnowledge CanonRK =
2294           llvm::simplifyRetainedKnowledge(cast<AssumeInst>(II), RK,
2295                                           &getAssumptionCache(),
2296                                           &getDominatorTree());
2297         if (CanonRK == RK)
2298           continue;
2299         if (!CanonRK) {
2300           if (BOI.End - BOI.Begin > 0) {
2301             Worklist.pushValue(II->op_begin()[BOI.Begin]);
2302             Value::dropDroppableUse(II->op_begin()[BOI.Begin]);
2303           }
2304           continue;
2305         }
2306         assert(RK.AttrKind == CanonRK.AttrKind);
2307         if (BOI.End - BOI.Begin > 0)
2308           II->op_begin()[BOI.Begin].set(CanonRK.WasOn);
2309         if (BOI.End - BOI.Begin > 1)
2310           II->op_begin()[BOI.Begin + 1].set(ConstantInt::get(
2311               Type::getInt64Ty(II->getContext()), CanonRK.ArgValue));
2312         if (RK.WasOn)
2313           Worklist.pushValue(RK.WasOn);
2314         return II;
2315       }
2316     }
2317 
2318     // If there is a dominating assume with the same condition as this one,
2319     // then this one is redundant, and should be removed.
2320     KnownBits Known(1);
2321     computeKnownBits(IIOperand, Known, 0, II);
2322     if (Known.isAllOnes() && isAssumeWithEmptyBundle(cast<AssumeInst>(*II)))
2323       return eraseInstFromFunction(*II);
2324 
2325     // Update the cache of affected values for this assumption (we might be
2326     // here because we just simplified the condition).
2327     AC.updateAffectedValues(cast<AssumeInst>(II));
2328     break;
2329   }
2330   case Intrinsic::experimental_guard: {
2331     // Is this guard followed by another guard?  We scan forward over a small
2332     // fixed window of instructions to handle common cases with conditions
2333     // computed between guards.
2334     Instruction *NextInst = II->getNextNonDebugInstruction();
2335     for (unsigned i = 0; i < GuardWideningWindow; i++) {
2336       // Note: Using context-free form to avoid compile time blow up
2337       if (!isSafeToSpeculativelyExecute(NextInst))
2338         break;
2339       NextInst = NextInst->getNextNonDebugInstruction();
2340     }
2341     Value *NextCond = nullptr;
2342     if (match(NextInst,
2343               m_Intrinsic<Intrinsic::experimental_guard>(m_Value(NextCond)))) {
2344       Value *CurrCond = II->getArgOperand(0);
2345 
2346       // Remove a guard that it is immediately preceded by an identical guard.
2347       // Otherwise canonicalize guard(a); guard(b) -> guard(a & b).
2348       if (CurrCond != NextCond) {
2349         Instruction *MoveI = II->getNextNonDebugInstruction();
2350         while (MoveI != NextInst) {
2351           auto *Temp = MoveI;
2352           MoveI = MoveI->getNextNonDebugInstruction();
2353           Temp->moveBefore(II);
2354         }
2355         replaceOperand(*II, 0, Builder.CreateAnd(CurrCond, NextCond));
2356       }
2357       eraseInstFromFunction(*NextInst);
2358       return II;
2359     }
2360     break;
2361   }
2362   case Intrinsic::vector_insert: {
2363     Value *Vec = II->getArgOperand(0);
2364     Value *SubVec = II->getArgOperand(1);
2365     Value *Idx = II->getArgOperand(2);
2366     auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
2367     auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
2368     auto *SubVecTy = dyn_cast<FixedVectorType>(SubVec->getType());
2369 
2370     // Only canonicalize if the destination vector, Vec, and SubVec are all
2371     // fixed vectors.
2372     if (DstTy && VecTy && SubVecTy) {
2373       unsigned DstNumElts = DstTy->getNumElements();
2374       unsigned VecNumElts = VecTy->getNumElements();
2375       unsigned SubVecNumElts = SubVecTy->getNumElements();
2376       unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
2377 
2378       // An insert that entirely overwrites Vec with SubVec is a nop.
2379       if (VecNumElts == SubVecNumElts)
2380         return replaceInstUsesWith(CI, SubVec);
2381 
2382       // Widen SubVec into a vector of the same width as Vec, since
2383       // shufflevector requires the two input vectors to be the same width.
2384       // Elements beyond the bounds of SubVec within the widened vector are
2385       // undefined.
2386       SmallVector<int, 8> WidenMask;
2387       unsigned i;
2388       for (i = 0; i != SubVecNumElts; ++i)
2389         WidenMask.push_back(i);
2390       for (; i != VecNumElts; ++i)
2391         WidenMask.push_back(UndefMaskElem);
2392 
2393       Value *WidenShuffle = Builder.CreateShuffleVector(SubVec, WidenMask);
2394 
2395       SmallVector<int, 8> Mask;
2396       for (unsigned i = 0; i != IdxN; ++i)
2397         Mask.push_back(i);
2398       for (unsigned i = DstNumElts; i != DstNumElts + SubVecNumElts; ++i)
2399         Mask.push_back(i);
2400       for (unsigned i = IdxN + SubVecNumElts; i != DstNumElts; ++i)
2401         Mask.push_back(i);
2402 
2403       Value *Shuffle = Builder.CreateShuffleVector(Vec, WidenShuffle, Mask);
2404       return replaceInstUsesWith(CI, Shuffle);
2405     }
2406     break;
2407   }
2408   case Intrinsic::vector_extract: {
2409     Value *Vec = II->getArgOperand(0);
2410     Value *Idx = II->getArgOperand(1);
2411 
2412     auto *DstTy = dyn_cast<FixedVectorType>(II->getType());
2413     auto *VecTy = dyn_cast<FixedVectorType>(Vec->getType());
2414 
2415     // Only canonicalize if the the destination vector and Vec are fixed
2416     // vectors.
2417     if (DstTy && VecTy) {
2418       unsigned DstNumElts = DstTy->getNumElements();
2419       unsigned VecNumElts = VecTy->getNumElements();
2420       unsigned IdxN = cast<ConstantInt>(Idx)->getZExtValue();
2421 
2422       // Extracting the entirety of Vec is a nop.
2423       if (VecNumElts == DstNumElts) {
2424         replaceInstUsesWith(CI, Vec);
2425         return eraseInstFromFunction(CI);
2426       }
2427 
2428       SmallVector<int, 8> Mask;
2429       for (unsigned i = 0; i != DstNumElts; ++i)
2430         Mask.push_back(IdxN + i);
2431 
2432       Value *Shuffle = Builder.CreateShuffleVector(Vec, Mask);
2433       return replaceInstUsesWith(CI, Shuffle);
2434     }
2435     break;
2436   }
2437   case Intrinsic::experimental_vector_reverse: {
2438     Value *BO0, *BO1, *X, *Y;
2439     Value *Vec = II->getArgOperand(0);
2440     if (match(Vec, m_OneUse(m_BinOp(m_Value(BO0), m_Value(BO1))))) {
2441       auto *OldBinOp = cast<BinaryOperator>(Vec);
2442       if (match(BO0, m_Intrinsic<Intrinsic::experimental_vector_reverse>(
2443                          m_Value(X)))) {
2444         // rev(binop rev(X), rev(Y)) --> binop X, Y
2445         if (match(BO1, m_Intrinsic<Intrinsic::experimental_vector_reverse>(
2446                            m_Value(Y))))
2447           return replaceInstUsesWith(CI,
2448                                      BinaryOperator::CreateWithCopiedFlags(
2449                                          OldBinOp->getOpcode(), X, Y, OldBinOp,
2450                                          OldBinOp->getName(), II));
2451         // rev(binop rev(X), BO1Splat) --> binop X, BO1Splat
2452         if (isSplatValue(BO1))
2453           return replaceInstUsesWith(CI,
2454                                      BinaryOperator::CreateWithCopiedFlags(
2455                                          OldBinOp->getOpcode(), X, BO1,
2456                                          OldBinOp, OldBinOp->getName(), II));
2457       }
2458       // rev(binop BO0Splat, rev(Y)) --> binop BO0Splat, Y
2459       if (match(BO1, m_Intrinsic<Intrinsic::experimental_vector_reverse>(
2460                          m_Value(Y))) &&
2461           isSplatValue(BO0))
2462         return replaceInstUsesWith(CI, BinaryOperator::CreateWithCopiedFlags(
2463                                            OldBinOp->getOpcode(), BO0, Y,
2464                                            OldBinOp, OldBinOp->getName(), II));
2465     }
2466     // rev(unop rev(X)) --> unop X
2467     if (match(Vec, m_OneUse(m_UnOp(
2468                        m_Intrinsic<Intrinsic::experimental_vector_reverse>(
2469                            m_Value(X)))))) {
2470       auto *OldUnOp = cast<UnaryOperator>(Vec);
2471       auto *NewUnOp = UnaryOperator::CreateWithCopiedFlags(
2472           OldUnOp->getOpcode(), X, OldUnOp, OldUnOp->getName(), II);
2473       return replaceInstUsesWith(CI, NewUnOp);
2474     }
2475     break;
2476   }
2477   case Intrinsic::vector_reduce_or:
2478   case Intrinsic::vector_reduce_and: {
2479     // Canonicalize logical or/and reductions:
2480     // Or reduction for i1 is represented as:
2481     // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2482     // %res = cmp ne iReduxWidth %val, 0
2483     // And reduction for i1 is represented as:
2484     // %val = bitcast <ReduxWidth x i1> to iReduxWidth
2485     // %res = cmp eq iReduxWidth %val, 11111
2486     Value *Arg = II->getArgOperand(0);
2487     Value *Vect;
2488     if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2489       if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2490         if (FTy->getElementType() == Builder.getInt1Ty()) {
2491           Value *Res = Builder.CreateBitCast(
2492               Vect, Builder.getIntNTy(FTy->getNumElements()));
2493           if (IID == Intrinsic::vector_reduce_and) {
2494             Res = Builder.CreateICmpEQ(
2495                 Res, ConstantInt::getAllOnesValue(Res->getType()));
2496           } else {
2497             assert(IID == Intrinsic::vector_reduce_or &&
2498                    "Expected or reduction.");
2499             Res = Builder.CreateIsNotNull(Res);
2500           }
2501           if (Arg != Vect)
2502             Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
2503                                      II->getType());
2504           return replaceInstUsesWith(CI, Res);
2505         }
2506     }
2507     LLVM_FALLTHROUGH;
2508   }
2509   case Intrinsic::vector_reduce_add: {
2510     if (IID == Intrinsic::vector_reduce_add) {
2511       // Convert vector_reduce_add(ZExt(<n x i1>)) to
2512       // ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
2513       // Convert vector_reduce_add(SExt(<n x i1>)) to
2514       // -ZExtOrTrunc(ctpop(bitcast <n x i1> to in)).
2515       // Convert vector_reduce_add(<n x i1>) to
2516       // Trunc(ctpop(bitcast <n x i1> to in)).
2517       Value *Arg = II->getArgOperand(0);
2518       Value *Vect;
2519       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2520         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2521           if (FTy->getElementType() == Builder.getInt1Ty()) {
2522             Value *V = Builder.CreateBitCast(
2523                 Vect, Builder.getIntNTy(FTy->getNumElements()));
2524             Value *Res = Builder.CreateUnaryIntrinsic(Intrinsic::ctpop, V);
2525             if (Res->getType() != II->getType())
2526               Res = Builder.CreateZExtOrTrunc(Res, II->getType());
2527             if (Arg != Vect &&
2528                 cast<Instruction>(Arg)->getOpcode() == Instruction::SExt)
2529               Res = Builder.CreateNeg(Res);
2530             return replaceInstUsesWith(CI, Res);
2531           }
2532       }
2533     }
2534     LLVM_FALLTHROUGH;
2535   }
2536   case Intrinsic::vector_reduce_xor: {
2537     if (IID == Intrinsic::vector_reduce_xor) {
2538       // Exclusive disjunction reduction over the vector with
2539       // (potentially-extended) i1 element type is actually a
2540       // (potentially-extended) arithmetic `add` reduction over the original
2541       // non-extended value:
2542       //   vector_reduce_xor(?ext(<n x i1>))
2543       //     -->
2544       //   ?ext(vector_reduce_add(<n x i1>))
2545       Value *Arg = II->getArgOperand(0);
2546       Value *Vect;
2547       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2548         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2549           if (FTy->getElementType() == Builder.getInt1Ty()) {
2550             Value *Res = Builder.CreateAddReduce(Vect);
2551             if (Arg != Vect)
2552               Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
2553                                        II->getType());
2554             return replaceInstUsesWith(CI, Res);
2555           }
2556       }
2557     }
2558     LLVM_FALLTHROUGH;
2559   }
2560   case Intrinsic::vector_reduce_mul: {
2561     if (IID == Intrinsic::vector_reduce_mul) {
2562       // Multiplicative reduction over the vector with (potentially-extended)
2563       // i1 element type is actually a (potentially zero-extended)
2564       // logical `and` reduction over the original non-extended value:
2565       //   vector_reduce_mul(?ext(<n x i1>))
2566       //     -->
2567       //   zext(vector_reduce_and(<n x i1>))
2568       Value *Arg = II->getArgOperand(0);
2569       Value *Vect;
2570       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2571         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2572           if (FTy->getElementType() == Builder.getInt1Ty()) {
2573             Value *Res = Builder.CreateAndReduce(Vect);
2574             if (Res->getType() != II->getType())
2575               Res = Builder.CreateZExt(Res, II->getType());
2576             return replaceInstUsesWith(CI, Res);
2577           }
2578       }
2579     }
2580     LLVM_FALLTHROUGH;
2581   }
2582   case Intrinsic::vector_reduce_umin:
2583   case Intrinsic::vector_reduce_umax: {
2584     if (IID == Intrinsic::vector_reduce_umin ||
2585         IID == Intrinsic::vector_reduce_umax) {
2586       // UMin/UMax reduction over the vector with (potentially-extended)
2587       // i1 element type is actually a (potentially-extended)
2588       // logical `and`/`or` reduction over the original non-extended value:
2589       //   vector_reduce_u{min,max}(?ext(<n x i1>))
2590       //     -->
2591       //   ?ext(vector_reduce_{and,or}(<n x i1>))
2592       Value *Arg = II->getArgOperand(0);
2593       Value *Vect;
2594       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2595         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2596           if (FTy->getElementType() == Builder.getInt1Ty()) {
2597             Value *Res = IID == Intrinsic::vector_reduce_umin
2598                              ? Builder.CreateAndReduce(Vect)
2599                              : Builder.CreateOrReduce(Vect);
2600             if (Arg != Vect)
2601               Res = Builder.CreateCast(cast<CastInst>(Arg)->getOpcode(), Res,
2602                                        II->getType());
2603             return replaceInstUsesWith(CI, Res);
2604           }
2605       }
2606     }
2607     LLVM_FALLTHROUGH;
2608   }
2609   case Intrinsic::vector_reduce_smin:
2610   case Intrinsic::vector_reduce_smax: {
2611     if (IID == Intrinsic::vector_reduce_smin ||
2612         IID == Intrinsic::vector_reduce_smax) {
2613       // SMin/SMax reduction over the vector with (potentially-extended)
2614       // i1 element type is actually a (potentially-extended)
2615       // logical `and`/`or` reduction over the original non-extended value:
2616       //   vector_reduce_s{min,max}(<n x i1>)
2617       //     -->
2618       //   vector_reduce_{or,and}(<n x i1>)
2619       // and
2620       //   vector_reduce_s{min,max}(sext(<n x i1>))
2621       //     -->
2622       //   sext(vector_reduce_{or,and}(<n x i1>))
2623       // and
2624       //   vector_reduce_s{min,max}(zext(<n x i1>))
2625       //     -->
2626       //   zext(vector_reduce_{and,or}(<n x i1>))
2627       Value *Arg = II->getArgOperand(0);
2628       Value *Vect;
2629       if (match(Arg, m_ZExtOrSExtOrSelf(m_Value(Vect)))) {
2630         if (auto *FTy = dyn_cast<FixedVectorType>(Vect->getType()))
2631           if (FTy->getElementType() == Builder.getInt1Ty()) {
2632             Instruction::CastOps ExtOpc = Instruction::CastOps::CastOpsEnd;
2633             if (Arg != Vect)
2634               ExtOpc = cast<CastInst>(Arg)->getOpcode();
2635             Value *Res = ((IID == Intrinsic::vector_reduce_smin) ==
2636                           (ExtOpc == Instruction::CastOps::ZExt))
2637                              ? Builder.CreateAndReduce(Vect)
2638                              : Builder.CreateOrReduce(Vect);
2639             if (Arg != Vect)
2640               Res = Builder.CreateCast(ExtOpc, Res, II->getType());
2641             return replaceInstUsesWith(CI, Res);
2642           }
2643       }
2644     }
2645     LLVM_FALLTHROUGH;
2646   }
2647   case Intrinsic::vector_reduce_fmax:
2648   case Intrinsic::vector_reduce_fmin:
2649   case Intrinsic::vector_reduce_fadd:
2650   case Intrinsic::vector_reduce_fmul: {
2651     bool CanBeReassociated = (IID != Intrinsic::vector_reduce_fadd &&
2652                               IID != Intrinsic::vector_reduce_fmul) ||
2653                              II->hasAllowReassoc();
2654     const unsigned ArgIdx = (IID == Intrinsic::vector_reduce_fadd ||
2655                              IID == Intrinsic::vector_reduce_fmul)
2656                                 ? 1
2657                                 : 0;
2658     Value *Arg = II->getArgOperand(ArgIdx);
2659     Value *V;
2660     ArrayRef<int> Mask;
2661     if (!isa<FixedVectorType>(Arg->getType()) || !CanBeReassociated ||
2662         !match(Arg, m_Shuffle(m_Value(V), m_Undef(), m_Mask(Mask))) ||
2663         !cast<ShuffleVectorInst>(Arg)->isSingleSource())
2664       break;
2665     int Sz = Mask.size();
2666     SmallBitVector UsedIndices(Sz);
2667     for (int Idx : Mask) {
2668       if (Idx == UndefMaskElem || UsedIndices.test(Idx))
2669         break;
2670       UsedIndices.set(Idx);
2671     }
2672     // Can remove shuffle iff just shuffled elements, no repeats, undefs, or
2673     // other changes.
2674     if (UsedIndices.all()) {
2675       replaceUse(II->getOperandUse(ArgIdx), V);
2676       return nullptr;
2677     }
2678     break;
2679   }
2680   default: {
2681     // Handle target specific intrinsics
2682     Optional<Instruction *> V = targetInstCombineIntrinsic(*II);
2683     if (V)
2684       return V.value();
2685     break;
2686   }
2687   }
2688 
2689   if (Instruction *Shuf = foldShuffledIntrinsicOperands(II, Builder))
2690     return Shuf;
2691 
2692   // Some intrinsics (like experimental_gc_statepoint) can be used in invoke
2693   // context, so it is handled in visitCallBase and we should trigger it.
2694   return visitCallBase(*II);
2695 }
2696 
2697 // Fence instruction simplification
2698 Instruction *InstCombinerImpl::visitFenceInst(FenceInst &FI) {
2699   auto *NFI = dyn_cast<FenceInst>(FI.getNextNonDebugInstruction());
2700   // This check is solely here to handle arbitrary target-dependent syncscopes.
2701   // TODO: Can remove if does not matter in practice.
2702   if (NFI && FI.isIdenticalTo(NFI))
2703     return eraseInstFromFunction(FI);
2704 
2705   // Returns true if FI1 is identical or stronger fence than FI2.
2706   auto isIdenticalOrStrongerFence = [](FenceInst *FI1, FenceInst *FI2) {
2707     auto FI1SyncScope = FI1->getSyncScopeID();
2708     // Consider same scope, where scope is global or single-thread.
2709     if (FI1SyncScope != FI2->getSyncScopeID() ||
2710         (FI1SyncScope != SyncScope::System &&
2711          FI1SyncScope != SyncScope::SingleThread))
2712       return false;
2713 
2714     return isAtLeastOrStrongerThan(FI1->getOrdering(), FI2->getOrdering());
2715   };
2716   if (NFI && isIdenticalOrStrongerFence(NFI, &FI))
2717     return eraseInstFromFunction(FI);
2718 
2719   if (auto *PFI = dyn_cast_or_null<FenceInst>(FI.getPrevNonDebugInstruction()))
2720     if (isIdenticalOrStrongerFence(PFI, &FI))
2721       return eraseInstFromFunction(FI);
2722   return nullptr;
2723 }
2724 
2725 // InvokeInst simplification
2726 Instruction *InstCombinerImpl::visitInvokeInst(InvokeInst &II) {
2727   return visitCallBase(II);
2728 }
2729 
2730 // CallBrInst simplification
2731 Instruction *InstCombinerImpl::visitCallBrInst(CallBrInst &CBI) {
2732   return visitCallBase(CBI);
2733 }
2734 
2735 /// If this cast does not affect the value passed through the varargs area, we
2736 /// can eliminate the use of the cast.
2737 static bool isSafeToEliminateVarargsCast(const CallBase &Call,
2738                                          const DataLayout &DL,
2739                                          const CastInst *const CI,
2740                                          const int ix) {
2741   if (!CI->isLosslessCast())
2742     return false;
2743 
2744   // If this is a GC intrinsic, avoid munging types.  We need types for
2745   // statepoint reconstruction in SelectionDAG.
2746   // TODO: This is probably something which should be expanded to all
2747   // intrinsics since the entire point of intrinsics is that
2748   // they are understandable by the optimizer.
2749   if (isa<GCStatepointInst>(Call) || isa<GCRelocateInst>(Call) ||
2750       isa<GCResultInst>(Call))
2751     return false;
2752 
2753   // Opaque pointers are compatible with any byval types.
2754   PointerType *SrcTy = cast<PointerType>(CI->getOperand(0)->getType());
2755   if (SrcTy->isOpaque())
2756     return true;
2757 
2758   // The size of ByVal or InAlloca arguments is derived from the type, so we
2759   // can't change to a type with a different size.  If the size were
2760   // passed explicitly we could avoid this check.
2761   if (!Call.isPassPointeeByValueArgument(ix))
2762     return true;
2763 
2764   // The transform currently only handles type replacement for byval, not other
2765   // type-carrying attributes.
2766   if (!Call.isByValArgument(ix))
2767     return false;
2768 
2769   Type *SrcElemTy = SrcTy->getNonOpaquePointerElementType();
2770   Type *DstElemTy = Call.getParamByValType(ix);
2771   if (!SrcElemTy->isSized() || !DstElemTy->isSized())
2772     return false;
2773   if (DL.getTypeAllocSize(SrcElemTy) != DL.getTypeAllocSize(DstElemTy))
2774     return false;
2775   return true;
2776 }
2777 
2778 Instruction *InstCombinerImpl::tryOptimizeCall(CallInst *CI) {
2779   if (!CI->getCalledFunction()) return nullptr;
2780 
2781   // Skip optimizing notail and musttail calls so
2782   // LibCallSimplifier::optimizeCall doesn't have to preserve those invariants.
2783   // LibCallSimplifier::optimizeCall should try to preseve tail calls though.
2784   if (CI->isMustTailCall() || CI->isNoTailCall())
2785     return nullptr;
2786 
2787   auto InstCombineRAUW = [this](Instruction *From, Value *With) {
2788     replaceInstUsesWith(*From, With);
2789   };
2790   auto InstCombineErase = [this](Instruction *I) {
2791     eraseInstFromFunction(*I);
2792   };
2793   LibCallSimplifier Simplifier(DL, &TLI, ORE, BFI, PSI, InstCombineRAUW,
2794                                InstCombineErase);
2795   if (Value *With = Simplifier.optimizeCall(CI, Builder)) {
2796     ++NumSimplified;
2797     return CI->use_empty() ? CI : replaceInstUsesWith(*CI, With);
2798   }
2799 
2800   return nullptr;
2801 }
2802 
2803 static IntrinsicInst *findInitTrampolineFromAlloca(Value *TrampMem) {
2804   // Strip off at most one level of pointer casts, looking for an alloca.  This
2805   // is good enough in practice and simpler than handling any number of casts.
2806   Value *Underlying = TrampMem->stripPointerCasts();
2807   if (Underlying != TrampMem &&
2808       (!Underlying->hasOneUse() || Underlying->user_back() != TrampMem))
2809     return nullptr;
2810   if (!isa<AllocaInst>(Underlying))
2811     return nullptr;
2812 
2813   IntrinsicInst *InitTrampoline = nullptr;
2814   for (User *U : TrampMem->users()) {
2815     IntrinsicInst *II = dyn_cast<IntrinsicInst>(U);
2816     if (!II)
2817       return nullptr;
2818     if (II->getIntrinsicID() == Intrinsic::init_trampoline) {
2819       if (InitTrampoline)
2820         // More than one init_trampoline writes to this value.  Give up.
2821         return nullptr;
2822       InitTrampoline = II;
2823       continue;
2824     }
2825     if (II->getIntrinsicID() == Intrinsic::adjust_trampoline)
2826       // Allow any number of calls to adjust.trampoline.
2827       continue;
2828     return nullptr;
2829   }
2830 
2831   // No call to init.trampoline found.
2832   if (!InitTrampoline)
2833     return nullptr;
2834 
2835   // Check that the alloca is being used in the expected way.
2836   if (InitTrampoline->getOperand(0) != TrampMem)
2837     return nullptr;
2838 
2839   return InitTrampoline;
2840 }
2841 
2842 static IntrinsicInst *findInitTrampolineFromBB(IntrinsicInst *AdjustTramp,
2843                                                Value *TrampMem) {
2844   // Visit all the previous instructions in the basic block, and try to find a
2845   // init.trampoline which has a direct path to the adjust.trampoline.
2846   for (BasicBlock::iterator I = AdjustTramp->getIterator(),
2847                             E = AdjustTramp->getParent()->begin();
2848        I != E;) {
2849     Instruction *Inst = &*--I;
2850     if (IntrinsicInst *II = dyn_cast<IntrinsicInst>(I))
2851       if (II->getIntrinsicID() == Intrinsic::init_trampoline &&
2852           II->getOperand(0) == TrampMem)
2853         return II;
2854     if (Inst->mayWriteToMemory())
2855       return nullptr;
2856   }
2857   return nullptr;
2858 }
2859 
2860 // Given a call to llvm.adjust.trampoline, find and return the corresponding
2861 // call to llvm.init.trampoline if the call to the trampoline can be optimized
2862 // to a direct call to a function.  Otherwise return NULL.
2863 static IntrinsicInst *findInitTrampoline(Value *Callee) {
2864   Callee = Callee->stripPointerCasts();
2865   IntrinsicInst *AdjustTramp = dyn_cast<IntrinsicInst>(Callee);
2866   if (!AdjustTramp ||
2867       AdjustTramp->getIntrinsicID() != Intrinsic::adjust_trampoline)
2868     return nullptr;
2869 
2870   Value *TrampMem = AdjustTramp->getOperand(0);
2871 
2872   if (IntrinsicInst *IT = findInitTrampolineFromAlloca(TrampMem))
2873     return IT;
2874   if (IntrinsicInst *IT = findInitTrampolineFromBB(AdjustTramp, TrampMem))
2875     return IT;
2876   return nullptr;
2877 }
2878 
2879 bool InstCombinerImpl::annotateAnyAllocSite(CallBase &Call,
2880                                             const TargetLibraryInfo *TLI) {
2881   // Note: We only handle cases which can't be driven from generic attributes
2882   // here.  So, for example, nonnull and noalias (which are common properties
2883   // of some allocation functions) are expected to be handled via annotation
2884   // of the respective allocator declaration with generic attributes.
2885   bool Changed = false;
2886 
2887   if (!Call.getType()->isPointerTy())
2888     return Changed;
2889 
2890   Optional<APInt> Size = getAllocSize(&Call, TLI);
2891   if (Size && *Size != 0) {
2892     // TODO: We really should just emit deref_or_null here and then
2893     // let the generic inference code combine that with nonnull.
2894     if (Call.hasRetAttr(Attribute::NonNull)) {
2895       Changed = !Call.hasRetAttr(Attribute::Dereferenceable);
2896       Call.addRetAttr(Attribute::getWithDereferenceableBytes(
2897           Call.getContext(), Size->getLimitedValue()));
2898     } else {
2899       Changed = !Call.hasRetAttr(Attribute::DereferenceableOrNull);
2900       Call.addRetAttr(Attribute::getWithDereferenceableOrNullBytes(
2901           Call.getContext(), Size->getLimitedValue()));
2902     }
2903   }
2904 
2905   // Add alignment attribute if alignment is a power of two constant.
2906   Value *Alignment = getAllocAlignment(&Call, TLI);
2907   if (!Alignment)
2908     return Changed;
2909 
2910   ConstantInt *AlignOpC = dyn_cast<ConstantInt>(Alignment);
2911   if (AlignOpC && AlignOpC->getValue().ult(llvm::Value::MaximumAlignment)) {
2912     uint64_t AlignmentVal = AlignOpC->getZExtValue();
2913     if (llvm::isPowerOf2_64(AlignmentVal)) {
2914       Align ExistingAlign = Call.getRetAlign().valueOrOne();
2915       Align NewAlign = Align(AlignmentVal);
2916       if (NewAlign > ExistingAlign) {
2917         Call.addRetAttr(
2918             Attribute::getWithAlignment(Call.getContext(), NewAlign));
2919         Changed = true;
2920       }
2921     }
2922   }
2923   return Changed;
2924 }
2925 
2926 /// Improvements for call, callbr and invoke instructions.
2927 Instruction *InstCombinerImpl::visitCallBase(CallBase &Call) {
2928   bool Changed = annotateAnyAllocSite(Call, &TLI);
2929 
2930   // Mark any parameters that are known to be non-null with the nonnull
2931   // attribute.  This is helpful for inlining calls to functions with null
2932   // checks on their arguments.
2933   SmallVector<unsigned, 4> ArgNos;
2934   unsigned ArgNo = 0;
2935 
2936   for (Value *V : Call.args()) {
2937     if (V->getType()->isPointerTy() &&
2938         !Call.paramHasAttr(ArgNo, Attribute::NonNull) &&
2939         isKnownNonZero(V, DL, 0, &AC, &Call, &DT))
2940       ArgNos.push_back(ArgNo);
2941     ArgNo++;
2942   }
2943 
2944   assert(ArgNo == Call.arg_size() && "Call arguments not processed correctly.");
2945 
2946   if (!ArgNos.empty()) {
2947     AttributeList AS = Call.getAttributes();
2948     LLVMContext &Ctx = Call.getContext();
2949     AS = AS.addParamAttribute(Ctx, ArgNos,
2950                               Attribute::get(Ctx, Attribute::NonNull));
2951     Call.setAttributes(AS);
2952     Changed = true;
2953   }
2954 
2955   // If the callee is a pointer to a function, attempt to move any casts to the
2956   // arguments of the call/callbr/invoke.
2957   Value *Callee = Call.getCalledOperand();
2958   Function *CalleeF = dyn_cast<Function>(Callee);
2959   if ((!CalleeF || CalleeF->getFunctionType() != Call.getFunctionType()) &&
2960       transformConstExprCastCall(Call))
2961     return nullptr;
2962 
2963   if (CalleeF) {
2964     // Remove the convergent attr on calls when the callee is not convergent.
2965     if (Call.isConvergent() && !CalleeF->isConvergent() &&
2966         !CalleeF->isIntrinsic()) {
2967       LLVM_DEBUG(dbgs() << "Removing convergent attr from instr " << Call
2968                         << "\n");
2969       Call.setNotConvergent();
2970       return &Call;
2971     }
2972 
2973     // If the call and callee calling conventions don't match, and neither one
2974     // of the calling conventions is compatible with C calling convention
2975     // this call must be unreachable, as the call is undefined.
2976     if ((CalleeF->getCallingConv() != Call.getCallingConv() &&
2977          !(CalleeF->getCallingConv() == llvm::CallingConv::C &&
2978            TargetLibraryInfoImpl::isCallingConvCCompatible(&Call)) &&
2979          !(Call.getCallingConv() == llvm::CallingConv::C &&
2980            TargetLibraryInfoImpl::isCallingConvCCompatible(CalleeF))) &&
2981         // Only do this for calls to a function with a body.  A prototype may
2982         // not actually end up matching the implementation's calling conv for a
2983         // variety of reasons (e.g. it may be written in assembly).
2984         !CalleeF->isDeclaration()) {
2985       Instruction *OldCall = &Call;
2986       CreateNonTerminatorUnreachable(OldCall);
2987       // If OldCall does not return void then replaceInstUsesWith poison.
2988       // This allows ValueHandlers and custom metadata to adjust itself.
2989       if (!OldCall->getType()->isVoidTy())
2990         replaceInstUsesWith(*OldCall, PoisonValue::get(OldCall->getType()));
2991       if (isa<CallInst>(OldCall))
2992         return eraseInstFromFunction(*OldCall);
2993 
2994       // We cannot remove an invoke or a callbr, because it would change thexi
2995       // CFG, just change the callee to a null pointer.
2996       cast<CallBase>(OldCall)->setCalledFunction(
2997           CalleeF->getFunctionType(),
2998           Constant::getNullValue(CalleeF->getType()));
2999       return nullptr;
3000     }
3001   }
3002 
3003   // Calling a null function pointer is undefined if a null address isn't
3004   // dereferenceable.
3005   if ((isa<ConstantPointerNull>(Callee) &&
3006        !NullPointerIsDefined(Call.getFunction())) ||
3007       isa<UndefValue>(Callee)) {
3008     // If Call does not return void then replaceInstUsesWith poison.
3009     // This allows ValueHandlers and custom metadata to adjust itself.
3010     if (!Call.getType()->isVoidTy())
3011       replaceInstUsesWith(Call, PoisonValue::get(Call.getType()));
3012 
3013     if (Call.isTerminator()) {
3014       // Can't remove an invoke or callbr because we cannot change the CFG.
3015       return nullptr;
3016     }
3017 
3018     // This instruction is not reachable, just remove it.
3019     CreateNonTerminatorUnreachable(&Call);
3020     return eraseInstFromFunction(Call);
3021   }
3022 
3023   if (IntrinsicInst *II = findInitTrampoline(Callee))
3024     return transformCallThroughTrampoline(Call, *II);
3025 
3026   // TODO: Drop this transform once opaque pointer transition is done.
3027   FunctionType *FTy = Call.getFunctionType();
3028   if (FTy->isVarArg()) {
3029     int ix = FTy->getNumParams();
3030     // See if we can optimize any arguments passed through the varargs area of
3031     // the call.
3032     for (auto I = Call.arg_begin() + FTy->getNumParams(), E = Call.arg_end();
3033          I != E; ++I, ++ix) {
3034       CastInst *CI = dyn_cast<CastInst>(*I);
3035       if (CI && isSafeToEliminateVarargsCast(Call, DL, CI, ix)) {
3036         replaceUse(*I, CI->getOperand(0));
3037 
3038         // Update the byval type to match the pointer type.
3039         // Not necessary for opaque pointers.
3040         PointerType *NewTy = cast<PointerType>(CI->getOperand(0)->getType());
3041         if (!NewTy->isOpaque() && Call.isByValArgument(ix)) {
3042           Call.removeParamAttr(ix, Attribute::ByVal);
3043           Call.addParamAttr(ix, Attribute::getWithByValType(
3044                                     Call.getContext(),
3045                                     NewTy->getNonOpaquePointerElementType()));
3046         }
3047         Changed = true;
3048       }
3049     }
3050   }
3051 
3052   if (isa<InlineAsm>(Callee) && !Call.doesNotThrow()) {
3053     InlineAsm *IA = cast<InlineAsm>(Callee);
3054     if (!IA->canThrow()) {
3055       // Normal inline asm calls cannot throw - mark them
3056       // 'nounwind'.
3057       Call.setDoesNotThrow();
3058       Changed = true;
3059     }
3060   }
3061 
3062   // Try to optimize the call if possible, we require DataLayout for most of
3063   // this.  None of these calls are seen as possibly dead so go ahead and
3064   // delete the instruction now.
3065   if (CallInst *CI = dyn_cast<CallInst>(&Call)) {
3066     Instruction *I = tryOptimizeCall(CI);
3067     // If we changed something return the result, etc. Otherwise let
3068     // the fallthrough check.
3069     if (I) return eraseInstFromFunction(*I);
3070   }
3071 
3072   if (!Call.use_empty() && !Call.isMustTailCall())
3073     if (Value *ReturnedArg = Call.getReturnedArgOperand()) {
3074       Type *CallTy = Call.getType();
3075       Type *RetArgTy = ReturnedArg->getType();
3076       if (RetArgTy->canLosslesslyBitCastTo(CallTy))
3077         return replaceInstUsesWith(
3078             Call, Builder.CreateBitOrPointerCast(ReturnedArg, CallTy));
3079     }
3080 
3081   if (isRemovableAlloc(&Call, &TLI))
3082     return visitAllocSite(Call);
3083 
3084   // Handle intrinsics which can be used in both call and invoke context.
3085   switch (Call.getIntrinsicID()) {
3086   case Intrinsic::experimental_gc_statepoint: {
3087     GCStatepointInst &GCSP = *cast<GCStatepointInst>(&Call);
3088     SmallPtrSet<Value *, 32> LiveGcValues;
3089     for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
3090       GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
3091 
3092       // Remove the relocation if unused.
3093       if (GCR.use_empty()) {
3094         eraseInstFromFunction(GCR);
3095         continue;
3096       }
3097 
3098       Value *DerivedPtr = GCR.getDerivedPtr();
3099       Value *BasePtr = GCR.getBasePtr();
3100 
3101       // Undef is undef, even after relocation.
3102       if (isa<UndefValue>(DerivedPtr) || isa<UndefValue>(BasePtr)) {
3103         replaceInstUsesWith(GCR, UndefValue::get(GCR.getType()));
3104         eraseInstFromFunction(GCR);
3105         continue;
3106       }
3107 
3108       if (auto *PT = dyn_cast<PointerType>(GCR.getType())) {
3109         // The relocation of null will be null for most any collector.
3110         // TODO: provide a hook for this in GCStrategy.  There might be some
3111         // weird collector this property does not hold for.
3112         if (isa<ConstantPointerNull>(DerivedPtr)) {
3113           // Use null-pointer of gc_relocate's type to replace it.
3114           replaceInstUsesWith(GCR, ConstantPointerNull::get(PT));
3115           eraseInstFromFunction(GCR);
3116           continue;
3117         }
3118 
3119         // isKnownNonNull -> nonnull attribute
3120         if (!GCR.hasRetAttr(Attribute::NonNull) &&
3121             isKnownNonZero(DerivedPtr, DL, 0, &AC, &Call, &DT)) {
3122           GCR.addRetAttr(Attribute::NonNull);
3123           // We discovered new fact, re-check users.
3124           Worklist.pushUsersToWorkList(GCR);
3125         }
3126       }
3127 
3128       // If we have two copies of the same pointer in the statepoint argument
3129       // list, canonicalize to one.  This may let us common gc.relocates.
3130       if (GCR.getBasePtr() == GCR.getDerivedPtr() &&
3131           GCR.getBasePtrIndex() != GCR.getDerivedPtrIndex()) {
3132         auto *OpIntTy = GCR.getOperand(2)->getType();
3133         GCR.setOperand(2, ConstantInt::get(OpIntTy, GCR.getBasePtrIndex()));
3134       }
3135 
3136       // TODO: bitcast(relocate(p)) -> relocate(bitcast(p))
3137       // Canonicalize on the type from the uses to the defs
3138 
3139       // TODO: relocate((gep p, C, C2, ...)) -> gep(relocate(p), C, C2, ...)
3140       LiveGcValues.insert(BasePtr);
3141       LiveGcValues.insert(DerivedPtr);
3142     }
3143     Optional<OperandBundleUse> Bundle =
3144         GCSP.getOperandBundle(LLVMContext::OB_gc_live);
3145     unsigned NumOfGCLives = LiveGcValues.size();
3146     if (!Bundle || NumOfGCLives == Bundle->Inputs.size())
3147       break;
3148     // We can reduce the size of gc live bundle.
3149     DenseMap<Value *, unsigned> Val2Idx;
3150     std::vector<Value *> NewLiveGc;
3151     for (unsigned I = 0, E = Bundle->Inputs.size(); I < E; ++I) {
3152       Value *V = Bundle->Inputs[I];
3153       if (Val2Idx.count(V))
3154         continue;
3155       if (LiveGcValues.count(V)) {
3156         Val2Idx[V] = NewLiveGc.size();
3157         NewLiveGc.push_back(V);
3158       } else
3159         Val2Idx[V] = NumOfGCLives;
3160     }
3161     // Update all gc.relocates
3162     for (const GCRelocateInst *Reloc : GCSP.getGCRelocates()) {
3163       GCRelocateInst &GCR = *const_cast<GCRelocateInst *>(Reloc);
3164       Value *BasePtr = GCR.getBasePtr();
3165       assert(Val2Idx.count(BasePtr) && Val2Idx[BasePtr] != NumOfGCLives &&
3166              "Missed live gc for base pointer");
3167       auto *OpIntTy1 = GCR.getOperand(1)->getType();
3168       GCR.setOperand(1, ConstantInt::get(OpIntTy1, Val2Idx[BasePtr]));
3169       Value *DerivedPtr = GCR.getDerivedPtr();
3170       assert(Val2Idx.count(DerivedPtr) && Val2Idx[DerivedPtr] != NumOfGCLives &&
3171              "Missed live gc for derived pointer");
3172       auto *OpIntTy2 = GCR.getOperand(2)->getType();
3173       GCR.setOperand(2, ConstantInt::get(OpIntTy2, Val2Idx[DerivedPtr]));
3174     }
3175     // Create new statepoint instruction.
3176     OperandBundleDef NewBundle("gc-live", NewLiveGc);
3177     return CallBase::Create(&Call, NewBundle);
3178   }
3179   default: { break; }
3180   }
3181 
3182   return Changed ? &Call : nullptr;
3183 }
3184 
3185 /// If the callee is a constexpr cast of a function, attempt to move the cast to
3186 /// the arguments of the call/callbr/invoke.
3187 bool InstCombinerImpl::transformConstExprCastCall(CallBase &Call) {
3188   auto *Callee =
3189       dyn_cast<Function>(Call.getCalledOperand()->stripPointerCasts());
3190   if (!Callee)
3191     return false;
3192 
3193   // If this is a call to a thunk function, don't remove the cast. Thunks are
3194   // used to transparently forward all incoming parameters and outgoing return
3195   // values, so it's important to leave the cast in place.
3196   if (Callee->hasFnAttribute("thunk"))
3197     return false;
3198 
3199   // If this is a musttail call, the callee's prototype must match the caller's
3200   // prototype with the exception of pointee types. The code below doesn't
3201   // implement that, so we can't do this transform.
3202   // TODO: Do the transform if it only requires adding pointer casts.
3203   if (Call.isMustTailCall())
3204     return false;
3205 
3206   Instruction *Caller = &Call;
3207   const AttributeList &CallerPAL = Call.getAttributes();
3208 
3209   // Okay, this is a cast from a function to a different type.  Unless doing so
3210   // would cause a type conversion of one of our arguments, change this call to
3211   // be a direct call with arguments casted to the appropriate types.
3212   FunctionType *FT = Callee->getFunctionType();
3213   Type *OldRetTy = Caller->getType();
3214   Type *NewRetTy = FT->getReturnType();
3215 
3216   // Check to see if we are changing the return type...
3217   if (OldRetTy != NewRetTy) {
3218 
3219     if (NewRetTy->isStructTy())
3220       return false; // TODO: Handle multiple return values.
3221 
3222     if (!CastInst::isBitOrNoopPointerCastable(NewRetTy, OldRetTy, DL)) {
3223       if (Callee->isDeclaration())
3224         return false;   // Cannot transform this return value.
3225 
3226       if (!Caller->use_empty() &&
3227           // void -> non-void is handled specially
3228           !NewRetTy->isVoidTy())
3229         return false;   // Cannot transform this return value.
3230     }
3231 
3232     if (!CallerPAL.isEmpty() && !Caller->use_empty()) {
3233       AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
3234       if (RAttrs.overlaps(AttributeFuncs::typeIncompatible(NewRetTy)))
3235         return false;   // Attribute not compatible with transformed value.
3236     }
3237 
3238     // If the callbase is an invoke/callbr instruction, and the return value is
3239     // used by a PHI node in a successor, we cannot change the return type of
3240     // the call because there is no place to put the cast instruction (without
3241     // breaking the critical edge).  Bail out in this case.
3242     if (!Caller->use_empty()) {
3243       BasicBlock *PhisNotSupportedBlock = nullptr;
3244       if (auto *II = dyn_cast<InvokeInst>(Caller))
3245         PhisNotSupportedBlock = II->getNormalDest();
3246       if (auto *CB = dyn_cast<CallBrInst>(Caller))
3247         PhisNotSupportedBlock = CB->getDefaultDest();
3248       if (PhisNotSupportedBlock)
3249         for (User *U : Caller->users())
3250           if (PHINode *PN = dyn_cast<PHINode>(U))
3251             if (PN->getParent() == PhisNotSupportedBlock)
3252               return false;
3253     }
3254   }
3255 
3256   unsigned NumActualArgs = Call.arg_size();
3257   unsigned NumCommonArgs = std::min(FT->getNumParams(), NumActualArgs);
3258 
3259   // Prevent us turning:
3260   // declare void @takes_i32_inalloca(i32* inalloca)
3261   //  call void bitcast (void (i32*)* @takes_i32_inalloca to void (i32)*)(i32 0)
3262   //
3263   // into:
3264   //  call void @takes_i32_inalloca(i32* null)
3265   //
3266   //  Similarly, avoid folding away bitcasts of byval calls.
3267   if (Callee->getAttributes().hasAttrSomewhere(Attribute::InAlloca) ||
3268       Callee->getAttributes().hasAttrSomewhere(Attribute::Preallocated))
3269     return false;
3270 
3271   auto AI = Call.arg_begin();
3272   for (unsigned i = 0, e = NumCommonArgs; i != e; ++i, ++AI) {
3273     Type *ParamTy = FT->getParamType(i);
3274     Type *ActTy = (*AI)->getType();
3275 
3276     if (!CastInst::isBitOrNoopPointerCastable(ActTy, ParamTy, DL))
3277       return false;   // Cannot transform this parameter value.
3278 
3279     // Check if there are any incompatible attributes we cannot drop safely.
3280     if (AttrBuilder(FT->getContext(), CallerPAL.getParamAttrs(i))
3281             .overlaps(AttributeFuncs::typeIncompatible(
3282                 ParamTy, AttributeFuncs::ASK_UNSAFE_TO_DROP)))
3283       return false;   // Attribute not compatible with transformed value.
3284 
3285     if (Call.isInAllocaArgument(i) ||
3286         CallerPAL.hasParamAttr(i, Attribute::Preallocated))
3287       return false; // Cannot transform to and from inalloca/preallocated.
3288 
3289     if (CallerPAL.hasParamAttr(i, Attribute::SwiftError))
3290       return false;
3291 
3292     if (CallerPAL.hasParamAttr(i, Attribute::ByVal) !=
3293         Callee->getAttributes().hasParamAttr(i, Attribute::ByVal))
3294       return false; // Cannot transform to or from byval.
3295 
3296     // If the parameter is passed as a byval argument, then we have to have a
3297     // sized type and the sized type has to have the same size as the old type.
3298     if (ParamTy != ActTy && CallerPAL.hasParamAttr(i, Attribute::ByVal)) {
3299       PointerType *ParamPTy = dyn_cast<PointerType>(ParamTy);
3300       if (!ParamPTy)
3301         return false;
3302 
3303       if (!ParamPTy->isOpaque()) {
3304         Type *ParamElTy = ParamPTy->getNonOpaquePointerElementType();
3305         if (!ParamElTy->isSized())
3306           return false;
3307 
3308         Type *CurElTy = Call.getParamByValType(i);
3309         if (DL.getTypeAllocSize(CurElTy) != DL.getTypeAllocSize(ParamElTy))
3310           return false;
3311       }
3312     }
3313   }
3314 
3315   if (Callee->isDeclaration()) {
3316     // Do not delete arguments unless we have a function body.
3317     if (FT->getNumParams() < NumActualArgs && !FT->isVarArg())
3318       return false;
3319 
3320     // If the callee is just a declaration, don't change the varargsness of the
3321     // call.  We don't want to introduce a varargs call where one doesn't
3322     // already exist.
3323     if (FT->isVarArg() != Call.getFunctionType()->isVarArg())
3324       return false;
3325 
3326     // If both the callee and the cast type are varargs, we still have to make
3327     // sure the number of fixed parameters are the same or we have the same
3328     // ABI issues as if we introduce a varargs call.
3329     if (FT->isVarArg() && Call.getFunctionType()->isVarArg() &&
3330         FT->getNumParams() != Call.getFunctionType()->getNumParams())
3331       return false;
3332   }
3333 
3334   if (FT->getNumParams() < NumActualArgs && FT->isVarArg() &&
3335       !CallerPAL.isEmpty()) {
3336     // In this case we have more arguments than the new function type, but we
3337     // won't be dropping them.  Check that these extra arguments have attributes
3338     // that are compatible with being a vararg call argument.
3339     unsigned SRetIdx;
3340     if (CallerPAL.hasAttrSomewhere(Attribute::StructRet, &SRetIdx) &&
3341         SRetIdx - AttributeList::FirstArgIndex >= FT->getNumParams())
3342       return false;
3343   }
3344 
3345   // Okay, we decided that this is a safe thing to do: go ahead and start
3346   // inserting cast instructions as necessary.
3347   SmallVector<Value *, 8> Args;
3348   SmallVector<AttributeSet, 8> ArgAttrs;
3349   Args.reserve(NumActualArgs);
3350   ArgAttrs.reserve(NumActualArgs);
3351 
3352   // Get any return attributes.
3353   AttrBuilder RAttrs(FT->getContext(), CallerPAL.getRetAttrs());
3354 
3355   // If the return value is not being used, the type may not be compatible
3356   // with the existing attributes.  Wipe out any problematic attributes.
3357   RAttrs.remove(AttributeFuncs::typeIncompatible(NewRetTy));
3358 
3359   LLVMContext &Ctx = Call.getContext();
3360   AI = Call.arg_begin();
3361   for (unsigned i = 0; i != NumCommonArgs; ++i, ++AI) {
3362     Type *ParamTy = FT->getParamType(i);
3363 
3364     Value *NewArg = *AI;
3365     if ((*AI)->getType() != ParamTy)
3366       NewArg = Builder.CreateBitOrPointerCast(*AI, ParamTy);
3367     Args.push_back(NewArg);
3368 
3369     // Add any parameter attributes except the ones incompatible with the new
3370     // type. Note that we made sure all incompatible ones are safe to drop.
3371     AttributeMask IncompatibleAttrs = AttributeFuncs::typeIncompatible(
3372         ParamTy, AttributeFuncs::ASK_SAFE_TO_DROP);
3373     if (CallerPAL.hasParamAttr(i, Attribute::ByVal) &&
3374         !ParamTy->isOpaquePointerTy()) {
3375       AttrBuilder AB(Ctx, CallerPAL.getParamAttrs(i).removeAttributes(
3376                               Ctx, IncompatibleAttrs));
3377       AB.addByValAttr(ParamTy->getNonOpaquePointerElementType());
3378       ArgAttrs.push_back(AttributeSet::get(Ctx, AB));
3379     } else {
3380       ArgAttrs.push_back(
3381           CallerPAL.getParamAttrs(i).removeAttributes(Ctx, IncompatibleAttrs));
3382     }
3383   }
3384 
3385   // If the function takes more arguments than the call was taking, add them
3386   // now.
3387   for (unsigned i = NumCommonArgs; i != FT->getNumParams(); ++i) {
3388     Args.push_back(Constant::getNullValue(FT->getParamType(i)));
3389     ArgAttrs.push_back(AttributeSet());
3390   }
3391 
3392   // If we are removing arguments to the function, emit an obnoxious warning.
3393   if (FT->getNumParams() < NumActualArgs) {
3394     // TODO: if (!FT->isVarArg()) this call may be unreachable. PR14722
3395     if (FT->isVarArg()) {
3396       // Add all of the arguments in their promoted form to the arg list.
3397       for (unsigned i = FT->getNumParams(); i != NumActualArgs; ++i, ++AI) {
3398         Type *PTy = getPromotedType((*AI)->getType());
3399         Value *NewArg = *AI;
3400         if (PTy != (*AI)->getType()) {
3401           // Must promote to pass through va_arg area!
3402           Instruction::CastOps opcode =
3403             CastInst::getCastOpcode(*AI, false, PTy, false);
3404           NewArg = Builder.CreateCast(opcode, *AI, PTy);
3405         }
3406         Args.push_back(NewArg);
3407 
3408         // Add any parameter attributes.
3409         ArgAttrs.push_back(CallerPAL.getParamAttrs(i));
3410       }
3411     }
3412   }
3413 
3414   AttributeSet FnAttrs = CallerPAL.getFnAttrs();
3415 
3416   if (NewRetTy->isVoidTy())
3417     Caller->setName("");   // Void type should not have a name.
3418 
3419   assert((ArgAttrs.size() == FT->getNumParams() || FT->isVarArg()) &&
3420          "missing argument attributes");
3421   AttributeList NewCallerPAL = AttributeList::get(
3422       Ctx, FnAttrs, AttributeSet::get(Ctx, RAttrs), ArgAttrs);
3423 
3424   SmallVector<OperandBundleDef, 1> OpBundles;
3425   Call.getOperandBundlesAsDefs(OpBundles);
3426 
3427   CallBase *NewCall;
3428   if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3429     NewCall = Builder.CreateInvoke(Callee, II->getNormalDest(),
3430                                    II->getUnwindDest(), Args, OpBundles);
3431   } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
3432     NewCall = Builder.CreateCallBr(Callee, CBI->getDefaultDest(),
3433                                    CBI->getIndirectDests(), Args, OpBundles);
3434   } else {
3435     NewCall = Builder.CreateCall(Callee, Args, OpBundles);
3436     cast<CallInst>(NewCall)->setTailCallKind(
3437         cast<CallInst>(Caller)->getTailCallKind());
3438   }
3439   NewCall->takeName(Caller);
3440   NewCall->setCallingConv(Call.getCallingConv());
3441   NewCall->setAttributes(NewCallerPAL);
3442 
3443   // Preserve prof metadata if any.
3444   NewCall->copyMetadata(*Caller, {LLVMContext::MD_prof});
3445 
3446   // Insert a cast of the return type as necessary.
3447   Instruction *NC = NewCall;
3448   Value *NV = NC;
3449   if (OldRetTy != NV->getType() && !Caller->use_empty()) {
3450     if (!NV->getType()->isVoidTy()) {
3451       NV = NC = CastInst::CreateBitOrPointerCast(NC, OldRetTy);
3452       NC->setDebugLoc(Caller->getDebugLoc());
3453 
3454       // If this is an invoke/callbr instruction, we should insert it after the
3455       // first non-phi instruction in the normal successor block.
3456       if (InvokeInst *II = dyn_cast<InvokeInst>(Caller)) {
3457         BasicBlock::iterator I = II->getNormalDest()->getFirstInsertionPt();
3458         InsertNewInstBefore(NC, *I);
3459       } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(Caller)) {
3460         BasicBlock::iterator I = CBI->getDefaultDest()->getFirstInsertionPt();
3461         InsertNewInstBefore(NC, *I);
3462       } else {
3463         // Otherwise, it's a call, just insert cast right after the call.
3464         InsertNewInstBefore(NC, *Caller);
3465       }
3466       Worklist.pushUsersToWorkList(*Caller);
3467     } else {
3468       NV = UndefValue::get(Caller->getType());
3469     }
3470   }
3471 
3472   if (!Caller->use_empty())
3473     replaceInstUsesWith(*Caller, NV);
3474   else if (Caller->hasValueHandle()) {
3475     if (OldRetTy == NV->getType())
3476       ValueHandleBase::ValueIsRAUWd(Caller, NV);
3477     else
3478       // We cannot call ValueIsRAUWd with a different type, and the
3479       // actual tracked value will disappear.
3480       ValueHandleBase::ValueIsDeleted(Caller);
3481   }
3482 
3483   eraseInstFromFunction(*Caller);
3484   return true;
3485 }
3486 
3487 /// Turn a call to a function created by init_trampoline / adjust_trampoline
3488 /// intrinsic pair into a direct call to the underlying function.
3489 Instruction *
3490 InstCombinerImpl::transformCallThroughTrampoline(CallBase &Call,
3491                                                  IntrinsicInst &Tramp) {
3492   Value *Callee = Call.getCalledOperand();
3493   Type *CalleeTy = Callee->getType();
3494   FunctionType *FTy = Call.getFunctionType();
3495   AttributeList Attrs = Call.getAttributes();
3496 
3497   // If the call already has the 'nest' attribute somewhere then give up -
3498   // otherwise 'nest' would occur twice after splicing in the chain.
3499   if (Attrs.hasAttrSomewhere(Attribute::Nest))
3500     return nullptr;
3501 
3502   Function *NestF = cast<Function>(Tramp.getArgOperand(1)->stripPointerCasts());
3503   FunctionType *NestFTy = NestF->getFunctionType();
3504 
3505   AttributeList NestAttrs = NestF->getAttributes();
3506   if (!NestAttrs.isEmpty()) {
3507     unsigned NestArgNo = 0;
3508     Type *NestTy = nullptr;
3509     AttributeSet NestAttr;
3510 
3511     // Look for a parameter marked with the 'nest' attribute.
3512     for (FunctionType::param_iterator I = NestFTy->param_begin(),
3513                                       E = NestFTy->param_end();
3514          I != E; ++NestArgNo, ++I) {
3515       AttributeSet AS = NestAttrs.getParamAttrs(NestArgNo);
3516       if (AS.hasAttribute(Attribute::Nest)) {
3517         // Record the parameter type and any other attributes.
3518         NestTy = *I;
3519         NestAttr = AS;
3520         break;
3521       }
3522     }
3523 
3524     if (NestTy) {
3525       std::vector<Value*> NewArgs;
3526       std::vector<AttributeSet> NewArgAttrs;
3527       NewArgs.reserve(Call.arg_size() + 1);
3528       NewArgAttrs.reserve(Call.arg_size());
3529 
3530       // Insert the nest argument into the call argument list, which may
3531       // mean appending it.  Likewise for attributes.
3532 
3533       {
3534         unsigned ArgNo = 0;
3535         auto I = Call.arg_begin(), E = Call.arg_end();
3536         do {
3537           if (ArgNo == NestArgNo) {
3538             // Add the chain argument and attributes.
3539             Value *NestVal = Tramp.getArgOperand(2);
3540             if (NestVal->getType() != NestTy)
3541               NestVal = Builder.CreateBitCast(NestVal, NestTy, "nest");
3542             NewArgs.push_back(NestVal);
3543             NewArgAttrs.push_back(NestAttr);
3544           }
3545 
3546           if (I == E)
3547             break;
3548 
3549           // Add the original argument and attributes.
3550           NewArgs.push_back(*I);
3551           NewArgAttrs.push_back(Attrs.getParamAttrs(ArgNo));
3552 
3553           ++ArgNo;
3554           ++I;
3555         } while (true);
3556       }
3557 
3558       // The trampoline may have been bitcast to a bogus type (FTy).
3559       // Handle this by synthesizing a new function type, equal to FTy
3560       // with the chain parameter inserted.
3561 
3562       std::vector<Type*> NewTypes;
3563       NewTypes.reserve(FTy->getNumParams()+1);
3564 
3565       // Insert the chain's type into the list of parameter types, which may
3566       // mean appending it.
3567       {
3568         unsigned ArgNo = 0;
3569         FunctionType::param_iterator I = FTy->param_begin(),
3570           E = FTy->param_end();
3571 
3572         do {
3573           if (ArgNo == NestArgNo)
3574             // Add the chain's type.
3575             NewTypes.push_back(NestTy);
3576 
3577           if (I == E)
3578             break;
3579 
3580           // Add the original type.
3581           NewTypes.push_back(*I);
3582 
3583           ++ArgNo;
3584           ++I;
3585         } while (true);
3586       }
3587 
3588       // Replace the trampoline call with a direct call.  Let the generic
3589       // code sort out any function type mismatches.
3590       FunctionType *NewFTy = FunctionType::get(FTy->getReturnType(), NewTypes,
3591                                                 FTy->isVarArg());
3592       Constant *NewCallee =
3593         NestF->getType() == PointerType::getUnqual(NewFTy) ?
3594         NestF : ConstantExpr::getBitCast(NestF,
3595                                          PointerType::getUnqual(NewFTy));
3596       AttributeList NewPAL =
3597           AttributeList::get(FTy->getContext(), Attrs.getFnAttrs(),
3598                              Attrs.getRetAttrs(), NewArgAttrs);
3599 
3600       SmallVector<OperandBundleDef, 1> OpBundles;
3601       Call.getOperandBundlesAsDefs(OpBundles);
3602 
3603       Instruction *NewCaller;
3604       if (InvokeInst *II = dyn_cast<InvokeInst>(&Call)) {
3605         NewCaller = InvokeInst::Create(NewFTy, NewCallee,
3606                                        II->getNormalDest(), II->getUnwindDest(),
3607                                        NewArgs, OpBundles);
3608         cast<InvokeInst>(NewCaller)->setCallingConv(II->getCallingConv());
3609         cast<InvokeInst>(NewCaller)->setAttributes(NewPAL);
3610       } else if (CallBrInst *CBI = dyn_cast<CallBrInst>(&Call)) {
3611         NewCaller =
3612             CallBrInst::Create(NewFTy, NewCallee, CBI->getDefaultDest(),
3613                                CBI->getIndirectDests(), NewArgs, OpBundles);
3614         cast<CallBrInst>(NewCaller)->setCallingConv(CBI->getCallingConv());
3615         cast<CallBrInst>(NewCaller)->setAttributes(NewPAL);
3616       } else {
3617         NewCaller = CallInst::Create(NewFTy, NewCallee, NewArgs, OpBundles);
3618         cast<CallInst>(NewCaller)->setTailCallKind(
3619             cast<CallInst>(Call).getTailCallKind());
3620         cast<CallInst>(NewCaller)->setCallingConv(
3621             cast<CallInst>(Call).getCallingConv());
3622         cast<CallInst>(NewCaller)->setAttributes(NewPAL);
3623       }
3624       NewCaller->setDebugLoc(Call.getDebugLoc());
3625 
3626       return NewCaller;
3627     }
3628   }
3629 
3630   // Replace the trampoline call with a direct call.  Since there is no 'nest'
3631   // parameter, there is no need to adjust the argument list.  Let the generic
3632   // code sort out any function type mismatches.
3633   Constant *NewCallee = ConstantExpr::getBitCast(NestF, CalleeTy);
3634   Call.setCalledFunction(FTy, NewCallee);
3635   return &Call;
3636 }
3637