1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/IR/Constant.h"
17 #include "llvm/IR/Constants.h"
18 #include "llvm/IR/DebugInfoMetadata.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include <cassert>
33 #include <cstdint>
34 #include <optional>
35 #include <vector>
36 
37 using namespace llvm;
38 
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified.  If Name is specified, it is the name of the global variable
42 /// created.
43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
44                                                   const Twine &Name,
45                                                   unsigned AddressSpace,
46                                                   Module *M) {
47   Constant *StrConstant = ConstantDataArray::getString(Context, Str);
48   if (!M)
49     M = BB->getParent()->getParent();
50   auto *GV = new GlobalVariable(
51       *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
52       StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
53   GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
54   GV->setAlignment(Align(1));
55   return GV;
56 }
57 
58 Type *IRBuilderBase::getCurrentFunctionReturnType() const {
59   assert(BB && BB->getParent() && "No current function!");
60   return BB->getParent()->getReturnType();
61 }
62 
63 DebugLoc IRBuilderBase::getCurrentDebugLocation() const {
64   for (auto &KV : MetadataToCopy)
65     if (KV.first == LLVMContext::MD_dbg)
66       return {cast<DILocation>(KV.second)};
67 
68   return {};
69 }
70 void IRBuilderBase::SetInstDebugLocation(Instruction *I) const {
71   for (const auto &KV : MetadataToCopy)
72     if (KV.first == LLVMContext::MD_dbg) {
73       I->setDebugLoc(DebugLoc(KV.second));
74       return;
75     }
76 }
77 
78 CallInst *
79 IRBuilderBase::createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
80                                 const Twine &Name, Instruction *FMFSource,
81                                 ArrayRef<OperandBundleDef> OpBundles) {
82   CallInst *CI = CreateCall(Callee, Ops, OpBundles, Name);
83   if (FMFSource)
84     CI->copyFastMathFlags(FMFSource);
85   return CI;
86 }
87 
88 Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
89   assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
90   if (cast<ConstantInt>(Scaling)->isZero())
91     return Scaling;
92   Module *M = GetInsertBlock()->getParent()->getParent();
93   Function *TheFn =
94       Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
95   CallInst *CI = CreateCall(TheFn, {}, {}, Name);
96   return cast<ConstantInt>(Scaling)->isOne() ? CI : CreateMul(CI, Scaling);
97 }
98 
99 Value *IRBuilderBase::CreateElementCount(Type *DstType, ElementCount EC) {
100   Constant *MinEC = ConstantInt::get(DstType, EC.getKnownMinValue());
101   return EC.isScalable() ? CreateVScale(MinEC) : MinEC;
102 }
103 
104 Value *IRBuilderBase::CreateTypeSize(Type *DstType, TypeSize Size) {
105   Constant *MinSize = ConstantInt::get(DstType, Size.getKnownMinValue());
106   return Size.isScalable() ? CreateVScale(MinSize) : MinSize;
107 }
108 
109 Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
110   Type *STy = DstType->getScalarType();
111   if (isa<ScalableVectorType>(DstType)) {
112     Type *StepVecType = DstType;
113     // TODO: We expect this special case (element type < 8 bits) to be
114     // temporary - once the intrinsic properly supports < 8 bits this code
115     // can be removed.
116     if (STy->getScalarSizeInBits() < 8)
117       StepVecType =
118           VectorType::get(getInt8Ty(), cast<ScalableVectorType>(DstType));
119     Value *Res = CreateIntrinsic(Intrinsic::experimental_stepvector,
120                                  {StepVecType}, {}, nullptr, Name);
121     if (StepVecType != DstType)
122       Res = CreateTrunc(Res, DstType);
123     return Res;
124   }
125 
126   unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
127 
128   // Create a vector of consecutive numbers from zero to VF.
129   SmallVector<Constant *, 8> Indices;
130   for (unsigned i = 0; i < NumEls; ++i)
131     Indices.push_back(ConstantInt::get(STy, i));
132 
133   // Add the consecutive indices to the vector value.
134   return ConstantVector::get(Indices);
135 }
136 
137 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
138                                       MaybeAlign Align, bool isVolatile,
139                                       MDNode *TBAATag, MDNode *ScopeTag,
140                                       MDNode *NoAliasTag) {
141   Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
142   Type *Tys[] = { Ptr->getType(), Size->getType() };
143   Module *M = BB->getParent()->getParent();
144   Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
145 
146   CallInst *CI = CreateCall(TheFn, Ops);
147 
148   if (Align)
149     cast<MemSetInst>(CI)->setDestAlignment(*Align);
150 
151   // Set the TBAA info if present.
152   if (TBAATag)
153     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
154 
155   if (ScopeTag)
156     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
157 
158   if (NoAliasTag)
159     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
160 
161   return CI;
162 }
163 
164 CallInst *IRBuilderBase::CreateMemSetInline(Value *Dst, MaybeAlign DstAlign,
165                                             Value *Val, Value *Size,
166                                             bool IsVolatile, MDNode *TBAATag,
167                                             MDNode *ScopeTag,
168                                             MDNode *NoAliasTag) {
169   Value *Ops[] = {Dst, Val, Size, getInt1(IsVolatile)};
170   Type *Tys[] = {Dst->getType(), Size->getType()};
171   Module *M = BB->getParent()->getParent();
172   Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset_inline, Tys);
173 
174   CallInst *CI = CreateCall(TheFn, Ops);
175 
176   if (DstAlign)
177     cast<MemSetInlineInst>(CI)->setDestAlignment(*DstAlign);
178 
179   // Set the TBAA info if present.
180   if (TBAATag)
181     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
182 
183   if (ScopeTag)
184     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
185 
186   if (NoAliasTag)
187     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
188 
189   return CI;
190 }
191 
192 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
193     Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
194     MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
195 
196   Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
197   Type *Tys[] = {Ptr->getType(), Size->getType()};
198   Module *M = BB->getParent()->getParent();
199   Function *TheFn = Intrinsic::getDeclaration(
200       M, Intrinsic::memset_element_unordered_atomic, Tys);
201 
202   CallInst *CI = CreateCall(TheFn, Ops);
203 
204   cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
205 
206   // Set the TBAA info if present.
207   if (TBAATag)
208     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
209 
210   if (ScopeTag)
211     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
212 
213   if (NoAliasTag)
214     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
215 
216   return CI;
217 }
218 
219 CallInst *IRBuilderBase::CreateMemTransferInst(
220     Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
221     MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
222     MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
223   Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
224   Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
225   Module *M = BB->getParent()->getParent();
226   Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
227 
228   CallInst *CI = CreateCall(TheFn, Ops);
229 
230   auto* MCI = cast<MemTransferInst>(CI);
231   if (DstAlign)
232     MCI->setDestAlignment(*DstAlign);
233   if (SrcAlign)
234     MCI->setSourceAlignment(*SrcAlign);
235 
236   // Set the TBAA info if present.
237   if (TBAATag)
238     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
239 
240   // Set the TBAA Struct info if present.
241   if (TBAAStructTag)
242     CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
243 
244   if (ScopeTag)
245     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
246 
247   if (NoAliasTag)
248     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
249 
250   return CI;
251 }
252 
253 CallInst *IRBuilderBase::CreateMemCpyInline(
254     Value *Dst, MaybeAlign DstAlign, Value *Src, MaybeAlign SrcAlign,
255     Value *Size, bool IsVolatile, MDNode *TBAATag, MDNode *TBAAStructTag,
256     MDNode *ScopeTag, MDNode *NoAliasTag) {
257   Value *Ops[] = {Dst, Src, Size, getInt1(IsVolatile)};
258   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
259   Function *F = BB->getParent();
260   Module *M = F->getParent();
261   Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
262 
263   CallInst *CI = CreateCall(TheFn, Ops);
264 
265   auto *MCI = cast<MemCpyInlineInst>(CI);
266   if (DstAlign)
267     MCI->setDestAlignment(*DstAlign);
268   if (SrcAlign)
269     MCI->setSourceAlignment(*SrcAlign);
270 
271   // Set the TBAA info if present.
272   if (TBAATag)
273     MCI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
274 
275   // Set the TBAA Struct info if present.
276   if (TBAAStructTag)
277     MCI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
278 
279   if (ScopeTag)
280     MCI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
281 
282   if (NoAliasTag)
283     MCI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
284 
285   return CI;
286 }
287 
288 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
289     Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
290     uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
291     MDNode *ScopeTag, MDNode *NoAliasTag) {
292   assert(DstAlign >= ElementSize &&
293          "Pointer alignment must be at least element size");
294   assert(SrcAlign >= ElementSize &&
295          "Pointer alignment must be at least element size");
296   Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
297   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
298   Module *M = BB->getParent()->getParent();
299   Function *TheFn = Intrinsic::getDeclaration(
300       M, Intrinsic::memcpy_element_unordered_atomic, Tys);
301 
302   CallInst *CI = CreateCall(TheFn, Ops);
303 
304   // Set the alignment of the pointer args.
305   auto *AMCI = cast<AtomicMemCpyInst>(CI);
306   AMCI->setDestAlignment(DstAlign);
307   AMCI->setSourceAlignment(SrcAlign);
308 
309   // Set the TBAA info if present.
310   if (TBAATag)
311     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
312 
313   // Set the TBAA Struct info if present.
314   if (TBAAStructTag)
315     CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
316 
317   if (ScopeTag)
318     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
319 
320   if (NoAliasTag)
321     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
322 
323   return CI;
324 }
325 
326 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
327                                        Value *Src, MaybeAlign SrcAlign,
328                                        Value *Size, bool isVolatile,
329                                        MDNode *TBAATag, MDNode *ScopeTag,
330                                        MDNode *NoAliasTag) {
331   Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
332   Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
333   Module *M = BB->getParent()->getParent();
334   Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
335 
336   CallInst *CI = CreateCall(TheFn, Ops);
337 
338   auto *MMI = cast<MemMoveInst>(CI);
339   if (DstAlign)
340     MMI->setDestAlignment(*DstAlign);
341   if (SrcAlign)
342     MMI->setSourceAlignment(*SrcAlign);
343 
344   // Set the TBAA info if present.
345   if (TBAATag)
346     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
347 
348   if (ScopeTag)
349     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
350 
351   if (NoAliasTag)
352     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
353 
354   return CI;
355 }
356 
357 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
358     Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
359     uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
360     MDNode *ScopeTag, MDNode *NoAliasTag) {
361   assert(DstAlign >= ElementSize &&
362          "Pointer alignment must be at least element size");
363   assert(SrcAlign >= ElementSize &&
364          "Pointer alignment must be at least element size");
365   Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
366   Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
367   Module *M = BB->getParent()->getParent();
368   Function *TheFn = Intrinsic::getDeclaration(
369       M, Intrinsic::memmove_element_unordered_atomic, Tys);
370 
371   CallInst *CI = CreateCall(TheFn, Ops);
372 
373   // Set the alignment of the pointer args.
374   CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
375   CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
376 
377   // Set the TBAA info if present.
378   if (TBAATag)
379     CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
380 
381   // Set the TBAA Struct info if present.
382   if (TBAAStructTag)
383     CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
384 
385   if (ScopeTag)
386     CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
387 
388   if (NoAliasTag)
389     CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
390 
391   return CI;
392 }
393 
394 CallInst *IRBuilderBase::getReductionIntrinsic(Intrinsic::ID ID, Value *Src) {
395   Module *M = GetInsertBlock()->getParent()->getParent();
396   Value *Ops[] = {Src};
397   Type *Tys[] = { Src->getType() };
398   auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
399   return CreateCall(Decl, Ops);
400 }
401 
402 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
403   Module *M = GetInsertBlock()->getParent()->getParent();
404   Value *Ops[] = {Acc, Src};
405   auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
406                                         {Src->getType()});
407   return CreateCall(Decl, Ops);
408 }
409 
410 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
411   Module *M = GetInsertBlock()->getParent()->getParent();
412   Value *Ops[] = {Acc, Src};
413   auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
414                                         {Src->getType()});
415   return CreateCall(Decl, Ops);
416 }
417 
418 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
419   return getReductionIntrinsic(Intrinsic::vector_reduce_add, Src);
420 }
421 
422 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
423   return getReductionIntrinsic(Intrinsic::vector_reduce_mul, Src);
424 }
425 
426 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
427   return getReductionIntrinsic(Intrinsic::vector_reduce_and, Src);
428 }
429 
430 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
431   return getReductionIntrinsic(Intrinsic::vector_reduce_or, Src);
432 }
433 
434 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
435   return getReductionIntrinsic(Intrinsic::vector_reduce_xor, Src);
436 }
437 
438 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
439   auto ID =
440       IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
441   return getReductionIntrinsic(ID, Src);
442 }
443 
444 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
445   auto ID =
446       IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
447   return getReductionIntrinsic(ID, Src);
448 }
449 
450 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
451   return getReductionIntrinsic(Intrinsic::vector_reduce_fmax, Src);
452 }
453 
454 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
455   return getReductionIntrinsic(Intrinsic::vector_reduce_fmin, Src);
456 }
457 
458 CallInst *IRBuilderBase::CreateFPMaximumReduce(Value *Src) {
459   return getReductionIntrinsic(Intrinsic::vector_reduce_fmaximum, Src);
460 }
461 
462 CallInst *IRBuilderBase::CreateFPMinimumReduce(Value *Src) {
463   return getReductionIntrinsic(Intrinsic::vector_reduce_fminimum, Src);
464 }
465 
466 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
467   assert(isa<PointerType>(Ptr->getType()) &&
468          "lifetime.start only applies to pointers.");
469   if (!Size)
470     Size = getInt64(-1);
471   else
472     assert(Size->getType() == getInt64Ty() &&
473            "lifetime.start requires the size to be an i64");
474   Value *Ops[] = { Size, Ptr };
475   Module *M = BB->getParent()->getParent();
476   Function *TheFn =
477       Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
478   return CreateCall(TheFn, Ops);
479 }
480 
481 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
482   assert(isa<PointerType>(Ptr->getType()) &&
483          "lifetime.end only applies to pointers.");
484   if (!Size)
485     Size = getInt64(-1);
486   else
487     assert(Size->getType() == getInt64Ty() &&
488            "lifetime.end requires the size to be an i64");
489   Value *Ops[] = { Size, Ptr };
490   Module *M = BB->getParent()->getParent();
491   Function *TheFn =
492       Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
493   return CreateCall(TheFn, Ops);
494 }
495 
496 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
497 
498   assert(isa<PointerType>(Ptr->getType()) &&
499          "invariant.start only applies to pointers.");
500   if (!Size)
501     Size = getInt64(-1);
502   else
503     assert(Size->getType() == getInt64Ty() &&
504            "invariant.start requires the size to be an i64");
505 
506   Value *Ops[] = {Size, Ptr};
507   // Fill in the single overloaded type: memory object type.
508   Type *ObjectPtr[1] = {Ptr->getType()};
509   Module *M = BB->getParent()->getParent();
510   Function *TheFn =
511       Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
512   return CreateCall(TheFn, Ops);
513 }
514 
515 static MaybeAlign getAlign(Value *Ptr) {
516   if (auto *O = dyn_cast<GlobalObject>(Ptr))
517     return O->getAlign();
518   if (auto *A = dyn_cast<GlobalAlias>(Ptr))
519     return A->getAliaseeObject()->getAlign();
520   return {};
521 }
522 
523 CallInst *IRBuilderBase::CreateThreadLocalAddress(Value *Ptr) {
524 #ifndef NDEBUG
525   // Handle specially for constexpr cast. This is possible when
526   // opaque pointers not enabled since constant could be sinked
527   // directly by the design of llvm. This could be eliminated
528   // after we eliminate the abuse of constexpr.
529   auto *V = Ptr;
530   if (auto *CE = dyn_cast<ConstantExpr>(V))
531     if (CE->isCast())
532       V = CE->getOperand(0);
533 
534   assert(isa<GlobalValue>(V) && cast<GlobalValue>(V)->isThreadLocal() &&
535          "threadlocal_address only applies to thread local variables.");
536 #endif
537   CallInst *CI = CreateIntrinsic(llvm::Intrinsic::threadlocal_address,
538                                  {Ptr->getType()}, {Ptr});
539   if (MaybeAlign A = getAlign(Ptr)) {
540     CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), *A));
541     CI->addRetAttr(Attribute::getWithAlignment(CI->getContext(), *A));
542   }
543   return CI;
544 }
545 
546 CallInst *
547 IRBuilderBase::CreateAssumption(Value *Cond,
548                                 ArrayRef<OperandBundleDef> OpBundles) {
549   assert(Cond->getType() == getInt1Ty() &&
550          "an assumption condition must be of type i1");
551 
552   Value *Ops[] = { Cond };
553   Module *M = BB->getParent()->getParent();
554   Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
555   return CreateCall(FnAssume, Ops, OpBundles);
556 }
557 
558 Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
559   Module *M = BB->getModule();
560   auto *FnIntrinsic = Intrinsic::getDeclaration(
561       M, Intrinsic::experimental_noalias_scope_decl, {});
562   return CreateCall(FnIntrinsic, {Scope});
563 }
564 
565 /// Create a call to a Masked Load intrinsic.
566 /// \p Ty        - vector type to load
567 /// \p Ptr       - base pointer for the load
568 /// \p Alignment - alignment of the source location
569 /// \p Mask      - vector of booleans which indicates what vector lanes should
570 ///                be accessed in memory
571 /// \p PassThru  - pass-through value that is used to fill the masked-off lanes
572 ///                of the result
573 /// \p Name      - name of the result variable
574 CallInst *IRBuilderBase::CreateMaskedLoad(Type *Ty, Value *Ptr, Align Alignment,
575                                           Value *Mask, Value *PassThru,
576                                           const Twine &Name) {
577   auto *PtrTy = cast<PointerType>(Ptr->getType());
578   assert(Ty->isVectorTy() && "Type should be vector");
579   assert(Mask && "Mask should not be all-ones (null)");
580   if (!PassThru)
581     PassThru = PoisonValue::get(Ty);
582   Type *OverloadedTypes[] = { Ty, PtrTy };
583   Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
584   return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
585                                OverloadedTypes, Name);
586 }
587 
588 /// Create a call to a Masked Store intrinsic.
589 /// \p Val       - data to be stored,
590 /// \p Ptr       - base pointer for the store
591 /// \p Alignment - alignment of the destination location
592 /// \p Mask      - vector of booleans which indicates what vector lanes should
593 ///                be accessed in memory
594 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
595                                            Align Alignment, Value *Mask) {
596   auto *PtrTy = cast<PointerType>(Ptr->getType());
597   Type *DataTy = Val->getType();
598   assert(DataTy->isVectorTy() && "Val should be a vector");
599   assert(Mask && "Mask should not be all-ones (null)");
600   Type *OverloadedTypes[] = { DataTy, PtrTy };
601   Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
602   return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
603 }
604 
605 /// Create a call to a Masked intrinsic, with given intrinsic Id,
606 /// an array of operands - Ops, and an array of overloaded types -
607 /// OverloadedTypes.
608 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
609                                                ArrayRef<Value *> Ops,
610                                                ArrayRef<Type *> OverloadedTypes,
611                                                const Twine &Name) {
612   Module *M = BB->getParent()->getParent();
613   Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
614   return CreateCall(TheFn, Ops, {}, Name);
615 }
616 
617 /// Create a call to a Masked Gather intrinsic.
618 /// \p Ty       - vector type to gather
619 /// \p Ptrs     - vector of pointers for loading
620 /// \p Align    - alignment for one element
621 /// \p Mask     - vector of booleans which indicates what vector lanes should
622 ///               be accessed in memory
623 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
624 ///               of the result
625 /// \p Name     - name of the result variable
626 CallInst *IRBuilderBase::CreateMaskedGather(Type *Ty, Value *Ptrs,
627                                             Align Alignment, Value *Mask,
628                                             Value *PassThru,
629                                             const Twine &Name) {
630   auto *VecTy = cast<VectorType>(Ty);
631   ElementCount NumElts = VecTy->getElementCount();
632   auto *PtrsTy = cast<VectorType>(Ptrs->getType());
633   assert(NumElts == PtrsTy->getElementCount() && "Element count mismatch");
634 
635   if (!Mask)
636     Mask = getAllOnesMask(NumElts);
637 
638   if (!PassThru)
639     PassThru = PoisonValue::get(Ty);
640 
641   Type *OverloadedTypes[] = {Ty, PtrsTy};
642   Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
643 
644   // We specify only one type when we create this intrinsic. Types of other
645   // arguments are derived from this type.
646   return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
647                                Name);
648 }
649 
650 /// Create a call to a Masked Scatter intrinsic.
651 /// \p Data  - data to be stored,
652 /// \p Ptrs  - the vector of pointers, where the \p Data elements should be
653 ///            stored
654 /// \p Align - alignment for one element
655 /// \p Mask  - vector of booleans which indicates what vector lanes should
656 ///            be accessed in memory
657 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
658                                              Align Alignment, Value *Mask) {
659   auto *PtrsTy = cast<VectorType>(Ptrs->getType());
660   auto *DataTy = cast<VectorType>(Data->getType());
661   ElementCount NumElts = PtrsTy->getElementCount();
662 
663   if (!Mask)
664     Mask = getAllOnesMask(NumElts);
665 
666   Type *OverloadedTypes[] = {DataTy, PtrsTy};
667   Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
668 
669   // We specify only one type when we create this intrinsic. Types of other
670   // arguments are derived from this type.
671   return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
672 }
673 
674 /// Create a call to Masked Expand Load intrinsic
675 /// \p Ty        - vector type to load
676 /// \p Ptr       - base pointer for the load
677 /// \p Mask      - vector of booleans which indicates what vector lanes should
678 ///                be accessed in memory
679 /// \p PassThru  - pass-through value that is used to fill the masked-off lanes
680 ///                of the result
681 /// \p Name      - name of the result variable
682 CallInst *IRBuilderBase::CreateMaskedExpandLoad(Type *Ty, Value *Ptr,
683                                                 Value *Mask, Value *PassThru,
684                                                 const Twine &Name) {
685   assert(Ty->isVectorTy() && "Type should be vector");
686   assert(Mask && "Mask should not be all-ones (null)");
687   if (!PassThru)
688     PassThru = PoisonValue::get(Ty);
689   Type *OverloadedTypes[] = {Ty};
690   Value *Ops[] = {Ptr, Mask, PassThru};
691   return CreateMaskedIntrinsic(Intrinsic::masked_expandload, Ops,
692                                OverloadedTypes, Name);
693 }
694 
695 /// Create a call to Masked Compress Store intrinsic
696 /// \p Val       - data to be stored,
697 /// \p Ptr       - base pointer for the store
698 /// \p Mask      - vector of booleans which indicates what vector lanes should
699 ///                be accessed in memory
700 CallInst *IRBuilderBase::CreateMaskedCompressStore(Value *Val, Value *Ptr,
701                                                    Value *Mask) {
702   Type *DataTy = Val->getType();
703   assert(DataTy->isVectorTy() && "Val should be a vector");
704   assert(Mask && "Mask should not be all-ones (null)");
705   Type *OverloadedTypes[] = {DataTy};
706   Value *Ops[] = {Val, Ptr, Mask};
707   return CreateMaskedIntrinsic(Intrinsic::masked_compressstore, Ops,
708                                OverloadedTypes);
709 }
710 
711 template <typename T0>
712 static std::vector<Value *>
713 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
714                   Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
715   std::vector<Value *> Args;
716   Args.push_back(B.getInt64(ID));
717   Args.push_back(B.getInt32(NumPatchBytes));
718   Args.push_back(ActualCallee);
719   Args.push_back(B.getInt32(CallArgs.size()));
720   Args.push_back(B.getInt32(Flags));
721   llvm::append_range(Args, CallArgs);
722   // GC Transition and Deopt args are now always handled via operand bundle.
723   // They will be removed from the signature of gc.statepoint shortly.
724   Args.push_back(B.getInt32(0));
725   Args.push_back(B.getInt32(0));
726   // GC args are now encoded in the gc-live operand bundle
727   return Args;
728 }
729 
730 template<typename T1, typename T2, typename T3>
731 static std::vector<OperandBundleDef>
732 getStatepointBundles(std::optional<ArrayRef<T1>> TransitionArgs,
733                      std::optional<ArrayRef<T2>> DeoptArgs,
734                      ArrayRef<T3> GCArgs) {
735   std::vector<OperandBundleDef> Rval;
736   if (DeoptArgs) {
737     SmallVector<Value*, 16> DeoptValues;
738     llvm::append_range(DeoptValues, *DeoptArgs);
739     Rval.emplace_back("deopt", DeoptValues);
740   }
741   if (TransitionArgs) {
742     SmallVector<Value*, 16> TransitionValues;
743     llvm::append_range(TransitionValues, *TransitionArgs);
744     Rval.emplace_back("gc-transition", TransitionValues);
745   }
746   if (GCArgs.size()) {
747     SmallVector<Value*, 16> LiveValues;
748     llvm::append_range(LiveValues, GCArgs);
749     Rval.emplace_back("gc-live", LiveValues);
750   }
751   return Rval;
752 }
753 
754 template <typename T0, typename T1, typename T2, typename T3>
755 static CallInst *CreateGCStatepointCallCommon(
756     IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
757     FunctionCallee ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
758     std::optional<ArrayRef<T1>> TransitionArgs,
759     std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
760     const Twine &Name) {
761   Module *M = Builder->GetInsertBlock()->getParent()->getParent();
762   // Fill in the one generic type'd argument (the function is also vararg)
763   Function *FnStatepoint =
764       Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
765                                 {ActualCallee.getCallee()->getType()});
766 
767   std::vector<Value *> Args = getStatepointArgs(
768       *Builder, ID, NumPatchBytes, ActualCallee.getCallee(), Flags, CallArgs);
769 
770   CallInst *CI = Builder->CreateCall(
771       FnStatepoint, Args,
772       getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
773   CI->addParamAttr(2,
774                    Attribute::get(Builder->getContext(), Attribute::ElementType,
775                                   ActualCallee.getFunctionType()));
776   return CI;
777 }
778 
779 CallInst *IRBuilderBase::CreateGCStatepointCall(
780     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
781     ArrayRef<Value *> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
782     ArrayRef<Value *> GCArgs, const Twine &Name) {
783   return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
784       this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
785       CallArgs, std::nullopt /* No Transition Args */, DeoptArgs, GCArgs, Name);
786 }
787 
788 CallInst *IRBuilderBase::CreateGCStatepointCall(
789     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
790     uint32_t Flags, ArrayRef<Value *> CallArgs,
791     std::optional<ArrayRef<Use>> TransitionArgs,
792     std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
793     const Twine &Name) {
794   return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
795       this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
796       DeoptArgs, GCArgs, Name);
797 }
798 
799 CallInst *IRBuilderBase::CreateGCStatepointCall(
800     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualCallee,
801     ArrayRef<Use> CallArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
802     ArrayRef<Value *> GCArgs, const Twine &Name) {
803   return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
804       this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
805       CallArgs, std::nullopt, DeoptArgs, GCArgs, Name);
806 }
807 
808 template <typename T0, typename T1, typename T2, typename T3>
809 static InvokeInst *CreateGCStatepointInvokeCommon(
810     IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
811     FunctionCallee ActualInvokee, BasicBlock *NormalDest,
812     BasicBlock *UnwindDest, uint32_t Flags, ArrayRef<T0> InvokeArgs,
813     std::optional<ArrayRef<T1>> TransitionArgs,
814     std::optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
815     const Twine &Name) {
816   Module *M = Builder->GetInsertBlock()->getParent()->getParent();
817   // Fill in the one generic type'd argument (the function is also vararg)
818   Function *FnStatepoint =
819       Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
820                                 {ActualInvokee.getCallee()->getType()});
821 
822   std::vector<Value *> Args =
823       getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee.getCallee(),
824                         Flags, InvokeArgs);
825 
826   InvokeInst *II = Builder->CreateInvoke(
827       FnStatepoint, NormalDest, UnwindDest, Args,
828       getStatepointBundles(TransitionArgs, DeoptArgs, GCArgs), Name);
829   II->addParamAttr(2,
830                    Attribute::get(Builder->getContext(), Attribute::ElementType,
831                                   ActualInvokee.getFunctionType()));
832   return II;
833 }
834 
835 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
836     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
837     BasicBlock *NormalDest, BasicBlock *UnwindDest,
838     ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Value *>> DeoptArgs,
839     ArrayRef<Value *> GCArgs, const Twine &Name) {
840   return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
841       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
842       uint32_t(StatepointFlags::None), InvokeArgs,
843       std::nullopt /* No Transition Args*/, DeoptArgs, GCArgs, Name);
844 }
845 
846 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
847     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
848     BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
849     ArrayRef<Value *> InvokeArgs, std::optional<ArrayRef<Use>> TransitionArgs,
850     std::optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
851     const Twine &Name) {
852   return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
853       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
854       InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
855 }
856 
857 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
858     uint64_t ID, uint32_t NumPatchBytes, FunctionCallee ActualInvokee,
859     BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
860     std::optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs,
861     const Twine &Name) {
862   return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
863       this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
864       uint32_t(StatepointFlags::None), InvokeArgs, std::nullopt, DeoptArgs,
865       GCArgs, Name);
866 }
867 
868 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
869                                         Type *ResultType, const Twine &Name) {
870   Intrinsic::ID ID = Intrinsic::experimental_gc_result;
871   Module *M = BB->getParent()->getParent();
872   Type *Types[] = {ResultType};
873   Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
874 
875   Value *Args[] = {Statepoint};
876   return CreateCall(FnGCResult, Args, {}, Name);
877 }
878 
879 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
880                                           int BaseOffset, int DerivedOffset,
881                                           Type *ResultType, const Twine &Name) {
882   Module *M = BB->getParent()->getParent();
883   Type *Types[] = {ResultType};
884   Function *FnGCRelocate =
885       Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
886 
887   Value *Args[] = {Statepoint, getInt32(BaseOffset), getInt32(DerivedOffset)};
888   return CreateCall(FnGCRelocate, Args, {}, Name);
889 }
890 
891 CallInst *IRBuilderBase::CreateGCGetPointerBase(Value *DerivedPtr,
892                                                 const Twine &Name) {
893   Module *M = BB->getParent()->getParent();
894   Type *PtrTy = DerivedPtr->getType();
895   Function *FnGCFindBase = Intrinsic::getDeclaration(
896       M, Intrinsic::experimental_gc_get_pointer_base, {PtrTy, PtrTy});
897   return CreateCall(FnGCFindBase, {DerivedPtr}, {}, Name);
898 }
899 
900 CallInst *IRBuilderBase::CreateGCGetPointerOffset(Value *DerivedPtr,
901                                                   const Twine &Name) {
902   Module *M = BB->getParent()->getParent();
903   Type *PtrTy = DerivedPtr->getType();
904   Function *FnGCGetOffset = Intrinsic::getDeclaration(
905       M, Intrinsic::experimental_gc_get_pointer_offset, {PtrTy});
906   return CreateCall(FnGCGetOffset, {DerivedPtr}, {}, Name);
907 }
908 
909 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
910                                               Instruction *FMFSource,
911                                               const Twine &Name) {
912   Module *M = BB->getModule();
913   Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
914   return createCallHelper(Fn, {V}, Name, FMFSource);
915 }
916 
917 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
918                                                Value *RHS,
919                                                Instruction *FMFSource,
920                                                const Twine &Name) {
921   Module *M = BB->getModule();
922   Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
923   return createCallHelper(Fn, {LHS, RHS}, Name, FMFSource);
924 }
925 
926 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
927                                          ArrayRef<Type *> Types,
928                                          ArrayRef<Value *> Args,
929                                          Instruction *FMFSource,
930                                          const Twine &Name) {
931   Module *M = BB->getModule();
932   Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
933   return createCallHelper(Fn, Args, Name, FMFSource);
934 }
935 
936 CallInst *IRBuilderBase::CreateIntrinsic(Type *RetTy, Intrinsic::ID ID,
937                                          ArrayRef<Value *> Args,
938                                          Instruction *FMFSource,
939                                          const Twine &Name) {
940   Module *M = BB->getModule();
941 
942   SmallVector<Intrinsic::IITDescriptor> Table;
943   Intrinsic::getIntrinsicInfoTableEntries(ID, Table);
944   ArrayRef<Intrinsic::IITDescriptor> TableRef(Table);
945 
946   SmallVector<Type *> ArgTys;
947   ArgTys.reserve(Args.size());
948   for (auto &I : Args)
949     ArgTys.push_back(I->getType());
950   FunctionType *FTy = FunctionType::get(RetTy, ArgTys, false);
951   SmallVector<Type *> OverloadTys;
952   Intrinsic::MatchIntrinsicTypesResult Res =
953       matchIntrinsicSignature(FTy, TableRef, OverloadTys);
954   (void)Res;
955   assert(Res == Intrinsic::MatchIntrinsicTypes_Match && TableRef.empty() &&
956          "Wrong types for intrinsic!");
957   // TODO: Handle varargs intrinsics.
958 
959   Function *Fn = Intrinsic::getDeclaration(M, ID, OverloadTys);
960   return createCallHelper(Fn, Args, Name, FMFSource);
961 }
962 
963 CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
964     Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
965     const Twine &Name, MDNode *FPMathTag,
966     std::optional<RoundingMode> Rounding,
967     std::optional<fp::ExceptionBehavior> Except) {
968   Value *RoundingV = getConstrainedFPRounding(Rounding);
969   Value *ExceptV = getConstrainedFPExcept(Except);
970 
971   FastMathFlags UseFMF = FMF;
972   if (FMFSource)
973     UseFMF = FMFSource->getFastMathFlags();
974 
975   CallInst *C = CreateIntrinsic(ID, {L->getType()},
976                                 {L, R, RoundingV, ExceptV}, nullptr, Name);
977   setConstrainedFPCallAttr(C);
978   setFPAttrs(C, FPMathTag, UseFMF);
979   return C;
980 }
981 
982 CallInst *IRBuilderBase::CreateConstrainedFPUnroundedBinOp(
983     Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
984     const Twine &Name, MDNode *FPMathTag,
985     std::optional<fp::ExceptionBehavior> Except) {
986   Value *ExceptV = getConstrainedFPExcept(Except);
987 
988   FastMathFlags UseFMF = FMF;
989   if (FMFSource)
990     UseFMF = FMFSource->getFastMathFlags();
991 
992   CallInst *C =
993       CreateIntrinsic(ID, {L->getType()}, {L, R, ExceptV}, nullptr, Name);
994   setConstrainedFPCallAttr(C);
995   setFPAttrs(C, FPMathTag, UseFMF);
996   return C;
997 }
998 
999 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
1000                                    const Twine &Name, MDNode *FPMathTag) {
1001   if (Instruction::isBinaryOp(Opc)) {
1002     assert(Ops.size() == 2 && "Invalid number of operands!");
1003     return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
1004                        Ops[0], Ops[1], Name, FPMathTag);
1005   }
1006   if (Instruction::isUnaryOp(Opc)) {
1007     assert(Ops.size() == 1 && "Invalid number of operands!");
1008     return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
1009                       Ops[0], Name, FPMathTag);
1010   }
1011   llvm_unreachable("Unexpected opcode!");
1012 }
1013 
1014 CallInst *IRBuilderBase::CreateConstrainedFPCast(
1015     Intrinsic::ID ID, Value *V, Type *DestTy,
1016     Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
1017     std::optional<RoundingMode> Rounding,
1018     std::optional<fp::ExceptionBehavior> Except) {
1019   Value *ExceptV = getConstrainedFPExcept(Except);
1020 
1021   FastMathFlags UseFMF = FMF;
1022   if (FMFSource)
1023     UseFMF = FMFSource->getFastMathFlags();
1024 
1025   CallInst *C;
1026   bool HasRoundingMD = false;
1027   switch (ID) {
1028   default:
1029     break;
1030 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)        \
1031   case Intrinsic::INTRINSIC:                                \
1032     HasRoundingMD = ROUND_MODE;                             \
1033     break;
1034 #include "llvm/IR/ConstrainedOps.def"
1035   }
1036   if (HasRoundingMD) {
1037     Value *RoundingV = getConstrainedFPRounding(Rounding);
1038     C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
1039                         nullptr, Name);
1040   } else
1041     C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
1042                         Name);
1043 
1044   setConstrainedFPCallAttr(C);
1045 
1046   if (isa<FPMathOperator>(C))
1047     setFPAttrs(C, FPMathTag, UseFMF);
1048   return C;
1049 }
1050 
1051 Value *IRBuilderBase::CreateFCmpHelper(
1052     CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
1053     MDNode *FPMathTag, bool IsSignaling) {
1054   if (IsFPConstrained) {
1055     auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
1056                           : Intrinsic::experimental_constrained_fcmp;
1057     return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
1058   }
1059 
1060   if (auto *LC = dyn_cast<Constant>(LHS))
1061     if (auto *RC = dyn_cast<Constant>(RHS))
1062       return Insert(Folder.CreateFCmp(P, LC, RC), Name);
1063   return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
1064 }
1065 
1066 CallInst *IRBuilderBase::CreateConstrainedFPCmp(
1067     Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
1068     const Twine &Name, std::optional<fp::ExceptionBehavior> Except) {
1069   Value *PredicateV = getConstrainedFPPredicate(P);
1070   Value *ExceptV = getConstrainedFPExcept(Except);
1071 
1072   CallInst *C = CreateIntrinsic(ID, {L->getType()},
1073                                 {L, R, PredicateV, ExceptV}, nullptr, Name);
1074   setConstrainedFPCallAttr(C);
1075   return C;
1076 }
1077 
1078 CallInst *IRBuilderBase::CreateConstrainedFPCall(
1079     Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
1080     std::optional<RoundingMode> Rounding,
1081     std::optional<fp::ExceptionBehavior> Except) {
1082   llvm::SmallVector<Value *, 6> UseArgs;
1083 
1084   append_range(UseArgs, Args);
1085   bool HasRoundingMD = false;
1086   switch (Callee->getIntrinsicID()) {
1087   default:
1088     break;
1089 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)        \
1090   case Intrinsic::INTRINSIC:                                \
1091     HasRoundingMD = ROUND_MODE;                             \
1092     break;
1093 #include "llvm/IR/ConstrainedOps.def"
1094   }
1095   if (HasRoundingMD)
1096     UseArgs.push_back(getConstrainedFPRounding(Rounding));
1097   UseArgs.push_back(getConstrainedFPExcept(Except));
1098 
1099   CallInst *C = CreateCall(Callee, UseArgs, Name);
1100   setConstrainedFPCallAttr(C);
1101   return C;
1102 }
1103 
1104 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
1105                                    const Twine &Name, Instruction *MDFrom) {
1106   if (auto *V = Folder.FoldSelect(C, True, False))
1107     return V;
1108 
1109   SelectInst *Sel = SelectInst::Create(C, True, False);
1110   if (MDFrom) {
1111     MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
1112     MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
1113     Sel = addBranchMetadata(Sel, Prof, Unpred);
1114   }
1115   if (isa<FPMathOperator>(Sel))
1116     setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
1117   return Insert(Sel, Name);
1118 }
1119 
1120 Value *IRBuilderBase::CreatePtrDiff(Type *ElemTy, Value *LHS, Value *RHS,
1121                                     const Twine &Name) {
1122   assert(LHS->getType() == RHS->getType() &&
1123          "Pointer subtraction operand types must match!");
1124   Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
1125   Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
1126   Value *Difference = CreateSub(LHS_int, RHS_int);
1127   return CreateExactSDiv(Difference, ConstantExpr::getSizeOf(ElemTy),
1128                          Name);
1129 }
1130 
1131 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
1132   assert(isa<PointerType>(Ptr->getType()) &&
1133          "launder.invariant.group only applies to pointers.");
1134   auto *PtrType = Ptr->getType();
1135   Module *M = BB->getParent()->getParent();
1136   Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
1137       M, Intrinsic::launder_invariant_group, {PtrType});
1138 
1139   assert(FnLaunderInvariantGroup->getReturnType() == PtrType &&
1140          FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
1141              PtrType &&
1142          "LaunderInvariantGroup should take and return the same type");
1143 
1144   return CreateCall(FnLaunderInvariantGroup, {Ptr});
1145 }
1146 
1147 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
1148   assert(isa<PointerType>(Ptr->getType()) &&
1149          "strip.invariant.group only applies to pointers.");
1150 
1151   auto *PtrType = Ptr->getType();
1152   Module *M = BB->getParent()->getParent();
1153   Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
1154       M, Intrinsic::strip_invariant_group, {PtrType});
1155 
1156   assert(FnStripInvariantGroup->getReturnType() == PtrType &&
1157          FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
1158              PtrType &&
1159          "StripInvariantGroup should take and return the same type");
1160 
1161   return CreateCall(FnStripInvariantGroup, {Ptr});
1162 }
1163 
1164 Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
1165   auto *Ty = cast<VectorType>(V->getType());
1166   if (isa<ScalableVectorType>(Ty)) {
1167     Module *M = BB->getParent()->getParent();
1168     Function *F = Intrinsic::getDeclaration(
1169         M, Intrinsic::experimental_vector_reverse, Ty);
1170     return Insert(CallInst::Create(F, V), Name);
1171   }
1172   // Keep the original behaviour for fixed vector
1173   SmallVector<int, 8> ShuffleMask;
1174   int NumElts = Ty->getElementCount().getKnownMinValue();
1175   for (int i = 0; i < NumElts; ++i)
1176     ShuffleMask.push_back(NumElts - i - 1);
1177   return CreateShuffleVector(V, ShuffleMask, Name);
1178 }
1179 
1180 Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
1181                                          const Twine &Name) {
1182   assert(isa<VectorType>(V1->getType()) && "Unexpected type");
1183   assert(V1->getType() == V2->getType() &&
1184          "Splice expects matching operand types!");
1185 
1186   if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
1187     Module *M = BB->getParent()->getParent();
1188     Function *F = Intrinsic::getDeclaration(
1189         M, Intrinsic::experimental_vector_splice, VTy);
1190 
1191     Value *Ops[] = {V1, V2, getInt32(Imm)};
1192     return Insert(CallInst::Create(F, Ops), Name);
1193   }
1194 
1195   unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
1196   assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
1197          "Invalid immediate for vector splice!");
1198 
1199   // Keep the original behaviour for fixed vector
1200   unsigned Idx = (NumElts + Imm) % NumElts;
1201   SmallVector<int, 8> Mask;
1202   for (unsigned I = 0; I < NumElts; ++I)
1203     Mask.push_back(Idx + I);
1204 
1205   return CreateShuffleVector(V1, V2, Mask);
1206 }
1207 
1208 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
1209                                         const Twine &Name) {
1210   auto EC = ElementCount::getFixed(NumElts);
1211   return CreateVectorSplat(EC, V, Name);
1212 }
1213 
1214 Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
1215                                         const Twine &Name) {
1216   assert(EC.isNonZero() && "Cannot splat to an empty vector!");
1217 
1218   // First insert it into a poison vector so we can shuffle it.
1219   Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
1220   V = CreateInsertElement(Poison, V, getInt64(0), Name + ".splatinsert");
1221 
1222   // Shuffle the value across the desired number of elements.
1223   SmallVector<int, 16> Zeros;
1224   Zeros.resize(EC.getKnownMinValue());
1225   return CreateShuffleVector(V, Zeros, Name + ".splat");
1226 }
1227 
1228 Value *IRBuilderBase::CreateExtractInteger(
1229     const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
1230     uint64_t Offset, const Twine &Name) {
1231   auto *IntTy = cast<IntegerType>(From->getType());
1232   assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
1233              DL.getTypeStoreSize(IntTy) &&
1234          "Element extends past full value");
1235   uint64_t ShAmt = 8 * Offset;
1236   Value *V = From;
1237   if (DL.isBigEndian())
1238     ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
1239                  DL.getTypeStoreSize(ExtractedTy) - Offset);
1240   if (ShAmt) {
1241     V = CreateLShr(V, ShAmt, Name + ".shift");
1242   }
1243   assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
1244          "Cannot extract to a larger integer!");
1245   if (ExtractedTy != IntTy) {
1246     V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
1247   }
1248   return V;
1249 }
1250 
1251 Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
1252     Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
1253     MDNode *DbgInfo) {
1254   auto *BaseType = Base->getType();
1255   assert(isa<PointerType>(BaseType) &&
1256          "Invalid Base ptr type for preserve.array.access.index.");
1257 
1258   Value *LastIndexV = getInt32(LastIndex);
1259   Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1260   SmallVector<Value *, 4> IdxList(Dimension, Zero);
1261   IdxList.push_back(LastIndexV);
1262 
1263   Type *ResultType = GetElementPtrInst::getGEPReturnType(Base, IdxList);
1264 
1265   Module *M = BB->getParent()->getParent();
1266   Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
1267       M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
1268 
1269   Value *DimV = getInt32(Dimension);
1270   CallInst *Fn =
1271       CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
1272   Fn->addParamAttr(
1273       0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1274   if (DbgInfo)
1275     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1276 
1277   return Fn;
1278 }
1279 
1280 Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
1281     Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
1282   assert(isa<PointerType>(Base->getType()) &&
1283          "Invalid Base ptr type for preserve.union.access.index.");
1284   auto *BaseType = Base->getType();
1285 
1286   Module *M = BB->getParent()->getParent();
1287   Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
1288       M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
1289 
1290   Value *DIIndex = getInt32(FieldIndex);
1291   CallInst *Fn =
1292       CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
1293   if (DbgInfo)
1294     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1295 
1296   return Fn;
1297 }
1298 
1299 Value *IRBuilderBase::CreatePreserveStructAccessIndex(
1300     Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
1301     MDNode *DbgInfo) {
1302   auto *BaseType = Base->getType();
1303   assert(isa<PointerType>(BaseType) &&
1304          "Invalid Base ptr type for preserve.struct.access.index.");
1305 
1306   Value *GEPIndex = getInt32(Index);
1307   Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1308   Type *ResultType =
1309       GetElementPtrInst::getGEPReturnType(Base, {Zero, GEPIndex});
1310 
1311   Module *M = BB->getParent()->getParent();
1312   Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
1313       M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
1314 
1315   Value *DIIndex = getInt32(FieldIndex);
1316   CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
1317                             {Base, GEPIndex, DIIndex});
1318   Fn->addParamAttr(
1319       0, Attribute::get(Fn->getContext(), Attribute::ElementType, ElTy));
1320   if (DbgInfo)
1321     Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1322 
1323   return Fn;
1324 }
1325 
1326 Value *IRBuilderBase::createIsFPClass(Value *FPNum, unsigned Test) {
1327   ConstantInt *TestV = getInt32(Test);
1328   Module *M = BB->getParent()->getParent();
1329   Function *FnIsFPClass =
1330       Intrinsic::getDeclaration(M, Intrinsic::is_fpclass, {FPNum->getType()});
1331   return CreateCall(FnIsFPClass, {FPNum, TestV});
1332 }
1333 
1334 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
1335                                                          Value *PtrValue,
1336                                                          Value *AlignValue,
1337                                                          Value *OffsetValue) {
1338   SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
1339   if (OffsetValue)
1340     Vals.push_back(OffsetValue);
1341   OperandBundleDefT<Value *> AlignOpB("align", Vals);
1342   return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
1343 }
1344 
1345 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1346                                                    Value *PtrValue,
1347                                                    unsigned Alignment,
1348                                                    Value *OffsetValue) {
1349   assert(isa<PointerType>(PtrValue->getType()) &&
1350          "trying to create an alignment assumption on a non-pointer?");
1351   assert(Alignment != 0 && "Invalid Alignment");
1352   auto *PtrTy = cast<PointerType>(PtrValue->getType());
1353   Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1354   Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
1355   return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
1356 }
1357 
1358 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1359                                                    Value *PtrValue,
1360                                                    Value *Alignment,
1361                                                    Value *OffsetValue) {
1362   assert(isa<PointerType>(PtrValue->getType()) &&
1363          "trying to create an alignment assumption on a non-pointer?");
1364   return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
1365 }
1366 
1367 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() = default;
1368 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() = default;
1369 IRBuilderFolder::~IRBuilderFolder() = default;
1370 void ConstantFolder::anchor() {}
1371 void NoFolder::anchor() {}
1372