1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/IR/Constant.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/MathExtras.h"
33 #include <cassert>
34 #include <cstdint>
35 #include <vector>
36
37 using namespace llvm;
38
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified. If Name is specified, it is the name of the global variable
42 /// created.
CreateGlobalString(StringRef Str,const Twine & Name,unsigned AddressSpace,Module * M)43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
44 const Twine &Name,
45 unsigned AddressSpace,
46 Module *M) {
47 Constant *StrConstant = ConstantDataArray::getString(Context, Str);
48 if (!M)
49 M = BB->getParent()->getParent();
50 auto *GV = new GlobalVariable(
51 *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
52 StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
53 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
54 GV->setAlignment(Align(1));
55 return GV;
56 }
57
getCurrentFunctionReturnType() const58 Type *IRBuilderBase::getCurrentFunctionReturnType() const {
59 assert(BB && BB->getParent() && "No current function!");
60 return BB->getParent()->getReturnType();
61 }
62
getCastedInt8PtrValue(Value * Ptr)63 Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
64 auto *PT = cast<PointerType>(Ptr->getType());
65 if (PT->getElementType()->isIntegerTy(8))
66 return Ptr;
67
68 // Otherwise, we need to insert a bitcast.
69 return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
70 }
71
createCallHelper(Function * Callee,ArrayRef<Value * > Ops,IRBuilderBase * Builder,const Twine & Name="",Instruction * FMFSource=nullptr,ArrayRef<OperandBundleDef> OpBundles={})72 static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
73 IRBuilderBase *Builder,
74 const Twine &Name = "",
75 Instruction *FMFSource = nullptr,
76 ArrayRef<OperandBundleDef> OpBundles = {}) {
77 CallInst *CI = Builder->CreateCall(Callee, Ops, OpBundles, Name);
78 if (FMFSource)
79 CI->copyFastMathFlags(FMFSource);
80 return CI;
81 }
82
CreateVScale(Constant * Scaling,const Twine & Name)83 Value *IRBuilderBase::CreateVScale(Constant *Scaling, const Twine &Name) {
84 assert(isa<ConstantInt>(Scaling) && "Expected constant integer");
85 if (cast<ConstantInt>(Scaling)->isZero())
86 return Scaling;
87 Module *M = GetInsertBlock()->getParent()->getParent();
88 Function *TheFn =
89 Intrinsic::getDeclaration(M, Intrinsic::vscale, {Scaling->getType()});
90 CallInst *CI = createCallHelper(TheFn, {}, this, Name);
91 return cast<ConstantInt>(Scaling)->getSExtValue() == 1
92 ? CI
93 : CreateMul(CI, Scaling);
94 }
95
CreateStepVector(Type * DstType,const Twine & Name)96 Value *IRBuilderBase::CreateStepVector(Type *DstType, const Twine &Name) {
97 if (isa<ScalableVectorType>(DstType))
98 return CreateIntrinsic(Intrinsic::experimental_stepvector, {DstType}, {},
99 nullptr, Name);
100
101 Type *STy = DstType->getScalarType();
102 unsigned NumEls = cast<FixedVectorType>(DstType)->getNumElements();
103
104 // Create a vector of consecutive numbers from zero to VF.
105 SmallVector<Constant *, 8> Indices;
106 for (unsigned i = 0; i < NumEls; ++i)
107 Indices.push_back(ConstantInt::get(STy, i));
108
109 // Add the consecutive indices to the vector value.
110 return ConstantVector::get(Indices);
111 }
112
CreateMemSet(Value * Ptr,Value * Val,Value * Size,MaybeAlign Align,bool isVolatile,MDNode * TBAATag,MDNode * ScopeTag,MDNode * NoAliasTag)113 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
114 MaybeAlign Align, bool isVolatile,
115 MDNode *TBAATag, MDNode *ScopeTag,
116 MDNode *NoAliasTag) {
117 Ptr = getCastedInt8PtrValue(Ptr);
118 Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
119 Type *Tys[] = { Ptr->getType(), Size->getType() };
120 Module *M = BB->getParent()->getParent();
121 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
122
123 CallInst *CI = createCallHelper(TheFn, Ops, this);
124
125 if (Align)
126 cast<MemSetInst>(CI)->setDestAlignment(Align->value());
127
128 // Set the TBAA info if present.
129 if (TBAATag)
130 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
131
132 if (ScopeTag)
133 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
134
135 if (NoAliasTag)
136 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
137
138 return CI;
139 }
140
CreateElementUnorderedAtomicMemSet(Value * Ptr,Value * Val,Value * Size,Align Alignment,uint32_t ElementSize,MDNode * TBAATag,MDNode * ScopeTag,MDNode * NoAliasTag)141 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
142 Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
143 MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
144
145 Ptr = getCastedInt8PtrValue(Ptr);
146 Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
147 Type *Tys[] = {Ptr->getType(), Size->getType()};
148 Module *M = BB->getParent()->getParent();
149 Function *TheFn = Intrinsic::getDeclaration(
150 M, Intrinsic::memset_element_unordered_atomic, Tys);
151
152 CallInst *CI = createCallHelper(TheFn, Ops, this);
153
154 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
155
156 // Set the TBAA info if present.
157 if (TBAATag)
158 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
159
160 if (ScopeTag)
161 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
162
163 if (NoAliasTag)
164 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
165
166 return CI;
167 }
168
CreateMemTransferInst(Intrinsic::ID IntrID,Value * Dst,MaybeAlign DstAlign,Value * Src,MaybeAlign SrcAlign,Value * Size,bool isVolatile,MDNode * TBAATag,MDNode * TBAAStructTag,MDNode * ScopeTag,MDNode * NoAliasTag)169 CallInst *IRBuilderBase::CreateMemTransferInst(
170 Intrinsic::ID IntrID, Value *Dst, MaybeAlign DstAlign, Value *Src,
171 MaybeAlign SrcAlign, Value *Size, bool isVolatile, MDNode *TBAATag,
172 MDNode *TBAAStructTag, MDNode *ScopeTag, MDNode *NoAliasTag) {
173 Dst = getCastedInt8PtrValue(Dst);
174 Src = getCastedInt8PtrValue(Src);
175
176 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
177 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
178 Module *M = BB->getParent()->getParent();
179 Function *TheFn = Intrinsic::getDeclaration(M, IntrID, Tys);
180
181 CallInst *CI = createCallHelper(TheFn, Ops, this);
182
183 auto* MCI = cast<MemTransferInst>(CI);
184 if (DstAlign)
185 MCI->setDestAlignment(*DstAlign);
186 if (SrcAlign)
187 MCI->setSourceAlignment(*SrcAlign);
188
189 // Set the TBAA info if present.
190 if (TBAATag)
191 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
192
193 // Set the TBAA Struct info if present.
194 if (TBAAStructTag)
195 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
196
197 if (ScopeTag)
198 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
199
200 if (NoAliasTag)
201 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
202
203 return CI;
204 }
205
CreateMemCpyInline(Value * Dst,MaybeAlign DstAlign,Value * Src,MaybeAlign SrcAlign,Value * Size)206 CallInst *IRBuilderBase::CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign,
207 Value *Src, MaybeAlign SrcAlign,
208 Value *Size) {
209 Dst = getCastedInt8PtrValue(Dst);
210 Src = getCastedInt8PtrValue(Src);
211 Value *IsVolatile = getInt1(false);
212
213 Value *Ops[] = {Dst, Src, Size, IsVolatile};
214 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
215 Function *F = BB->getParent();
216 Module *M = F->getParent();
217 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
218
219 CallInst *CI = createCallHelper(TheFn, Ops, this);
220
221 auto *MCI = cast<MemCpyInlineInst>(CI);
222 if (DstAlign)
223 MCI->setDestAlignment(*DstAlign);
224 if (SrcAlign)
225 MCI->setSourceAlignment(*SrcAlign);
226
227 return CI;
228 }
229
CreateElementUnorderedAtomicMemCpy(Value * Dst,Align DstAlign,Value * Src,Align SrcAlign,Value * Size,uint32_t ElementSize,MDNode * TBAATag,MDNode * TBAAStructTag,MDNode * ScopeTag,MDNode * NoAliasTag)230 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
231 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
232 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
233 MDNode *ScopeTag, MDNode *NoAliasTag) {
234 assert(DstAlign >= ElementSize &&
235 "Pointer alignment must be at least element size");
236 assert(SrcAlign >= ElementSize &&
237 "Pointer alignment must be at least element size");
238 Dst = getCastedInt8PtrValue(Dst);
239 Src = getCastedInt8PtrValue(Src);
240
241 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
242 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
243 Module *M = BB->getParent()->getParent();
244 Function *TheFn = Intrinsic::getDeclaration(
245 M, Intrinsic::memcpy_element_unordered_atomic, Tys);
246
247 CallInst *CI = createCallHelper(TheFn, Ops, this);
248
249 // Set the alignment of the pointer args.
250 auto *AMCI = cast<AtomicMemCpyInst>(CI);
251 AMCI->setDestAlignment(DstAlign);
252 AMCI->setSourceAlignment(SrcAlign);
253
254 // Set the TBAA info if present.
255 if (TBAATag)
256 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
257
258 // Set the TBAA Struct info if present.
259 if (TBAAStructTag)
260 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
261
262 if (ScopeTag)
263 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
264
265 if (NoAliasTag)
266 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
267
268 return CI;
269 }
270
CreateMemMove(Value * Dst,MaybeAlign DstAlign,Value * Src,MaybeAlign SrcAlign,Value * Size,bool isVolatile,MDNode * TBAATag,MDNode * ScopeTag,MDNode * NoAliasTag)271 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
272 Value *Src, MaybeAlign SrcAlign,
273 Value *Size, bool isVolatile,
274 MDNode *TBAATag, MDNode *ScopeTag,
275 MDNode *NoAliasTag) {
276 Dst = getCastedInt8PtrValue(Dst);
277 Src = getCastedInt8PtrValue(Src);
278
279 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
280 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
281 Module *M = BB->getParent()->getParent();
282 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
283
284 CallInst *CI = createCallHelper(TheFn, Ops, this);
285
286 auto *MMI = cast<MemMoveInst>(CI);
287 if (DstAlign)
288 MMI->setDestAlignment(*DstAlign);
289 if (SrcAlign)
290 MMI->setSourceAlignment(*SrcAlign);
291
292 // Set the TBAA info if present.
293 if (TBAATag)
294 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
295
296 if (ScopeTag)
297 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
298
299 if (NoAliasTag)
300 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
301
302 return CI;
303 }
304
CreateElementUnorderedAtomicMemMove(Value * Dst,Align DstAlign,Value * Src,Align SrcAlign,Value * Size,uint32_t ElementSize,MDNode * TBAATag,MDNode * TBAAStructTag,MDNode * ScopeTag,MDNode * NoAliasTag)305 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
306 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
307 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
308 MDNode *ScopeTag, MDNode *NoAliasTag) {
309 assert(DstAlign >= ElementSize &&
310 "Pointer alignment must be at least element size");
311 assert(SrcAlign >= ElementSize &&
312 "Pointer alignment must be at least element size");
313 Dst = getCastedInt8PtrValue(Dst);
314 Src = getCastedInt8PtrValue(Src);
315
316 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
317 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
318 Module *M = BB->getParent()->getParent();
319 Function *TheFn = Intrinsic::getDeclaration(
320 M, Intrinsic::memmove_element_unordered_atomic, Tys);
321
322 CallInst *CI = createCallHelper(TheFn, Ops, this);
323
324 // Set the alignment of the pointer args.
325 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
326 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
327
328 // Set the TBAA info if present.
329 if (TBAATag)
330 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
331
332 // Set the TBAA Struct info if present.
333 if (TBAAStructTag)
334 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
335
336 if (ScopeTag)
337 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
338
339 if (NoAliasTag)
340 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
341
342 return CI;
343 }
344
getReductionIntrinsic(IRBuilderBase * Builder,Intrinsic::ID ID,Value * Src)345 static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
346 Value *Src) {
347 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
348 Value *Ops[] = {Src};
349 Type *Tys[] = { Src->getType() };
350 auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
351 return createCallHelper(Decl, Ops, Builder);
352 }
353
CreateFAddReduce(Value * Acc,Value * Src)354 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
355 Module *M = GetInsertBlock()->getParent()->getParent();
356 Value *Ops[] = {Acc, Src};
357 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fadd,
358 {Src->getType()});
359 return createCallHelper(Decl, Ops, this);
360 }
361
CreateFMulReduce(Value * Acc,Value * Src)362 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
363 Module *M = GetInsertBlock()->getParent()->getParent();
364 Value *Ops[] = {Acc, Src};
365 auto Decl = Intrinsic::getDeclaration(M, Intrinsic::vector_reduce_fmul,
366 {Src->getType()});
367 return createCallHelper(Decl, Ops, this);
368 }
369
CreateAddReduce(Value * Src)370 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
371 return getReductionIntrinsic(this, Intrinsic::vector_reduce_add, Src);
372 }
373
CreateMulReduce(Value * Src)374 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
375 return getReductionIntrinsic(this, Intrinsic::vector_reduce_mul, Src);
376 }
377
CreateAndReduce(Value * Src)378 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
379 return getReductionIntrinsic(this, Intrinsic::vector_reduce_and, Src);
380 }
381
CreateOrReduce(Value * Src)382 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
383 return getReductionIntrinsic(this, Intrinsic::vector_reduce_or, Src);
384 }
385
CreateXorReduce(Value * Src)386 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
387 return getReductionIntrinsic(this, Intrinsic::vector_reduce_xor, Src);
388 }
389
CreateIntMaxReduce(Value * Src,bool IsSigned)390 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
391 auto ID =
392 IsSigned ? Intrinsic::vector_reduce_smax : Intrinsic::vector_reduce_umax;
393 return getReductionIntrinsic(this, ID, Src);
394 }
395
CreateIntMinReduce(Value * Src,bool IsSigned)396 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
397 auto ID =
398 IsSigned ? Intrinsic::vector_reduce_smin : Intrinsic::vector_reduce_umin;
399 return getReductionIntrinsic(this, ID, Src);
400 }
401
CreateFPMaxReduce(Value * Src)402 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src) {
403 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmax, Src);
404 }
405
CreateFPMinReduce(Value * Src)406 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src) {
407 return getReductionIntrinsic(this, Intrinsic::vector_reduce_fmin, Src);
408 }
409
CreateLifetimeStart(Value * Ptr,ConstantInt * Size)410 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
411 assert(isa<PointerType>(Ptr->getType()) &&
412 "lifetime.start only applies to pointers.");
413 Ptr = getCastedInt8PtrValue(Ptr);
414 if (!Size)
415 Size = getInt64(-1);
416 else
417 assert(Size->getType() == getInt64Ty() &&
418 "lifetime.start requires the size to be an i64");
419 Value *Ops[] = { Size, Ptr };
420 Module *M = BB->getParent()->getParent();
421 Function *TheFn =
422 Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
423 return createCallHelper(TheFn, Ops, this);
424 }
425
CreateLifetimeEnd(Value * Ptr,ConstantInt * Size)426 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
427 assert(isa<PointerType>(Ptr->getType()) &&
428 "lifetime.end only applies to pointers.");
429 Ptr = getCastedInt8PtrValue(Ptr);
430 if (!Size)
431 Size = getInt64(-1);
432 else
433 assert(Size->getType() == getInt64Ty() &&
434 "lifetime.end requires the size to be an i64");
435 Value *Ops[] = { Size, Ptr };
436 Module *M = BB->getParent()->getParent();
437 Function *TheFn =
438 Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
439 return createCallHelper(TheFn, Ops, this);
440 }
441
CreateInvariantStart(Value * Ptr,ConstantInt * Size)442 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
443
444 assert(isa<PointerType>(Ptr->getType()) &&
445 "invariant.start only applies to pointers.");
446 Ptr = getCastedInt8PtrValue(Ptr);
447 if (!Size)
448 Size = getInt64(-1);
449 else
450 assert(Size->getType() == getInt64Ty() &&
451 "invariant.start requires the size to be an i64");
452
453 Value *Ops[] = {Size, Ptr};
454 // Fill in the single overloaded type: memory object type.
455 Type *ObjectPtr[1] = {Ptr->getType()};
456 Module *M = BB->getParent()->getParent();
457 Function *TheFn =
458 Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
459 return createCallHelper(TheFn, Ops, this);
460 }
461
462 CallInst *
CreateAssumption(Value * Cond,ArrayRef<OperandBundleDef> OpBundles)463 IRBuilderBase::CreateAssumption(Value *Cond,
464 ArrayRef<OperandBundleDef> OpBundles) {
465 assert(Cond->getType() == getInt1Ty() &&
466 "an assumption condition must be of type i1");
467
468 Value *Ops[] = { Cond };
469 Module *M = BB->getParent()->getParent();
470 Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
471 return createCallHelper(FnAssume, Ops, this, "", nullptr, OpBundles);
472 }
473
CreateNoAliasScopeDeclaration(Value * Scope)474 Instruction *IRBuilderBase::CreateNoAliasScopeDeclaration(Value *Scope) {
475 Module *M = BB->getModule();
476 auto *FnIntrinsic = Intrinsic::getDeclaration(
477 M, Intrinsic::experimental_noalias_scope_decl, {});
478 return createCallHelper(FnIntrinsic, {Scope}, this);
479 }
480
481 /// Create a call to a Masked Load intrinsic.
482 /// \p Ptr - base pointer for the load
483 /// \p Alignment - alignment of the source location
484 /// \p Mask - vector of booleans which indicates what vector lanes should
485 /// be accessed in memory
486 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
487 /// of the result
488 /// \p Name - name of the result variable
CreateMaskedLoad(Value * Ptr,Align Alignment,Value * Mask,Value * PassThru,const Twine & Name)489 CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
490 Value *Mask, Value *PassThru,
491 const Twine &Name) {
492 auto *PtrTy = cast<PointerType>(Ptr->getType());
493 Type *DataTy = PtrTy->getElementType();
494 assert(DataTy->isVectorTy() && "Ptr should point to a vector");
495 assert(Mask && "Mask should not be all-ones (null)");
496 if (!PassThru)
497 PassThru = UndefValue::get(DataTy);
498 Type *OverloadedTypes[] = { DataTy, PtrTy };
499 Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
500 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
501 OverloadedTypes, Name);
502 }
503
504 /// Create a call to a Masked Store intrinsic.
505 /// \p Val - data to be stored,
506 /// \p Ptr - base pointer for the store
507 /// \p Alignment - alignment of the destination location
508 /// \p Mask - vector of booleans which indicates what vector lanes should
509 /// be accessed in memory
CreateMaskedStore(Value * Val,Value * Ptr,Align Alignment,Value * Mask)510 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
511 Align Alignment, Value *Mask) {
512 auto *PtrTy = cast<PointerType>(Ptr->getType());
513 Type *DataTy = PtrTy->getElementType();
514 assert(DataTy->isVectorTy() && "Ptr should point to a vector");
515 assert(Mask && "Mask should not be all-ones (null)");
516 Type *OverloadedTypes[] = { DataTy, PtrTy };
517 Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
518 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
519 }
520
521 /// Create a call to a Masked intrinsic, with given intrinsic Id,
522 /// an array of operands - Ops, and an array of overloaded types -
523 /// OverloadedTypes.
CreateMaskedIntrinsic(Intrinsic::ID Id,ArrayRef<Value * > Ops,ArrayRef<Type * > OverloadedTypes,const Twine & Name)524 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
525 ArrayRef<Value *> Ops,
526 ArrayRef<Type *> OverloadedTypes,
527 const Twine &Name) {
528 Module *M = BB->getParent()->getParent();
529 Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
530 return createCallHelper(TheFn, Ops, this, Name);
531 }
532
533 /// Create a call to a Masked Gather intrinsic.
534 /// \p Ptrs - vector of pointers for loading
535 /// \p Align - alignment for one element
536 /// \p Mask - vector of booleans which indicates what vector lanes should
537 /// be accessed in memory
538 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
539 /// of the result
540 /// \p Name - name of the result variable
CreateMaskedGather(Value * Ptrs,Align Alignment,Value * Mask,Value * PassThru,const Twine & Name)541 CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
542 Value *Mask, Value *PassThru,
543 const Twine &Name) {
544 auto *PtrsTy = cast<VectorType>(Ptrs->getType());
545 auto *PtrTy = cast<PointerType>(PtrsTy->getElementType());
546 ElementCount NumElts = PtrsTy->getElementCount();
547 auto *DataTy = VectorType::get(PtrTy->getElementType(), NumElts);
548
549 if (!Mask)
550 Mask = Constant::getAllOnesValue(
551 VectorType::get(Type::getInt1Ty(Context), NumElts));
552
553 if (!PassThru)
554 PassThru = UndefValue::get(DataTy);
555
556 Type *OverloadedTypes[] = {DataTy, PtrsTy};
557 Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
558
559 // We specify only one type when we create this intrinsic. Types of other
560 // arguments are derived from this type.
561 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
562 Name);
563 }
564
565 /// Create a call to a Masked Scatter intrinsic.
566 /// \p Data - data to be stored,
567 /// \p Ptrs - the vector of pointers, where the \p Data elements should be
568 /// stored
569 /// \p Align - alignment for one element
570 /// \p Mask - vector of booleans which indicates what vector lanes should
571 /// be accessed in memory
CreateMaskedScatter(Value * Data,Value * Ptrs,Align Alignment,Value * Mask)572 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
573 Align Alignment, Value *Mask) {
574 auto *PtrsTy = cast<VectorType>(Ptrs->getType());
575 auto *DataTy = cast<VectorType>(Data->getType());
576 ElementCount NumElts = PtrsTy->getElementCount();
577
578 #ifndef NDEBUG
579 auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
580 assert(NumElts == DataTy->getElementCount() &&
581 PtrTy->getElementType() == DataTy->getElementType() &&
582 "Incompatible pointer and data types");
583 #endif
584
585 if (!Mask)
586 Mask = Constant::getAllOnesValue(
587 VectorType::get(Type::getInt1Ty(Context), NumElts));
588
589 Type *OverloadedTypes[] = {DataTy, PtrsTy};
590 Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
591
592 // We specify only one type when we create this intrinsic. Types of other
593 // arguments are derived from this type.
594 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
595 }
596
597 template <typename T0>
598 static std::vector<Value *>
getStatepointArgs(IRBuilderBase & B,uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,uint32_t Flags,ArrayRef<T0> CallArgs)599 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
600 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
601 std::vector<Value *> Args;
602 Args.push_back(B.getInt64(ID));
603 Args.push_back(B.getInt32(NumPatchBytes));
604 Args.push_back(ActualCallee);
605 Args.push_back(B.getInt32(CallArgs.size()));
606 Args.push_back(B.getInt32(Flags));
607 llvm::append_range(Args, CallArgs);
608 // GC Transition and Deopt args are now always handled via operand bundle.
609 // They will be removed from the signature of gc.statepoint shortly.
610 Args.push_back(B.getInt32(0));
611 Args.push_back(B.getInt32(0));
612 // GC args are now encoded in the gc-live operand bundle
613 return Args;
614 }
615
616 template<typename T1, typename T2, typename T3>
617 static std::vector<OperandBundleDef>
getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,Optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs)618 getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,
619 Optional<ArrayRef<T2>> DeoptArgs,
620 ArrayRef<T3> GCArgs) {
621 std::vector<OperandBundleDef> Rval;
622 if (DeoptArgs) {
623 SmallVector<Value*, 16> DeoptValues;
624 llvm::append_range(DeoptValues, *DeoptArgs);
625 Rval.emplace_back("deopt", DeoptValues);
626 }
627 if (TransitionArgs) {
628 SmallVector<Value*, 16> TransitionValues;
629 llvm::append_range(TransitionValues, *TransitionArgs);
630 Rval.emplace_back("gc-transition", TransitionValues);
631 }
632 if (GCArgs.size()) {
633 SmallVector<Value*, 16> LiveValues;
634 llvm::append_range(LiveValues, GCArgs);
635 Rval.emplace_back("gc-live", LiveValues);
636 }
637 return Rval;
638 }
639
640 template <typename T0, typename T1, typename T2, typename T3>
CreateGCStatepointCallCommon(IRBuilderBase * Builder,uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,uint32_t Flags,ArrayRef<T0> CallArgs,Optional<ArrayRef<T1>> TransitionArgs,Optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs,const Twine & Name)641 static CallInst *CreateGCStatepointCallCommon(
642 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
643 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
644 Optional<ArrayRef<T1>> TransitionArgs,
645 Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
646 const Twine &Name) {
647 // Extract out the type of the callee.
648 auto *FuncPtrType = cast<PointerType>(ActualCallee->getType());
649 assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
650 "actual callee must be a callable value");
651
652 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
653 // Fill in the one generic type'd argument (the function is also vararg)
654 Type *ArgTypes[] = { FuncPtrType };
655 Function *FnStatepoint =
656 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
657 ArgTypes);
658
659 std::vector<Value *> Args =
660 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
661 CallArgs);
662
663 return Builder->CreateCall(FnStatepoint, Args,
664 getStatepointBundles(TransitionArgs, DeoptArgs,
665 GCArgs),
666 Name);
667 }
668
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,ArrayRef<Value * > CallArgs,Optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)669 CallInst *IRBuilderBase::CreateGCStatepointCall(
670 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
671 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
672 ArrayRef<Value *> GCArgs, const Twine &Name) {
673 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
674 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
675 CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
676 }
677
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,uint32_t Flags,ArrayRef<Value * > CallArgs,Optional<ArrayRef<Use>> TransitionArgs,Optional<ArrayRef<Use>> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)678 CallInst *IRBuilderBase::CreateGCStatepointCall(
679 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags,
680 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Use>> TransitionArgs,
681 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
682 const Twine &Name) {
683 return CreateGCStatepointCallCommon<Value *, Use, Use, Value *>(
684 this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
685 DeoptArgs, GCArgs, Name);
686 }
687
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,ArrayRef<Use> CallArgs,Optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)688 CallInst *IRBuilderBase::CreateGCStatepointCall(
689 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
690 ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
691 ArrayRef<Value *> GCArgs, const Twine &Name) {
692 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
693 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
694 CallArgs, None, DeoptArgs, GCArgs, Name);
695 }
696
697 template <typename T0, typename T1, typename T2, typename T3>
CreateGCStatepointInvokeCommon(IRBuilderBase * Builder,uint64_t ID,uint32_t NumPatchBytes,Value * ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,uint32_t Flags,ArrayRef<T0> InvokeArgs,Optional<ArrayRef<T1>> TransitionArgs,Optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs,const Twine & Name)698 static InvokeInst *CreateGCStatepointInvokeCommon(
699 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
700 Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest,
701 uint32_t Flags, ArrayRef<T0> InvokeArgs,
702 Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
703 ArrayRef<T3> GCArgs, const Twine &Name) {
704 // Extract out the type of the callee.
705 auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
706 assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
707 "actual callee must be a callable value");
708
709 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
710 // Fill in the one generic type'd argument (the function is also vararg)
711 Function *FnStatepoint = Intrinsic::getDeclaration(
712 M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});
713
714 std::vector<Value *> Args =
715 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags,
716 InvokeArgs);
717
718 return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args,
719 getStatepointBundles(TransitionArgs, DeoptArgs,
720 GCArgs),
721 Name);
722 }
723
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,Value * ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,ArrayRef<Value * > InvokeArgs,Optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)724 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
725 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
726 BasicBlock *NormalDest, BasicBlock *UnwindDest,
727 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs,
728 ArrayRef<Value *> GCArgs, const Twine &Name) {
729 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
730 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
731 uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
732 DeoptArgs, GCArgs, Name);
733 }
734
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,Value * ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,uint32_t Flags,ArrayRef<Value * > InvokeArgs,Optional<ArrayRef<Use>> TransitionArgs,Optional<ArrayRef<Use>> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)735 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
736 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
737 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
738 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
739 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
740 return CreateGCStatepointInvokeCommon<Value *, Use, Use, Value *>(
741 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
742 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
743 }
744
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,Value * ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,ArrayRef<Use> InvokeArgs,Optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)745 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
746 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
747 BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
748 Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
749 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
750 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
751 uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
752 Name);
753 }
754
CreateGCResult(Instruction * Statepoint,Type * ResultType,const Twine & Name)755 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
756 Type *ResultType,
757 const Twine &Name) {
758 Intrinsic::ID ID = Intrinsic::experimental_gc_result;
759 Module *M = BB->getParent()->getParent();
760 Type *Types[] = {ResultType};
761 Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
762
763 Value *Args[] = {Statepoint};
764 return createCallHelper(FnGCResult, Args, this, Name);
765 }
766
CreateGCRelocate(Instruction * Statepoint,int BaseOffset,int DerivedOffset,Type * ResultType,const Twine & Name)767 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
768 int BaseOffset,
769 int DerivedOffset,
770 Type *ResultType,
771 const Twine &Name) {
772 Module *M = BB->getParent()->getParent();
773 Type *Types[] = {ResultType};
774 Function *FnGCRelocate =
775 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
776
777 Value *Args[] = {Statepoint,
778 getInt32(BaseOffset),
779 getInt32(DerivedOffset)};
780 return createCallHelper(FnGCRelocate, Args, this, Name);
781 }
782
CreateUnaryIntrinsic(Intrinsic::ID ID,Value * V,Instruction * FMFSource,const Twine & Name)783 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
784 Instruction *FMFSource,
785 const Twine &Name) {
786 Module *M = BB->getModule();
787 Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
788 return createCallHelper(Fn, {V}, this, Name, FMFSource);
789 }
790
CreateBinaryIntrinsic(Intrinsic::ID ID,Value * LHS,Value * RHS,Instruction * FMFSource,const Twine & Name)791 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
792 Value *RHS,
793 Instruction *FMFSource,
794 const Twine &Name) {
795 Module *M = BB->getModule();
796 Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
797 return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
798 }
799
CreateIntrinsic(Intrinsic::ID ID,ArrayRef<Type * > Types,ArrayRef<Value * > Args,Instruction * FMFSource,const Twine & Name)800 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
801 ArrayRef<Type *> Types,
802 ArrayRef<Value *> Args,
803 Instruction *FMFSource,
804 const Twine &Name) {
805 Module *M = BB->getModule();
806 Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
807 return createCallHelper(Fn, Args, this, Name, FMFSource);
808 }
809
CreateConstrainedFPBinOp(Intrinsic::ID ID,Value * L,Value * R,Instruction * FMFSource,const Twine & Name,MDNode * FPMathTag,Optional<RoundingMode> Rounding,Optional<fp::ExceptionBehavior> Except)810 CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
811 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
812 const Twine &Name, MDNode *FPMathTag,
813 Optional<RoundingMode> Rounding,
814 Optional<fp::ExceptionBehavior> Except) {
815 Value *RoundingV = getConstrainedFPRounding(Rounding);
816 Value *ExceptV = getConstrainedFPExcept(Except);
817
818 FastMathFlags UseFMF = FMF;
819 if (FMFSource)
820 UseFMF = FMFSource->getFastMathFlags();
821
822 CallInst *C = CreateIntrinsic(ID, {L->getType()},
823 {L, R, RoundingV, ExceptV}, nullptr, Name);
824 setConstrainedFPCallAttr(C);
825 setFPAttrs(C, FPMathTag, UseFMF);
826 return C;
827 }
828
CreateNAryOp(unsigned Opc,ArrayRef<Value * > Ops,const Twine & Name,MDNode * FPMathTag)829 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
830 const Twine &Name, MDNode *FPMathTag) {
831 if (Instruction::isBinaryOp(Opc)) {
832 assert(Ops.size() == 2 && "Invalid number of operands!");
833 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
834 Ops[0], Ops[1], Name, FPMathTag);
835 }
836 if (Instruction::isUnaryOp(Opc)) {
837 assert(Ops.size() == 1 && "Invalid number of operands!");
838 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
839 Ops[0], Name, FPMathTag);
840 }
841 llvm_unreachable("Unexpected opcode!");
842 }
843
CreateConstrainedFPCast(Intrinsic::ID ID,Value * V,Type * DestTy,Instruction * FMFSource,const Twine & Name,MDNode * FPMathTag,Optional<RoundingMode> Rounding,Optional<fp::ExceptionBehavior> Except)844 CallInst *IRBuilderBase::CreateConstrainedFPCast(
845 Intrinsic::ID ID, Value *V, Type *DestTy,
846 Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
847 Optional<RoundingMode> Rounding,
848 Optional<fp::ExceptionBehavior> Except) {
849 Value *ExceptV = getConstrainedFPExcept(Except);
850
851 FastMathFlags UseFMF = FMF;
852 if (FMFSource)
853 UseFMF = FMFSource->getFastMathFlags();
854
855 CallInst *C;
856 bool HasRoundingMD = false;
857 switch (ID) {
858 default:
859 break;
860 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
861 case Intrinsic::INTRINSIC: \
862 HasRoundingMD = ROUND_MODE; \
863 break;
864 #include "llvm/IR/ConstrainedOps.def"
865 }
866 if (HasRoundingMD) {
867 Value *RoundingV = getConstrainedFPRounding(Rounding);
868 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
869 nullptr, Name);
870 } else
871 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
872 Name);
873
874 setConstrainedFPCallAttr(C);
875
876 if (isa<FPMathOperator>(C))
877 setFPAttrs(C, FPMathTag, UseFMF);
878 return C;
879 }
880
CreateFCmpHelper(CmpInst::Predicate P,Value * LHS,Value * RHS,const Twine & Name,MDNode * FPMathTag,bool IsSignaling)881 Value *IRBuilderBase::CreateFCmpHelper(
882 CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
883 MDNode *FPMathTag, bool IsSignaling) {
884 if (IsFPConstrained) {
885 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
886 : Intrinsic::experimental_constrained_fcmp;
887 return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
888 }
889
890 if (auto *LC = dyn_cast<Constant>(LHS))
891 if (auto *RC = dyn_cast<Constant>(RHS))
892 return Insert(Folder.CreateFCmp(P, LC, RC), Name);
893 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
894 }
895
CreateConstrainedFPCmp(Intrinsic::ID ID,CmpInst::Predicate P,Value * L,Value * R,const Twine & Name,Optional<fp::ExceptionBehavior> Except)896 CallInst *IRBuilderBase::CreateConstrainedFPCmp(
897 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
898 const Twine &Name, Optional<fp::ExceptionBehavior> Except) {
899 Value *PredicateV = getConstrainedFPPredicate(P);
900 Value *ExceptV = getConstrainedFPExcept(Except);
901
902 CallInst *C = CreateIntrinsic(ID, {L->getType()},
903 {L, R, PredicateV, ExceptV}, nullptr, Name);
904 setConstrainedFPCallAttr(C);
905 return C;
906 }
907
CreateConstrainedFPCall(Function * Callee,ArrayRef<Value * > Args,const Twine & Name,Optional<RoundingMode> Rounding,Optional<fp::ExceptionBehavior> Except)908 CallInst *IRBuilderBase::CreateConstrainedFPCall(
909 Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
910 Optional<RoundingMode> Rounding,
911 Optional<fp::ExceptionBehavior> Except) {
912 llvm::SmallVector<Value *, 6> UseArgs;
913
914 append_range(UseArgs, Args);
915 bool HasRoundingMD = false;
916 switch (Callee->getIntrinsicID()) {
917 default:
918 break;
919 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
920 case Intrinsic::INTRINSIC: \
921 HasRoundingMD = ROUND_MODE; \
922 break;
923 #include "llvm/IR/ConstrainedOps.def"
924 }
925 if (HasRoundingMD)
926 UseArgs.push_back(getConstrainedFPRounding(Rounding));
927 UseArgs.push_back(getConstrainedFPExcept(Except));
928
929 CallInst *C = CreateCall(Callee, UseArgs, Name);
930 setConstrainedFPCallAttr(C);
931 return C;
932 }
933
CreateSelect(Value * C,Value * True,Value * False,const Twine & Name,Instruction * MDFrom)934 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
935 const Twine &Name, Instruction *MDFrom) {
936 if (auto *CC = dyn_cast<Constant>(C))
937 if (auto *TC = dyn_cast<Constant>(True))
938 if (auto *FC = dyn_cast<Constant>(False))
939 return Insert(Folder.CreateSelect(CC, TC, FC), Name);
940
941 SelectInst *Sel = SelectInst::Create(C, True, False);
942 if (MDFrom) {
943 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
944 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
945 Sel = addBranchMetadata(Sel, Prof, Unpred);
946 }
947 if (isa<FPMathOperator>(Sel))
948 setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
949 return Insert(Sel, Name);
950 }
951
CreatePtrDiff(Value * LHS,Value * RHS,const Twine & Name)952 Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS,
953 const Twine &Name) {
954 assert(LHS->getType() == RHS->getType() &&
955 "Pointer subtraction operand types must match!");
956 auto *ArgType = cast<PointerType>(LHS->getType());
957 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
958 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
959 Value *Difference = CreateSub(LHS_int, RHS_int);
960 return CreateExactSDiv(Difference,
961 ConstantExpr::getSizeOf(ArgType->getElementType()),
962 Name);
963 }
964
CreateLaunderInvariantGroup(Value * Ptr)965 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
966 assert(isa<PointerType>(Ptr->getType()) &&
967 "launder.invariant.group only applies to pointers.");
968 // FIXME: we could potentially avoid casts to/from i8*.
969 auto *PtrType = Ptr->getType();
970 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
971 if (PtrType != Int8PtrTy)
972 Ptr = CreateBitCast(Ptr, Int8PtrTy);
973 Module *M = BB->getParent()->getParent();
974 Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
975 M, Intrinsic::launder_invariant_group, {Int8PtrTy});
976
977 assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
978 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
979 Int8PtrTy &&
980 "LaunderInvariantGroup should take and return the same type");
981
982 CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
983
984 if (PtrType != Int8PtrTy)
985 return CreateBitCast(Fn, PtrType);
986 return Fn;
987 }
988
CreateStripInvariantGroup(Value * Ptr)989 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
990 assert(isa<PointerType>(Ptr->getType()) &&
991 "strip.invariant.group only applies to pointers.");
992
993 // FIXME: we could potentially avoid casts to/from i8*.
994 auto *PtrType = Ptr->getType();
995 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
996 if (PtrType != Int8PtrTy)
997 Ptr = CreateBitCast(Ptr, Int8PtrTy);
998 Module *M = BB->getParent()->getParent();
999 Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
1000 M, Intrinsic::strip_invariant_group, {Int8PtrTy});
1001
1002 assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
1003 FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
1004 Int8PtrTy &&
1005 "StripInvariantGroup should take and return the same type");
1006
1007 CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
1008
1009 if (PtrType != Int8PtrTy)
1010 return CreateBitCast(Fn, PtrType);
1011 return Fn;
1012 }
1013
CreateVectorReverse(Value * V,const Twine & Name)1014 Value *IRBuilderBase::CreateVectorReverse(Value *V, const Twine &Name) {
1015 auto *Ty = cast<VectorType>(V->getType());
1016 if (isa<ScalableVectorType>(Ty)) {
1017 Module *M = BB->getParent()->getParent();
1018 Function *F = Intrinsic::getDeclaration(
1019 M, Intrinsic::experimental_vector_reverse, Ty);
1020 return Insert(CallInst::Create(F, V), Name);
1021 }
1022 // Keep the original behaviour for fixed vector
1023 SmallVector<int, 8> ShuffleMask;
1024 int NumElts = Ty->getElementCount().getKnownMinValue();
1025 for (int i = 0; i < NumElts; ++i)
1026 ShuffleMask.push_back(NumElts - i - 1);
1027 return CreateShuffleVector(V, ShuffleMask, Name);
1028 }
1029
CreateVectorSplice(Value * V1,Value * V2,int64_t Imm,const Twine & Name)1030 Value *IRBuilderBase::CreateVectorSplice(Value *V1, Value *V2, int64_t Imm,
1031 const Twine &Name) {
1032 assert(isa<VectorType>(V1->getType()) && "Unexpected type");
1033 assert(V1->getType() == V2->getType() &&
1034 "Splice expects matching operand types!");
1035
1036 if (auto *VTy = dyn_cast<ScalableVectorType>(V1->getType())) {
1037 Module *M = BB->getParent()->getParent();
1038 Function *F = Intrinsic::getDeclaration(
1039 M, Intrinsic::experimental_vector_splice, VTy);
1040
1041 Value *Ops[] = {V1, V2, getInt32(Imm)};
1042 return Insert(CallInst::Create(F, Ops), Name);
1043 }
1044
1045 unsigned NumElts = cast<FixedVectorType>(V1->getType())->getNumElements();
1046 assert(((-Imm <= NumElts) || (Imm < NumElts)) &&
1047 "Invalid immediate for vector splice!");
1048
1049 // Keep the original behaviour for fixed vector
1050 unsigned Idx = (NumElts + Imm) % NumElts;
1051 SmallVector<int, 8> Mask;
1052 for (unsigned I = 0; I < NumElts; ++I)
1053 Mask.push_back(Idx + I);
1054
1055 return CreateShuffleVector(V1, V2, Mask);
1056 }
1057
CreateVectorSplat(unsigned NumElts,Value * V,const Twine & Name)1058 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
1059 const Twine &Name) {
1060 auto EC = ElementCount::getFixed(NumElts);
1061 return CreateVectorSplat(EC, V, Name);
1062 }
1063
CreateVectorSplat(ElementCount EC,Value * V,const Twine & Name)1064 Value *IRBuilderBase::CreateVectorSplat(ElementCount EC, Value *V,
1065 const Twine &Name) {
1066 assert(EC.isNonZero() && "Cannot splat to an empty vector!");
1067
1068 // First insert it into a poison vector so we can shuffle it.
1069 Type *I32Ty = getInt32Ty();
1070 Value *Poison = PoisonValue::get(VectorType::get(V->getType(), EC));
1071 V = CreateInsertElement(Poison, V, ConstantInt::get(I32Ty, 0),
1072 Name + ".splatinsert");
1073
1074 // Shuffle the value across the desired number of elements.
1075 SmallVector<int, 16> Zeros;
1076 Zeros.resize(EC.getKnownMinValue());
1077 return CreateShuffleVector(V, Zeros, Name + ".splat");
1078 }
1079
CreateExtractInteger(const DataLayout & DL,Value * From,IntegerType * ExtractedTy,uint64_t Offset,const Twine & Name)1080 Value *IRBuilderBase::CreateExtractInteger(
1081 const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
1082 uint64_t Offset, const Twine &Name) {
1083 auto *IntTy = cast<IntegerType>(From->getType());
1084 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
1085 DL.getTypeStoreSize(IntTy) &&
1086 "Element extends past full value");
1087 uint64_t ShAmt = 8 * Offset;
1088 Value *V = From;
1089 if (DL.isBigEndian())
1090 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
1091 DL.getTypeStoreSize(ExtractedTy) - Offset);
1092 if (ShAmt) {
1093 V = CreateLShr(V, ShAmt, Name + ".shift");
1094 }
1095 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
1096 "Cannot extract to a larger integer!");
1097 if (ExtractedTy != IntTy) {
1098 V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
1099 }
1100 return V;
1101 }
1102
CreatePreserveArrayAccessIndex(Type * ElTy,Value * Base,unsigned Dimension,unsigned LastIndex,MDNode * DbgInfo)1103 Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
1104 Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
1105 MDNode *DbgInfo) {
1106 assert(isa<PointerType>(Base->getType()) &&
1107 "Invalid Base ptr type for preserve.array.access.index.");
1108 auto *BaseType = Base->getType();
1109
1110 Value *LastIndexV = getInt32(LastIndex);
1111 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1112 SmallVector<Value *, 4> IdxList(Dimension, Zero);
1113 IdxList.push_back(LastIndexV);
1114
1115 Type *ResultType =
1116 GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
1117
1118 Module *M = BB->getParent()->getParent();
1119 Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
1120 M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
1121
1122 Value *DimV = getInt32(Dimension);
1123 CallInst *Fn =
1124 CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
1125 if (DbgInfo)
1126 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1127
1128 return Fn;
1129 }
1130
CreatePreserveUnionAccessIndex(Value * Base,unsigned FieldIndex,MDNode * DbgInfo)1131 Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
1132 Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
1133 assert(isa<PointerType>(Base->getType()) &&
1134 "Invalid Base ptr type for preserve.union.access.index.");
1135 auto *BaseType = Base->getType();
1136
1137 Module *M = BB->getParent()->getParent();
1138 Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
1139 M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
1140
1141 Value *DIIndex = getInt32(FieldIndex);
1142 CallInst *Fn =
1143 CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
1144 if (DbgInfo)
1145 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1146
1147 return Fn;
1148 }
1149
CreatePreserveStructAccessIndex(Type * ElTy,Value * Base,unsigned Index,unsigned FieldIndex,MDNode * DbgInfo)1150 Value *IRBuilderBase::CreatePreserveStructAccessIndex(
1151 Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
1152 MDNode *DbgInfo) {
1153 assert(isa<PointerType>(Base->getType()) &&
1154 "Invalid Base ptr type for preserve.struct.access.index.");
1155 auto *BaseType = Base->getType();
1156
1157 Value *GEPIndex = getInt32(Index);
1158 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1159 Type *ResultType =
1160 GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
1161
1162 Module *M = BB->getParent()->getParent();
1163 Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
1164 M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
1165
1166 Value *DIIndex = getInt32(FieldIndex);
1167 CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
1168 {Base, GEPIndex, DIIndex});
1169 if (DbgInfo)
1170 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1171
1172 return Fn;
1173 }
1174
CreateAlignmentAssumptionHelper(const DataLayout & DL,Value * PtrValue,Value * AlignValue,Value * OffsetValue)1175 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(const DataLayout &DL,
1176 Value *PtrValue,
1177 Value *AlignValue,
1178 Value *OffsetValue) {
1179 SmallVector<Value *, 4> Vals({PtrValue, AlignValue});
1180 if (OffsetValue)
1181 Vals.push_back(OffsetValue);
1182 OperandBundleDefT<Value *> AlignOpB("align", Vals);
1183 return CreateAssumption(ConstantInt::getTrue(getContext()), {AlignOpB});
1184 }
1185
CreateAlignmentAssumption(const DataLayout & DL,Value * PtrValue,unsigned Alignment,Value * OffsetValue)1186 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1187 Value *PtrValue,
1188 unsigned Alignment,
1189 Value *OffsetValue) {
1190 assert(isa<PointerType>(PtrValue->getType()) &&
1191 "trying to create an alignment assumption on a non-pointer?");
1192 assert(Alignment != 0 && "Invalid Alignment");
1193 auto *PtrTy = cast<PointerType>(PtrValue->getType());
1194 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1195 Value *AlignValue = ConstantInt::get(IntPtrTy, Alignment);
1196 return CreateAlignmentAssumptionHelper(DL, PtrValue, AlignValue, OffsetValue);
1197 }
1198
CreateAlignmentAssumption(const DataLayout & DL,Value * PtrValue,Value * Alignment,Value * OffsetValue)1199 CallInst *IRBuilderBase::CreateAlignmentAssumption(const DataLayout &DL,
1200 Value *PtrValue,
1201 Value *Alignment,
1202 Value *OffsetValue) {
1203 assert(isa<PointerType>(PtrValue->getType()) &&
1204 "trying to create an alignment assumption on a non-pointer?");
1205 return CreateAlignmentAssumptionHelper(DL, PtrValue, Alignment, OffsetValue);
1206 }
1207
~IRBuilderDefaultInserter()1208 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
~IRBuilderCallbackInserter()1209 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {}
~IRBuilderFolder()1210 IRBuilderFolder::~IRBuilderFolder() {}
anchor()1211 void ConstantFolder::anchor() {}
anchor()1212 void NoFolder::anchor() {}
1213