1 //===- IRBuilder.cpp - Builder for LLVM Instrs ----------------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file implements the IRBuilder class, which is used as a convenient way
10 // to create LLVM instructions with a consistent and simplified interface.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "llvm/IR/IRBuilder.h"
15 #include "llvm/ADT/ArrayRef.h"
16 #include "llvm/ADT/None.h"
17 #include "llvm/IR/Constant.h"
18 #include "llvm/IR/Constants.h"
19 #include "llvm/IR/DerivedTypes.h"
20 #include "llvm/IR/Function.h"
21 #include "llvm/IR/GlobalValue.h"
22 #include "llvm/IR/GlobalVariable.h"
23 #include "llvm/IR/IntrinsicInst.h"
24 #include "llvm/IR/Intrinsics.h"
25 #include "llvm/IR/LLVMContext.h"
26 #include "llvm/IR/NoFolder.h"
27 #include "llvm/IR/Operator.h"
28 #include "llvm/IR/Statepoint.h"
29 #include "llvm/IR/Type.h"
30 #include "llvm/IR/Value.h"
31 #include "llvm/Support/Casting.h"
32 #include "llvm/Support/MathExtras.h"
33 #include <cassert>
34 #include <cstdint>
35 #include <vector>
36
37 using namespace llvm;
38
39 /// CreateGlobalString - Make a new global variable with an initializer that
40 /// has array of i8 type filled in with the nul terminated string value
41 /// specified. If Name is specified, it is the name of the global variable
42 /// created.
CreateGlobalString(StringRef Str,const Twine & Name,unsigned AddressSpace,Module * M)43 GlobalVariable *IRBuilderBase::CreateGlobalString(StringRef Str,
44 const Twine &Name,
45 unsigned AddressSpace,
46 Module *M) {
47 Constant *StrConstant = ConstantDataArray::getString(Context, Str);
48 if (!M)
49 M = BB->getParent()->getParent();
50 auto *GV = new GlobalVariable(
51 *M, StrConstant->getType(), true, GlobalValue::PrivateLinkage,
52 StrConstant, Name, nullptr, GlobalVariable::NotThreadLocal, AddressSpace);
53 GV->setUnnamedAddr(GlobalValue::UnnamedAddr::Global);
54 GV->setAlignment(Align(1));
55 return GV;
56 }
57
getCurrentFunctionReturnType() const58 Type *IRBuilderBase::getCurrentFunctionReturnType() const {
59 assert(BB && BB->getParent() && "No current function!");
60 return BB->getParent()->getReturnType();
61 }
62
getCastedInt8PtrValue(Value * Ptr)63 Value *IRBuilderBase::getCastedInt8PtrValue(Value *Ptr) {
64 auto *PT = cast<PointerType>(Ptr->getType());
65 if (PT->getElementType()->isIntegerTy(8))
66 return Ptr;
67
68 // Otherwise, we need to insert a bitcast.
69 return CreateBitCast(Ptr, getInt8PtrTy(PT->getAddressSpace()));
70 }
71
createCallHelper(Function * Callee,ArrayRef<Value * > Ops,IRBuilderBase * Builder,const Twine & Name="",Instruction * FMFSource=nullptr)72 static CallInst *createCallHelper(Function *Callee, ArrayRef<Value *> Ops,
73 IRBuilderBase *Builder,
74 const Twine &Name = "",
75 Instruction *FMFSource = nullptr) {
76 CallInst *CI = Builder->CreateCall(Callee, Ops, Name);
77 if (FMFSource)
78 CI->copyFastMathFlags(FMFSource);
79 return CI;
80 }
81
CreateMemSet(Value * Ptr,Value * Val,Value * Size,MaybeAlign Align,bool isVolatile,MDNode * TBAATag,MDNode * ScopeTag,MDNode * NoAliasTag)82 CallInst *IRBuilderBase::CreateMemSet(Value *Ptr, Value *Val, Value *Size,
83 MaybeAlign Align, bool isVolatile,
84 MDNode *TBAATag, MDNode *ScopeTag,
85 MDNode *NoAliasTag) {
86 Ptr = getCastedInt8PtrValue(Ptr);
87 Value *Ops[] = {Ptr, Val, Size, getInt1(isVolatile)};
88 Type *Tys[] = { Ptr->getType(), Size->getType() };
89 Module *M = BB->getParent()->getParent();
90 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memset, Tys);
91
92 CallInst *CI = createCallHelper(TheFn, Ops, this);
93
94 if (Align)
95 cast<MemSetInst>(CI)->setDestAlignment(Align->value());
96
97 // Set the TBAA info if present.
98 if (TBAATag)
99 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
100
101 if (ScopeTag)
102 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
103
104 if (NoAliasTag)
105 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
106
107 return CI;
108 }
109
CreateElementUnorderedAtomicMemSet(Value * Ptr,Value * Val,Value * Size,Align Alignment,uint32_t ElementSize,MDNode * TBAATag,MDNode * ScopeTag,MDNode * NoAliasTag)110 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemSet(
111 Value *Ptr, Value *Val, Value *Size, Align Alignment, uint32_t ElementSize,
112 MDNode *TBAATag, MDNode *ScopeTag, MDNode *NoAliasTag) {
113
114 Ptr = getCastedInt8PtrValue(Ptr);
115 Value *Ops[] = {Ptr, Val, Size, getInt32(ElementSize)};
116 Type *Tys[] = {Ptr->getType(), Size->getType()};
117 Module *M = BB->getParent()->getParent();
118 Function *TheFn = Intrinsic::getDeclaration(
119 M, Intrinsic::memset_element_unordered_atomic, Tys);
120
121 CallInst *CI = createCallHelper(TheFn, Ops, this);
122
123 cast<AtomicMemSetInst>(CI)->setDestAlignment(Alignment);
124
125 // Set the TBAA info if present.
126 if (TBAATag)
127 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
128
129 if (ScopeTag)
130 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
131
132 if (NoAliasTag)
133 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
134
135 return CI;
136 }
137
CreateMemCpy(Value * Dst,MaybeAlign DstAlign,Value * Src,MaybeAlign SrcAlign,Value * Size,bool isVolatile,MDNode * TBAATag,MDNode * TBAAStructTag,MDNode * ScopeTag,MDNode * NoAliasTag)138 CallInst *IRBuilderBase::CreateMemCpy(Value *Dst, MaybeAlign DstAlign,
139 Value *Src, MaybeAlign SrcAlign,
140 Value *Size, bool isVolatile,
141 MDNode *TBAATag, MDNode *TBAAStructTag,
142 MDNode *ScopeTag, MDNode *NoAliasTag) {
143 Dst = getCastedInt8PtrValue(Dst);
144 Src = getCastedInt8PtrValue(Src);
145
146 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
147 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
148 Module *M = BB->getParent()->getParent();
149 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy, Tys);
150
151 CallInst *CI = createCallHelper(TheFn, Ops, this);
152
153 auto* MCI = cast<MemCpyInst>(CI);
154 if (DstAlign)
155 MCI->setDestAlignment(*DstAlign);
156 if (SrcAlign)
157 MCI->setSourceAlignment(*SrcAlign);
158
159 // Set the TBAA info if present.
160 if (TBAATag)
161 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
162
163 // Set the TBAA Struct info if present.
164 if (TBAAStructTag)
165 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
166
167 if (ScopeTag)
168 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
169
170 if (NoAliasTag)
171 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
172
173 return CI;
174 }
175
CreateMemCpyInline(Value * Dst,MaybeAlign DstAlign,Value * Src,MaybeAlign SrcAlign,Value * Size)176 CallInst *IRBuilderBase::CreateMemCpyInline(Value *Dst, MaybeAlign DstAlign,
177 Value *Src, MaybeAlign SrcAlign,
178 Value *Size) {
179 Dst = getCastedInt8PtrValue(Dst);
180 Src = getCastedInt8PtrValue(Src);
181 Value *IsVolatile = getInt1(false);
182
183 Value *Ops[] = {Dst, Src, Size, IsVolatile};
184 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
185 Function *F = BB->getParent();
186 Module *M = F->getParent();
187 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memcpy_inline, Tys);
188
189 CallInst *CI = createCallHelper(TheFn, Ops, this);
190
191 auto *MCI = cast<MemCpyInlineInst>(CI);
192 if (DstAlign)
193 MCI->setDestAlignment(*DstAlign);
194 if (SrcAlign)
195 MCI->setSourceAlignment(*SrcAlign);
196
197 return CI;
198 }
199
CreateElementUnorderedAtomicMemCpy(Value * Dst,Align DstAlign,Value * Src,Align SrcAlign,Value * Size,uint32_t ElementSize,MDNode * TBAATag,MDNode * TBAAStructTag,MDNode * ScopeTag,MDNode * NoAliasTag)200 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemCpy(
201 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
202 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
203 MDNode *ScopeTag, MDNode *NoAliasTag) {
204 assert(DstAlign >= ElementSize &&
205 "Pointer alignment must be at least element size");
206 assert(SrcAlign >= ElementSize &&
207 "Pointer alignment must be at least element size");
208 Dst = getCastedInt8PtrValue(Dst);
209 Src = getCastedInt8PtrValue(Src);
210
211 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
212 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
213 Module *M = BB->getParent()->getParent();
214 Function *TheFn = Intrinsic::getDeclaration(
215 M, Intrinsic::memcpy_element_unordered_atomic, Tys);
216
217 CallInst *CI = createCallHelper(TheFn, Ops, this);
218
219 // Set the alignment of the pointer args.
220 auto *AMCI = cast<AtomicMemCpyInst>(CI);
221 AMCI->setDestAlignment(DstAlign);
222 AMCI->setSourceAlignment(SrcAlign);
223
224 // Set the TBAA info if present.
225 if (TBAATag)
226 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
227
228 // Set the TBAA Struct info if present.
229 if (TBAAStructTag)
230 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
231
232 if (ScopeTag)
233 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
234
235 if (NoAliasTag)
236 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
237
238 return CI;
239 }
240
CreateMemMove(Value * Dst,MaybeAlign DstAlign,Value * Src,MaybeAlign SrcAlign,Value * Size,bool isVolatile,MDNode * TBAATag,MDNode * ScopeTag,MDNode * NoAliasTag)241 CallInst *IRBuilderBase::CreateMemMove(Value *Dst, MaybeAlign DstAlign,
242 Value *Src, MaybeAlign SrcAlign,
243 Value *Size, bool isVolatile,
244 MDNode *TBAATag, MDNode *ScopeTag,
245 MDNode *NoAliasTag) {
246 Dst = getCastedInt8PtrValue(Dst);
247 Src = getCastedInt8PtrValue(Src);
248
249 Value *Ops[] = {Dst, Src, Size, getInt1(isVolatile)};
250 Type *Tys[] = { Dst->getType(), Src->getType(), Size->getType() };
251 Module *M = BB->getParent()->getParent();
252 Function *TheFn = Intrinsic::getDeclaration(M, Intrinsic::memmove, Tys);
253
254 CallInst *CI = createCallHelper(TheFn, Ops, this);
255
256 auto *MMI = cast<MemMoveInst>(CI);
257 if (DstAlign)
258 MMI->setDestAlignment(*DstAlign);
259 if (SrcAlign)
260 MMI->setSourceAlignment(*SrcAlign);
261
262 // Set the TBAA info if present.
263 if (TBAATag)
264 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
265
266 if (ScopeTag)
267 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
268
269 if (NoAliasTag)
270 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
271
272 return CI;
273 }
274
CreateElementUnorderedAtomicMemMove(Value * Dst,Align DstAlign,Value * Src,Align SrcAlign,Value * Size,uint32_t ElementSize,MDNode * TBAATag,MDNode * TBAAStructTag,MDNode * ScopeTag,MDNode * NoAliasTag)275 CallInst *IRBuilderBase::CreateElementUnorderedAtomicMemMove(
276 Value *Dst, Align DstAlign, Value *Src, Align SrcAlign, Value *Size,
277 uint32_t ElementSize, MDNode *TBAATag, MDNode *TBAAStructTag,
278 MDNode *ScopeTag, MDNode *NoAliasTag) {
279 assert(DstAlign >= ElementSize &&
280 "Pointer alignment must be at least element size");
281 assert(SrcAlign >= ElementSize &&
282 "Pointer alignment must be at least element size");
283 Dst = getCastedInt8PtrValue(Dst);
284 Src = getCastedInt8PtrValue(Src);
285
286 Value *Ops[] = {Dst, Src, Size, getInt32(ElementSize)};
287 Type *Tys[] = {Dst->getType(), Src->getType(), Size->getType()};
288 Module *M = BB->getParent()->getParent();
289 Function *TheFn = Intrinsic::getDeclaration(
290 M, Intrinsic::memmove_element_unordered_atomic, Tys);
291
292 CallInst *CI = createCallHelper(TheFn, Ops, this);
293
294 // Set the alignment of the pointer args.
295 CI->addParamAttr(0, Attribute::getWithAlignment(CI->getContext(), DstAlign));
296 CI->addParamAttr(1, Attribute::getWithAlignment(CI->getContext(), SrcAlign));
297
298 // Set the TBAA info if present.
299 if (TBAATag)
300 CI->setMetadata(LLVMContext::MD_tbaa, TBAATag);
301
302 // Set the TBAA Struct info if present.
303 if (TBAAStructTag)
304 CI->setMetadata(LLVMContext::MD_tbaa_struct, TBAAStructTag);
305
306 if (ScopeTag)
307 CI->setMetadata(LLVMContext::MD_alias_scope, ScopeTag);
308
309 if (NoAliasTag)
310 CI->setMetadata(LLVMContext::MD_noalias, NoAliasTag);
311
312 return CI;
313 }
314
getReductionIntrinsic(IRBuilderBase * Builder,Intrinsic::ID ID,Value * Src)315 static CallInst *getReductionIntrinsic(IRBuilderBase *Builder, Intrinsic::ID ID,
316 Value *Src) {
317 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
318 Value *Ops[] = {Src};
319 Type *Tys[] = { Src->getType() };
320 auto Decl = Intrinsic::getDeclaration(M, ID, Tys);
321 return createCallHelper(Decl, Ops, Builder);
322 }
323
CreateFAddReduce(Value * Acc,Value * Src)324 CallInst *IRBuilderBase::CreateFAddReduce(Value *Acc, Value *Src) {
325 Module *M = GetInsertBlock()->getParent()->getParent();
326 Value *Ops[] = {Acc, Src};
327 Type *Tys[] = {Acc->getType(), Src->getType()};
328 auto Decl = Intrinsic::getDeclaration(
329 M, Intrinsic::experimental_vector_reduce_v2_fadd, Tys);
330 return createCallHelper(Decl, Ops, this);
331 }
332
CreateFMulReduce(Value * Acc,Value * Src)333 CallInst *IRBuilderBase::CreateFMulReduce(Value *Acc, Value *Src) {
334 Module *M = GetInsertBlock()->getParent()->getParent();
335 Value *Ops[] = {Acc, Src};
336 Type *Tys[] = {Acc->getType(), Src->getType()};
337 auto Decl = Intrinsic::getDeclaration(
338 M, Intrinsic::experimental_vector_reduce_v2_fmul, Tys);
339 return createCallHelper(Decl, Ops, this);
340 }
341
CreateAddReduce(Value * Src)342 CallInst *IRBuilderBase::CreateAddReduce(Value *Src) {
343 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_add,
344 Src);
345 }
346
CreateMulReduce(Value * Src)347 CallInst *IRBuilderBase::CreateMulReduce(Value *Src) {
348 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_mul,
349 Src);
350 }
351
CreateAndReduce(Value * Src)352 CallInst *IRBuilderBase::CreateAndReduce(Value *Src) {
353 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_and,
354 Src);
355 }
356
CreateOrReduce(Value * Src)357 CallInst *IRBuilderBase::CreateOrReduce(Value *Src) {
358 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_or,
359 Src);
360 }
361
CreateXorReduce(Value * Src)362 CallInst *IRBuilderBase::CreateXorReduce(Value *Src) {
363 return getReductionIntrinsic(this, Intrinsic::experimental_vector_reduce_xor,
364 Src);
365 }
366
CreateIntMaxReduce(Value * Src,bool IsSigned)367 CallInst *IRBuilderBase::CreateIntMaxReduce(Value *Src, bool IsSigned) {
368 auto ID = IsSigned ? Intrinsic::experimental_vector_reduce_smax
369 : Intrinsic::experimental_vector_reduce_umax;
370 return getReductionIntrinsic(this, ID, Src);
371 }
372
CreateIntMinReduce(Value * Src,bool IsSigned)373 CallInst *IRBuilderBase::CreateIntMinReduce(Value *Src, bool IsSigned) {
374 auto ID = IsSigned ? Intrinsic::experimental_vector_reduce_smin
375 : Intrinsic::experimental_vector_reduce_umin;
376 return getReductionIntrinsic(this, ID, Src);
377 }
378
CreateFPMaxReduce(Value * Src,bool NoNaN)379 CallInst *IRBuilderBase::CreateFPMaxReduce(Value *Src, bool NoNaN) {
380 auto Rdx = getReductionIntrinsic(
381 this, Intrinsic::experimental_vector_reduce_fmax, Src);
382 if (NoNaN) {
383 FastMathFlags FMF;
384 FMF.setNoNaNs();
385 Rdx->setFastMathFlags(FMF);
386 }
387 return Rdx;
388 }
389
CreateFPMinReduce(Value * Src,bool NoNaN)390 CallInst *IRBuilderBase::CreateFPMinReduce(Value *Src, bool NoNaN) {
391 auto Rdx = getReductionIntrinsic(
392 this, Intrinsic::experimental_vector_reduce_fmin, Src);
393 if (NoNaN) {
394 FastMathFlags FMF;
395 FMF.setNoNaNs();
396 Rdx->setFastMathFlags(FMF);
397 }
398 return Rdx;
399 }
400
CreateLifetimeStart(Value * Ptr,ConstantInt * Size)401 CallInst *IRBuilderBase::CreateLifetimeStart(Value *Ptr, ConstantInt *Size) {
402 assert(isa<PointerType>(Ptr->getType()) &&
403 "lifetime.start only applies to pointers.");
404 Ptr = getCastedInt8PtrValue(Ptr);
405 if (!Size)
406 Size = getInt64(-1);
407 else
408 assert(Size->getType() == getInt64Ty() &&
409 "lifetime.start requires the size to be an i64");
410 Value *Ops[] = { Size, Ptr };
411 Module *M = BB->getParent()->getParent();
412 Function *TheFn =
413 Intrinsic::getDeclaration(M, Intrinsic::lifetime_start, {Ptr->getType()});
414 return createCallHelper(TheFn, Ops, this);
415 }
416
CreateLifetimeEnd(Value * Ptr,ConstantInt * Size)417 CallInst *IRBuilderBase::CreateLifetimeEnd(Value *Ptr, ConstantInt *Size) {
418 assert(isa<PointerType>(Ptr->getType()) &&
419 "lifetime.end only applies to pointers.");
420 Ptr = getCastedInt8PtrValue(Ptr);
421 if (!Size)
422 Size = getInt64(-1);
423 else
424 assert(Size->getType() == getInt64Ty() &&
425 "lifetime.end requires the size to be an i64");
426 Value *Ops[] = { Size, Ptr };
427 Module *M = BB->getParent()->getParent();
428 Function *TheFn =
429 Intrinsic::getDeclaration(M, Intrinsic::lifetime_end, {Ptr->getType()});
430 return createCallHelper(TheFn, Ops, this);
431 }
432
CreateInvariantStart(Value * Ptr,ConstantInt * Size)433 CallInst *IRBuilderBase::CreateInvariantStart(Value *Ptr, ConstantInt *Size) {
434
435 assert(isa<PointerType>(Ptr->getType()) &&
436 "invariant.start only applies to pointers.");
437 Ptr = getCastedInt8PtrValue(Ptr);
438 if (!Size)
439 Size = getInt64(-1);
440 else
441 assert(Size->getType() == getInt64Ty() &&
442 "invariant.start requires the size to be an i64");
443
444 Value *Ops[] = {Size, Ptr};
445 // Fill in the single overloaded type: memory object type.
446 Type *ObjectPtr[1] = {Ptr->getType()};
447 Module *M = BB->getParent()->getParent();
448 Function *TheFn =
449 Intrinsic::getDeclaration(M, Intrinsic::invariant_start, ObjectPtr);
450 return createCallHelper(TheFn, Ops, this);
451 }
452
CreateAssumption(Value * Cond)453 CallInst *IRBuilderBase::CreateAssumption(Value *Cond) {
454 assert(Cond->getType() == getInt1Ty() &&
455 "an assumption condition must be of type i1");
456
457 Value *Ops[] = { Cond };
458 Module *M = BB->getParent()->getParent();
459 Function *FnAssume = Intrinsic::getDeclaration(M, Intrinsic::assume);
460 return createCallHelper(FnAssume, Ops, this);
461 }
462
463 /// Create a call to a Masked Load intrinsic.
464 /// \p Ptr - base pointer for the load
465 /// \p Alignment - alignment of the source location
466 /// \p Mask - vector of booleans which indicates what vector lanes should
467 /// be accessed in memory
468 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
469 /// of the result
470 /// \p Name - name of the result variable
CreateMaskedLoad(Value * Ptr,Align Alignment,Value * Mask,Value * PassThru,const Twine & Name)471 CallInst *IRBuilderBase::CreateMaskedLoad(Value *Ptr, Align Alignment,
472 Value *Mask, Value *PassThru,
473 const Twine &Name) {
474 auto *PtrTy = cast<PointerType>(Ptr->getType());
475 Type *DataTy = PtrTy->getElementType();
476 assert(DataTy->isVectorTy() && "Ptr should point to a vector");
477 assert(Mask && "Mask should not be all-ones (null)");
478 if (!PassThru)
479 PassThru = UndefValue::get(DataTy);
480 Type *OverloadedTypes[] = { DataTy, PtrTy };
481 Value *Ops[] = {Ptr, getInt32(Alignment.value()), Mask, PassThru};
482 return CreateMaskedIntrinsic(Intrinsic::masked_load, Ops,
483 OverloadedTypes, Name);
484 }
485
486 /// Create a call to a Masked Store intrinsic.
487 /// \p Val - data to be stored,
488 /// \p Ptr - base pointer for the store
489 /// \p Alignment - alignment of the destination location
490 /// \p Mask - vector of booleans which indicates what vector lanes should
491 /// be accessed in memory
CreateMaskedStore(Value * Val,Value * Ptr,Align Alignment,Value * Mask)492 CallInst *IRBuilderBase::CreateMaskedStore(Value *Val, Value *Ptr,
493 Align Alignment, Value *Mask) {
494 auto *PtrTy = cast<PointerType>(Ptr->getType());
495 Type *DataTy = PtrTy->getElementType();
496 assert(DataTy->isVectorTy() && "Ptr should point to a vector");
497 assert(Mask && "Mask should not be all-ones (null)");
498 Type *OverloadedTypes[] = { DataTy, PtrTy };
499 Value *Ops[] = {Val, Ptr, getInt32(Alignment.value()), Mask};
500 return CreateMaskedIntrinsic(Intrinsic::masked_store, Ops, OverloadedTypes);
501 }
502
503 /// Create a call to a Masked intrinsic, with given intrinsic Id,
504 /// an array of operands - Ops, and an array of overloaded types -
505 /// OverloadedTypes.
CreateMaskedIntrinsic(Intrinsic::ID Id,ArrayRef<Value * > Ops,ArrayRef<Type * > OverloadedTypes,const Twine & Name)506 CallInst *IRBuilderBase::CreateMaskedIntrinsic(Intrinsic::ID Id,
507 ArrayRef<Value *> Ops,
508 ArrayRef<Type *> OverloadedTypes,
509 const Twine &Name) {
510 Module *M = BB->getParent()->getParent();
511 Function *TheFn = Intrinsic::getDeclaration(M, Id, OverloadedTypes);
512 return createCallHelper(TheFn, Ops, this, Name);
513 }
514
515 /// Create a call to a Masked Gather intrinsic.
516 /// \p Ptrs - vector of pointers for loading
517 /// \p Align - alignment for one element
518 /// \p Mask - vector of booleans which indicates what vector lanes should
519 /// be accessed in memory
520 /// \p PassThru - pass-through value that is used to fill the masked-off lanes
521 /// of the result
522 /// \p Name - name of the result variable
CreateMaskedGather(Value * Ptrs,Align Alignment,Value * Mask,Value * PassThru,const Twine & Name)523 CallInst *IRBuilderBase::CreateMaskedGather(Value *Ptrs, Align Alignment,
524 Value *Mask, Value *PassThru,
525 const Twine &Name) {
526 auto PtrsTy = cast<VectorType>(Ptrs->getType());
527 auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
528 unsigned NumElts = PtrsTy->getNumElements();
529 auto *DataTy = FixedVectorType::get(PtrTy->getElementType(), NumElts);
530
531 if (!Mask)
532 Mask = Constant::getAllOnesValue(
533 FixedVectorType::get(Type::getInt1Ty(Context), NumElts));
534
535 if (!PassThru)
536 PassThru = UndefValue::get(DataTy);
537
538 Type *OverloadedTypes[] = {DataTy, PtrsTy};
539 Value *Ops[] = {Ptrs, getInt32(Alignment.value()), Mask, PassThru};
540
541 // We specify only one type when we create this intrinsic. Types of other
542 // arguments are derived from this type.
543 return CreateMaskedIntrinsic(Intrinsic::masked_gather, Ops, OverloadedTypes,
544 Name);
545 }
546
547 /// Create a call to a Masked Scatter intrinsic.
548 /// \p Data - data to be stored,
549 /// \p Ptrs - the vector of pointers, where the \p Data elements should be
550 /// stored
551 /// \p Align - alignment for one element
552 /// \p Mask - vector of booleans which indicates what vector lanes should
553 /// be accessed in memory
CreateMaskedScatter(Value * Data,Value * Ptrs,Align Alignment,Value * Mask)554 CallInst *IRBuilderBase::CreateMaskedScatter(Value *Data, Value *Ptrs,
555 Align Alignment, Value *Mask) {
556 auto PtrsTy = cast<VectorType>(Ptrs->getType());
557 auto DataTy = cast<VectorType>(Data->getType());
558 unsigned NumElts = PtrsTy->getNumElements();
559
560 #ifndef NDEBUG
561 auto PtrTy = cast<PointerType>(PtrsTy->getElementType());
562 assert(NumElts == DataTy->getNumElements() &&
563 PtrTy->getElementType() == DataTy->getElementType() &&
564 "Incompatible pointer and data types");
565 #endif
566
567 if (!Mask)
568 Mask = Constant::getAllOnesValue(
569 FixedVectorType::get(Type::getInt1Ty(Context), NumElts));
570
571 Type *OverloadedTypes[] = {DataTy, PtrsTy};
572 Value *Ops[] = {Data, Ptrs, getInt32(Alignment.value()), Mask};
573
574 // We specify only one type when we create this intrinsic. Types of other
575 // arguments are derived from this type.
576 return CreateMaskedIntrinsic(Intrinsic::masked_scatter, Ops, OverloadedTypes);
577 }
578
579 template <typename T0>
580 static std::vector<Value *>
getStatepointArgs(IRBuilderBase & B,uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,uint32_t Flags,ArrayRef<T0> CallArgs)581 getStatepointArgs(IRBuilderBase &B, uint64_t ID, uint32_t NumPatchBytes,
582 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs) {
583 std::vector<Value *> Args;
584 Args.push_back(B.getInt64(ID));
585 Args.push_back(B.getInt32(NumPatchBytes));
586 Args.push_back(ActualCallee);
587 Args.push_back(B.getInt32(CallArgs.size()));
588 Args.push_back(B.getInt32(Flags));
589 Args.insert(Args.end(), CallArgs.begin(), CallArgs.end());
590 // GC Transition and Deopt args are now always handled via operand bundle.
591 // They will be removed from the signature of gc.statepoint shortly.
592 Args.push_back(B.getInt32(0));
593 Args.push_back(B.getInt32(0));
594 // GC args are now encoded in the gc-live operand bundle
595 return Args;
596 }
597
598 template<typename T1, typename T2, typename T3>
599 static std::vector<OperandBundleDef>
getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,Optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs)600 getStatepointBundles(Optional<ArrayRef<T1>> TransitionArgs,
601 Optional<ArrayRef<T2>> DeoptArgs,
602 ArrayRef<T3> GCArgs) {
603 std::vector<OperandBundleDef> Rval;
604 if (DeoptArgs) {
605 SmallVector<Value*, 16> DeoptValues;
606 DeoptValues.insert(DeoptValues.end(), DeoptArgs->begin(), DeoptArgs->end());
607 Rval.emplace_back("deopt", DeoptValues);
608 }
609 if (TransitionArgs) {
610 SmallVector<Value*, 16> TransitionValues;
611 TransitionValues.insert(TransitionValues.end(),
612 TransitionArgs->begin(), TransitionArgs->end());
613 Rval.emplace_back("gc-transition", TransitionValues);
614 }
615 if (GCArgs.size()) {
616 SmallVector<Value*, 16> LiveValues;
617 LiveValues.insert(LiveValues.end(), GCArgs.begin(), GCArgs.end());
618 Rval.emplace_back("gc-live", LiveValues);
619 }
620 return Rval;
621 }
622
623 template <typename T0, typename T1, typename T2, typename T3>
CreateGCStatepointCallCommon(IRBuilderBase * Builder,uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,uint32_t Flags,ArrayRef<T0> CallArgs,Optional<ArrayRef<T1>> TransitionArgs,Optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs,const Twine & Name)624 static CallInst *CreateGCStatepointCallCommon(
625 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
626 Value *ActualCallee, uint32_t Flags, ArrayRef<T0> CallArgs,
627 Optional<ArrayRef<T1>> TransitionArgs,
628 Optional<ArrayRef<T2>> DeoptArgs, ArrayRef<T3> GCArgs,
629 const Twine &Name) {
630 // Extract out the type of the callee.
631 auto *FuncPtrType = cast<PointerType>(ActualCallee->getType());
632 assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
633 "actual callee must be a callable value");
634
635 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
636 // Fill in the one generic type'd argument (the function is also vararg)
637 Type *ArgTypes[] = { FuncPtrType };
638 Function *FnStatepoint =
639 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_statepoint,
640 ArgTypes);
641
642 std::vector<Value *> Args =
643 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualCallee, Flags,
644 CallArgs);
645
646 return Builder->CreateCall(FnStatepoint, Args,
647 getStatepointBundles(TransitionArgs, DeoptArgs,
648 GCArgs),
649 Name);
650 }
651
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,ArrayRef<Value * > CallArgs,Optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)652 CallInst *IRBuilderBase::CreateGCStatepointCall(
653 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
654 ArrayRef<Value *> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
655 ArrayRef<Value *> GCArgs, const Twine &Name) {
656 return CreateGCStatepointCallCommon<Value *, Value *, Value *, Value *>(
657 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
658 CallArgs, None /* No Transition Args */, DeoptArgs, GCArgs, Name);
659 }
660
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,uint32_t Flags,ArrayRef<Use> CallArgs,Optional<ArrayRef<Use>> TransitionArgs,Optional<ArrayRef<Use>> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)661 CallInst *IRBuilderBase::CreateGCStatepointCall(
662 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee, uint32_t Flags,
663 ArrayRef<Use> CallArgs, Optional<ArrayRef<Use>> TransitionArgs,
664 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs,
665 const Twine &Name) {
666 return CreateGCStatepointCallCommon<Use, Use, Use, Value *>(
667 this, ID, NumPatchBytes, ActualCallee, Flags, CallArgs, TransitionArgs,
668 DeoptArgs, GCArgs, Name);
669 }
670
CreateGCStatepointCall(uint64_t ID,uint32_t NumPatchBytes,Value * ActualCallee,ArrayRef<Use> CallArgs,Optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)671 CallInst *IRBuilderBase::CreateGCStatepointCall(
672 uint64_t ID, uint32_t NumPatchBytes, Value *ActualCallee,
673 ArrayRef<Use> CallArgs, Optional<ArrayRef<Value *>> DeoptArgs,
674 ArrayRef<Value *> GCArgs, const Twine &Name) {
675 return CreateGCStatepointCallCommon<Use, Value *, Value *, Value *>(
676 this, ID, NumPatchBytes, ActualCallee, uint32_t(StatepointFlags::None),
677 CallArgs, None, DeoptArgs, GCArgs, Name);
678 }
679
680 template <typename T0, typename T1, typename T2, typename T3>
CreateGCStatepointInvokeCommon(IRBuilderBase * Builder,uint64_t ID,uint32_t NumPatchBytes,Value * ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,uint32_t Flags,ArrayRef<T0> InvokeArgs,Optional<ArrayRef<T1>> TransitionArgs,Optional<ArrayRef<T2>> DeoptArgs,ArrayRef<T3> GCArgs,const Twine & Name)681 static InvokeInst *CreateGCStatepointInvokeCommon(
682 IRBuilderBase *Builder, uint64_t ID, uint32_t NumPatchBytes,
683 Value *ActualInvokee, BasicBlock *NormalDest, BasicBlock *UnwindDest,
684 uint32_t Flags, ArrayRef<T0> InvokeArgs,
685 Optional<ArrayRef<T1>> TransitionArgs, Optional<ArrayRef<T2>> DeoptArgs,
686 ArrayRef<T3> GCArgs, const Twine &Name) {
687 // Extract out the type of the callee.
688 auto *FuncPtrType = cast<PointerType>(ActualInvokee->getType());
689 assert(isa<FunctionType>(FuncPtrType->getElementType()) &&
690 "actual callee must be a callable value");
691
692 Module *M = Builder->GetInsertBlock()->getParent()->getParent();
693 // Fill in the one generic type'd argument (the function is also vararg)
694 Function *FnStatepoint = Intrinsic::getDeclaration(
695 M, Intrinsic::experimental_gc_statepoint, {FuncPtrType});
696
697 std::vector<Value *> Args =
698 getStatepointArgs(*Builder, ID, NumPatchBytes, ActualInvokee, Flags,
699 InvokeArgs);
700
701 return Builder->CreateInvoke(FnStatepoint, NormalDest, UnwindDest, Args,
702 getStatepointBundles(TransitionArgs, DeoptArgs,
703 GCArgs),
704 Name);
705 }
706
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,Value * ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,ArrayRef<Value * > InvokeArgs,Optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)707 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
708 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
709 BasicBlock *NormalDest, BasicBlock *UnwindDest,
710 ArrayRef<Value *> InvokeArgs, Optional<ArrayRef<Value *>> DeoptArgs,
711 ArrayRef<Value *> GCArgs, const Twine &Name) {
712 return CreateGCStatepointInvokeCommon<Value *, Value *, Value *, Value *>(
713 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
714 uint32_t(StatepointFlags::None), InvokeArgs, None /* No Transition Args*/,
715 DeoptArgs, GCArgs, Name);
716 }
717
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,Value * ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,uint32_t Flags,ArrayRef<Use> InvokeArgs,Optional<ArrayRef<Use>> TransitionArgs,Optional<ArrayRef<Use>> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)718 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
719 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
720 BasicBlock *NormalDest, BasicBlock *UnwindDest, uint32_t Flags,
721 ArrayRef<Use> InvokeArgs, Optional<ArrayRef<Use>> TransitionArgs,
722 Optional<ArrayRef<Use>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
723 return CreateGCStatepointInvokeCommon<Use, Use, Use, Value *>(
724 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest, Flags,
725 InvokeArgs, TransitionArgs, DeoptArgs, GCArgs, Name);
726 }
727
CreateGCStatepointInvoke(uint64_t ID,uint32_t NumPatchBytes,Value * ActualInvokee,BasicBlock * NormalDest,BasicBlock * UnwindDest,ArrayRef<Use> InvokeArgs,Optional<ArrayRef<Value * >> DeoptArgs,ArrayRef<Value * > GCArgs,const Twine & Name)728 InvokeInst *IRBuilderBase::CreateGCStatepointInvoke(
729 uint64_t ID, uint32_t NumPatchBytes, Value *ActualInvokee,
730 BasicBlock *NormalDest, BasicBlock *UnwindDest, ArrayRef<Use> InvokeArgs,
731 Optional<ArrayRef<Value *>> DeoptArgs, ArrayRef<Value *> GCArgs, const Twine &Name) {
732 return CreateGCStatepointInvokeCommon<Use, Value *, Value *, Value *>(
733 this, ID, NumPatchBytes, ActualInvokee, NormalDest, UnwindDest,
734 uint32_t(StatepointFlags::None), InvokeArgs, None, DeoptArgs, GCArgs,
735 Name);
736 }
737
CreateGCResult(Instruction * Statepoint,Type * ResultType,const Twine & Name)738 CallInst *IRBuilderBase::CreateGCResult(Instruction *Statepoint,
739 Type *ResultType,
740 const Twine &Name) {
741 Intrinsic::ID ID = Intrinsic::experimental_gc_result;
742 Module *M = BB->getParent()->getParent();
743 Type *Types[] = {ResultType};
744 Function *FnGCResult = Intrinsic::getDeclaration(M, ID, Types);
745
746 Value *Args[] = {Statepoint};
747 return createCallHelper(FnGCResult, Args, this, Name);
748 }
749
CreateGCRelocate(Instruction * Statepoint,int BaseOffset,int DerivedOffset,Type * ResultType,const Twine & Name)750 CallInst *IRBuilderBase::CreateGCRelocate(Instruction *Statepoint,
751 int BaseOffset,
752 int DerivedOffset,
753 Type *ResultType,
754 const Twine &Name) {
755 Module *M = BB->getParent()->getParent();
756 Type *Types[] = {ResultType};
757 Function *FnGCRelocate =
758 Intrinsic::getDeclaration(M, Intrinsic::experimental_gc_relocate, Types);
759
760 Value *Args[] = {Statepoint,
761 getInt32(BaseOffset),
762 getInt32(DerivedOffset)};
763 return createCallHelper(FnGCRelocate, Args, this, Name);
764 }
765
CreateUnaryIntrinsic(Intrinsic::ID ID,Value * V,Instruction * FMFSource,const Twine & Name)766 CallInst *IRBuilderBase::CreateUnaryIntrinsic(Intrinsic::ID ID, Value *V,
767 Instruction *FMFSource,
768 const Twine &Name) {
769 Module *M = BB->getModule();
770 Function *Fn = Intrinsic::getDeclaration(M, ID, {V->getType()});
771 return createCallHelper(Fn, {V}, this, Name, FMFSource);
772 }
773
CreateBinaryIntrinsic(Intrinsic::ID ID,Value * LHS,Value * RHS,Instruction * FMFSource,const Twine & Name)774 CallInst *IRBuilderBase::CreateBinaryIntrinsic(Intrinsic::ID ID, Value *LHS,
775 Value *RHS,
776 Instruction *FMFSource,
777 const Twine &Name) {
778 Module *M = BB->getModule();
779 Function *Fn = Intrinsic::getDeclaration(M, ID, { LHS->getType() });
780 return createCallHelper(Fn, {LHS, RHS}, this, Name, FMFSource);
781 }
782
CreateIntrinsic(Intrinsic::ID ID,ArrayRef<Type * > Types,ArrayRef<Value * > Args,Instruction * FMFSource,const Twine & Name)783 CallInst *IRBuilderBase::CreateIntrinsic(Intrinsic::ID ID,
784 ArrayRef<Type *> Types,
785 ArrayRef<Value *> Args,
786 Instruction *FMFSource,
787 const Twine &Name) {
788 Module *M = BB->getModule();
789 Function *Fn = Intrinsic::getDeclaration(M, ID, Types);
790 return createCallHelper(Fn, Args, this, Name, FMFSource);
791 }
792
CreateConstrainedFPBinOp(Intrinsic::ID ID,Value * L,Value * R,Instruction * FMFSource,const Twine & Name,MDNode * FPMathTag,Optional<RoundingMode> Rounding,Optional<fp::ExceptionBehavior> Except)793 CallInst *IRBuilderBase::CreateConstrainedFPBinOp(
794 Intrinsic::ID ID, Value *L, Value *R, Instruction *FMFSource,
795 const Twine &Name, MDNode *FPMathTag,
796 Optional<RoundingMode> Rounding,
797 Optional<fp::ExceptionBehavior> Except) {
798 Value *RoundingV = getConstrainedFPRounding(Rounding);
799 Value *ExceptV = getConstrainedFPExcept(Except);
800
801 FastMathFlags UseFMF = FMF;
802 if (FMFSource)
803 UseFMF = FMFSource->getFastMathFlags();
804
805 CallInst *C = CreateIntrinsic(ID, {L->getType()},
806 {L, R, RoundingV, ExceptV}, nullptr, Name);
807 setConstrainedFPCallAttr(C);
808 setFPAttrs(C, FPMathTag, UseFMF);
809 return C;
810 }
811
CreateNAryOp(unsigned Opc,ArrayRef<Value * > Ops,const Twine & Name,MDNode * FPMathTag)812 Value *IRBuilderBase::CreateNAryOp(unsigned Opc, ArrayRef<Value *> Ops,
813 const Twine &Name, MDNode *FPMathTag) {
814 if (Instruction::isBinaryOp(Opc)) {
815 assert(Ops.size() == 2 && "Invalid number of operands!");
816 return CreateBinOp(static_cast<Instruction::BinaryOps>(Opc),
817 Ops[0], Ops[1], Name, FPMathTag);
818 }
819 if (Instruction::isUnaryOp(Opc)) {
820 assert(Ops.size() == 1 && "Invalid number of operands!");
821 return CreateUnOp(static_cast<Instruction::UnaryOps>(Opc),
822 Ops[0], Name, FPMathTag);
823 }
824 llvm_unreachable("Unexpected opcode!");
825 }
826
CreateConstrainedFPCast(Intrinsic::ID ID,Value * V,Type * DestTy,Instruction * FMFSource,const Twine & Name,MDNode * FPMathTag,Optional<RoundingMode> Rounding,Optional<fp::ExceptionBehavior> Except)827 CallInst *IRBuilderBase::CreateConstrainedFPCast(
828 Intrinsic::ID ID, Value *V, Type *DestTy,
829 Instruction *FMFSource, const Twine &Name, MDNode *FPMathTag,
830 Optional<RoundingMode> Rounding,
831 Optional<fp::ExceptionBehavior> Except) {
832 Value *ExceptV = getConstrainedFPExcept(Except);
833
834 FastMathFlags UseFMF = FMF;
835 if (FMFSource)
836 UseFMF = FMFSource->getFastMathFlags();
837
838 CallInst *C;
839 bool HasRoundingMD = false;
840 switch (ID) {
841 default:
842 break;
843 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
844 case Intrinsic::INTRINSIC: \
845 HasRoundingMD = ROUND_MODE; \
846 break;
847 #include "llvm/IR/ConstrainedOps.def"
848 }
849 if (HasRoundingMD) {
850 Value *RoundingV = getConstrainedFPRounding(Rounding);
851 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, RoundingV, ExceptV},
852 nullptr, Name);
853 } else
854 C = CreateIntrinsic(ID, {DestTy, V->getType()}, {V, ExceptV}, nullptr,
855 Name);
856
857 setConstrainedFPCallAttr(C);
858
859 if (isa<FPMathOperator>(C))
860 setFPAttrs(C, FPMathTag, UseFMF);
861 return C;
862 }
863
CreateFCmpHelper(CmpInst::Predicate P,Value * LHS,Value * RHS,const Twine & Name,MDNode * FPMathTag,bool IsSignaling)864 Value *IRBuilderBase::CreateFCmpHelper(
865 CmpInst::Predicate P, Value *LHS, Value *RHS, const Twine &Name,
866 MDNode *FPMathTag, bool IsSignaling) {
867 if (IsFPConstrained) {
868 auto ID = IsSignaling ? Intrinsic::experimental_constrained_fcmps
869 : Intrinsic::experimental_constrained_fcmp;
870 return CreateConstrainedFPCmp(ID, P, LHS, RHS, Name);
871 }
872
873 if (auto *LC = dyn_cast<Constant>(LHS))
874 if (auto *RC = dyn_cast<Constant>(RHS))
875 return Insert(Folder.CreateFCmp(P, LC, RC), Name);
876 return Insert(setFPAttrs(new FCmpInst(P, LHS, RHS), FPMathTag, FMF), Name);
877 }
878
CreateConstrainedFPCmp(Intrinsic::ID ID,CmpInst::Predicate P,Value * L,Value * R,const Twine & Name,Optional<fp::ExceptionBehavior> Except)879 CallInst *IRBuilderBase::CreateConstrainedFPCmp(
880 Intrinsic::ID ID, CmpInst::Predicate P, Value *L, Value *R,
881 const Twine &Name, Optional<fp::ExceptionBehavior> Except) {
882 Value *PredicateV = getConstrainedFPPredicate(P);
883 Value *ExceptV = getConstrainedFPExcept(Except);
884
885 CallInst *C = CreateIntrinsic(ID, {L->getType()},
886 {L, R, PredicateV, ExceptV}, nullptr, Name);
887 setConstrainedFPCallAttr(C);
888 return C;
889 }
890
CreateConstrainedFPCall(Function * Callee,ArrayRef<Value * > Args,const Twine & Name,Optional<RoundingMode> Rounding,Optional<fp::ExceptionBehavior> Except)891 CallInst *IRBuilderBase::CreateConstrainedFPCall(
892 Function *Callee, ArrayRef<Value *> Args, const Twine &Name,
893 Optional<RoundingMode> Rounding,
894 Optional<fp::ExceptionBehavior> Except) {
895 llvm::SmallVector<Value *, 6> UseArgs;
896
897 for (auto *OneArg : Args)
898 UseArgs.push_back(OneArg);
899 bool HasRoundingMD = false;
900 switch (Callee->getIntrinsicID()) {
901 default:
902 break;
903 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC) \
904 case Intrinsic::INTRINSIC: \
905 HasRoundingMD = ROUND_MODE; \
906 break;
907 #include "llvm/IR/ConstrainedOps.def"
908 }
909 if (HasRoundingMD)
910 UseArgs.push_back(getConstrainedFPRounding(Rounding));
911 UseArgs.push_back(getConstrainedFPExcept(Except));
912
913 CallInst *C = CreateCall(Callee, UseArgs, Name);
914 setConstrainedFPCallAttr(C);
915 return C;
916 }
917
CreateSelect(Value * C,Value * True,Value * False,const Twine & Name,Instruction * MDFrom)918 Value *IRBuilderBase::CreateSelect(Value *C, Value *True, Value *False,
919 const Twine &Name, Instruction *MDFrom) {
920 if (auto *CC = dyn_cast<Constant>(C))
921 if (auto *TC = dyn_cast<Constant>(True))
922 if (auto *FC = dyn_cast<Constant>(False))
923 return Insert(Folder.CreateSelect(CC, TC, FC), Name);
924
925 SelectInst *Sel = SelectInst::Create(C, True, False);
926 if (MDFrom) {
927 MDNode *Prof = MDFrom->getMetadata(LLVMContext::MD_prof);
928 MDNode *Unpred = MDFrom->getMetadata(LLVMContext::MD_unpredictable);
929 Sel = addBranchMetadata(Sel, Prof, Unpred);
930 }
931 if (isa<FPMathOperator>(Sel))
932 setFPAttrs(Sel, nullptr /* MDNode* */, FMF);
933 return Insert(Sel, Name);
934 }
935
CreatePtrDiff(Value * LHS,Value * RHS,const Twine & Name)936 Value *IRBuilderBase::CreatePtrDiff(Value *LHS, Value *RHS,
937 const Twine &Name) {
938 assert(LHS->getType() == RHS->getType() &&
939 "Pointer subtraction operand types must match!");
940 auto *ArgType = cast<PointerType>(LHS->getType());
941 Value *LHS_int = CreatePtrToInt(LHS, Type::getInt64Ty(Context));
942 Value *RHS_int = CreatePtrToInt(RHS, Type::getInt64Ty(Context));
943 Value *Difference = CreateSub(LHS_int, RHS_int);
944 return CreateExactSDiv(Difference,
945 ConstantExpr::getSizeOf(ArgType->getElementType()),
946 Name);
947 }
948
CreateLaunderInvariantGroup(Value * Ptr)949 Value *IRBuilderBase::CreateLaunderInvariantGroup(Value *Ptr) {
950 assert(isa<PointerType>(Ptr->getType()) &&
951 "launder.invariant.group only applies to pointers.");
952 // FIXME: we could potentially avoid casts to/from i8*.
953 auto *PtrType = Ptr->getType();
954 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
955 if (PtrType != Int8PtrTy)
956 Ptr = CreateBitCast(Ptr, Int8PtrTy);
957 Module *M = BB->getParent()->getParent();
958 Function *FnLaunderInvariantGroup = Intrinsic::getDeclaration(
959 M, Intrinsic::launder_invariant_group, {Int8PtrTy});
960
961 assert(FnLaunderInvariantGroup->getReturnType() == Int8PtrTy &&
962 FnLaunderInvariantGroup->getFunctionType()->getParamType(0) ==
963 Int8PtrTy &&
964 "LaunderInvariantGroup should take and return the same type");
965
966 CallInst *Fn = CreateCall(FnLaunderInvariantGroup, {Ptr});
967
968 if (PtrType != Int8PtrTy)
969 return CreateBitCast(Fn, PtrType);
970 return Fn;
971 }
972
CreateStripInvariantGroup(Value * Ptr)973 Value *IRBuilderBase::CreateStripInvariantGroup(Value *Ptr) {
974 assert(isa<PointerType>(Ptr->getType()) &&
975 "strip.invariant.group only applies to pointers.");
976
977 // FIXME: we could potentially avoid casts to/from i8*.
978 auto *PtrType = Ptr->getType();
979 auto *Int8PtrTy = getInt8PtrTy(PtrType->getPointerAddressSpace());
980 if (PtrType != Int8PtrTy)
981 Ptr = CreateBitCast(Ptr, Int8PtrTy);
982 Module *M = BB->getParent()->getParent();
983 Function *FnStripInvariantGroup = Intrinsic::getDeclaration(
984 M, Intrinsic::strip_invariant_group, {Int8PtrTy});
985
986 assert(FnStripInvariantGroup->getReturnType() == Int8PtrTy &&
987 FnStripInvariantGroup->getFunctionType()->getParamType(0) ==
988 Int8PtrTy &&
989 "StripInvariantGroup should take and return the same type");
990
991 CallInst *Fn = CreateCall(FnStripInvariantGroup, {Ptr});
992
993 if (PtrType != Int8PtrTy)
994 return CreateBitCast(Fn, PtrType);
995 return Fn;
996 }
997
CreateVectorSplat(unsigned NumElts,Value * V,const Twine & Name)998 Value *IRBuilderBase::CreateVectorSplat(unsigned NumElts, Value *V,
999 const Twine &Name) {
1000 assert(NumElts > 0 && "Cannot splat to an empty vector!");
1001
1002 // First insert it into an undef vector so we can shuffle it.
1003 Type *I32Ty = getInt32Ty();
1004 Value *Undef = UndefValue::get(FixedVectorType::get(V->getType(), NumElts));
1005 V = CreateInsertElement(Undef, V, ConstantInt::get(I32Ty, 0),
1006 Name + ".splatinsert");
1007
1008 // Shuffle the value across the desired number of elements.
1009 Value *Zeros =
1010 ConstantAggregateZero::get(FixedVectorType::get(I32Ty, NumElts));
1011 return CreateShuffleVector(V, Undef, Zeros, Name + ".splat");
1012 }
1013
CreateExtractInteger(const DataLayout & DL,Value * From,IntegerType * ExtractedTy,uint64_t Offset,const Twine & Name)1014 Value *IRBuilderBase::CreateExtractInteger(
1015 const DataLayout &DL, Value *From, IntegerType *ExtractedTy,
1016 uint64_t Offset, const Twine &Name) {
1017 auto *IntTy = cast<IntegerType>(From->getType());
1018 assert(DL.getTypeStoreSize(ExtractedTy) + Offset <=
1019 DL.getTypeStoreSize(IntTy) &&
1020 "Element extends past full value");
1021 uint64_t ShAmt = 8 * Offset;
1022 Value *V = From;
1023 if (DL.isBigEndian())
1024 ShAmt = 8 * (DL.getTypeStoreSize(IntTy) -
1025 DL.getTypeStoreSize(ExtractedTy) - Offset);
1026 if (ShAmt) {
1027 V = CreateLShr(V, ShAmt, Name + ".shift");
1028 }
1029 assert(ExtractedTy->getBitWidth() <= IntTy->getBitWidth() &&
1030 "Cannot extract to a larger integer!");
1031 if (ExtractedTy != IntTy) {
1032 V = CreateTrunc(V, ExtractedTy, Name + ".trunc");
1033 }
1034 return V;
1035 }
1036
CreatePreserveArrayAccessIndex(Type * ElTy,Value * Base,unsigned Dimension,unsigned LastIndex,MDNode * DbgInfo)1037 Value *IRBuilderBase::CreatePreserveArrayAccessIndex(
1038 Type *ElTy, Value *Base, unsigned Dimension, unsigned LastIndex,
1039 MDNode *DbgInfo) {
1040 assert(isa<PointerType>(Base->getType()) &&
1041 "Invalid Base ptr type for preserve.array.access.index.");
1042 auto *BaseType = Base->getType();
1043
1044 Value *LastIndexV = getInt32(LastIndex);
1045 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1046 SmallVector<Value *, 4> IdxList;
1047 for (unsigned I = 0; I < Dimension; ++I)
1048 IdxList.push_back(Zero);
1049 IdxList.push_back(LastIndexV);
1050
1051 Type *ResultType =
1052 GetElementPtrInst::getGEPReturnType(ElTy, Base, IdxList);
1053
1054 Module *M = BB->getParent()->getParent();
1055 Function *FnPreserveArrayAccessIndex = Intrinsic::getDeclaration(
1056 M, Intrinsic::preserve_array_access_index, {ResultType, BaseType});
1057
1058 Value *DimV = getInt32(Dimension);
1059 CallInst *Fn =
1060 CreateCall(FnPreserveArrayAccessIndex, {Base, DimV, LastIndexV});
1061 if (DbgInfo)
1062 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1063
1064 return Fn;
1065 }
1066
CreatePreserveUnionAccessIndex(Value * Base,unsigned FieldIndex,MDNode * DbgInfo)1067 Value *IRBuilderBase::CreatePreserveUnionAccessIndex(
1068 Value *Base, unsigned FieldIndex, MDNode *DbgInfo) {
1069 assert(isa<PointerType>(Base->getType()) &&
1070 "Invalid Base ptr type for preserve.union.access.index.");
1071 auto *BaseType = Base->getType();
1072
1073 Module *M = BB->getParent()->getParent();
1074 Function *FnPreserveUnionAccessIndex = Intrinsic::getDeclaration(
1075 M, Intrinsic::preserve_union_access_index, {BaseType, BaseType});
1076
1077 Value *DIIndex = getInt32(FieldIndex);
1078 CallInst *Fn =
1079 CreateCall(FnPreserveUnionAccessIndex, {Base, DIIndex});
1080 if (DbgInfo)
1081 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1082
1083 return Fn;
1084 }
1085
CreatePreserveStructAccessIndex(Type * ElTy,Value * Base,unsigned Index,unsigned FieldIndex,MDNode * DbgInfo)1086 Value *IRBuilderBase::CreatePreserveStructAccessIndex(
1087 Type *ElTy, Value *Base, unsigned Index, unsigned FieldIndex,
1088 MDNode *DbgInfo) {
1089 assert(isa<PointerType>(Base->getType()) &&
1090 "Invalid Base ptr type for preserve.struct.access.index.");
1091 auto *BaseType = Base->getType();
1092
1093 Value *GEPIndex = getInt32(Index);
1094 Constant *Zero = ConstantInt::get(Type::getInt32Ty(Context), 0);
1095 Type *ResultType =
1096 GetElementPtrInst::getGEPReturnType(ElTy, Base, {Zero, GEPIndex});
1097
1098 Module *M = BB->getParent()->getParent();
1099 Function *FnPreserveStructAccessIndex = Intrinsic::getDeclaration(
1100 M, Intrinsic::preserve_struct_access_index, {ResultType, BaseType});
1101
1102 Value *DIIndex = getInt32(FieldIndex);
1103 CallInst *Fn = CreateCall(FnPreserveStructAccessIndex,
1104 {Base, GEPIndex, DIIndex});
1105 if (DbgInfo)
1106 Fn->setMetadata(LLVMContext::MD_preserve_access_index, DbgInfo);
1107
1108 return Fn;
1109 }
1110
CreateAlignmentAssumptionHelper(const DataLayout & DL,Value * PtrValue,Value * Mask,Type * IntPtrTy,Value * OffsetValue,Value ** TheCheck)1111 CallInst *IRBuilderBase::CreateAlignmentAssumptionHelper(
1112 const DataLayout &DL, Value *PtrValue, Value *Mask, Type *IntPtrTy,
1113 Value *OffsetValue, Value **TheCheck) {
1114 Value *PtrIntValue = CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
1115
1116 if (OffsetValue) {
1117 bool IsOffsetZero = false;
1118 if (const auto *CI = dyn_cast<ConstantInt>(OffsetValue))
1119 IsOffsetZero = CI->isZero();
1120
1121 if (!IsOffsetZero) {
1122 if (OffsetValue->getType() != IntPtrTy)
1123 OffsetValue = CreateIntCast(OffsetValue, IntPtrTy, /*isSigned*/ true,
1124 "offsetcast");
1125 PtrIntValue = CreateSub(PtrIntValue, OffsetValue, "offsetptr");
1126 }
1127 }
1128
1129 Value *Zero = ConstantInt::get(IntPtrTy, 0);
1130 Value *MaskedPtr = CreateAnd(PtrIntValue, Mask, "maskedptr");
1131 Value *InvCond = CreateICmpEQ(MaskedPtr, Zero, "maskcond");
1132 if (TheCheck)
1133 *TheCheck = InvCond;
1134
1135 return CreateAssumption(InvCond);
1136 }
1137
CreateAlignmentAssumption(const DataLayout & DL,Value * PtrValue,unsigned Alignment,Value * OffsetValue,Value ** TheCheck)1138 CallInst *IRBuilderBase::CreateAlignmentAssumption(
1139 const DataLayout &DL, Value *PtrValue, unsigned Alignment,
1140 Value *OffsetValue, Value **TheCheck) {
1141 assert(isa<PointerType>(PtrValue->getType()) &&
1142 "trying to create an alignment assumption on a non-pointer?");
1143 assert(Alignment != 0 && "Invalid Alignment");
1144 auto *PtrTy = cast<PointerType>(PtrValue->getType());
1145 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1146
1147 Value *Mask = ConstantInt::get(IntPtrTy, Alignment - 1);
1148 return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
1149 OffsetValue, TheCheck);
1150 }
1151
CreateAlignmentAssumption(const DataLayout & DL,Value * PtrValue,Value * Alignment,Value * OffsetValue,Value ** TheCheck)1152 CallInst *IRBuilderBase::CreateAlignmentAssumption(
1153 const DataLayout &DL, Value *PtrValue, Value *Alignment,
1154 Value *OffsetValue, Value **TheCheck) {
1155 assert(isa<PointerType>(PtrValue->getType()) &&
1156 "trying to create an alignment assumption on a non-pointer?");
1157 auto *PtrTy = cast<PointerType>(PtrValue->getType());
1158 Type *IntPtrTy = getIntPtrTy(DL, PtrTy->getAddressSpace());
1159
1160 if (Alignment->getType() != IntPtrTy)
1161 Alignment = CreateIntCast(Alignment, IntPtrTy, /*isSigned*/ false,
1162 "alignmentcast");
1163
1164 Value *Mask = CreateSub(Alignment, ConstantInt::get(IntPtrTy, 1), "mask");
1165
1166 return CreateAlignmentAssumptionHelper(DL, PtrValue, Mask, IntPtrTy,
1167 OffsetValue, TheCheck);
1168 }
1169
~IRBuilderDefaultInserter()1170 IRBuilderDefaultInserter::~IRBuilderDefaultInserter() {}
~IRBuilderCallbackInserter()1171 IRBuilderCallbackInserter::~IRBuilderCallbackInserter() {}
~IRBuilderFolder()1172 IRBuilderFolder::~IRBuilderFolder() {}
anchor()1173 void ConstantFolder::anchor() {}
anchor()1174 void NoFolder::anchor() {}
1175