1 //===---- CGBuiltin.cpp - Emit LLVM Code for builtins ---------------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This contains code to emit Builtin calls as LLVM code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGObjCRuntime.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/ASTContext.h"
19 #include "clang/AST/Decl.h"
20 #include "clang/Basic/TargetBuiltins.h"
21 #include "clang/Basic/TargetInfo.h"
22 #include "clang/CodeGen/CGFunctionInfo.h"
23 #include "llvm/ADT/StringExtras.h"
24 #include "llvm/IR/DataLayout.h"
25 #include "llvm/IR/InlineAsm.h"
26 #include "llvm/IR/Intrinsics.h"
27
28 using namespace clang;
29 using namespace CodeGen;
30 using namespace llvm;
31
32 /// getBuiltinLibFunction - Given a builtin id for a function like
33 /// "__builtin_fabsf", return a Function* for "fabsf".
getBuiltinLibFunction(const FunctionDecl * FD,unsigned BuiltinID)34 llvm::Value *CodeGenModule::getBuiltinLibFunction(const FunctionDecl *FD,
35 unsigned BuiltinID) {
36 assert(Context.BuiltinInfo.isLibFunction(BuiltinID));
37
38 // Get the name, skip over the __builtin_ prefix (if necessary).
39 StringRef Name;
40 GlobalDecl D(FD);
41
42 // If the builtin has been declared explicitly with an assembler label,
43 // use the mangled name. This differs from the plain label on platforms
44 // that prefix labels.
45 if (FD->hasAttr<AsmLabelAttr>())
46 Name = getMangledName(D);
47 else
48 Name = Context.BuiltinInfo.GetName(BuiltinID) + 10;
49
50 llvm::FunctionType *Ty =
51 cast<llvm::FunctionType>(getTypes().ConvertType(FD->getType()));
52
53 return GetOrCreateLLVMFunction(Name, Ty, D, /*ForVTable=*/false);
54 }
55
56 /// Emit the conversions required to turn the given value into an
57 /// integer of the given size.
EmitToInt(CodeGenFunction & CGF,llvm::Value * V,QualType T,llvm::IntegerType * IntType)58 static Value *EmitToInt(CodeGenFunction &CGF, llvm::Value *V,
59 QualType T, llvm::IntegerType *IntType) {
60 V = CGF.EmitToMemory(V, T);
61
62 if (V->getType()->isPointerTy())
63 return CGF.Builder.CreatePtrToInt(V, IntType);
64
65 assert(V->getType() == IntType);
66 return V;
67 }
68
EmitFromInt(CodeGenFunction & CGF,llvm::Value * V,QualType T,llvm::Type * ResultType)69 static Value *EmitFromInt(CodeGenFunction &CGF, llvm::Value *V,
70 QualType T, llvm::Type *ResultType) {
71 V = CGF.EmitFromMemory(V, T);
72
73 if (ResultType->isPointerTy())
74 return CGF.Builder.CreateIntToPtr(V, ResultType);
75
76 assert(V->getType() == ResultType);
77 return V;
78 }
79
80 /// Utility to insert an atomic instruction based on Instrinsic::ID
81 /// and the expression node.
EmitBinaryAtomic(CodeGenFunction & CGF,llvm::AtomicRMWInst::BinOp Kind,const CallExpr * E)82 static RValue EmitBinaryAtomic(CodeGenFunction &CGF,
83 llvm::AtomicRMWInst::BinOp Kind,
84 const CallExpr *E) {
85 QualType T = E->getType();
86 assert(E->getArg(0)->getType()->isPointerType());
87 assert(CGF.getContext().hasSameUnqualifiedType(T,
88 E->getArg(0)->getType()->getPointeeType()));
89 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
90
91 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
92 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
93
94 llvm::IntegerType *IntType =
95 llvm::IntegerType::get(CGF.getLLVMContext(),
96 CGF.getContext().getTypeSize(T));
97 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
98
99 llvm::Value *Args[2];
100 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
101 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
102 llvm::Type *ValueType = Args[1]->getType();
103 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
104
105 llvm::Value *Result =
106 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
107 llvm::SequentiallyConsistent);
108 Result = EmitFromInt(CGF, Result, T, ValueType);
109 return RValue::get(Result);
110 }
111
112 /// Utility to insert an atomic instruction based Instrinsic::ID and
113 /// the expression node, where the return value is the result of the
114 /// operation.
EmitBinaryAtomicPost(CodeGenFunction & CGF,llvm::AtomicRMWInst::BinOp Kind,const CallExpr * E,Instruction::BinaryOps Op,bool Invert=false)115 static RValue EmitBinaryAtomicPost(CodeGenFunction &CGF,
116 llvm::AtomicRMWInst::BinOp Kind,
117 const CallExpr *E,
118 Instruction::BinaryOps Op,
119 bool Invert = false) {
120 QualType T = E->getType();
121 assert(E->getArg(0)->getType()->isPointerType());
122 assert(CGF.getContext().hasSameUnqualifiedType(T,
123 E->getArg(0)->getType()->getPointeeType()));
124 assert(CGF.getContext().hasSameUnqualifiedType(T, E->getArg(1)->getType()));
125
126 llvm::Value *DestPtr = CGF.EmitScalarExpr(E->getArg(0));
127 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
128
129 llvm::IntegerType *IntType =
130 llvm::IntegerType::get(CGF.getLLVMContext(),
131 CGF.getContext().getTypeSize(T));
132 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
133
134 llvm::Value *Args[2];
135 Args[1] = CGF.EmitScalarExpr(E->getArg(1));
136 llvm::Type *ValueType = Args[1]->getType();
137 Args[1] = EmitToInt(CGF, Args[1], T, IntType);
138 Args[0] = CGF.Builder.CreateBitCast(DestPtr, IntPtrType);
139
140 llvm::Value *Result =
141 CGF.Builder.CreateAtomicRMW(Kind, Args[0], Args[1],
142 llvm::SequentiallyConsistent);
143 Result = CGF.Builder.CreateBinOp(Op, Result, Args[1]);
144 if (Invert)
145 Result = CGF.Builder.CreateBinOp(llvm::Instruction::Xor, Result,
146 llvm::ConstantInt::get(IntType, -1));
147 Result = EmitFromInt(CGF, Result, T, ValueType);
148 return RValue::get(Result);
149 }
150
151 /// EmitFAbs - Emit a call to @llvm.fabs().
EmitFAbs(CodeGenFunction & CGF,Value * V)152 static Value *EmitFAbs(CodeGenFunction &CGF, Value *V) {
153 Value *F = CGF.CGM.getIntrinsic(Intrinsic::fabs, V->getType());
154 llvm::CallInst *Call = CGF.Builder.CreateCall(F, V);
155 Call->setDoesNotAccessMemory();
156 return Call;
157 }
158
emitLibraryCall(CodeGenFunction & CGF,const FunctionDecl * Fn,const CallExpr * E,llvm::Value * calleeValue)159 static RValue emitLibraryCall(CodeGenFunction &CGF, const FunctionDecl *Fn,
160 const CallExpr *E, llvm::Value *calleeValue) {
161 return CGF.EmitCall(E->getCallee()->getType(), calleeValue, E,
162 ReturnValueSlot(), Fn);
163 }
164
165 /// \brief Emit a call to llvm.{sadd,uadd,ssub,usub,smul,umul}.with.overflow.*
166 /// depending on IntrinsicID.
167 ///
168 /// \arg CGF The current codegen function.
169 /// \arg IntrinsicID The ID for the Intrinsic we wish to generate.
170 /// \arg X The first argument to the llvm.*.with.overflow.*.
171 /// \arg Y The second argument to the llvm.*.with.overflow.*.
172 /// \arg Carry The carry returned by the llvm.*.with.overflow.*.
173 /// \returns The result (i.e. sum/product) returned by the intrinsic.
EmitOverflowIntrinsic(CodeGenFunction & CGF,const llvm::Intrinsic::ID IntrinsicID,llvm::Value * X,llvm::Value * Y,llvm::Value * & Carry)174 static llvm::Value *EmitOverflowIntrinsic(CodeGenFunction &CGF,
175 const llvm::Intrinsic::ID IntrinsicID,
176 llvm::Value *X, llvm::Value *Y,
177 llvm::Value *&Carry) {
178 // Make sure we have integers of the same width.
179 assert(X->getType() == Y->getType() &&
180 "Arguments must be the same type. (Did you forget to make sure both "
181 "arguments have the same integer width?)");
182
183 llvm::Value *Callee = CGF.CGM.getIntrinsic(IntrinsicID, X->getType());
184 llvm::Value *Tmp = CGF.Builder.CreateCall2(Callee, X, Y);
185 Carry = CGF.Builder.CreateExtractValue(Tmp, 1);
186 return CGF.Builder.CreateExtractValue(Tmp, 0);
187 }
188
EmitBuiltinExpr(const FunctionDecl * FD,unsigned BuiltinID,const CallExpr * E,ReturnValueSlot ReturnValue)189 RValue CodeGenFunction::EmitBuiltinExpr(const FunctionDecl *FD,
190 unsigned BuiltinID, const CallExpr *E,
191 ReturnValueSlot ReturnValue) {
192 // See if we can constant fold this builtin. If so, don't emit it at all.
193 Expr::EvalResult Result;
194 if (E->EvaluateAsRValue(Result, CGM.getContext()) &&
195 !Result.hasSideEffects()) {
196 if (Result.Val.isInt())
197 return RValue::get(llvm::ConstantInt::get(getLLVMContext(),
198 Result.Val.getInt()));
199 if (Result.Val.isFloat())
200 return RValue::get(llvm::ConstantFP::get(getLLVMContext(),
201 Result.Val.getFloat()));
202 }
203
204 switch (BuiltinID) {
205 default: break; // Handle intrinsics and libm functions below.
206 case Builtin::BI__builtin___CFStringMakeConstantString:
207 case Builtin::BI__builtin___NSStringMakeConstantString:
208 return RValue::get(CGM.EmitConstantExpr(E, E->getType(), nullptr));
209 case Builtin::BI__builtin_stdarg_start:
210 case Builtin::BI__builtin_va_start:
211 case Builtin::BI__va_start:
212 case Builtin::BI__builtin_va_end: {
213 Value *ArgValue = (BuiltinID == Builtin::BI__va_start)
214 ? EmitScalarExpr(E->getArg(0))
215 : EmitVAListRef(E->getArg(0));
216 llvm::Type *DestType = Int8PtrTy;
217 if (ArgValue->getType() != DestType)
218 ArgValue = Builder.CreateBitCast(ArgValue, DestType,
219 ArgValue->getName().data());
220
221 Intrinsic::ID inst = (BuiltinID == Builtin::BI__builtin_va_end) ?
222 Intrinsic::vaend : Intrinsic::vastart;
223 return RValue::get(Builder.CreateCall(CGM.getIntrinsic(inst), ArgValue));
224 }
225 case Builtin::BI__builtin_va_copy: {
226 Value *DstPtr = EmitVAListRef(E->getArg(0));
227 Value *SrcPtr = EmitVAListRef(E->getArg(1));
228
229 llvm::Type *Type = Int8PtrTy;
230
231 DstPtr = Builder.CreateBitCast(DstPtr, Type);
232 SrcPtr = Builder.CreateBitCast(SrcPtr, Type);
233 return RValue::get(Builder.CreateCall2(CGM.getIntrinsic(Intrinsic::vacopy),
234 DstPtr, SrcPtr));
235 }
236 case Builtin::BI__builtin_abs:
237 case Builtin::BI__builtin_labs:
238 case Builtin::BI__builtin_llabs: {
239 Value *ArgValue = EmitScalarExpr(E->getArg(0));
240
241 Value *NegOp = Builder.CreateNeg(ArgValue, "neg");
242 Value *CmpResult =
243 Builder.CreateICmpSGE(ArgValue,
244 llvm::Constant::getNullValue(ArgValue->getType()),
245 "abscond");
246 Value *Result =
247 Builder.CreateSelect(CmpResult, ArgValue, NegOp, "abs");
248
249 return RValue::get(Result);
250 }
251 case Builtin::BI__builtin_fabs:
252 case Builtin::BI__builtin_fabsf:
253 case Builtin::BI__builtin_fabsl: {
254 Value *Arg1 = EmitScalarExpr(E->getArg(0));
255 Value *Result = EmitFAbs(*this, Arg1);
256 return RValue::get(Result);
257 }
258 case Builtin::BI__builtin_fmod:
259 case Builtin::BI__builtin_fmodf:
260 case Builtin::BI__builtin_fmodl: {
261 Value *Arg1 = EmitScalarExpr(E->getArg(0));
262 Value *Arg2 = EmitScalarExpr(E->getArg(1));
263 Value *Result = Builder.CreateFRem(Arg1, Arg2, "fmod");
264 return RValue::get(Result);
265 }
266
267 case Builtin::BI__builtin_conj:
268 case Builtin::BI__builtin_conjf:
269 case Builtin::BI__builtin_conjl: {
270 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
271 Value *Real = ComplexVal.first;
272 Value *Imag = ComplexVal.second;
273 Value *Zero =
274 Imag->getType()->isFPOrFPVectorTy()
275 ? llvm::ConstantFP::getZeroValueForNegation(Imag->getType())
276 : llvm::Constant::getNullValue(Imag->getType());
277
278 Imag = Builder.CreateFSub(Zero, Imag, "sub");
279 return RValue::getComplex(std::make_pair(Real, Imag));
280 }
281 case Builtin::BI__builtin_creal:
282 case Builtin::BI__builtin_crealf:
283 case Builtin::BI__builtin_creall:
284 case Builtin::BIcreal:
285 case Builtin::BIcrealf:
286 case Builtin::BIcreall: {
287 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
288 return RValue::get(ComplexVal.first);
289 }
290
291 case Builtin::BI__builtin_cimag:
292 case Builtin::BI__builtin_cimagf:
293 case Builtin::BI__builtin_cimagl:
294 case Builtin::BIcimag:
295 case Builtin::BIcimagf:
296 case Builtin::BIcimagl: {
297 ComplexPairTy ComplexVal = EmitComplexExpr(E->getArg(0));
298 return RValue::get(ComplexVal.second);
299 }
300
301 case Builtin::BI__builtin_ctzs:
302 case Builtin::BI__builtin_ctz:
303 case Builtin::BI__builtin_ctzl:
304 case Builtin::BI__builtin_ctzll: {
305 Value *ArgValue = EmitScalarExpr(E->getArg(0));
306
307 llvm::Type *ArgType = ArgValue->getType();
308 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
309
310 llvm::Type *ResultType = ConvertType(E->getType());
311 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
312 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
313 if (Result->getType() != ResultType)
314 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
315 "cast");
316 return RValue::get(Result);
317 }
318 case Builtin::BI__builtin_clzs:
319 case Builtin::BI__builtin_clz:
320 case Builtin::BI__builtin_clzl:
321 case Builtin::BI__builtin_clzll: {
322 Value *ArgValue = EmitScalarExpr(E->getArg(0));
323
324 llvm::Type *ArgType = ArgValue->getType();
325 Value *F = CGM.getIntrinsic(Intrinsic::ctlz, ArgType);
326
327 llvm::Type *ResultType = ConvertType(E->getType());
328 Value *ZeroUndef = Builder.getInt1(getTarget().isCLZForZeroUndef());
329 Value *Result = Builder.CreateCall2(F, ArgValue, ZeroUndef);
330 if (Result->getType() != ResultType)
331 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
332 "cast");
333 return RValue::get(Result);
334 }
335 case Builtin::BI__builtin_ffs:
336 case Builtin::BI__builtin_ffsl:
337 case Builtin::BI__builtin_ffsll: {
338 // ffs(x) -> x ? cttz(x) + 1 : 0
339 Value *ArgValue = EmitScalarExpr(E->getArg(0));
340
341 llvm::Type *ArgType = ArgValue->getType();
342 Value *F = CGM.getIntrinsic(Intrinsic::cttz, ArgType);
343
344 llvm::Type *ResultType = ConvertType(E->getType());
345 Value *Tmp = Builder.CreateAdd(Builder.CreateCall2(F, ArgValue,
346 Builder.getTrue()),
347 llvm::ConstantInt::get(ArgType, 1));
348 Value *Zero = llvm::Constant::getNullValue(ArgType);
349 Value *IsZero = Builder.CreateICmpEQ(ArgValue, Zero, "iszero");
350 Value *Result = Builder.CreateSelect(IsZero, Zero, Tmp, "ffs");
351 if (Result->getType() != ResultType)
352 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
353 "cast");
354 return RValue::get(Result);
355 }
356 case Builtin::BI__builtin_parity:
357 case Builtin::BI__builtin_parityl:
358 case Builtin::BI__builtin_parityll: {
359 // parity(x) -> ctpop(x) & 1
360 Value *ArgValue = EmitScalarExpr(E->getArg(0));
361
362 llvm::Type *ArgType = ArgValue->getType();
363 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
364
365 llvm::Type *ResultType = ConvertType(E->getType());
366 Value *Tmp = Builder.CreateCall(F, ArgValue);
367 Value *Result = Builder.CreateAnd(Tmp, llvm::ConstantInt::get(ArgType, 1));
368 if (Result->getType() != ResultType)
369 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
370 "cast");
371 return RValue::get(Result);
372 }
373 case Builtin::BI__builtin_popcount:
374 case Builtin::BI__builtin_popcountl:
375 case Builtin::BI__builtin_popcountll: {
376 Value *ArgValue = EmitScalarExpr(E->getArg(0));
377
378 llvm::Type *ArgType = ArgValue->getType();
379 Value *F = CGM.getIntrinsic(Intrinsic::ctpop, ArgType);
380
381 llvm::Type *ResultType = ConvertType(E->getType());
382 Value *Result = Builder.CreateCall(F, ArgValue);
383 if (Result->getType() != ResultType)
384 Result = Builder.CreateIntCast(Result, ResultType, /*isSigned*/true,
385 "cast");
386 return RValue::get(Result);
387 }
388 case Builtin::BI__builtin_expect: {
389 Value *ArgValue = EmitScalarExpr(E->getArg(0));
390 llvm::Type *ArgType = ArgValue->getType();
391
392 Value *FnExpect = CGM.getIntrinsic(Intrinsic::expect, ArgType);
393 Value *ExpectedValue = EmitScalarExpr(E->getArg(1));
394
395 Value *Result = Builder.CreateCall2(FnExpect, ArgValue, ExpectedValue,
396 "expval");
397 return RValue::get(Result);
398 }
399 case Builtin::BI__builtin_assume_aligned: {
400 Value *PtrValue = EmitScalarExpr(E->getArg(0));
401 Value *OffsetValue =
402 (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) : nullptr;
403
404 Value *AlignmentValue = EmitScalarExpr(E->getArg(1));
405 ConstantInt *AlignmentCI = cast<ConstantInt>(AlignmentValue);
406 unsigned Alignment = (unsigned) AlignmentCI->getZExtValue();
407
408 EmitAlignmentAssumption(PtrValue, Alignment, OffsetValue);
409 return RValue::get(PtrValue);
410 }
411 case Builtin::BI__assume:
412 case Builtin::BI__builtin_assume: {
413 if (E->getArg(0)->HasSideEffects(getContext()))
414 return RValue::get(nullptr);
415
416 Value *ArgValue = EmitScalarExpr(E->getArg(0));
417 Value *FnAssume = CGM.getIntrinsic(Intrinsic::assume);
418 return RValue::get(Builder.CreateCall(FnAssume, ArgValue));
419 }
420 case Builtin::BI__builtin_bswap16:
421 case Builtin::BI__builtin_bswap32:
422 case Builtin::BI__builtin_bswap64: {
423 Value *ArgValue = EmitScalarExpr(E->getArg(0));
424 llvm::Type *ArgType = ArgValue->getType();
425 Value *F = CGM.getIntrinsic(Intrinsic::bswap, ArgType);
426 return RValue::get(Builder.CreateCall(F, ArgValue));
427 }
428 case Builtin::BI__builtin_object_size: {
429 // We rely on constant folding to deal with expressions with side effects.
430 assert(!E->getArg(0)->HasSideEffects(getContext()) &&
431 "should have been constant folded");
432
433 // We pass this builtin onto the optimizer so that it can
434 // figure out the object size in more complex cases.
435 llvm::Type *ResType = ConvertType(E->getType());
436
437 // LLVM only supports 0 and 2, make sure that we pass along that
438 // as a boolean.
439 Value *Ty = EmitScalarExpr(E->getArg(1));
440 ConstantInt *CI = dyn_cast<ConstantInt>(Ty);
441 assert(CI);
442 uint64_t val = CI->getZExtValue();
443 CI = ConstantInt::get(Builder.getInt1Ty(), (val & 0x2) >> 1);
444 // FIXME: Get right address space.
445 llvm::Type *Tys[] = { ResType, Builder.getInt8PtrTy(0) };
446 Value *F = CGM.getIntrinsic(Intrinsic::objectsize, Tys);
447 return RValue::get(Builder.CreateCall2(F, EmitScalarExpr(E->getArg(0)),CI));
448 }
449 case Builtin::BI__builtin_prefetch: {
450 Value *Locality, *RW, *Address = EmitScalarExpr(E->getArg(0));
451 // FIXME: Technically these constants should of type 'int', yes?
452 RW = (E->getNumArgs() > 1) ? EmitScalarExpr(E->getArg(1)) :
453 llvm::ConstantInt::get(Int32Ty, 0);
454 Locality = (E->getNumArgs() > 2) ? EmitScalarExpr(E->getArg(2)) :
455 llvm::ConstantInt::get(Int32Ty, 3);
456 Value *Data = llvm::ConstantInt::get(Int32Ty, 1);
457 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
458 return RValue::get(Builder.CreateCall4(F, Address, RW, Locality, Data));
459 }
460 case Builtin::BI__builtin_readcyclecounter: {
461 Value *F = CGM.getIntrinsic(Intrinsic::readcyclecounter);
462 return RValue::get(Builder.CreateCall(F));
463 }
464 case Builtin::BI__builtin___clear_cache: {
465 Value *Begin = EmitScalarExpr(E->getArg(0));
466 Value *End = EmitScalarExpr(E->getArg(1));
467 Value *F = CGM.getIntrinsic(Intrinsic::clear_cache);
468 return RValue::get(Builder.CreateCall2(F, Begin, End));
469 }
470 case Builtin::BI__builtin_trap: {
471 Value *F = CGM.getIntrinsic(Intrinsic::trap);
472 return RValue::get(Builder.CreateCall(F));
473 }
474 case Builtin::BI__debugbreak: {
475 Value *F = CGM.getIntrinsic(Intrinsic::debugtrap);
476 return RValue::get(Builder.CreateCall(F));
477 }
478 case Builtin::BI__builtin_unreachable: {
479 if (SanOpts.has(SanitizerKind::Unreachable)) {
480 SanitizerScope SanScope(this);
481 EmitCheck(std::make_pair(static_cast<llvm::Value *>(Builder.getFalse()),
482 SanitizerKind::Unreachable),
483 "builtin_unreachable", EmitCheckSourceLocation(E->getExprLoc()),
484 None);
485 } else
486 Builder.CreateUnreachable();
487
488 // We do need to preserve an insertion point.
489 EmitBlock(createBasicBlock("unreachable.cont"));
490
491 return RValue::get(nullptr);
492 }
493
494 case Builtin::BI__builtin_powi:
495 case Builtin::BI__builtin_powif:
496 case Builtin::BI__builtin_powil: {
497 Value *Base = EmitScalarExpr(E->getArg(0));
498 Value *Exponent = EmitScalarExpr(E->getArg(1));
499 llvm::Type *ArgType = Base->getType();
500 Value *F = CGM.getIntrinsic(Intrinsic::powi, ArgType);
501 return RValue::get(Builder.CreateCall2(F, Base, Exponent));
502 }
503
504 case Builtin::BI__builtin_isgreater:
505 case Builtin::BI__builtin_isgreaterequal:
506 case Builtin::BI__builtin_isless:
507 case Builtin::BI__builtin_islessequal:
508 case Builtin::BI__builtin_islessgreater:
509 case Builtin::BI__builtin_isunordered: {
510 // Ordered comparisons: we know the arguments to these are matching scalar
511 // floating point values.
512 Value *LHS = EmitScalarExpr(E->getArg(0));
513 Value *RHS = EmitScalarExpr(E->getArg(1));
514
515 switch (BuiltinID) {
516 default: llvm_unreachable("Unknown ordered comparison");
517 case Builtin::BI__builtin_isgreater:
518 LHS = Builder.CreateFCmpOGT(LHS, RHS, "cmp");
519 break;
520 case Builtin::BI__builtin_isgreaterequal:
521 LHS = Builder.CreateFCmpOGE(LHS, RHS, "cmp");
522 break;
523 case Builtin::BI__builtin_isless:
524 LHS = Builder.CreateFCmpOLT(LHS, RHS, "cmp");
525 break;
526 case Builtin::BI__builtin_islessequal:
527 LHS = Builder.CreateFCmpOLE(LHS, RHS, "cmp");
528 break;
529 case Builtin::BI__builtin_islessgreater:
530 LHS = Builder.CreateFCmpONE(LHS, RHS, "cmp");
531 break;
532 case Builtin::BI__builtin_isunordered:
533 LHS = Builder.CreateFCmpUNO(LHS, RHS, "cmp");
534 break;
535 }
536 // ZExt bool to int type.
537 return RValue::get(Builder.CreateZExt(LHS, ConvertType(E->getType())));
538 }
539 case Builtin::BI__builtin_isnan: {
540 Value *V = EmitScalarExpr(E->getArg(0));
541 V = Builder.CreateFCmpUNO(V, V, "cmp");
542 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
543 }
544
545 case Builtin::BI__builtin_isinf: {
546 // isinf(x) --> fabs(x) == infinity
547 Value *V = EmitScalarExpr(E->getArg(0));
548 V = EmitFAbs(*this, V);
549
550 V = Builder.CreateFCmpOEQ(V, ConstantFP::getInfinity(V->getType()),"isinf");
551 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
552 }
553
554 // TODO: BI__builtin_isinf_sign
555 // isinf_sign(x) -> isinf(x) ? (signbit(x) ? -1 : 1) : 0
556
557 case Builtin::BI__builtin_isnormal: {
558 // isnormal(x) --> x == x && fabsf(x) < infinity && fabsf(x) >= float_min
559 Value *V = EmitScalarExpr(E->getArg(0));
560 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
561
562 Value *Abs = EmitFAbs(*this, V);
563 Value *IsLessThanInf =
564 Builder.CreateFCmpULT(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
565 APFloat Smallest = APFloat::getSmallestNormalized(
566 getContext().getFloatTypeSemantics(E->getArg(0)->getType()));
567 Value *IsNormal =
568 Builder.CreateFCmpUGE(Abs, ConstantFP::get(V->getContext(), Smallest),
569 "isnormal");
570 V = Builder.CreateAnd(Eq, IsLessThanInf, "and");
571 V = Builder.CreateAnd(V, IsNormal, "and");
572 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
573 }
574
575 case Builtin::BI__builtin_isfinite: {
576 // isfinite(x) --> x == x && fabs(x) != infinity;
577 Value *V = EmitScalarExpr(E->getArg(0));
578 Value *Eq = Builder.CreateFCmpOEQ(V, V, "iseq");
579
580 Value *Abs = EmitFAbs(*this, V);
581 Value *IsNotInf =
582 Builder.CreateFCmpUNE(Abs, ConstantFP::getInfinity(V->getType()),"isinf");
583
584 V = Builder.CreateAnd(Eq, IsNotInf, "and");
585 return RValue::get(Builder.CreateZExt(V, ConvertType(E->getType())));
586 }
587
588 case Builtin::BI__builtin_fpclassify: {
589 Value *V = EmitScalarExpr(E->getArg(5));
590 llvm::Type *Ty = ConvertType(E->getArg(5)->getType());
591
592 // Create Result
593 BasicBlock *Begin = Builder.GetInsertBlock();
594 BasicBlock *End = createBasicBlock("fpclassify_end", this->CurFn);
595 Builder.SetInsertPoint(End);
596 PHINode *Result =
597 Builder.CreatePHI(ConvertType(E->getArg(0)->getType()), 4,
598 "fpclassify_result");
599
600 // if (V==0) return FP_ZERO
601 Builder.SetInsertPoint(Begin);
602 Value *IsZero = Builder.CreateFCmpOEQ(V, Constant::getNullValue(Ty),
603 "iszero");
604 Value *ZeroLiteral = EmitScalarExpr(E->getArg(4));
605 BasicBlock *NotZero = createBasicBlock("fpclassify_not_zero", this->CurFn);
606 Builder.CreateCondBr(IsZero, End, NotZero);
607 Result->addIncoming(ZeroLiteral, Begin);
608
609 // if (V != V) return FP_NAN
610 Builder.SetInsertPoint(NotZero);
611 Value *IsNan = Builder.CreateFCmpUNO(V, V, "cmp");
612 Value *NanLiteral = EmitScalarExpr(E->getArg(0));
613 BasicBlock *NotNan = createBasicBlock("fpclassify_not_nan", this->CurFn);
614 Builder.CreateCondBr(IsNan, End, NotNan);
615 Result->addIncoming(NanLiteral, NotZero);
616
617 // if (fabs(V) == infinity) return FP_INFINITY
618 Builder.SetInsertPoint(NotNan);
619 Value *VAbs = EmitFAbs(*this, V);
620 Value *IsInf =
621 Builder.CreateFCmpOEQ(VAbs, ConstantFP::getInfinity(V->getType()),
622 "isinf");
623 Value *InfLiteral = EmitScalarExpr(E->getArg(1));
624 BasicBlock *NotInf = createBasicBlock("fpclassify_not_inf", this->CurFn);
625 Builder.CreateCondBr(IsInf, End, NotInf);
626 Result->addIncoming(InfLiteral, NotNan);
627
628 // if (fabs(V) >= MIN_NORMAL) return FP_NORMAL else FP_SUBNORMAL
629 Builder.SetInsertPoint(NotInf);
630 APFloat Smallest = APFloat::getSmallestNormalized(
631 getContext().getFloatTypeSemantics(E->getArg(5)->getType()));
632 Value *IsNormal =
633 Builder.CreateFCmpUGE(VAbs, ConstantFP::get(V->getContext(), Smallest),
634 "isnormal");
635 Value *NormalResult =
636 Builder.CreateSelect(IsNormal, EmitScalarExpr(E->getArg(2)),
637 EmitScalarExpr(E->getArg(3)));
638 Builder.CreateBr(End);
639 Result->addIncoming(NormalResult, NotInf);
640
641 // return Result
642 Builder.SetInsertPoint(End);
643 return RValue::get(Result);
644 }
645
646 case Builtin::BIalloca:
647 case Builtin::BI_alloca:
648 case Builtin::BI__builtin_alloca: {
649 Value *Size = EmitScalarExpr(E->getArg(0));
650 return RValue::get(Builder.CreateAlloca(Builder.getInt8Ty(), Size));
651 }
652 case Builtin::BIbzero:
653 case Builtin::BI__builtin_bzero: {
654 std::pair<llvm::Value*, unsigned> Dest =
655 EmitPointerWithAlignment(E->getArg(0));
656 Value *SizeVal = EmitScalarExpr(E->getArg(1));
657 Builder.CreateMemSet(Dest.first, Builder.getInt8(0), SizeVal,
658 Dest.second, false);
659 return RValue::get(Dest.first);
660 }
661 case Builtin::BImemcpy:
662 case Builtin::BI__builtin_memcpy: {
663 std::pair<llvm::Value*, unsigned> Dest =
664 EmitPointerWithAlignment(E->getArg(0));
665 std::pair<llvm::Value*, unsigned> Src =
666 EmitPointerWithAlignment(E->getArg(1));
667 Value *SizeVal = EmitScalarExpr(E->getArg(2));
668 unsigned Align = std::min(Dest.second, Src.second);
669 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
670 return RValue::get(Dest.first);
671 }
672
673 case Builtin::BI__builtin___memcpy_chk: {
674 // fold __builtin_memcpy_chk(x, y, cst1, cst2) to memcpy iff cst1<=cst2.
675 llvm::APSInt Size, DstSize;
676 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
677 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
678 break;
679 if (Size.ugt(DstSize))
680 break;
681 std::pair<llvm::Value*, unsigned> Dest =
682 EmitPointerWithAlignment(E->getArg(0));
683 std::pair<llvm::Value*, unsigned> Src =
684 EmitPointerWithAlignment(E->getArg(1));
685 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
686 unsigned Align = std::min(Dest.second, Src.second);
687 Builder.CreateMemCpy(Dest.first, Src.first, SizeVal, Align, false);
688 return RValue::get(Dest.first);
689 }
690
691 case Builtin::BI__builtin_objc_memmove_collectable: {
692 Value *Address = EmitScalarExpr(E->getArg(0));
693 Value *SrcAddr = EmitScalarExpr(E->getArg(1));
694 Value *SizeVal = EmitScalarExpr(E->getArg(2));
695 CGM.getObjCRuntime().EmitGCMemmoveCollectable(*this,
696 Address, SrcAddr, SizeVal);
697 return RValue::get(Address);
698 }
699
700 case Builtin::BI__builtin___memmove_chk: {
701 // fold __builtin_memmove_chk(x, y, cst1, cst2) to memmove iff cst1<=cst2.
702 llvm::APSInt Size, DstSize;
703 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
704 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
705 break;
706 if (Size.ugt(DstSize))
707 break;
708 std::pair<llvm::Value*, unsigned> Dest =
709 EmitPointerWithAlignment(E->getArg(0));
710 std::pair<llvm::Value*, unsigned> Src =
711 EmitPointerWithAlignment(E->getArg(1));
712 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
713 unsigned Align = std::min(Dest.second, Src.second);
714 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
715 return RValue::get(Dest.first);
716 }
717
718 case Builtin::BImemmove:
719 case Builtin::BI__builtin_memmove: {
720 std::pair<llvm::Value*, unsigned> Dest =
721 EmitPointerWithAlignment(E->getArg(0));
722 std::pair<llvm::Value*, unsigned> Src =
723 EmitPointerWithAlignment(E->getArg(1));
724 Value *SizeVal = EmitScalarExpr(E->getArg(2));
725 unsigned Align = std::min(Dest.second, Src.second);
726 Builder.CreateMemMove(Dest.first, Src.first, SizeVal, Align, false);
727 return RValue::get(Dest.first);
728 }
729 case Builtin::BImemset:
730 case Builtin::BI__builtin_memset: {
731 std::pair<llvm::Value*, unsigned> Dest =
732 EmitPointerWithAlignment(E->getArg(0));
733 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
734 Builder.getInt8Ty());
735 Value *SizeVal = EmitScalarExpr(E->getArg(2));
736 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
737 return RValue::get(Dest.first);
738 }
739 case Builtin::BI__builtin___memset_chk: {
740 // fold __builtin_memset_chk(x, y, cst1, cst2) to memset iff cst1<=cst2.
741 llvm::APSInt Size, DstSize;
742 if (!E->getArg(2)->EvaluateAsInt(Size, CGM.getContext()) ||
743 !E->getArg(3)->EvaluateAsInt(DstSize, CGM.getContext()))
744 break;
745 if (Size.ugt(DstSize))
746 break;
747 std::pair<llvm::Value*, unsigned> Dest =
748 EmitPointerWithAlignment(E->getArg(0));
749 Value *ByteVal = Builder.CreateTrunc(EmitScalarExpr(E->getArg(1)),
750 Builder.getInt8Ty());
751 Value *SizeVal = llvm::ConstantInt::get(Builder.getContext(), Size);
752 Builder.CreateMemSet(Dest.first, ByteVal, SizeVal, Dest.second, false);
753 return RValue::get(Dest.first);
754 }
755 case Builtin::BI__builtin_dwarf_cfa: {
756 // The offset in bytes from the first argument to the CFA.
757 //
758 // Why on earth is this in the frontend? Is there any reason at
759 // all that the backend can't reasonably determine this while
760 // lowering llvm.eh.dwarf.cfa()?
761 //
762 // TODO: If there's a satisfactory reason, add a target hook for
763 // this instead of hard-coding 0, which is correct for most targets.
764 int32_t Offset = 0;
765
766 Value *F = CGM.getIntrinsic(Intrinsic::eh_dwarf_cfa);
767 return RValue::get(Builder.CreateCall(F,
768 llvm::ConstantInt::get(Int32Ty, Offset)));
769 }
770 case Builtin::BI__builtin_return_address: {
771 Value *Depth = EmitScalarExpr(E->getArg(0));
772 Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
773 Value *F = CGM.getIntrinsic(Intrinsic::returnaddress);
774 return RValue::get(Builder.CreateCall(F, Depth));
775 }
776 case Builtin::BI__builtin_frame_address: {
777 Value *Depth = EmitScalarExpr(E->getArg(0));
778 Depth = Builder.CreateIntCast(Depth, Int32Ty, false);
779 Value *F = CGM.getIntrinsic(Intrinsic::frameaddress);
780 return RValue::get(Builder.CreateCall(F, Depth));
781 }
782 case Builtin::BI__builtin_extract_return_addr: {
783 Value *Address = EmitScalarExpr(E->getArg(0));
784 Value *Result = getTargetHooks().decodeReturnAddress(*this, Address);
785 return RValue::get(Result);
786 }
787 case Builtin::BI__builtin_frob_return_addr: {
788 Value *Address = EmitScalarExpr(E->getArg(0));
789 Value *Result = getTargetHooks().encodeReturnAddress(*this, Address);
790 return RValue::get(Result);
791 }
792 case Builtin::BI__builtin_dwarf_sp_column: {
793 llvm::IntegerType *Ty
794 = cast<llvm::IntegerType>(ConvertType(E->getType()));
795 int Column = getTargetHooks().getDwarfEHStackPointer(CGM);
796 if (Column == -1) {
797 CGM.ErrorUnsupported(E, "__builtin_dwarf_sp_column");
798 return RValue::get(llvm::UndefValue::get(Ty));
799 }
800 return RValue::get(llvm::ConstantInt::get(Ty, Column, true));
801 }
802 case Builtin::BI__builtin_init_dwarf_reg_size_table: {
803 Value *Address = EmitScalarExpr(E->getArg(0));
804 if (getTargetHooks().initDwarfEHRegSizeTable(*this, Address))
805 CGM.ErrorUnsupported(E, "__builtin_init_dwarf_reg_size_table");
806 return RValue::get(llvm::UndefValue::get(ConvertType(E->getType())));
807 }
808 case Builtin::BI__builtin_eh_return: {
809 Value *Int = EmitScalarExpr(E->getArg(0));
810 Value *Ptr = EmitScalarExpr(E->getArg(1));
811
812 llvm::IntegerType *IntTy = cast<llvm::IntegerType>(Int->getType());
813 assert((IntTy->getBitWidth() == 32 || IntTy->getBitWidth() == 64) &&
814 "LLVM's __builtin_eh_return only supports 32- and 64-bit variants");
815 Value *F = CGM.getIntrinsic(IntTy->getBitWidth() == 32
816 ? Intrinsic::eh_return_i32
817 : Intrinsic::eh_return_i64);
818 Builder.CreateCall2(F, Int, Ptr);
819 Builder.CreateUnreachable();
820
821 // We do need to preserve an insertion point.
822 EmitBlock(createBasicBlock("builtin_eh_return.cont"));
823
824 return RValue::get(nullptr);
825 }
826 case Builtin::BI__builtin_unwind_init: {
827 Value *F = CGM.getIntrinsic(Intrinsic::eh_unwind_init);
828 return RValue::get(Builder.CreateCall(F));
829 }
830 case Builtin::BI__builtin_extend_pointer: {
831 // Extends a pointer to the size of an _Unwind_Word, which is
832 // uint64_t on all platforms. Generally this gets poked into a
833 // register and eventually used as an address, so if the
834 // addressing registers are wider than pointers and the platform
835 // doesn't implicitly ignore high-order bits when doing
836 // addressing, we need to make sure we zext / sext based on
837 // the platform's expectations.
838 //
839 // See: http://gcc.gnu.org/ml/gcc-bugs/2002-02/msg00237.html
840
841 // Cast the pointer to intptr_t.
842 Value *Ptr = EmitScalarExpr(E->getArg(0));
843 Value *Result = Builder.CreatePtrToInt(Ptr, IntPtrTy, "extend.cast");
844
845 // If that's 64 bits, we're done.
846 if (IntPtrTy->getBitWidth() == 64)
847 return RValue::get(Result);
848
849 // Otherwise, ask the codegen data what to do.
850 if (getTargetHooks().extendPointerWithSExt())
851 return RValue::get(Builder.CreateSExt(Result, Int64Ty, "extend.sext"));
852 else
853 return RValue::get(Builder.CreateZExt(Result, Int64Ty, "extend.zext"));
854 }
855 case Builtin::BI__builtin_setjmp: {
856 // Buffer is a void**.
857 Value *Buf = EmitScalarExpr(E->getArg(0));
858
859 // Store the frame pointer to the setjmp buffer.
860 Value *FrameAddr =
861 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::frameaddress),
862 ConstantInt::get(Int32Ty, 0));
863 Builder.CreateStore(FrameAddr, Buf);
864
865 // Store the stack pointer to the setjmp buffer.
866 Value *StackAddr =
867 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::stacksave));
868 Value *StackSaveSlot =
869 Builder.CreateGEP(Buf, ConstantInt::get(Int32Ty, 2));
870 Builder.CreateStore(StackAddr, StackSaveSlot);
871
872 // Call LLVM's EH setjmp, which is lightweight.
873 Value *F = CGM.getIntrinsic(Intrinsic::eh_sjlj_setjmp);
874 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
875 return RValue::get(Builder.CreateCall(F, Buf));
876 }
877 case Builtin::BI__builtin_longjmp: {
878 Value *Buf = EmitScalarExpr(E->getArg(0));
879 Buf = Builder.CreateBitCast(Buf, Int8PtrTy);
880
881 // Call LLVM's EH longjmp, which is lightweight.
882 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::eh_sjlj_longjmp), Buf);
883
884 // longjmp doesn't return; mark this as unreachable.
885 Builder.CreateUnreachable();
886
887 // We do need to preserve an insertion point.
888 EmitBlock(createBasicBlock("longjmp.cont"));
889
890 return RValue::get(nullptr);
891 }
892 case Builtin::BI__sync_fetch_and_add:
893 case Builtin::BI__sync_fetch_and_sub:
894 case Builtin::BI__sync_fetch_and_or:
895 case Builtin::BI__sync_fetch_and_and:
896 case Builtin::BI__sync_fetch_and_xor:
897 case Builtin::BI__sync_fetch_and_nand:
898 case Builtin::BI__sync_add_and_fetch:
899 case Builtin::BI__sync_sub_and_fetch:
900 case Builtin::BI__sync_and_and_fetch:
901 case Builtin::BI__sync_or_and_fetch:
902 case Builtin::BI__sync_xor_and_fetch:
903 case Builtin::BI__sync_nand_and_fetch:
904 case Builtin::BI__sync_val_compare_and_swap:
905 case Builtin::BI__sync_bool_compare_and_swap:
906 case Builtin::BI__sync_lock_test_and_set:
907 case Builtin::BI__sync_lock_release:
908 case Builtin::BI__sync_swap:
909 llvm_unreachable("Shouldn't make it through sema");
910 case Builtin::BI__sync_fetch_and_add_1:
911 case Builtin::BI__sync_fetch_and_add_2:
912 case Builtin::BI__sync_fetch_and_add_4:
913 case Builtin::BI__sync_fetch_and_add_8:
914 case Builtin::BI__sync_fetch_and_add_16:
915 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Add, E);
916 case Builtin::BI__sync_fetch_and_sub_1:
917 case Builtin::BI__sync_fetch_and_sub_2:
918 case Builtin::BI__sync_fetch_and_sub_4:
919 case Builtin::BI__sync_fetch_and_sub_8:
920 case Builtin::BI__sync_fetch_and_sub_16:
921 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Sub, E);
922 case Builtin::BI__sync_fetch_and_or_1:
923 case Builtin::BI__sync_fetch_and_or_2:
924 case Builtin::BI__sync_fetch_and_or_4:
925 case Builtin::BI__sync_fetch_and_or_8:
926 case Builtin::BI__sync_fetch_and_or_16:
927 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Or, E);
928 case Builtin::BI__sync_fetch_and_and_1:
929 case Builtin::BI__sync_fetch_and_and_2:
930 case Builtin::BI__sync_fetch_and_and_4:
931 case Builtin::BI__sync_fetch_and_and_8:
932 case Builtin::BI__sync_fetch_and_and_16:
933 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::And, E);
934 case Builtin::BI__sync_fetch_and_xor_1:
935 case Builtin::BI__sync_fetch_and_xor_2:
936 case Builtin::BI__sync_fetch_and_xor_4:
937 case Builtin::BI__sync_fetch_and_xor_8:
938 case Builtin::BI__sync_fetch_and_xor_16:
939 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xor, E);
940 case Builtin::BI__sync_fetch_and_nand_1:
941 case Builtin::BI__sync_fetch_and_nand_2:
942 case Builtin::BI__sync_fetch_and_nand_4:
943 case Builtin::BI__sync_fetch_and_nand_8:
944 case Builtin::BI__sync_fetch_and_nand_16:
945 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Nand, E);
946
947 // Clang extensions: not overloaded yet.
948 case Builtin::BI__sync_fetch_and_min:
949 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Min, E);
950 case Builtin::BI__sync_fetch_and_max:
951 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Max, E);
952 case Builtin::BI__sync_fetch_and_umin:
953 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMin, E);
954 case Builtin::BI__sync_fetch_and_umax:
955 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::UMax, E);
956
957 case Builtin::BI__sync_add_and_fetch_1:
958 case Builtin::BI__sync_add_and_fetch_2:
959 case Builtin::BI__sync_add_and_fetch_4:
960 case Builtin::BI__sync_add_and_fetch_8:
961 case Builtin::BI__sync_add_and_fetch_16:
962 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Add, E,
963 llvm::Instruction::Add);
964 case Builtin::BI__sync_sub_and_fetch_1:
965 case Builtin::BI__sync_sub_and_fetch_2:
966 case Builtin::BI__sync_sub_and_fetch_4:
967 case Builtin::BI__sync_sub_and_fetch_8:
968 case Builtin::BI__sync_sub_and_fetch_16:
969 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Sub, E,
970 llvm::Instruction::Sub);
971 case Builtin::BI__sync_and_and_fetch_1:
972 case Builtin::BI__sync_and_and_fetch_2:
973 case Builtin::BI__sync_and_and_fetch_4:
974 case Builtin::BI__sync_and_and_fetch_8:
975 case Builtin::BI__sync_and_and_fetch_16:
976 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::And, E,
977 llvm::Instruction::And);
978 case Builtin::BI__sync_or_and_fetch_1:
979 case Builtin::BI__sync_or_and_fetch_2:
980 case Builtin::BI__sync_or_and_fetch_4:
981 case Builtin::BI__sync_or_and_fetch_8:
982 case Builtin::BI__sync_or_and_fetch_16:
983 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Or, E,
984 llvm::Instruction::Or);
985 case Builtin::BI__sync_xor_and_fetch_1:
986 case Builtin::BI__sync_xor_and_fetch_2:
987 case Builtin::BI__sync_xor_and_fetch_4:
988 case Builtin::BI__sync_xor_and_fetch_8:
989 case Builtin::BI__sync_xor_and_fetch_16:
990 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Xor, E,
991 llvm::Instruction::Xor);
992 case Builtin::BI__sync_nand_and_fetch_1:
993 case Builtin::BI__sync_nand_and_fetch_2:
994 case Builtin::BI__sync_nand_and_fetch_4:
995 case Builtin::BI__sync_nand_and_fetch_8:
996 case Builtin::BI__sync_nand_and_fetch_16:
997 return EmitBinaryAtomicPost(*this, llvm::AtomicRMWInst::Nand, E,
998 llvm::Instruction::And, true);
999
1000 case Builtin::BI__sync_val_compare_and_swap_1:
1001 case Builtin::BI__sync_val_compare_and_swap_2:
1002 case Builtin::BI__sync_val_compare_and_swap_4:
1003 case Builtin::BI__sync_val_compare_and_swap_8:
1004 case Builtin::BI__sync_val_compare_and_swap_16: {
1005 QualType T = E->getType();
1006 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
1007 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
1008
1009 llvm::IntegerType *IntType =
1010 llvm::IntegerType::get(getLLVMContext(),
1011 getContext().getTypeSize(T));
1012 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
1013
1014 Value *Args[3];
1015 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
1016 Args[1] = EmitScalarExpr(E->getArg(1));
1017 llvm::Type *ValueType = Args[1]->getType();
1018 Args[1] = EmitToInt(*this, Args[1], T, IntType);
1019 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
1020
1021 Value *Result = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
1022 llvm::SequentiallyConsistent,
1023 llvm::SequentiallyConsistent);
1024 Result = Builder.CreateExtractValue(Result, 0);
1025 Result = EmitFromInt(*this, Result, T, ValueType);
1026 return RValue::get(Result);
1027 }
1028
1029 case Builtin::BI__sync_bool_compare_and_swap_1:
1030 case Builtin::BI__sync_bool_compare_and_swap_2:
1031 case Builtin::BI__sync_bool_compare_and_swap_4:
1032 case Builtin::BI__sync_bool_compare_and_swap_8:
1033 case Builtin::BI__sync_bool_compare_and_swap_16: {
1034 QualType T = E->getArg(1)->getType();
1035 llvm::Value *DestPtr = EmitScalarExpr(E->getArg(0));
1036 unsigned AddrSpace = DestPtr->getType()->getPointerAddressSpace();
1037
1038 llvm::IntegerType *IntType =
1039 llvm::IntegerType::get(getLLVMContext(),
1040 getContext().getTypeSize(T));
1041 llvm::Type *IntPtrType = IntType->getPointerTo(AddrSpace);
1042
1043 Value *Args[3];
1044 Args[0] = Builder.CreateBitCast(DestPtr, IntPtrType);
1045 Args[1] = EmitToInt(*this, EmitScalarExpr(E->getArg(1)), T, IntType);
1046 Args[2] = EmitToInt(*this, EmitScalarExpr(E->getArg(2)), T, IntType);
1047
1048 Value *Pair = Builder.CreateAtomicCmpXchg(Args[0], Args[1], Args[2],
1049 llvm::SequentiallyConsistent,
1050 llvm::SequentiallyConsistent);
1051 Value *Result = Builder.CreateExtractValue(Pair, 1);
1052 // zext bool to int.
1053 Result = Builder.CreateZExt(Result, ConvertType(E->getType()));
1054 return RValue::get(Result);
1055 }
1056
1057 case Builtin::BI__sync_swap_1:
1058 case Builtin::BI__sync_swap_2:
1059 case Builtin::BI__sync_swap_4:
1060 case Builtin::BI__sync_swap_8:
1061 case Builtin::BI__sync_swap_16:
1062 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1063
1064 case Builtin::BI__sync_lock_test_and_set_1:
1065 case Builtin::BI__sync_lock_test_and_set_2:
1066 case Builtin::BI__sync_lock_test_and_set_4:
1067 case Builtin::BI__sync_lock_test_and_set_8:
1068 case Builtin::BI__sync_lock_test_and_set_16:
1069 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1070
1071 case Builtin::BI__sync_lock_release_1:
1072 case Builtin::BI__sync_lock_release_2:
1073 case Builtin::BI__sync_lock_release_4:
1074 case Builtin::BI__sync_lock_release_8:
1075 case Builtin::BI__sync_lock_release_16: {
1076 Value *Ptr = EmitScalarExpr(E->getArg(0));
1077 QualType ElTy = E->getArg(0)->getType()->getPointeeType();
1078 CharUnits StoreSize = getContext().getTypeSizeInChars(ElTy);
1079 llvm::Type *ITy = llvm::IntegerType::get(getLLVMContext(),
1080 StoreSize.getQuantity() * 8);
1081 Ptr = Builder.CreateBitCast(Ptr, ITy->getPointerTo());
1082 llvm::StoreInst *Store =
1083 Builder.CreateStore(llvm::Constant::getNullValue(ITy), Ptr);
1084 Store->setAlignment(StoreSize.getQuantity());
1085 Store->setAtomic(llvm::Release);
1086 return RValue::get(nullptr);
1087 }
1088
1089 case Builtin::BI__sync_synchronize: {
1090 // We assume this is supposed to correspond to a C++0x-style
1091 // sequentially-consistent fence (i.e. this is only usable for
1092 // synchonization, not device I/O or anything like that). This intrinsic
1093 // is really badly designed in the sense that in theory, there isn't
1094 // any way to safely use it... but in practice, it mostly works
1095 // to use it with non-atomic loads and stores to get acquire/release
1096 // semantics.
1097 Builder.CreateFence(llvm::SequentiallyConsistent);
1098 return RValue::get(nullptr);
1099 }
1100
1101 case Builtin::BI__c11_atomic_is_lock_free:
1102 case Builtin::BI__atomic_is_lock_free: {
1103 // Call "bool __atomic_is_lock_free(size_t size, void *ptr)". For the
1104 // __c11 builtin, ptr is 0 (indicating a properly-aligned object), since
1105 // _Atomic(T) is always properly-aligned.
1106 const char *LibCallName = "__atomic_is_lock_free";
1107 CallArgList Args;
1108 Args.add(RValue::get(EmitScalarExpr(E->getArg(0))),
1109 getContext().getSizeType());
1110 if (BuiltinID == Builtin::BI__atomic_is_lock_free)
1111 Args.add(RValue::get(EmitScalarExpr(E->getArg(1))),
1112 getContext().VoidPtrTy);
1113 else
1114 Args.add(RValue::get(llvm::Constant::getNullValue(VoidPtrTy)),
1115 getContext().VoidPtrTy);
1116 const CGFunctionInfo &FuncInfo =
1117 CGM.getTypes().arrangeFreeFunctionCall(E->getType(), Args,
1118 FunctionType::ExtInfo(),
1119 RequiredArgs::All);
1120 llvm::FunctionType *FTy = CGM.getTypes().GetFunctionType(FuncInfo);
1121 llvm::Constant *Func = CGM.CreateRuntimeFunction(FTy, LibCallName);
1122 return EmitCall(FuncInfo, Func, ReturnValueSlot(), Args);
1123 }
1124
1125 case Builtin::BI__atomic_test_and_set: {
1126 // Look at the argument type to determine whether this is a volatile
1127 // operation. The parameter type is always volatile.
1128 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1129 bool Volatile =
1130 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1131
1132 Value *Ptr = EmitScalarExpr(E->getArg(0));
1133 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1134 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1135 Value *NewVal = Builder.getInt8(1);
1136 Value *Order = EmitScalarExpr(E->getArg(1));
1137 if (isa<llvm::ConstantInt>(Order)) {
1138 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1139 AtomicRMWInst *Result = nullptr;
1140 switch (ord) {
1141 case 0: // memory_order_relaxed
1142 default: // invalid order
1143 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1144 Ptr, NewVal,
1145 llvm::Monotonic);
1146 break;
1147 case 1: // memory_order_consume
1148 case 2: // memory_order_acquire
1149 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1150 Ptr, NewVal,
1151 llvm::Acquire);
1152 break;
1153 case 3: // memory_order_release
1154 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1155 Ptr, NewVal,
1156 llvm::Release);
1157 break;
1158 case 4: // memory_order_acq_rel
1159 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1160 Ptr, NewVal,
1161 llvm::AcquireRelease);
1162 break;
1163 case 5: // memory_order_seq_cst
1164 Result = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1165 Ptr, NewVal,
1166 llvm::SequentiallyConsistent);
1167 break;
1168 }
1169 Result->setVolatile(Volatile);
1170 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1171 }
1172
1173 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1174
1175 llvm::BasicBlock *BBs[5] = {
1176 createBasicBlock("monotonic", CurFn),
1177 createBasicBlock("acquire", CurFn),
1178 createBasicBlock("release", CurFn),
1179 createBasicBlock("acqrel", CurFn),
1180 createBasicBlock("seqcst", CurFn)
1181 };
1182 llvm::AtomicOrdering Orders[5] = {
1183 llvm::Monotonic, llvm::Acquire, llvm::Release,
1184 llvm::AcquireRelease, llvm::SequentiallyConsistent
1185 };
1186
1187 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1188 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1189
1190 Builder.SetInsertPoint(ContBB);
1191 PHINode *Result = Builder.CreatePHI(Int8Ty, 5, "was_set");
1192
1193 for (unsigned i = 0; i < 5; ++i) {
1194 Builder.SetInsertPoint(BBs[i]);
1195 AtomicRMWInst *RMW = Builder.CreateAtomicRMW(llvm::AtomicRMWInst::Xchg,
1196 Ptr, NewVal, Orders[i]);
1197 RMW->setVolatile(Volatile);
1198 Result->addIncoming(RMW, BBs[i]);
1199 Builder.CreateBr(ContBB);
1200 }
1201
1202 SI->addCase(Builder.getInt32(0), BBs[0]);
1203 SI->addCase(Builder.getInt32(1), BBs[1]);
1204 SI->addCase(Builder.getInt32(2), BBs[1]);
1205 SI->addCase(Builder.getInt32(3), BBs[2]);
1206 SI->addCase(Builder.getInt32(4), BBs[3]);
1207 SI->addCase(Builder.getInt32(5), BBs[4]);
1208
1209 Builder.SetInsertPoint(ContBB);
1210 return RValue::get(Builder.CreateIsNotNull(Result, "tobool"));
1211 }
1212
1213 case Builtin::BI__atomic_clear: {
1214 QualType PtrTy = E->getArg(0)->IgnoreImpCasts()->getType();
1215 bool Volatile =
1216 PtrTy->castAs<PointerType>()->getPointeeType().isVolatileQualified();
1217
1218 Value *Ptr = EmitScalarExpr(E->getArg(0));
1219 unsigned AddrSpace = Ptr->getType()->getPointerAddressSpace();
1220 Ptr = Builder.CreateBitCast(Ptr, Int8Ty->getPointerTo(AddrSpace));
1221 Value *NewVal = Builder.getInt8(0);
1222 Value *Order = EmitScalarExpr(E->getArg(1));
1223 if (isa<llvm::ConstantInt>(Order)) {
1224 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1225 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1226 Store->setAlignment(1);
1227 switch (ord) {
1228 case 0: // memory_order_relaxed
1229 default: // invalid order
1230 Store->setOrdering(llvm::Monotonic);
1231 break;
1232 case 3: // memory_order_release
1233 Store->setOrdering(llvm::Release);
1234 break;
1235 case 5: // memory_order_seq_cst
1236 Store->setOrdering(llvm::SequentiallyConsistent);
1237 break;
1238 }
1239 return RValue::get(nullptr);
1240 }
1241
1242 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1243
1244 llvm::BasicBlock *BBs[3] = {
1245 createBasicBlock("monotonic", CurFn),
1246 createBasicBlock("release", CurFn),
1247 createBasicBlock("seqcst", CurFn)
1248 };
1249 llvm::AtomicOrdering Orders[3] = {
1250 llvm::Monotonic, llvm::Release, llvm::SequentiallyConsistent
1251 };
1252
1253 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1254 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, BBs[0]);
1255
1256 for (unsigned i = 0; i < 3; ++i) {
1257 Builder.SetInsertPoint(BBs[i]);
1258 StoreInst *Store = Builder.CreateStore(NewVal, Ptr, Volatile);
1259 Store->setAlignment(1);
1260 Store->setOrdering(Orders[i]);
1261 Builder.CreateBr(ContBB);
1262 }
1263
1264 SI->addCase(Builder.getInt32(0), BBs[0]);
1265 SI->addCase(Builder.getInt32(3), BBs[1]);
1266 SI->addCase(Builder.getInt32(5), BBs[2]);
1267
1268 Builder.SetInsertPoint(ContBB);
1269 return RValue::get(nullptr);
1270 }
1271
1272 case Builtin::BI__atomic_thread_fence:
1273 case Builtin::BI__atomic_signal_fence:
1274 case Builtin::BI__c11_atomic_thread_fence:
1275 case Builtin::BI__c11_atomic_signal_fence: {
1276 llvm::SynchronizationScope Scope;
1277 if (BuiltinID == Builtin::BI__atomic_signal_fence ||
1278 BuiltinID == Builtin::BI__c11_atomic_signal_fence)
1279 Scope = llvm::SingleThread;
1280 else
1281 Scope = llvm::CrossThread;
1282 Value *Order = EmitScalarExpr(E->getArg(0));
1283 if (isa<llvm::ConstantInt>(Order)) {
1284 int ord = cast<llvm::ConstantInt>(Order)->getZExtValue();
1285 switch (ord) {
1286 case 0: // memory_order_relaxed
1287 default: // invalid order
1288 break;
1289 case 1: // memory_order_consume
1290 case 2: // memory_order_acquire
1291 Builder.CreateFence(llvm::Acquire, Scope);
1292 break;
1293 case 3: // memory_order_release
1294 Builder.CreateFence(llvm::Release, Scope);
1295 break;
1296 case 4: // memory_order_acq_rel
1297 Builder.CreateFence(llvm::AcquireRelease, Scope);
1298 break;
1299 case 5: // memory_order_seq_cst
1300 Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1301 break;
1302 }
1303 return RValue::get(nullptr);
1304 }
1305
1306 llvm::BasicBlock *AcquireBB, *ReleaseBB, *AcqRelBB, *SeqCstBB;
1307 AcquireBB = createBasicBlock("acquire", CurFn);
1308 ReleaseBB = createBasicBlock("release", CurFn);
1309 AcqRelBB = createBasicBlock("acqrel", CurFn);
1310 SeqCstBB = createBasicBlock("seqcst", CurFn);
1311 llvm::BasicBlock *ContBB = createBasicBlock("atomic.continue", CurFn);
1312
1313 Order = Builder.CreateIntCast(Order, Builder.getInt32Ty(), false);
1314 llvm::SwitchInst *SI = Builder.CreateSwitch(Order, ContBB);
1315
1316 Builder.SetInsertPoint(AcquireBB);
1317 Builder.CreateFence(llvm::Acquire, Scope);
1318 Builder.CreateBr(ContBB);
1319 SI->addCase(Builder.getInt32(1), AcquireBB);
1320 SI->addCase(Builder.getInt32(2), AcquireBB);
1321
1322 Builder.SetInsertPoint(ReleaseBB);
1323 Builder.CreateFence(llvm::Release, Scope);
1324 Builder.CreateBr(ContBB);
1325 SI->addCase(Builder.getInt32(3), ReleaseBB);
1326
1327 Builder.SetInsertPoint(AcqRelBB);
1328 Builder.CreateFence(llvm::AcquireRelease, Scope);
1329 Builder.CreateBr(ContBB);
1330 SI->addCase(Builder.getInt32(4), AcqRelBB);
1331
1332 Builder.SetInsertPoint(SeqCstBB);
1333 Builder.CreateFence(llvm::SequentiallyConsistent, Scope);
1334 Builder.CreateBr(ContBB);
1335 SI->addCase(Builder.getInt32(5), SeqCstBB);
1336
1337 Builder.SetInsertPoint(ContBB);
1338 return RValue::get(nullptr);
1339 }
1340
1341 // Library functions with special handling.
1342 case Builtin::BIsqrt:
1343 case Builtin::BIsqrtf:
1344 case Builtin::BIsqrtl: {
1345 // Transform a call to sqrt* into a @llvm.sqrt.* intrinsic call, but only
1346 // in finite- or unsafe-math mode (the intrinsic has different semantics
1347 // for handling negative numbers compared to the library function, so
1348 // -fmath-errno=0 is not enough).
1349 if (!FD->hasAttr<ConstAttr>())
1350 break;
1351 if (!(CGM.getCodeGenOpts().UnsafeFPMath ||
1352 CGM.getCodeGenOpts().NoNaNsFPMath))
1353 break;
1354 Value *Arg0 = EmitScalarExpr(E->getArg(0));
1355 llvm::Type *ArgType = Arg0->getType();
1356 Value *F = CGM.getIntrinsic(Intrinsic::sqrt, ArgType);
1357 return RValue::get(Builder.CreateCall(F, Arg0));
1358 }
1359
1360 case Builtin::BIpow:
1361 case Builtin::BIpowf:
1362 case Builtin::BIpowl: {
1363 // Transform a call to pow* into a @llvm.pow.* intrinsic call.
1364 if (!FD->hasAttr<ConstAttr>())
1365 break;
1366 Value *Base = EmitScalarExpr(E->getArg(0));
1367 Value *Exponent = EmitScalarExpr(E->getArg(1));
1368 llvm::Type *ArgType = Base->getType();
1369 Value *F = CGM.getIntrinsic(Intrinsic::pow, ArgType);
1370 return RValue::get(Builder.CreateCall2(F, Base, Exponent));
1371 }
1372
1373 case Builtin::BIfma:
1374 case Builtin::BIfmaf:
1375 case Builtin::BIfmal:
1376 case Builtin::BI__builtin_fma:
1377 case Builtin::BI__builtin_fmaf:
1378 case Builtin::BI__builtin_fmal: {
1379 // Rewrite fma to intrinsic.
1380 Value *FirstArg = EmitScalarExpr(E->getArg(0));
1381 llvm::Type *ArgType = FirstArg->getType();
1382 Value *F = CGM.getIntrinsic(Intrinsic::fma, ArgType);
1383 return RValue::get(Builder.CreateCall3(F, FirstArg,
1384 EmitScalarExpr(E->getArg(1)),
1385 EmitScalarExpr(E->getArg(2))));
1386 }
1387
1388 case Builtin::BI__builtin_signbit:
1389 case Builtin::BI__builtin_signbitf:
1390 case Builtin::BI__builtin_signbitl: {
1391 LLVMContext &C = CGM.getLLVMContext();
1392
1393 Value *Arg = EmitScalarExpr(E->getArg(0));
1394 llvm::Type *ArgTy = Arg->getType();
1395 int ArgWidth = ArgTy->getPrimitiveSizeInBits();
1396 llvm::Type *ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
1397 Value *BCArg = Builder.CreateBitCast(Arg, ArgIntTy);
1398 if (ArgTy->isPPC_FP128Ty()) {
1399 // The higher-order double comes first, and so we need to truncate the
1400 // pair to extract the overall sign. The order of the pair is the same
1401 // in both little- and big-Endian modes.
1402 ArgWidth >>= 1;
1403 ArgIntTy = llvm::IntegerType::get(C, ArgWidth);
1404 BCArg = Builder.CreateTrunc(BCArg, ArgIntTy);
1405 }
1406 Value *ZeroCmp = llvm::Constant::getNullValue(ArgIntTy);
1407 Value *Result = Builder.CreateICmpSLT(BCArg, ZeroCmp);
1408 return RValue::get(Builder.CreateZExt(Result, ConvertType(E->getType())));
1409 }
1410 case Builtin::BI__builtin_annotation: {
1411 llvm::Value *AnnVal = EmitScalarExpr(E->getArg(0));
1412 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::annotation,
1413 AnnVal->getType());
1414
1415 // Get the annotation string, go through casts. Sema requires this to be a
1416 // non-wide string literal, potentially casted, so the cast<> is safe.
1417 const Expr *AnnotationStrExpr = E->getArg(1)->IgnoreParenCasts();
1418 StringRef Str = cast<StringLiteral>(AnnotationStrExpr)->getString();
1419 return RValue::get(EmitAnnotationCall(F, AnnVal, Str, E->getExprLoc()));
1420 }
1421 case Builtin::BI__builtin_addcb:
1422 case Builtin::BI__builtin_addcs:
1423 case Builtin::BI__builtin_addc:
1424 case Builtin::BI__builtin_addcl:
1425 case Builtin::BI__builtin_addcll:
1426 case Builtin::BI__builtin_subcb:
1427 case Builtin::BI__builtin_subcs:
1428 case Builtin::BI__builtin_subc:
1429 case Builtin::BI__builtin_subcl:
1430 case Builtin::BI__builtin_subcll: {
1431
1432 // We translate all of these builtins from expressions of the form:
1433 // int x = ..., y = ..., carryin = ..., carryout, result;
1434 // result = __builtin_addc(x, y, carryin, &carryout);
1435 //
1436 // to LLVM IR of the form:
1437 //
1438 // %tmp1 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %x, i32 %y)
1439 // %tmpsum1 = extractvalue {i32, i1} %tmp1, 0
1440 // %carry1 = extractvalue {i32, i1} %tmp1, 1
1441 // %tmp2 = call {i32, i1} @llvm.uadd.with.overflow.i32(i32 %tmpsum1,
1442 // i32 %carryin)
1443 // %result = extractvalue {i32, i1} %tmp2, 0
1444 // %carry2 = extractvalue {i32, i1} %tmp2, 1
1445 // %tmp3 = or i1 %carry1, %carry2
1446 // %tmp4 = zext i1 %tmp3 to i32
1447 // store i32 %tmp4, i32* %carryout
1448
1449 // Scalarize our inputs.
1450 llvm::Value *X = EmitScalarExpr(E->getArg(0));
1451 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1452 llvm::Value *Carryin = EmitScalarExpr(E->getArg(2));
1453 std::pair<llvm::Value*, unsigned> CarryOutPtr =
1454 EmitPointerWithAlignment(E->getArg(3));
1455
1456 // Decide if we are lowering to a uadd.with.overflow or usub.with.overflow.
1457 llvm::Intrinsic::ID IntrinsicId;
1458 switch (BuiltinID) {
1459 default: llvm_unreachable("Unknown multiprecision builtin id.");
1460 case Builtin::BI__builtin_addcb:
1461 case Builtin::BI__builtin_addcs:
1462 case Builtin::BI__builtin_addc:
1463 case Builtin::BI__builtin_addcl:
1464 case Builtin::BI__builtin_addcll:
1465 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1466 break;
1467 case Builtin::BI__builtin_subcb:
1468 case Builtin::BI__builtin_subcs:
1469 case Builtin::BI__builtin_subc:
1470 case Builtin::BI__builtin_subcl:
1471 case Builtin::BI__builtin_subcll:
1472 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1473 break;
1474 }
1475
1476 // Construct our resulting LLVM IR expression.
1477 llvm::Value *Carry1;
1478 llvm::Value *Sum1 = EmitOverflowIntrinsic(*this, IntrinsicId,
1479 X, Y, Carry1);
1480 llvm::Value *Carry2;
1481 llvm::Value *Sum2 = EmitOverflowIntrinsic(*this, IntrinsicId,
1482 Sum1, Carryin, Carry2);
1483 llvm::Value *CarryOut = Builder.CreateZExt(Builder.CreateOr(Carry1, Carry2),
1484 X->getType());
1485 llvm::StoreInst *CarryOutStore = Builder.CreateStore(CarryOut,
1486 CarryOutPtr.first);
1487 CarryOutStore->setAlignment(CarryOutPtr.second);
1488 return RValue::get(Sum2);
1489 }
1490 case Builtin::BI__builtin_uadd_overflow:
1491 case Builtin::BI__builtin_uaddl_overflow:
1492 case Builtin::BI__builtin_uaddll_overflow:
1493 case Builtin::BI__builtin_usub_overflow:
1494 case Builtin::BI__builtin_usubl_overflow:
1495 case Builtin::BI__builtin_usubll_overflow:
1496 case Builtin::BI__builtin_umul_overflow:
1497 case Builtin::BI__builtin_umull_overflow:
1498 case Builtin::BI__builtin_umulll_overflow:
1499 case Builtin::BI__builtin_sadd_overflow:
1500 case Builtin::BI__builtin_saddl_overflow:
1501 case Builtin::BI__builtin_saddll_overflow:
1502 case Builtin::BI__builtin_ssub_overflow:
1503 case Builtin::BI__builtin_ssubl_overflow:
1504 case Builtin::BI__builtin_ssubll_overflow:
1505 case Builtin::BI__builtin_smul_overflow:
1506 case Builtin::BI__builtin_smull_overflow:
1507 case Builtin::BI__builtin_smulll_overflow: {
1508
1509 // We translate all of these builtins directly to the relevant llvm IR node.
1510
1511 // Scalarize our inputs.
1512 llvm::Value *X = EmitScalarExpr(E->getArg(0));
1513 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
1514 std::pair<llvm::Value *, unsigned> SumOutPtr =
1515 EmitPointerWithAlignment(E->getArg(2));
1516
1517 // Decide which of the overflow intrinsics we are lowering to:
1518 llvm::Intrinsic::ID IntrinsicId;
1519 switch (BuiltinID) {
1520 default: llvm_unreachable("Unknown security overflow builtin id.");
1521 case Builtin::BI__builtin_uadd_overflow:
1522 case Builtin::BI__builtin_uaddl_overflow:
1523 case Builtin::BI__builtin_uaddll_overflow:
1524 IntrinsicId = llvm::Intrinsic::uadd_with_overflow;
1525 break;
1526 case Builtin::BI__builtin_usub_overflow:
1527 case Builtin::BI__builtin_usubl_overflow:
1528 case Builtin::BI__builtin_usubll_overflow:
1529 IntrinsicId = llvm::Intrinsic::usub_with_overflow;
1530 break;
1531 case Builtin::BI__builtin_umul_overflow:
1532 case Builtin::BI__builtin_umull_overflow:
1533 case Builtin::BI__builtin_umulll_overflow:
1534 IntrinsicId = llvm::Intrinsic::umul_with_overflow;
1535 break;
1536 case Builtin::BI__builtin_sadd_overflow:
1537 case Builtin::BI__builtin_saddl_overflow:
1538 case Builtin::BI__builtin_saddll_overflow:
1539 IntrinsicId = llvm::Intrinsic::sadd_with_overflow;
1540 break;
1541 case Builtin::BI__builtin_ssub_overflow:
1542 case Builtin::BI__builtin_ssubl_overflow:
1543 case Builtin::BI__builtin_ssubll_overflow:
1544 IntrinsicId = llvm::Intrinsic::ssub_with_overflow;
1545 break;
1546 case Builtin::BI__builtin_smul_overflow:
1547 case Builtin::BI__builtin_smull_overflow:
1548 case Builtin::BI__builtin_smulll_overflow:
1549 IntrinsicId = llvm::Intrinsic::smul_with_overflow;
1550 break;
1551 }
1552
1553
1554 llvm::Value *Carry;
1555 llvm::Value *Sum = EmitOverflowIntrinsic(*this, IntrinsicId, X, Y, Carry);
1556 llvm::StoreInst *SumOutStore = Builder.CreateStore(Sum, SumOutPtr.first);
1557 SumOutStore->setAlignment(SumOutPtr.second);
1558
1559 return RValue::get(Carry);
1560 }
1561 case Builtin::BI__builtin_addressof:
1562 return RValue::get(EmitLValue(E->getArg(0)).getAddress());
1563 case Builtin::BI__builtin_operator_new:
1564 return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
1565 E->getArg(0), false);
1566 case Builtin::BI__builtin_operator_delete:
1567 return EmitBuiltinNewDeleteCall(FD->getType()->castAs<FunctionProtoType>(),
1568 E->getArg(0), true);
1569 case Builtin::BI__noop:
1570 // __noop always evaluates to an integer literal zero.
1571 return RValue::get(ConstantInt::get(IntTy, 0));
1572 case Builtin::BI__builtin_call_with_static_chain: {
1573 const CallExpr *Call = cast<CallExpr>(E->getArg(0));
1574 const Expr *Chain = E->getArg(1);
1575 return EmitCall(Call->getCallee()->getType(),
1576 EmitScalarExpr(Call->getCallee()), Call, ReturnValue,
1577 Call->getCalleeDecl(), EmitScalarExpr(Chain));
1578 }
1579 case Builtin::BI_InterlockedExchange:
1580 case Builtin::BI_InterlockedExchangePointer:
1581 return EmitBinaryAtomic(*this, llvm::AtomicRMWInst::Xchg, E);
1582 case Builtin::BI_InterlockedCompareExchangePointer: {
1583 llvm::Type *RTy;
1584 llvm::IntegerType *IntType =
1585 IntegerType::get(getLLVMContext(),
1586 getContext().getTypeSize(E->getType()));
1587 llvm::Type *IntPtrType = IntType->getPointerTo();
1588
1589 llvm::Value *Destination =
1590 Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), IntPtrType);
1591
1592 llvm::Value *Exchange = EmitScalarExpr(E->getArg(1));
1593 RTy = Exchange->getType();
1594 Exchange = Builder.CreatePtrToInt(Exchange, IntType);
1595
1596 llvm::Value *Comparand =
1597 Builder.CreatePtrToInt(EmitScalarExpr(E->getArg(2)), IntType);
1598
1599 auto Result = Builder.CreateAtomicCmpXchg(Destination, Comparand, Exchange,
1600 SequentiallyConsistent,
1601 SequentiallyConsistent);
1602 Result->setVolatile(true);
1603
1604 return RValue::get(Builder.CreateIntToPtr(Builder.CreateExtractValue(Result,
1605 0),
1606 RTy));
1607 }
1608 case Builtin::BI_InterlockedCompareExchange: {
1609 AtomicCmpXchgInst *CXI = Builder.CreateAtomicCmpXchg(
1610 EmitScalarExpr(E->getArg(0)),
1611 EmitScalarExpr(E->getArg(2)),
1612 EmitScalarExpr(E->getArg(1)),
1613 SequentiallyConsistent,
1614 SequentiallyConsistent);
1615 CXI->setVolatile(true);
1616 return RValue::get(Builder.CreateExtractValue(CXI, 0));
1617 }
1618 case Builtin::BI_InterlockedIncrement: {
1619 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1620 AtomicRMWInst::Add,
1621 EmitScalarExpr(E->getArg(0)),
1622 ConstantInt::get(Int32Ty, 1),
1623 llvm::SequentiallyConsistent);
1624 RMWI->setVolatile(true);
1625 return RValue::get(Builder.CreateAdd(RMWI, ConstantInt::get(Int32Ty, 1)));
1626 }
1627 case Builtin::BI_InterlockedDecrement: {
1628 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1629 AtomicRMWInst::Sub,
1630 EmitScalarExpr(E->getArg(0)),
1631 ConstantInt::get(Int32Ty, 1),
1632 llvm::SequentiallyConsistent);
1633 RMWI->setVolatile(true);
1634 return RValue::get(Builder.CreateSub(RMWI, ConstantInt::get(Int32Ty, 1)));
1635 }
1636 case Builtin::BI_InterlockedExchangeAdd: {
1637 AtomicRMWInst *RMWI = Builder.CreateAtomicRMW(
1638 AtomicRMWInst::Add,
1639 EmitScalarExpr(E->getArg(0)),
1640 EmitScalarExpr(E->getArg(1)),
1641 llvm::SequentiallyConsistent);
1642 RMWI->setVolatile(true);
1643 return RValue::get(RMWI);
1644 }
1645 case Builtin::BI__readfsdword: {
1646 Value *IntToPtr =
1647 Builder.CreateIntToPtr(EmitScalarExpr(E->getArg(0)),
1648 llvm::PointerType::get(CGM.Int32Ty, 257));
1649 LoadInst *Load =
1650 Builder.CreateAlignedLoad(IntToPtr, /*Align=*/4, /*isVolatile=*/true);
1651 return RValue::get(Load);
1652 }
1653 }
1654
1655 // If this is an alias for a lib function (e.g. __builtin_sin), emit
1656 // the call using the normal call path, but using the unmangled
1657 // version of the function name.
1658 if (getContext().BuiltinInfo.isLibFunction(BuiltinID))
1659 return emitLibraryCall(*this, FD, E,
1660 CGM.getBuiltinLibFunction(FD, BuiltinID));
1661
1662 // If this is a predefined lib function (e.g. malloc), emit the call
1663 // using exactly the normal call path.
1664 if (getContext().BuiltinInfo.isPredefinedLibFunction(BuiltinID))
1665 return emitLibraryCall(*this, FD, E, EmitScalarExpr(E->getCallee()));
1666
1667 // See if we have a target specific intrinsic.
1668 const char *Name = getContext().BuiltinInfo.GetName(BuiltinID);
1669 Intrinsic::ID IntrinsicID = Intrinsic::not_intrinsic;
1670 if (const char *Prefix =
1671 llvm::Triple::getArchTypePrefix(getTarget().getTriple().getArch())) {
1672 IntrinsicID = Intrinsic::getIntrinsicForGCCBuiltin(Prefix, Name);
1673 // NOTE we dont need to perform a compatibility flag check here since the
1674 // intrinsics are declared in Builtins*.def via LANGBUILTIN which filter the
1675 // MS builtins via ALL_MS_LANGUAGES and are filtered earlier.
1676 if (IntrinsicID == Intrinsic::not_intrinsic)
1677 IntrinsicID = Intrinsic::getIntrinsicForMSBuiltin(Prefix, Name);
1678 }
1679
1680 if (IntrinsicID != Intrinsic::not_intrinsic) {
1681 SmallVector<Value*, 16> Args;
1682
1683 // Find out if any arguments are required to be integer constant
1684 // expressions.
1685 unsigned ICEArguments = 0;
1686 ASTContext::GetBuiltinTypeError Error;
1687 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
1688 assert(Error == ASTContext::GE_None && "Should not codegen an error");
1689
1690 Function *F = CGM.getIntrinsic(IntrinsicID);
1691 llvm::FunctionType *FTy = F->getFunctionType();
1692
1693 for (unsigned i = 0, e = E->getNumArgs(); i != e; ++i) {
1694 Value *ArgValue;
1695 // If this is a normal argument, just emit it as a scalar.
1696 if ((ICEArguments & (1 << i)) == 0) {
1697 ArgValue = EmitScalarExpr(E->getArg(i));
1698 } else {
1699 // If this is required to be a constant, constant fold it so that we
1700 // know that the generated intrinsic gets a ConstantInt.
1701 llvm::APSInt Result;
1702 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result,getContext());
1703 assert(IsConst && "Constant arg isn't actually constant?");
1704 (void)IsConst;
1705 ArgValue = llvm::ConstantInt::get(getLLVMContext(), Result);
1706 }
1707
1708 // If the intrinsic arg type is different from the builtin arg type
1709 // we need to do a bit cast.
1710 llvm::Type *PTy = FTy->getParamType(i);
1711 if (PTy != ArgValue->getType()) {
1712 assert(PTy->canLosslesslyBitCastTo(FTy->getParamType(i)) &&
1713 "Must be able to losslessly bit cast to param");
1714 ArgValue = Builder.CreateBitCast(ArgValue, PTy);
1715 }
1716
1717 Args.push_back(ArgValue);
1718 }
1719
1720 Value *V = Builder.CreateCall(F, Args);
1721 QualType BuiltinRetType = E->getType();
1722
1723 llvm::Type *RetTy = VoidTy;
1724 if (!BuiltinRetType->isVoidType())
1725 RetTy = ConvertType(BuiltinRetType);
1726
1727 if (RetTy != V->getType()) {
1728 assert(V->getType()->canLosslesslyBitCastTo(RetTy) &&
1729 "Must be able to losslessly bit cast result type");
1730 V = Builder.CreateBitCast(V, RetTy);
1731 }
1732
1733 return RValue::get(V);
1734 }
1735
1736 // See if we have a target specific builtin that needs to be lowered.
1737 if (Value *V = EmitTargetBuiltinExpr(BuiltinID, E))
1738 return RValue::get(V);
1739
1740 ErrorUnsupported(E, "builtin function");
1741
1742 // Unknown builtin, for now just dump it out and return undef.
1743 return GetUndefRValue(E->getType());
1744 }
1745
EmitTargetBuiltinExpr(unsigned BuiltinID,const CallExpr * E)1746 Value *CodeGenFunction::EmitTargetBuiltinExpr(unsigned BuiltinID,
1747 const CallExpr *E) {
1748 switch (getTarget().getTriple().getArch()) {
1749 case llvm::Triple::arm:
1750 case llvm::Triple::armeb:
1751 case llvm::Triple::thumb:
1752 case llvm::Triple::thumbeb:
1753 return EmitARMBuiltinExpr(BuiltinID, E);
1754 case llvm::Triple::aarch64:
1755 case llvm::Triple::aarch64_be:
1756 return EmitAArch64BuiltinExpr(BuiltinID, E);
1757 case llvm::Triple::x86:
1758 case llvm::Triple::x86_64:
1759 return EmitX86BuiltinExpr(BuiltinID, E);
1760 case llvm::Triple::ppc:
1761 case llvm::Triple::ppc64:
1762 case llvm::Triple::ppc64le:
1763 return EmitPPCBuiltinExpr(BuiltinID, E);
1764 case llvm::Triple::r600:
1765 case llvm::Triple::amdgcn:
1766 return EmitR600BuiltinExpr(BuiltinID, E);
1767 default:
1768 return nullptr;
1769 }
1770 }
1771
GetNeonType(CodeGenFunction * CGF,NeonTypeFlags TypeFlags,bool V1Ty=false)1772 static llvm::VectorType *GetNeonType(CodeGenFunction *CGF,
1773 NeonTypeFlags TypeFlags,
1774 bool V1Ty=false) {
1775 int IsQuad = TypeFlags.isQuad();
1776 switch (TypeFlags.getEltType()) {
1777 case NeonTypeFlags::Int8:
1778 case NeonTypeFlags::Poly8:
1779 return llvm::VectorType::get(CGF->Int8Ty, V1Ty ? 1 : (8 << IsQuad));
1780 case NeonTypeFlags::Int16:
1781 case NeonTypeFlags::Poly16:
1782 case NeonTypeFlags::Float16:
1783 return llvm::VectorType::get(CGF->Int16Ty, V1Ty ? 1 : (4 << IsQuad));
1784 case NeonTypeFlags::Int32:
1785 return llvm::VectorType::get(CGF->Int32Ty, V1Ty ? 1 : (2 << IsQuad));
1786 case NeonTypeFlags::Int64:
1787 case NeonTypeFlags::Poly64:
1788 return llvm::VectorType::get(CGF->Int64Ty, V1Ty ? 1 : (1 << IsQuad));
1789 case NeonTypeFlags::Poly128:
1790 // FIXME: i128 and f128 doesn't get fully support in Clang and llvm.
1791 // There is a lot of i128 and f128 API missing.
1792 // so we use v16i8 to represent poly128 and get pattern matched.
1793 return llvm::VectorType::get(CGF->Int8Ty, 16);
1794 case NeonTypeFlags::Float32:
1795 return llvm::VectorType::get(CGF->FloatTy, V1Ty ? 1 : (2 << IsQuad));
1796 case NeonTypeFlags::Float64:
1797 return llvm::VectorType::get(CGF->DoubleTy, V1Ty ? 1 : (1 << IsQuad));
1798 }
1799 llvm_unreachable("Unknown vector element type!");
1800 }
1801
EmitNeonSplat(Value * V,Constant * C)1802 Value *CodeGenFunction::EmitNeonSplat(Value *V, Constant *C) {
1803 unsigned nElts = cast<llvm::VectorType>(V->getType())->getNumElements();
1804 Value* SV = llvm::ConstantVector::getSplat(nElts, C);
1805 return Builder.CreateShuffleVector(V, V, SV, "lane");
1806 }
1807
EmitNeonCall(Function * F,SmallVectorImpl<Value * > & Ops,const char * name,unsigned shift,bool rightshift)1808 Value *CodeGenFunction::EmitNeonCall(Function *F, SmallVectorImpl<Value*> &Ops,
1809 const char *name,
1810 unsigned shift, bool rightshift) {
1811 unsigned j = 0;
1812 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
1813 ai != ae; ++ai, ++j)
1814 if (shift > 0 && shift == j)
1815 Ops[j] = EmitNeonShiftVector(Ops[j], ai->getType(), rightshift);
1816 else
1817 Ops[j] = Builder.CreateBitCast(Ops[j], ai->getType(), name);
1818
1819 return Builder.CreateCall(F, Ops, name);
1820 }
1821
EmitNeonShiftVector(Value * V,llvm::Type * Ty,bool neg)1822 Value *CodeGenFunction::EmitNeonShiftVector(Value *V, llvm::Type *Ty,
1823 bool neg) {
1824 int SV = cast<ConstantInt>(V)->getSExtValue();
1825
1826 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1827 llvm::Constant *C = ConstantInt::get(VTy->getElementType(), neg ? -SV : SV);
1828 return llvm::ConstantVector::getSplat(VTy->getNumElements(), C);
1829 }
1830
1831 // \brief Right-shift a vector by a constant.
EmitNeonRShiftImm(Value * Vec,Value * Shift,llvm::Type * Ty,bool usgn,const char * name)1832 Value *CodeGenFunction::EmitNeonRShiftImm(Value *Vec, Value *Shift,
1833 llvm::Type *Ty, bool usgn,
1834 const char *name) {
1835 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
1836
1837 int ShiftAmt = cast<ConstantInt>(Shift)->getSExtValue();
1838 int EltSize = VTy->getScalarSizeInBits();
1839
1840 Vec = Builder.CreateBitCast(Vec, Ty);
1841
1842 // lshr/ashr are undefined when the shift amount is equal to the vector
1843 // element size.
1844 if (ShiftAmt == EltSize) {
1845 if (usgn) {
1846 // Right-shifting an unsigned value by its size yields 0.
1847 llvm::Constant *Zero = ConstantInt::get(VTy->getElementType(), 0);
1848 return llvm::ConstantVector::getSplat(VTy->getNumElements(), Zero);
1849 } else {
1850 // Right-shifting a signed value by its size is equivalent
1851 // to a shift of size-1.
1852 --ShiftAmt;
1853 Shift = ConstantInt::get(VTy->getElementType(), ShiftAmt);
1854 }
1855 }
1856
1857 Shift = EmitNeonShiftVector(Shift, Ty, false);
1858 if (usgn)
1859 return Builder.CreateLShr(Vec, Shift, name);
1860 else
1861 return Builder.CreateAShr(Vec, Shift, name);
1862 }
1863
1864 /// GetPointeeAlignment - Given an expression with a pointer type, find the
1865 /// alignment of the type referenced by the pointer. Skip over implicit
1866 /// casts.
1867 std::pair<llvm::Value*, unsigned>
EmitPointerWithAlignment(const Expr * Addr)1868 CodeGenFunction::EmitPointerWithAlignment(const Expr *Addr) {
1869 assert(Addr->getType()->isPointerType());
1870 Addr = Addr->IgnoreParens();
1871 if (const ImplicitCastExpr *ICE = dyn_cast<ImplicitCastExpr>(Addr)) {
1872 if ((ICE->getCastKind() == CK_BitCast || ICE->getCastKind() == CK_NoOp) &&
1873 ICE->getSubExpr()->getType()->isPointerType()) {
1874 std::pair<llvm::Value*, unsigned> Ptr =
1875 EmitPointerWithAlignment(ICE->getSubExpr());
1876 Ptr.first = Builder.CreateBitCast(Ptr.first,
1877 ConvertType(Addr->getType()));
1878 return Ptr;
1879 } else if (ICE->getCastKind() == CK_ArrayToPointerDecay) {
1880 LValue LV = EmitLValue(ICE->getSubExpr());
1881 unsigned Align = LV.getAlignment().getQuantity();
1882 if (!Align) {
1883 // FIXME: Once LValues are fixed to always set alignment,
1884 // zap this code.
1885 QualType PtTy = ICE->getSubExpr()->getType();
1886 if (!PtTy->isIncompleteType())
1887 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1888 else
1889 Align = 1;
1890 }
1891 return std::make_pair(LV.getAddress(), Align);
1892 }
1893 }
1894 if (const UnaryOperator *UO = dyn_cast<UnaryOperator>(Addr)) {
1895 if (UO->getOpcode() == UO_AddrOf) {
1896 LValue LV = EmitLValue(UO->getSubExpr());
1897 unsigned Align = LV.getAlignment().getQuantity();
1898 if (!Align) {
1899 // FIXME: Once LValues are fixed to always set alignment,
1900 // zap this code.
1901 QualType PtTy = UO->getSubExpr()->getType();
1902 if (!PtTy->isIncompleteType())
1903 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1904 else
1905 Align = 1;
1906 }
1907 return std::make_pair(LV.getAddress(), Align);
1908 }
1909 }
1910
1911 unsigned Align = 1;
1912 QualType PtTy = Addr->getType()->getPointeeType();
1913 if (!PtTy->isIncompleteType())
1914 Align = getContext().getTypeAlignInChars(PtTy).getQuantity();
1915
1916 return std::make_pair(EmitScalarExpr(Addr), Align);
1917 }
1918
1919 enum {
1920 AddRetType = (1 << 0),
1921 Add1ArgType = (1 << 1),
1922 Add2ArgTypes = (1 << 2),
1923
1924 VectorizeRetType = (1 << 3),
1925 VectorizeArgTypes = (1 << 4),
1926
1927 InventFloatType = (1 << 5),
1928 UnsignedAlts = (1 << 6),
1929
1930 Use64BitVectors = (1 << 7),
1931 Use128BitVectors = (1 << 8),
1932
1933 Vectorize1ArgType = Add1ArgType | VectorizeArgTypes,
1934 VectorRet = AddRetType | VectorizeRetType,
1935 VectorRetGetArgs01 =
1936 AddRetType | Add2ArgTypes | VectorizeRetType | VectorizeArgTypes,
1937 FpCmpzModifiers =
1938 AddRetType | VectorizeRetType | Add1ArgType | InventFloatType
1939 };
1940
1941 struct NeonIntrinsicInfo {
1942 unsigned BuiltinID;
1943 unsigned LLVMIntrinsic;
1944 unsigned AltLLVMIntrinsic;
1945 const char *NameHint;
1946 unsigned TypeModifier;
1947
operator <NeonIntrinsicInfo1948 bool operator<(unsigned RHSBuiltinID) const {
1949 return BuiltinID < RHSBuiltinID;
1950 }
1951 };
1952
1953 #define NEONMAP0(NameBase) \
1954 { NEON::BI__builtin_neon_ ## NameBase, 0, 0, #NameBase, 0 }
1955
1956 #define NEONMAP1(NameBase, LLVMIntrinsic, TypeModifier) \
1957 { NEON:: BI__builtin_neon_ ## NameBase, \
1958 Intrinsic::LLVMIntrinsic, 0, #NameBase, TypeModifier }
1959
1960 #define NEONMAP2(NameBase, LLVMIntrinsic, AltLLVMIntrinsic, TypeModifier) \
1961 { NEON:: BI__builtin_neon_ ## NameBase, \
1962 Intrinsic::LLVMIntrinsic, Intrinsic::AltLLVMIntrinsic, \
1963 #NameBase, TypeModifier }
1964
1965 static NeonIntrinsicInfo ARMSIMDIntrinsicMap [] = {
1966 NEONMAP2(vabd_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
1967 NEONMAP2(vabdq_v, arm_neon_vabdu, arm_neon_vabds, Add1ArgType | UnsignedAlts),
1968 NEONMAP1(vabs_v, arm_neon_vabs, 0),
1969 NEONMAP1(vabsq_v, arm_neon_vabs, 0),
1970 NEONMAP0(vaddhn_v),
1971 NEONMAP1(vaesdq_v, arm_neon_aesd, 0),
1972 NEONMAP1(vaeseq_v, arm_neon_aese, 0),
1973 NEONMAP1(vaesimcq_v, arm_neon_aesimc, 0),
1974 NEONMAP1(vaesmcq_v, arm_neon_aesmc, 0),
1975 NEONMAP1(vbsl_v, arm_neon_vbsl, AddRetType),
1976 NEONMAP1(vbslq_v, arm_neon_vbsl, AddRetType),
1977 NEONMAP1(vcage_v, arm_neon_vacge, 0),
1978 NEONMAP1(vcageq_v, arm_neon_vacge, 0),
1979 NEONMAP1(vcagt_v, arm_neon_vacgt, 0),
1980 NEONMAP1(vcagtq_v, arm_neon_vacgt, 0),
1981 NEONMAP1(vcale_v, arm_neon_vacge, 0),
1982 NEONMAP1(vcaleq_v, arm_neon_vacge, 0),
1983 NEONMAP1(vcalt_v, arm_neon_vacgt, 0),
1984 NEONMAP1(vcaltq_v, arm_neon_vacgt, 0),
1985 NEONMAP1(vcls_v, arm_neon_vcls, Add1ArgType),
1986 NEONMAP1(vclsq_v, arm_neon_vcls, Add1ArgType),
1987 NEONMAP1(vclz_v, ctlz, Add1ArgType),
1988 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
1989 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
1990 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
1991 NEONMAP1(vcvt_f16_v, arm_neon_vcvtfp2hf, 0),
1992 NEONMAP1(vcvt_f32_f16, arm_neon_vcvthf2fp, 0),
1993 NEONMAP0(vcvt_f32_v),
1994 NEONMAP2(vcvt_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
1995 NEONMAP1(vcvt_n_s32_v, arm_neon_vcvtfp2fxs, 0),
1996 NEONMAP1(vcvt_n_s64_v, arm_neon_vcvtfp2fxs, 0),
1997 NEONMAP1(vcvt_n_u32_v, arm_neon_vcvtfp2fxu, 0),
1998 NEONMAP1(vcvt_n_u64_v, arm_neon_vcvtfp2fxu, 0),
1999 NEONMAP0(vcvt_s32_v),
2000 NEONMAP0(vcvt_s64_v),
2001 NEONMAP0(vcvt_u32_v),
2002 NEONMAP0(vcvt_u64_v),
2003 NEONMAP1(vcvta_s32_v, arm_neon_vcvtas, 0),
2004 NEONMAP1(vcvta_s64_v, arm_neon_vcvtas, 0),
2005 NEONMAP1(vcvta_u32_v, arm_neon_vcvtau, 0),
2006 NEONMAP1(vcvta_u64_v, arm_neon_vcvtau, 0),
2007 NEONMAP1(vcvtaq_s32_v, arm_neon_vcvtas, 0),
2008 NEONMAP1(vcvtaq_s64_v, arm_neon_vcvtas, 0),
2009 NEONMAP1(vcvtaq_u32_v, arm_neon_vcvtau, 0),
2010 NEONMAP1(vcvtaq_u64_v, arm_neon_vcvtau, 0),
2011 NEONMAP1(vcvtm_s32_v, arm_neon_vcvtms, 0),
2012 NEONMAP1(vcvtm_s64_v, arm_neon_vcvtms, 0),
2013 NEONMAP1(vcvtm_u32_v, arm_neon_vcvtmu, 0),
2014 NEONMAP1(vcvtm_u64_v, arm_neon_vcvtmu, 0),
2015 NEONMAP1(vcvtmq_s32_v, arm_neon_vcvtms, 0),
2016 NEONMAP1(vcvtmq_s64_v, arm_neon_vcvtms, 0),
2017 NEONMAP1(vcvtmq_u32_v, arm_neon_vcvtmu, 0),
2018 NEONMAP1(vcvtmq_u64_v, arm_neon_vcvtmu, 0),
2019 NEONMAP1(vcvtn_s32_v, arm_neon_vcvtns, 0),
2020 NEONMAP1(vcvtn_s64_v, arm_neon_vcvtns, 0),
2021 NEONMAP1(vcvtn_u32_v, arm_neon_vcvtnu, 0),
2022 NEONMAP1(vcvtn_u64_v, arm_neon_vcvtnu, 0),
2023 NEONMAP1(vcvtnq_s32_v, arm_neon_vcvtns, 0),
2024 NEONMAP1(vcvtnq_s64_v, arm_neon_vcvtns, 0),
2025 NEONMAP1(vcvtnq_u32_v, arm_neon_vcvtnu, 0),
2026 NEONMAP1(vcvtnq_u64_v, arm_neon_vcvtnu, 0),
2027 NEONMAP1(vcvtp_s32_v, arm_neon_vcvtps, 0),
2028 NEONMAP1(vcvtp_s64_v, arm_neon_vcvtps, 0),
2029 NEONMAP1(vcvtp_u32_v, arm_neon_vcvtpu, 0),
2030 NEONMAP1(vcvtp_u64_v, arm_neon_vcvtpu, 0),
2031 NEONMAP1(vcvtpq_s32_v, arm_neon_vcvtps, 0),
2032 NEONMAP1(vcvtpq_s64_v, arm_neon_vcvtps, 0),
2033 NEONMAP1(vcvtpq_u32_v, arm_neon_vcvtpu, 0),
2034 NEONMAP1(vcvtpq_u64_v, arm_neon_vcvtpu, 0),
2035 NEONMAP0(vcvtq_f32_v),
2036 NEONMAP2(vcvtq_n_f32_v, arm_neon_vcvtfxu2fp, arm_neon_vcvtfxs2fp, 0),
2037 NEONMAP1(vcvtq_n_s32_v, arm_neon_vcvtfp2fxs, 0),
2038 NEONMAP1(vcvtq_n_s64_v, arm_neon_vcvtfp2fxs, 0),
2039 NEONMAP1(vcvtq_n_u32_v, arm_neon_vcvtfp2fxu, 0),
2040 NEONMAP1(vcvtq_n_u64_v, arm_neon_vcvtfp2fxu, 0),
2041 NEONMAP0(vcvtq_s32_v),
2042 NEONMAP0(vcvtq_s64_v),
2043 NEONMAP0(vcvtq_u32_v),
2044 NEONMAP0(vcvtq_u64_v),
2045 NEONMAP0(vext_v),
2046 NEONMAP0(vextq_v),
2047 NEONMAP0(vfma_v),
2048 NEONMAP0(vfmaq_v),
2049 NEONMAP2(vhadd_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
2050 NEONMAP2(vhaddq_v, arm_neon_vhaddu, arm_neon_vhadds, Add1ArgType | UnsignedAlts),
2051 NEONMAP2(vhsub_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
2052 NEONMAP2(vhsubq_v, arm_neon_vhsubu, arm_neon_vhsubs, Add1ArgType | UnsignedAlts),
2053 NEONMAP0(vld1_dup_v),
2054 NEONMAP1(vld1_v, arm_neon_vld1, 0),
2055 NEONMAP0(vld1q_dup_v),
2056 NEONMAP1(vld1q_v, arm_neon_vld1, 0),
2057 NEONMAP1(vld2_lane_v, arm_neon_vld2lane, 0),
2058 NEONMAP1(vld2_v, arm_neon_vld2, 0),
2059 NEONMAP1(vld2q_lane_v, arm_neon_vld2lane, 0),
2060 NEONMAP1(vld2q_v, arm_neon_vld2, 0),
2061 NEONMAP1(vld3_lane_v, arm_neon_vld3lane, 0),
2062 NEONMAP1(vld3_v, arm_neon_vld3, 0),
2063 NEONMAP1(vld3q_lane_v, arm_neon_vld3lane, 0),
2064 NEONMAP1(vld3q_v, arm_neon_vld3, 0),
2065 NEONMAP1(vld4_lane_v, arm_neon_vld4lane, 0),
2066 NEONMAP1(vld4_v, arm_neon_vld4, 0),
2067 NEONMAP1(vld4q_lane_v, arm_neon_vld4lane, 0),
2068 NEONMAP1(vld4q_v, arm_neon_vld4, 0),
2069 NEONMAP2(vmax_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
2070 NEONMAP1(vmaxnm_v, arm_neon_vmaxnm, Add1ArgType),
2071 NEONMAP1(vmaxnmq_v, arm_neon_vmaxnm, Add1ArgType),
2072 NEONMAP2(vmaxq_v, arm_neon_vmaxu, arm_neon_vmaxs, Add1ArgType | UnsignedAlts),
2073 NEONMAP2(vmin_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
2074 NEONMAP1(vminnm_v, arm_neon_vminnm, Add1ArgType),
2075 NEONMAP1(vminnmq_v, arm_neon_vminnm, Add1ArgType),
2076 NEONMAP2(vminq_v, arm_neon_vminu, arm_neon_vmins, Add1ArgType | UnsignedAlts),
2077 NEONMAP0(vmovl_v),
2078 NEONMAP0(vmovn_v),
2079 NEONMAP1(vmul_v, arm_neon_vmulp, Add1ArgType),
2080 NEONMAP0(vmull_v),
2081 NEONMAP1(vmulq_v, arm_neon_vmulp, Add1ArgType),
2082 NEONMAP2(vpadal_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
2083 NEONMAP2(vpadalq_v, arm_neon_vpadalu, arm_neon_vpadals, UnsignedAlts),
2084 NEONMAP1(vpadd_v, arm_neon_vpadd, Add1ArgType),
2085 NEONMAP2(vpaddl_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
2086 NEONMAP2(vpaddlq_v, arm_neon_vpaddlu, arm_neon_vpaddls, UnsignedAlts),
2087 NEONMAP1(vpaddq_v, arm_neon_vpadd, Add1ArgType),
2088 NEONMAP2(vpmax_v, arm_neon_vpmaxu, arm_neon_vpmaxs, Add1ArgType | UnsignedAlts),
2089 NEONMAP2(vpmin_v, arm_neon_vpminu, arm_neon_vpmins, Add1ArgType | UnsignedAlts),
2090 NEONMAP1(vqabs_v, arm_neon_vqabs, Add1ArgType),
2091 NEONMAP1(vqabsq_v, arm_neon_vqabs, Add1ArgType),
2092 NEONMAP2(vqadd_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
2093 NEONMAP2(vqaddq_v, arm_neon_vqaddu, arm_neon_vqadds, Add1ArgType | UnsignedAlts),
2094 NEONMAP2(vqdmlal_v, arm_neon_vqdmull, arm_neon_vqadds, 0),
2095 NEONMAP2(vqdmlsl_v, arm_neon_vqdmull, arm_neon_vqsubs, 0),
2096 NEONMAP1(vqdmulh_v, arm_neon_vqdmulh, Add1ArgType),
2097 NEONMAP1(vqdmulhq_v, arm_neon_vqdmulh, Add1ArgType),
2098 NEONMAP1(vqdmull_v, arm_neon_vqdmull, Add1ArgType),
2099 NEONMAP2(vqmovn_v, arm_neon_vqmovnu, arm_neon_vqmovns, Add1ArgType | UnsignedAlts),
2100 NEONMAP1(vqmovun_v, arm_neon_vqmovnsu, Add1ArgType),
2101 NEONMAP1(vqneg_v, arm_neon_vqneg, Add1ArgType),
2102 NEONMAP1(vqnegq_v, arm_neon_vqneg, Add1ArgType),
2103 NEONMAP1(vqrdmulh_v, arm_neon_vqrdmulh, Add1ArgType),
2104 NEONMAP1(vqrdmulhq_v, arm_neon_vqrdmulh, Add1ArgType),
2105 NEONMAP2(vqrshl_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
2106 NEONMAP2(vqrshlq_v, arm_neon_vqrshiftu, arm_neon_vqrshifts, Add1ArgType | UnsignedAlts),
2107 NEONMAP2(vqshl_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
2108 NEONMAP2(vqshl_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
2109 NEONMAP2(vqshlq_n_v, arm_neon_vqshiftu, arm_neon_vqshifts, UnsignedAlts),
2110 NEONMAP2(vqshlq_v, arm_neon_vqshiftu, arm_neon_vqshifts, Add1ArgType | UnsignedAlts),
2111 NEONMAP1(vqshlu_n_v, arm_neon_vqshiftsu, 0),
2112 NEONMAP1(vqshluq_n_v, arm_neon_vqshiftsu, 0),
2113 NEONMAP2(vqsub_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
2114 NEONMAP2(vqsubq_v, arm_neon_vqsubu, arm_neon_vqsubs, Add1ArgType | UnsignedAlts),
2115 NEONMAP1(vraddhn_v, arm_neon_vraddhn, Add1ArgType),
2116 NEONMAP2(vrecpe_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
2117 NEONMAP2(vrecpeq_v, arm_neon_vrecpe, arm_neon_vrecpe, 0),
2118 NEONMAP1(vrecps_v, arm_neon_vrecps, Add1ArgType),
2119 NEONMAP1(vrecpsq_v, arm_neon_vrecps, Add1ArgType),
2120 NEONMAP2(vrhadd_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
2121 NEONMAP2(vrhaddq_v, arm_neon_vrhaddu, arm_neon_vrhadds, Add1ArgType | UnsignedAlts),
2122 NEONMAP1(vrnd_v, arm_neon_vrintz, Add1ArgType),
2123 NEONMAP1(vrnda_v, arm_neon_vrinta, Add1ArgType),
2124 NEONMAP1(vrndaq_v, arm_neon_vrinta, Add1ArgType),
2125 NEONMAP1(vrndm_v, arm_neon_vrintm, Add1ArgType),
2126 NEONMAP1(vrndmq_v, arm_neon_vrintm, Add1ArgType),
2127 NEONMAP1(vrndn_v, arm_neon_vrintn, Add1ArgType),
2128 NEONMAP1(vrndnq_v, arm_neon_vrintn, Add1ArgType),
2129 NEONMAP1(vrndp_v, arm_neon_vrintp, Add1ArgType),
2130 NEONMAP1(vrndpq_v, arm_neon_vrintp, Add1ArgType),
2131 NEONMAP1(vrndq_v, arm_neon_vrintz, Add1ArgType),
2132 NEONMAP1(vrndx_v, arm_neon_vrintx, Add1ArgType),
2133 NEONMAP1(vrndxq_v, arm_neon_vrintx, Add1ArgType),
2134 NEONMAP2(vrshl_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
2135 NEONMAP2(vrshlq_v, arm_neon_vrshiftu, arm_neon_vrshifts, Add1ArgType | UnsignedAlts),
2136 NEONMAP2(vrshr_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
2137 NEONMAP2(vrshrq_n_v, arm_neon_vrshiftu, arm_neon_vrshifts, UnsignedAlts),
2138 NEONMAP2(vrsqrte_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
2139 NEONMAP2(vrsqrteq_v, arm_neon_vrsqrte, arm_neon_vrsqrte, 0),
2140 NEONMAP1(vrsqrts_v, arm_neon_vrsqrts, Add1ArgType),
2141 NEONMAP1(vrsqrtsq_v, arm_neon_vrsqrts, Add1ArgType),
2142 NEONMAP1(vrsubhn_v, arm_neon_vrsubhn, Add1ArgType),
2143 NEONMAP1(vsha1su0q_v, arm_neon_sha1su0, 0),
2144 NEONMAP1(vsha1su1q_v, arm_neon_sha1su1, 0),
2145 NEONMAP1(vsha256h2q_v, arm_neon_sha256h2, 0),
2146 NEONMAP1(vsha256hq_v, arm_neon_sha256h, 0),
2147 NEONMAP1(vsha256su0q_v, arm_neon_sha256su0, 0),
2148 NEONMAP1(vsha256su1q_v, arm_neon_sha256su1, 0),
2149 NEONMAP0(vshl_n_v),
2150 NEONMAP2(vshl_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
2151 NEONMAP0(vshll_n_v),
2152 NEONMAP0(vshlq_n_v),
2153 NEONMAP2(vshlq_v, arm_neon_vshiftu, arm_neon_vshifts, Add1ArgType | UnsignedAlts),
2154 NEONMAP0(vshr_n_v),
2155 NEONMAP0(vshrn_n_v),
2156 NEONMAP0(vshrq_n_v),
2157 NEONMAP1(vst1_v, arm_neon_vst1, 0),
2158 NEONMAP1(vst1q_v, arm_neon_vst1, 0),
2159 NEONMAP1(vst2_lane_v, arm_neon_vst2lane, 0),
2160 NEONMAP1(vst2_v, arm_neon_vst2, 0),
2161 NEONMAP1(vst2q_lane_v, arm_neon_vst2lane, 0),
2162 NEONMAP1(vst2q_v, arm_neon_vst2, 0),
2163 NEONMAP1(vst3_lane_v, arm_neon_vst3lane, 0),
2164 NEONMAP1(vst3_v, arm_neon_vst3, 0),
2165 NEONMAP1(vst3q_lane_v, arm_neon_vst3lane, 0),
2166 NEONMAP1(vst3q_v, arm_neon_vst3, 0),
2167 NEONMAP1(vst4_lane_v, arm_neon_vst4lane, 0),
2168 NEONMAP1(vst4_v, arm_neon_vst4, 0),
2169 NEONMAP1(vst4q_lane_v, arm_neon_vst4lane, 0),
2170 NEONMAP1(vst4q_v, arm_neon_vst4, 0),
2171 NEONMAP0(vsubhn_v),
2172 NEONMAP0(vtrn_v),
2173 NEONMAP0(vtrnq_v),
2174 NEONMAP0(vtst_v),
2175 NEONMAP0(vtstq_v),
2176 NEONMAP0(vuzp_v),
2177 NEONMAP0(vuzpq_v),
2178 NEONMAP0(vzip_v),
2179 NEONMAP0(vzipq_v)
2180 };
2181
2182 static NeonIntrinsicInfo AArch64SIMDIntrinsicMap[] = {
2183 NEONMAP1(vabs_v, aarch64_neon_abs, 0),
2184 NEONMAP1(vabsq_v, aarch64_neon_abs, 0),
2185 NEONMAP0(vaddhn_v),
2186 NEONMAP1(vaesdq_v, aarch64_crypto_aesd, 0),
2187 NEONMAP1(vaeseq_v, aarch64_crypto_aese, 0),
2188 NEONMAP1(vaesimcq_v, aarch64_crypto_aesimc, 0),
2189 NEONMAP1(vaesmcq_v, aarch64_crypto_aesmc, 0),
2190 NEONMAP1(vcage_v, aarch64_neon_facge, 0),
2191 NEONMAP1(vcageq_v, aarch64_neon_facge, 0),
2192 NEONMAP1(vcagt_v, aarch64_neon_facgt, 0),
2193 NEONMAP1(vcagtq_v, aarch64_neon_facgt, 0),
2194 NEONMAP1(vcale_v, aarch64_neon_facge, 0),
2195 NEONMAP1(vcaleq_v, aarch64_neon_facge, 0),
2196 NEONMAP1(vcalt_v, aarch64_neon_facgt, 0),
2197 NEONMAP1(vcaltq_v, aarch64_neon_facgt, 0),
2198 NEONMAP1(vcls_v, aarch64_neon_cls, Add1ArgType),
2199 NEONMAP1(vclsq_v, aarch64_neon_cls, Add1ArgType),
2200 NEONMAP1(vclz_v, ctlz, Add1ArgType),
2201 NEONMAP1(vclzq_v, ctlz, Add1ArgType),
2202 NEONMAP1(vcnt_v, ctpop, Add1ArgType),
2203 NEONMAP1(vcntq_v, ctpop, Add1ArgType),
2204 NEONMAP1(vcvt_f16_v, aarch64_neon_vcvtfp2hf, 0),
2205 NEONMAP1(vcvt_f32_f16, aarch64_neon_vcvthf2fp, 0),
2206 NEONMAP0(vcvt_f32_v),
2207 NEONMAP2(vcvt_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2208 NEONMAP2(vcvt_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2209 NEONMAP1(vcvt_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
2210 NEONMAP1(vcvt_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
2211 NEONMAP1(vcvt_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
2212 NEONMAP1(vcvt_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
2213 NEONMAP0(vcvtq_f32_v),
2214 NEONMAP2(vcvtq_n_f32_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2215 NEONMAP2(vcvtq_n_f64_v, aarch64_neon_vcvtfxu2fp, aarch64_neon_vcvtfxs2fp, 0),
2216 NEONMAP1(vcvtq_n_s32_v, aarch64_neon_vcvtfp2fxs, 0),
2217 NEONMAP1(vcvtq_n_s64_v, aarch64_neon_vcvtfp2fxs, 0),
2218 NEONMAP1(vcvtq_n_u32_v, aarch64_neon_vcvtfp2fxu, 0),
2219 NEONMAP1(vcvtq_n_u64_v, aarch64_neon_vcvtfp2fxu, 0),
2220 NEONMAP1(vcvtx_f32_v, aarch64_neon_fcvtxn, AddRetType | Add1ArgType),
2221 NEONMAP0(vext_v),
2222 NEONMAP0(vextq_v),
2223 NEONMAP0(vfma_v),
2224 NEONMAP0(vfmaq_v),
2225 NEONMAP2(vhadd_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
2226 NEONMAP2(vhaddq_v, aarch64_neon_uhadd, aarch64_neon_shadd, Add1ArgType | UnsignedAlts),
2227 NEONMAP2(vhsub_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
2228 NEONMAP2(vhsubq_v, aarch64_neon_uhsub, aarch64_neon_shsub, Add1ArgType | UnsignedAlts),
2229 NEONMAP0(vmovl_v),
2230 NEONMAP0(vmovn_v),
2231 NEONMAP1(vmul_v, aarch64_neon_pmul, Add1ArgType),
2232 NEONMAP1(vmulq_v, aarch64_neon_pmul, Add1ArgType),
2233 NEONMAP1(vpadd_v, aarch64_neon_addp, Add1ArgType),
2234 NEONMAP2(vpaddl_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
2235 NEONMAP2(vpaddlq_v, aarch64_neon_uaddlp, aarch64_neon_saddlp, UnsignedAlts),
2236 NEONMAP1(vpaddq_v, aarch64_neon_addp, Add1ArgType),
2237 NEONMAP1(vqabs_v, aarch64_neon_sqabs, Add1ArgType),
2238 NEONMAP1(vqabsq_v, aarch64_neon_sqabs, Add1ArgType),
2239 NEONMAP2(vqadd_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
2240 NEONMAP2(vqaddq_v, aarch64_neon_uqadd, aarch64_neon_sqadd, Add1ArgType | UnsignedAlts),
2241 NEONMAP2(vqdmlal_v, aarch64_neon_sqdmull, aarch64_neon_sqadd, 0),
2242 NEONMAP2(vqdmlsl_v, aarch64_neon_sqdmull, aarch64_neon_sqsub, 0),
2243 NEONMAP1(vqdmulh_v, aarch64_neon_sqdmulh, Add1ArgType),
2244 NEONMAP1(vqdmulhq_v, aarch64_neon_sqdmulh, Add1ArgType),
2245 NEONMAP1(vqdmull_v, aarch64_neon_sqdmull, Add1ArgType),
2246 NEONMAP2(vqmovn_v, aarch64_neon_uqxtn, aarch64_neon_sqxtn, Add1ArgType | UnsignedAlts),
2247 NEONMAP1(vqmovun_v, aarch64_neon_sqxtun, Add1ArgType),
2248 NEONMAP1(vqneg_v, aarch64_neon_sqneg, Add1ArgType),
2249 NEONMAP1(vqnegq_v, aarch64_neon_sqneg, Add1ArgType),
2250 NEONMAP1(vqrdmulh_v, aarch64_neon_sqrdmulh, Add1ArgType),
2251 NEONMAP1(vqrdmulhq_v, aarch64_neon_sqrdmulh, Add1ArgType),
2252 NEONMAP2(vqrshl_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
2253 NEONMAP2(vqrshlq_v, aarch64_neon_uqrshl, aarch64_neon_sqrshl, Add1ArgType | UnsignedAlts),
2254 NEONMAP2(vqshl_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl, UnsignedAlts),
2255 NEONMAP2(vqshl_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
2256 NEONMAP2(vqshlq_n_v, aarch64_neon_uqshl, aarch64_neon_sqshl,UnsignedAlts),
2257 NEONMAP2(vqshlq_v, aarch64_neon_uqshl, aarch64_neon_sqshl, Add1ArgType | UnsignedAlts),
2258 NEONMAP1(vqshlu_n_v, aarch64_neon_sqshlu, 0),
2259 NEONMAP1(vqshluq_n_v, aarch64_neon_sqshlu, 0),
2260 NEONMAP2(vqsub_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
2261 NEONMAP2(vqsubq_v, aarch64_neon_uqsub, aarch64_neon_sqsub, Add1ArgType | UnsignedAlts),
2262 NEONMAP1(vraddhn_v, aarch64_neon_raddhn, Add1ArgType),
2263 NEONMAP2(vrecpe_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
2264 NEONMAP2(vrecpeq_v, aarch64_neon_frecpe, aarch64_neon_urecpe, 0),
2265 NEONMAP1(vrecps_v, aarch64_neon_frecps, Add1ArgType),
2266 NEONMAP1(vrecpsq_v, aarch64_neon_frecps, Add1ArgType),
2267 NEONMAP2(vrhadd_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
2268 NEONMAP2(vrhaddq_v, aarch64_neon_urhadd, aarch64_neon_srhadd, Add1ArgType | UnsignedAlts),
2269 NEONMAP2(vrshl_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
2270 NEONMAP2(vrshlq_v, aarch64_neon_urshl, aarch64_neon_srshl, Add1ArgType | UnsignedAlts),
2271 NEONMAP2(vrshr_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
2272 NEONMAP2(vrshrq_n_v, aarch64_neon_urshl, aarch64_neon_srshl, UnsignedAlts),
2273 NEONMAP2(vrsqrte_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
2274 NEONMAP2(vrsqrteq_v, aarch64_neon_frsqrte, aarch64_neon_ursqrte, 0),
2275 NEONMAP1(vrsqrts_v, aarch64_neon_frsqrts, Add1ArgType),
2276 NEONMAP1(vrsqrtsq_v, aarch64_neon_frsqrts, Add1ArgType),
2277 NEONMAP1(vrsubhn_v, aarch64_neon_rsubhn, Add1ArgType),
2278 NEONMAP1(vsha1su0q_v, aarch64_crypto_sha1su0, 0),
2279 NEONMAP1(vsha1su1q_v, aarch64_crypto_sha1su1, 0),
2280 NEONMAP1(vsha256h2q_v, aarch64_crypto_sha256h2, 0),
2281 NEONMAP1(vsha256hq_v, aarch64_crypto_sha256h, 0),
2282 NEONMAP1(vsha256su0q_v, aarch64_crypto_sha256su0, 0),
2283 NEONMAP1(vsha256su1q_v, aarch64_crypto_sha256su1, 0),
2284 NEONMAP0(vshl_n_v),
2285 NEONMAP2(vshl_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
2286 NEONMAP0(vshll_n_v),
2287 NEONMAP0(vshlq_n_v),
2288 NEONMAP2(vshlq_v, aarch64_neon_ushl, aarch64_neon_sshl, Add1ArgType | UnsignedAlts),
2289 NEONMAP0(vshr_n_v),
2290 NEONMAP0(vshrn_n_v),
2291 NEONMAP0(vshrq_n_v),
2292 NEONMAP0(vsubhn_v),
2293 NEONMAP0(vtst_v),
2294 NEONMAP0(vtstq_v),
2295 };
2296
2297 static NeonIntrinsicInfo AArch64SISDIntrinsicMap[] = {
2298 NEONMAP1(vabdd_f64, aarch64_sisd_fabd, Add1ArgType),
2299 NEONMAP1(vabds_f32, aarch64_sisd_fabd, Add1ArgType),
2300 NEONMAP1(vabsd_s64, aarch64_neon_abs, Add1ArgType),
2301 NEONMAP1(vaddlv_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
2302 NEONMAP1(vaddlv_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
2303 NEONMAP1(vaddlvq_s32, aarch64_neon_saddlv, AddRetType | Add1ArgType),
2304 NEONMAP1(vaddlvq_u32, aarch64_neon_uaddlv, AddRetType | Add1ArgType),
2305 NEONMAP1(vaddv_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
2306 NEONMAP1(vaddv_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
2307 NEONMAP1(vaddv_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2308 NEONMAP1(vaddvq_f32, aarch64_neon_faddv, AddRetType | Add1ArgType),
2309 NEONMAP1(vaddvq_f64, aarch64_neon_faddv, AddRetType | Add1ArgType),
2310 NEONMAP1(vaddvq_s32, aarch64_neon_saddv, AddRetType | Add1ArgType),
2311 NEONMAP1(vaddvq_s64, aarch64_neon_saddv, AddRetType | Add1ArgType),
2312 NEONMAP1(vaddvq_u32, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2313 NEONMAP1(vaddvq_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2314 NEONMAP1(vcaged_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
2315 NEONMAP1(vcages_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
2316 NEONMAP1(vcagtd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
2317 NEONMAP1(vcagts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
2318 NEONMAP1(vcaled_f64, aarch64_neon_facge, AddRetType | Add1ArgType),
2319 NEONMAP1(vcales_f32, aarch64_neon_facge, AddRetType | Add1ArgType),
2320 NEONMAP1(vcaltd_f64, aarch64_neon_facgt, AddRetType | Add1ArgType),
2321 NEONMAP1(vcalts_f32, aarch64_neon_facgt, AddRetType | Add1ArgType),
2322 NEONMAP1(vcvtad_s64_f64, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
2323 NEONMAP1(vcvtad_u64_f64, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
2324 NEONMAP1(vcvtas_s32_f32, aarch64_neon_fcvtas, AddRetType | Add1ArgType),
2325 NEONMAP1(vcvtas_u32_f32, aarch64_neon_fcvtau, AddRetType | Add1ArgType),
2326 NEONMAP1(vcvtd_n_f64_s64, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
2327 NEONMAP1(vcvtd_n_f64_u64, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
2328 NEONMAP1(vcvtd_n_s64_f64, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
2329 NEONMAP1(vcvtd_n_u64_f64, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
2330 NEONMAP1(vcvtmd_s64_f64, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
2331 NEONMAP1(vcvtmd_u64_f64, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
2332 NEONMAP1(vcvtms_s32_f32, aarch64_neon_fcvtms, AddRetType | Add1ArgType),
2333 NEONMAP1(vcvtms_u32_f32, aarch64_neon_fcvtmu, AddRetType | Add1ArgType),
2334 NEONMAP1(vcvtnd_s64_f64, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
2335 NEONMAP1(vcvtnd_u64_f64, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
2336 NEONMAP1(vcvtns_s32_f32, aarch64_neon_fcvtns, AddRetType | Add1ArgType),
2337 NEONMAP1(vcvtns_u32_f32, aarch64_neon_fcvtnu, AddRetType | Add1ArgType),
2338 NEONMAP1(vcvtpd_s64_f64, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
2339 NEONMAP1(vcvtpd_u64_f64, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
2340 NEONMAP1(vcvtps_s32_f32, aarch64_neon_fcvtps, AddRetType | Add1ArgType),
2341 NEONMAP1(vcvtps_u32_f32, aarch64_neon_fcvtpu, AddRetType | Add1ArgType),
2342 NEONMAP1(vcvts_n_f32_s32, aarch64_neon_vcvtfxs2fp, AddRetType | Add1ArgType),
2343 NEONMAP1(vcvts_n_f32_u32, aarch64_neon_vcvtfxu2fp, AddRetType | Add1ArgType),
2344 NEONMAP1(vcvts_n_s32_f32, aarch64_neon_vcvtfp2fxs, AddRetType | Add1ArgType),
2345 NEONMAP1(vcvts_n_u32_f32, aarch64_neon_vcvtfp2fxu, AddRetType | Add1ArgType),
2346 NEONMAP1(vcvtxd_f32_f64, aarch64_sisd_fcvtxn, 0),
2347 NEONMAP1(vmaxnmv_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2348 NEONMAP1(vmaxnmvq_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2349 NEONMAP1(vmaxnmvq_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2350 NEONMAP1(vmaxv_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2351 NEONMAP1(vmaxv_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
2352 NEONMAP1(vmaxv_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
2353 NEONMAP1(vmaxvq_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2354 NEONMAP1(vmaxvq_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2355 NEONMAP1(vmaxvq_s32, aarch64_neon_smaxv, AddRetType | Add1ArgType),
2356 NEONMAP1(vmaxvq_u32, aarch64_neon_umaxv, AddRetType | Add1ArgType),
2357 NEONMAP1(vminnmv_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2358 NEONMAP1(vminnmvq_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2359 NEONMAP1(vminnmvq_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2360 NEONMAP1(vminv_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2361 NEONMAP1(vminv_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
2362 NEONMAP1(vminv_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
2363 NEONMAP1(vminvq_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2364 NEONMAP1(vminvq_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
2365 NEONMAP1(vminvq_s32, aarch64_neon_sminv, AddRetType | Add1ArgType),
2366 NEONMAP1(vminvq_u32, aarch64_neon_uminv, AddRetType | Add1ArgType),
2367 NEONMAP1(vmull_p64, aarch64_neon_pmull64, 0),
2368 NEONMAP1(vmulxd_f64, aarch64_neon_fmulx, Add1ArgType),
2369 NEONMAP1(vmulxs_f32, aarch64_neon_fmulx, Add1ArgType),
2370 NEONMAP1(vpaddd_s64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2371 NEONMAP1(vpaddd_u64, aarch64_neon_uaddv, AddRetType | Add1ArgType),
2372 NEONMAP1(vpmaxnmqd_f64, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2373 NEONMAP1(vpmaxnms_f32, aarch64_neon_fmaxnmv, AddRetType | Add1ArgType),
2374 NEONMAP1(vpmaxqd_f64, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2375 NEONMAP1(vpmaxs_f32, aarch64_neon_fmaxv, AddRetType | Add1ArgType),
2376 NEONMAP1(vpminnmqd_f64, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2377 NEONMAP1(vpminnms_f32, aarch64_neon_fminnmv, AddRetType | Add1ArgType),
2378 NEONMAP1(vpminqd_f64, aarch64_neon_fminv, AddRetType | Add1ArgType),
2379 NEONMAP1(vpmins_f32, aarch64_neon_fminv, AddRetType | Add1ArgType),
2380 NEONMAP1(vqabsb_s8, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
2381 NEONMAP1(vqabsd_s64, aarch64_neon_sqabs, Add1ArgType),
2382 NEONMAP1(vqabsh_s16, aarch64_neon_sqabs, Vectorize1ArgType | Use64BitVectors),
2383 NEONMAP1(vqabss_s32, aarch64_neon_sqabs, Add1ArgType),
2384 NEONMAP1(vqaddb_s8, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
2385 NEONMAP1(vqaddb_u8, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
2386 NEONMAP1(vqaddd_s64, aarch64_neon_sqadd, Add1ArgType),
2387 NEONMAP1(vqaddd_u64, aarch64_neon_uqadd, Add1ArgType),
2388 NEONMAP1(vqaddh_s16, aarch64_neon_sqadd, Vectorize1ArgType | Use64BitVectors),
2389 NEONMAP1(vqaddh_u16, aarch64_neon_uqadd, Vectorize1ArgType | Use64BitVectors),
2390 NEONMAP1(vqadds_s32, aarch64_neon_sqadd, Add1ArgType),
2391 NEONMAP1(vqadds_u32, aarch64_neon_uqadd, Add1ArgType),
2392 NEONMAP1(vqdmulhh_s16, aarch64_neon_sqdmulh, Vectorize1ArgType | Use64BitVectors),
2393 NEONMAP1(vqdmulhs_s32, aarch64_neon_sqdmulh, Add1ArgType),
2394 NEONMAP1(vqdmullh_s16, aarch64_neon_sqdmull, VectorRet | Use128BitVectors),
2395 NEONMAP1(vqdmulls_s32, aarch64_neon_sqdmulls_scalar, 0),
2396 NEONMAP1(vqmovnd_s64, aarch64_neon_scalar_sqxtn, AddRetType | Add1ArgType),
2397 NEONMAP1(vqmovnd_u64, aarch64_neon_scalar_uqxtn, AddRetType | Add1ArgType),
2398 NEONMAP1(vqmovnh_s16, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
2399 NEONMAP1(vqmovnh_u16, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
2400 NEONMAP1(vqmovns_s32, aarch64_neon_sqxtn, VectorRet | Use64BitVectors),
2401 NEONMAP1(vqmovns_u32, aarch64_neon_uqxtn, VectorRet | Use64BitVectors),
2402 NEONMAP1(vqmovund_s64, aarch64_neon_scalar_sqxtun, AddRetType | Add1ArgType),
2403 NEONMAP1(vqmovunh_s16, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
2404 NEONMAP1(vqmovuns_s32, aarch64_neon_sqxtun, VectorRet | Use64BitVectors),
2405 NEONMAP1(vqnegb_s8, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
2406 NEONMAP1(vqnegd_s64, aarch64_neon_sqneg, Add1ArgType),
2407 NEONMAP1(vqnegh_s16, aarch64_neon_sqneg, Vectorize1ArgType | Use64BitVectors),
2408 NEONMAP1(vqnegs_s32, aarch64_neon_sqneg, Add1ArgType),
2409 NEONMAP1(vqrdmulhh_s16, aarch64_neon_sqrdmulh, Vectorize1ArgType | Use64BitVectors),
2410 NEONMAP1(vqrdmulhs_s32, aarch64_neon_sqrdmulh, Add1ArgType),
2411 NEONMAP1(vqrshlb_s8, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
2412 NEONMAP1(vqrshlb_u8, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
2413 NEONMAP1(vqrshld_s64, aarch64_neon_sqrshl, Add1ArgType),
2414 NEONMAP1(vqrshld_u64, aarch64_neon_uqrshl, Add1ArgType),
2415 NEONMAP1(vqrshlh_s16, aarch64_neon_sqrshl, Vectorize1ArgType | Use64BitVectors),
2416 NEONMAP1(vqrshlh_u16, aarch64_neon_uqrshl, Vectorize1ArgType | Use64BitVectors),
2417 NEONMAP1(vqrshls_s32, aarch64_neon_sqrshl, Add1ArgType),
2418 NEONMAP1(vqrshls_u32, aarch64_neon_uqrshl, Add1ArgType),
2419 NEONMAP1(vqrshrnd_n_s64, aarch64_neon_sqrshrn, AddRetType),
2420 NEONMAP1(vqrshrnd_n_u64, aarch64_neon_uqrshrn, AddRetType),
2421 NEONMAP1(vqrshrnh_n_s16, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
2422 NEONMAP1(vqrshrnh_n_u16, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
2423 NEONMAP1(vqrshrns_n_s32, aarch64_neon_sqrshrn, VectorRet | Use64BitVectors),
2424 NEONMAP1(vqrshrns_n_u32, aarch64_neon_uqrshrn, VectorRet | Use64BitVectors),
2425 NEONMAP1(vqrshrund_n_s64, aarch64_neon_sqrshrun, AddRetType),
2426 NEONMAP1(vqrshrunh_n_s16, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
2427 NEONMAP1(vqrshruns_n_s32, aarch64_neon_sqrshrun, VectorRet | Use64BitVectors),
2428 NEONMAP1(vqshlb_n_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2429 NEONMAP1(vqshlb_n_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2430 NEONMAP1(vqshlb_s8, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2431 NEONMAP1(vqshlb_u8, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2432 NEONMAP1(vqshld_s64, aarch64_neon_sqshl, Add1ArgType),
2433 NEONMAP1(vqshld_u64, aarch64_neon_uqshl, Add1ArgType),
2434 NEONMAP1(vqshlh_n_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2435 NEONMAP1(vqshlh_n_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2436 NEONMAP1(vqshlh_s16, aarch64_neon_sqshl, Vectorize1ArgType | Use64BitVectors),
2437 NEONMAP1(vqshlh_u16, aarch64_neon_uqshl, Vectorize1ArgType | Use64BitVectors),
2438 NEONMAP1(vqshls_n_s32, aarch64_neon_sqshl, Add1ArgType),
2439 NEONMAP1(vqshls_n_u32, aarch64_neon_uqshl, Add1ArgType),
2440 NEONMAP1(vqshls_s32, aarch64_neon_sqshl, Add1ArgType),
2441 NEONMAP1(vqshls_u32, aarch64_neon_uqshl, Add1ArgType),
2442 NEONMAP1(vqshlub_n_s8, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
2443 NEONMAP1(vqshluh_n_s16, aarch64_neon_sqshlu, Vectorize1ArgType | Use64BitVectors),
2444 NEONMAP1(vqshlus_n_s32, aarch64_neon_sqshlu, Add1ArgType),
2445 NEONMAP1(vqshrnd_n_s64, aarch64_neon_sqshrn, AddRetType),
2446 NEONMAP1(vqshrnd_n_u64, aarch64_neon_uqshrn, AddRetType),
2447 NEONMAP1(vqshrnh_n_s16, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
2448 NEONMAP1(vqshrnh_n_u16, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
2449 NEONMAP1(vqshrns_n_s32, aarch64_neon_sqshrn, VectorRet | Use64BitVectors),
2450 NEONMAP1(vqshrns_n_u32, aarch64_neon_uqshrn, VectorRet | Use64BitVectors),
2451 NEONMAP1(vqshrund_n_s64, aarch64_neon_sqshrun, AddRetType),
2452 NEONMAP1(vqshrunh_n_s16, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
2453 NEONMAP1(vqshruns_n_s32, aarch64_neon_sqshrun, VectorRet | Use64BitVectors),
2454 NEONMAP1(vqsubb_s8, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
2455 NEONMAP1(vqsubb_u8, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
2456 NEONMAP1(vqsubd_s64, aarch64_neon_sqsub, Add1ArgType),
2457 NEONMAP1(vqsubd_u64, aarch64_neon_uqsub, Add1ArgType),
2458 NEONMAP1(vqsubh_s16, aarch64_neon_sqsub, Vectorize1ArgType | Use64BitVectors),
2459 NEONMAP1(vqsubh_u16, aarch64_neon_uqsub, Vectorize1ArgType | Use64BitVectors),
2460 NEONMAP1(vqsubs_s32, aarch64_neon_sqsub, Add1ArgType),
2461 NEONMAP1(vqsubs_u32, aarch64_neon_uqsub, Add1ArgType),
2462 NEONMAP1(vrecped_f64, aarch64_neon_frecpe, Add1ArgType),
2463 NEONMAP1(vrecpes_f32, aarch64_neon_frecpe, Add1ArgType),
2464 NEONMAP1(vrecpxd_f64, aarch64_neon_frecpx, Add1ArgType),
2465 NEONMAP1(vrecpxs_f32, aarch64_neon_frecpx, Add1ArgType),
2466 NEONMAP1(vrshld_s64, aarch64_neon_srshl, Add1ArgType),
2467 NEONMAP1(vrshld_u64, aarch64_neon_urshl, Add1ArgType),
2468 NEONMAP1(vrsqrted_f64, aarch64_neon_frsqrte, Add1ArgType),
2469 NEONMAP1(vrsqrtes_f32, aarch64_neon_frsqrte, Add1ArgType),
2470 NEONMAP1(vrsqrtsd_f64, aarch64_neon_frsqrts, Add1ArgType),
2471 NEONMAP1(vrsqrtss_f32, aarch64_neon_frsqrts, Add1ArgType),
2472 NEONMAP1(vsha1cq_u32, aarch64_crypto_sha1c, 0),
2473 NEONMAP1(vsha1h_u32, aarch64_crypto_sha1h, 0),
2474 NEONMAP1(vsha1mq_u32, aarch64_crypto_sha1m, 0),
2475 NEONMAP1(vsha1pq_u32, aarch64_crypto_sha1p, 0),
2476 NEONMAP1(vshld_s64, aarch64_neon_sshl, Add1ArgType),
2477 NEONMAP1(vshld_u64, aarch64_neon_ushl, Add1ArgType),
2478 NEONMAP1(vslid_n_s64, aarch64_neon_vsli, Vectorize1ArgType),
2479 NEONMAP1(vslid_n_u64, aarch64_neon_vsli, Vectorize1ArgType),
2480 NEONMAP1(vsqaddb_u8, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
2481 NEONMAP1(vsqaddd_u64, aarch64_neon_usqadd, Add1ArgType),
2482 NEONMAP1(vsqaddh_u16, aarch64_neon_usqadd, Vectorize1ArgType | Use64BitVectors),
2483 NEONMAP1(vsqadds_u32, aarch64_neon_usqadd, Add1ArgType),
2484 NEONMAP1(vsrid_n_s64, aarch64_neon_vsri, Vectorize1ArgType),
2485 NEONMAP1(vsrid_n_u64, aarch64_neon_vsri, Vectorize1ArgType),
2486 NEONMAP1(vuqaddb_s8, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
2487 NEONMAP1(vuqaddd_s64, aarch64_neon_suqadd, Add1ArgType),
2488 NEONMAP1(vuqaddh_s16, aarch64_neon_suqadd, Vectorize1ArgType | Use64BitVectors),
2489 NEONMAP1(vuqadds_s32, aarch64_neon_suqadd, Add1ArgType),
2490 };
2491
2492 #undef NEONMAP0
2493 #undef NEONMAP1
2494 #undef NEONMAP2
2495
2496 static bool NEONSIMDIntrinsicsProvenSorted = false;
2497
2498 static bool AArch64SIMDIntrinsicsProvenSorted = false;
2499 static bool AArch64SISDIntrinsicsProvenSorted = false;
2500
2501
2502 static const NeonIntrinsicInfo *
findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,unsigned BuiltinID,bool & MapProvenSorted)2503 findNeonIntrinsicInMap(ArrayRef<NeonIntrinsicInfo> IntrinsicMap,
2504 unsigned BuiltinID, bool &MapProvenSorted) {
2505
2506 #ifndef NDEBUG
2507 if (!MapProvenSorted) {
2508 // FIXME: use std::is_sorted once C++11 is allowed
2509 for (unsigned i = 0; i < IntrinsicMap.size() - 1; ++i)
2510 assert(IntrinsicMap[i].BuiltinID <= IntrinsicMap[i + 1].BuiltinID);
2511 MapProvenSorted = true;
2512 }
2513 #endif
2514
2515 const NeonIntrinsicInfo *Builtin =
2516 std::lower_bound(IntrinsicMap.begin(), IntrinsicMap.end(), BuiltinID);
2517
2518 if (Builtin != IntrinsicMap.end() && Builtin->BuiltinID == BuiltinID)
2519 return Builtin;
2520
2521 return nullptr;
2522 }
2523
LookupNeonLLVMIntrinsic(unsigned IntrinsicID,unsigned Modifier,llvm::Type * ArgType,const CallExpr * E)2524 Function *CodeGenFunction::LookupNeonLLVMIntrinsic(unsigned IntrinsicID,
2525 unsigned Modifier,
2526 llvm::Type *ArgType,
2527 const CallExpr *E) {
2528 int VectorSize = 0;
2529 if (Modifier & Use64BitVectors)
2530 VectorSize = 64;
2531 else if (Modifier & Use128BitVectors)
2532 VectorSize = 128;
2533
2534 // Return type.
2535 SmallVector<llvm::Type *, 3> Tys;
2536 if (Modifier & AddRetType) {
2537 llvm::Type *Ty = ConvertType(E->getCallReturnType());
2538 if (Modifier & VectorizeRetType)
2539 Ty = llvm::VectorType::get(
2540 Ty, VectorSize ? VectorSize / Ty->getPrimitiveSizeInBits() : 1);
2541
2542 Tys.push_back(Ty);
2543 }
2544
2545 // Arguments.
2546 if (Modifier & VectorizeArgTypes) {
2547 int Elts = VectorSize ? VectorSize / ArgType->getPrimitiveSizeInBits() : 1;
2548 ArgType = llvm::VectorType::get(ArgType, Elts);
2549 }
2550
2551 if (Modifier & (Add1ArgType | Add2ArgTypes))
2552 Tys.push_back(ArgType);
2553
2554 if (Modifier & Add2ArgTypes)
2555 Tys.push_back(ArgType);
2556
2557 if (Modifier & InventFloatType)
2558 Tys.push_back(FloatTy);
2559
2560 return CGM.getIntrinsic(IntrinsicID, Tys);
2561 }
2562
EmitCommonNeonSISDBuiltinExpr(CodeGenFunction & CGF,const NeonIntrinsicInfo & SISDInfo,SmallVectorImpl<Value * > & Ops,const CallExpr * E)2563 static Value *EmitCommonNeonSISDBuiltinExpr(CodeGenFunction &CGF,
2564 const NeonIntrinsicInfo &SISDInfo,
2565 SmallVectorImpl<Value *> &Ops,
2566 const CallExpr *E) {
2567 unsigned BuiltinID = SISDInfo.BuiltinID;
2568 unsigned int Int = SISDInfo.LLVMIntrinsic;
2569 unsigned Modifier = SISDInfo.TypeModifier;
2570 const char *s = SISDInfo.NameHint;
2571
2572 switch (BuiltinID) {
2573 case NEON::BI__builtin_neon_vcled_s64:
2574 case NEON::BI__builtin_neon_vcled_u64:
2575 case NEON::BI__builtin_neon_vcles_f32:
2576 case NEON::BI__builtin_neon_vcled_f64:
2577 case NEON::BI__builtin_neon_vcltd_s64:
2578 case NEON::BI__builtin_neon_vcltd_u64:
2579 case NEON::BI__builtin_neon_vclts_f32:
2580 case NEON::BI__builtin_neon_vcltd_f64:
2581 case NEON::BI__builtin_neon_vcales_f32:
2582 case NEON::BI__builtin_neon_vcaled_f64:
2583 case NEON::BI__builtin_neon_vcalts_f32:
2584 case NEON::BI__builtin_neon_vcaltd_f64:
2585 // Only one direction of comparisons actually exist, cmle is actually a cmge
2586 // with swapped operands. The table gives us the right intrinsic but we
2587 // still need to do the swap.
2588 std::swap(Ops[0], Ops[1]);
2589 break;
2590 }
2591
2592 assert(Int && "Generic code assumes a valid intrinsic");
2593
2594 // Determine the type(s) of this overloaded AArch64 intrinsic.
2595 const Expr *Arg = E->getArg(0);
2596 llvm::Type *ArgTy = CGF.ConvertType(Arg->getType());
2597 Function *F = CGF.LookupNeonLLVMIntrinsic(Int, Modifier, ArgTy, E);
2598
2599 int j = 0;
2600 ConstantInt *C0 = ConstantInt::get(CGF.SizeTy, 0);
2601 for (Function::const_arg_iterator ai = F->arg_begin(), ae = F->arg_end();
2602 ai != ae; ++ai, ++j) {
2603 llvm::Type *ArgTy = ai->getType();
2604 if (Ops[j]->getType()->getPrimitiveSizeInBits() ==
2605 ArgTy->getPrimitiveSizeInBits())
2606 continue;
2607
2608 assert(ArgTy->isVectorTy() && !Ops[j]->getType()->isVectorTy());
2609 // The constant argument to an _n_ intrinsic always has Int32Ty, so truncate
2610 // it before inserting.
2611 Ops[j] =
2612 CGF.Builder.CreateTruncOrBitCast(Ops[j], ArgTy->getVectorElementType());
2613 Ops[j] =
2614 CGF.Builder.CreateInsertElement(UndefValue::get(ArgTy), Ops[j], C0);
2615 }
2616
2617 Value *Result = CGF.EmitNeonCall(F, Ops, s);
2618 llvm::Type *ResultType = CGF.ConvertType(E->getType());
2619 if (ResultType->getPrimitiveSizeInBits() <
2620 Result->getType()->getPrimitiveSizeInBits())
2621 return CGF.Builder.CreateExtractElement(Result, C0);
2622
2623 return CGF.Builder.CreateBitCast(Result, ResultType, s);
2624 }
2625
EmitCommonNeonBuiltinExpr(unsigned BuiltinID,unsigned LLVMIntrinsic,unsigned AltLLVMIntrinsic,const char * NameHint,unsigned Modifier,const CallExpr * E,SmallVectorImpl<llvm::Value * > & Ops,llvm::Value * Align)2626 Value *CodeGenFunction::EmitCommonNeonBuiltinExpr(
2627 unsigned BuiltinID, unsigned LLVMIntrinsic, unsigned AltLLVMIntrinsic,
2628 const char *NameHint, unsigned Modifier, const CallExpr *E,
2629 SmallVectorImpl<llvm::Value *> &Ops, llvm::Value *Align) {
2630 // Get the last argument, which specifies the vector type.
2631 llvm::APSInt NeonTypeConst;
2632 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
2633 if (!Arg->isIntegerConstantExpr(NeonTypeConst, getContext()))
2634 return nullptr;
2635
2636 // Determine the type of this overloaded NEON intrinsic.
2637 NeonTypeFlags Type(NeonTypeConst.getZExtValue());
2638 bool Usgn = Type.isUnsigned();
2639 bool Quad = Type.isQuad();
2640
2641 llvm::VectorType *VTy = GetNeonType(this, Type);
2642 llvm::Type *Ty = VTy;
2643 if (!Ty)
2644 return nullptr;
2645
2646 unsigned Int = LLVMIntrinsic;
2647 if ((Modifier & UnsignedAlts) && !Usgn)
2648 Int = AltLLVMIntrinsic;
2649
2650 switch (BuiltinID) {
2651 default: break;
2652 case NEON::BI__builtin_neon_vabs_v:
2653 case NEON::BI__builtin_neon_vabsq_v:
2654 if (VTy->getElementType()->isFloatingPointTy())
2655 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::fabs, Ty), Ops, "vabs");
2656 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vabs");
2657 case NEON::BI__builtin_neon_vaddhn_v: {
2658 llvm::VectorType *SrcTy =
2659 llvm::VectorType::getExtendedElementVectorType(VTy);
2660
2661 // %sum = add <4 x i32> %lhs, %rhs
2662 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2663 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
2664 Ops[0] = Builder.CreateAdd(Ops[0], Ops[1], "vaddhn");
2665
2666 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
2667 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
2668 SrcTy->getScalarSizeInBits() / 2);
2669 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
2670 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vaddhn");
2671
2672 // %res = trunc <4 x i32> %high to <4 x i16>
2673 return Builder.CreateTrunc(Ops[0], VTy, "vaddhn");
2674 }
2675 case NEON::BI__builtin_neon_vcale_v:
2676 case NEON::BI__builtin_neon_vcaleq_v:
2677 case NEON::BI__builtin_neon_vcalt_v:
2678 case NEON::BI__builtin_neon_vcaltq_v:
2679 std::swap(Ops[0], Ops[1]);
2680 case NEON::BI__builtin_neon_vcage_v:
2681 case NEON::BI__builtin_neon_vcageq_v:
2682 case NEON::BI__builtin_neon_vcagt_v:
2683 case NEON::BI__builtin_neon_vcagtq_v: {
2684 llvm::Type *VecFlt = llvm::VectorType::get(
2685 VTy->getScalarSizeInBits() == 32 ? FloatTy : DoubleTy,
2686 VTy->getNumElements());
2687 llvm::Type *Tys[] = { VTy, VecFlt };
2688 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
2689 return EmitNeonCall(F, Ops, NameHint);
2690 }
2691 case NEON::BI__builtin_neon_vclz_v:
2692 case NEON::BI__builtin_neon_vclzq_v:
2693 // We generate target-independent intrinsic, which needs a second argument
2694 // for whether or not clz of zero is undefined; on ARM it isn't.
2695 Ops.push_back(Builder.getInt1(getTarget().isCLZForZeroUndef()));
2696 break;
2697 case NEON::BI__builtin_neon_vcvt_f32_v:
2698 case NEON::BI__builtin_neon_vcvtq_f32_v:
2699 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2700 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float32, false, Quad));
2701 return Usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
2702 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
2703 case NEON::BI__builtin_neon_vcvt_n_f32_v:
2704 case NEON::BI__builtin_neon_vcvt_n_f64_v:
2705 case NEON::BI__builtin_neon_vcvtq_n_f32_v:
2706 case NEON::BI__builtin_neon_vcvtq_n_f64_v: {
2707 bool Double =
2708 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2709 llvm::Type *FloatTy =
2710 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2711 : NeonTypeFlags::Float32,
2712 false, Quad));
2713 llvm::Type *Tys[2] = { FloatTy, Ty };
2714 Int = Usgn ? LLVMIntrinsic : AltLLVMIntrinsic;
2715 Function *F = CGM.getIntrinsic(Int, Tys);
2716 return EmitNeonCall(F, Ops, "vcvt_n");
2717 }
2718 case NEON::BI__builtin_neon_vcvt_n_s32_v:
2719 case NEON::BI__builtin_neon_vcvt_n_u32_v:
2720 case NEON::BI__builtin_neon_vcvt_n_s64_v:
2721 case NEON::BI__builtin_neon_vcvt_n_u64_v:
2722 case NEON::BI__builtin_neon_vcvtq_n_s32_v:
2723 case NEON::BI__builtin_neon_vcvtq_n_u32_v:
2724 case NEON::BI__builtin_neon_vcvtq_n_s64_v:
2725 case NEON::BI__builtin_neon_vcvtq_n_u64_v: {
2726 bool Double =
2727 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2728 llvm::Type *FloatTy =
2729 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2730 : NeonTypeFlags::Float32,
2731 false, Quad));
2732 llvm::Type *Tys[2] = { Ty, FloatTy };
2733 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Tys);
2734 return EmitNeonCall(F, Ops, "vcvt_n");
2735 }
2736 case NEON::BI__builtin_neon_vcvt_s32_v:
2737 case NEON::BI__builtin_neon_vcvt_u32_v:
2738 case NEON::BI__builtin_neon_vcvt_s64_v:
2739 case NEON::BI__builtin_neon_vcvt_u64_v:
2740 case NEON::BI__builtin_neon_vcvtq_s32_v:
2741 case NEON::BI__builtin_neon_vcvtq_u32_v:
2742 case NEON::BI__builtin_neon_vcvtq_s64_v:
2743 case NEON::BI__builtin_neon_vcvtq_u64_v: {
2744 bool Double =
2745 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2746 llvm::Type *FloatTy =
2747 GetNeonType(this, NeonTypeFlags(Double ? NeonTypeFlags::Float64
2748 : NeonTypeFlags::Float32,
2749 false, Quad));
2750 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
2751 return Usgn ? Builder.CreateFPToUI(Ops[0], Ty, "vcvt")
2752 : Builder.CreateFPToSI(Ops[0], Ty, "vcvt");
2753 }
2754 case NEON::BI__builtin_neon_vcvta_s32_v:
2755 case NEON::BI__builtin_neon_vcvta_s64_v:
2756 case NEON::BI__builtin_neon_vcvta_u32_v:
2757 case NEON::BI__builtin_neon_vcvta_u64_v:
2758 case NEON::BI__builtin_neon_vcvtaq_s32_v:
2759 case NEON::BI__builtin_neon_vcvtaq_s64_v:
2760 case NEON::BI__builtin_neon_vcvtaq_u32_v:
2761 case NEON::BI__builtin_neon_vcvtaq_u64_v:
2762 case NEON::BI__builtin_neon_vcvtn_s32_v:
2763 case NEON::BI__builtin_neon_vcvtn_s64_v:
2764 case NEON::BI__builtin_neon_vcvtn_u32_v:
2765 case NEON::BI__builtin_neon_vcvtn_u64_v:
2766 case NEON::BI__builtin_neon_vcvtnq_s32_v:
2767 case NEON::BI__builtin_neon_vcvtnq_s64_v:
2768 case NEON::BI__builtin_neon_vcvtnq_u32_v:
2769 case NEON::BI__builtin_neon_vcvtnq_u64_v:
2770 case NEON::BI__builtin_neon_vcvtp_s32_v:
2771 case NEON::BI__builtin_neon_vcvtp_s64_v:
2772 case NEON::BI__builtin_neon_vcvtp_u32_v:
2773 case NEON::BI__builtin_neon_vcvtp_u64_v:
2774 case NEON::BI__builtin_neon_vcvtpq_s32_v:
2775 case NEON::BI__builtin_neon_vcvtpq_s64_v:
2776 case NEON::BI__builtin_neon_vcvtpq_u32_v:
2777 case NEON::BI__builtin_neon_vcvtpq_u64_v:
2778 case NEON::BI__builtin_neon_vcvtm_s32_v:
2779 case NEON::BI__builtin_neon_vcvtm_s64_v:
2780 case NEON::BI__builtin_neon_vcvtm_u32_v:
2781 case NEON::BI__builtin_neon_vcvtm_u64_v:
2782 case NEON::BI__builtin_neon_vcvtmq_s32_v:
2783 case NEON::BI__builtin_neon_vcvtmq_s64_v:
2784 case NEON::BI__builtin_neon_vcvtmq_u32_v:
2785 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
2786 bool Double =
2787 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
2788 llvm::Type *InTy =
2789 GetNeonType(this,
2790 NeonTypeFlags(Double ? NeonTypeFlags::Float64
2791 : NeonTypeFlags::Float32, false, Quad));
2792 llvm::Type *Tys[2] = { Ty, InTy };
2793 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Tys), Ops, NameHint);
2794 }
2795 case NEON::BI__builtin_neon_vext_v:
2796 case NEON::BI__builtin_neon_vextq_v: {
2797 int CV = cast<ConstantInt>(Ops[2])->getSExtValue();
2798 SmallVector<Constant*, 16> Indices;
2799 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
2800 Indices.push_back(ConstantInt::get(Int32Ty, i+CV));
2801
2802 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2803 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2804 Value *SV = llvm::ConstantVector::get(Indices);
2805 return Builder.CreateShuffleVector(Ops[0], Ops[1], SV, "vext");
2806 }
2807 case NEON::BI__builtin_neon_vfma_v:
2808 case NEON::BI__builtin_neon_vfmaq_v: {
2809 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
2810 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2811 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2812 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2813
2814 // NEON intrinsic puts accumulator first, unlike the LLVM fma.
2815 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
2816 }
2817 case NEON::BI__builtin_neon_vld1_v:
2818 case NEON::BI__builtin_neon_vld1q_v:
2819 Ops.push_back(Align);
2820 return EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty), Ops, "vld1");
2821 case NEON::BI__builtin_neon_vld2_v:
2822 case NEON::BI__builtin_neon_vld2q_v:
2823 case NEON::BI__builtin_neon_vld3_v:
2824 case NEON::BI__builtin_neon_vld3q_v:
2825 case NEON::BI__builtin_neon_vld4_v:
2826 case NEON::BI__builtin_neon_vld4q_v: {
2827 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
2828 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, NameHint);
2829 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2830 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2831 return Builder.CreateStore(Ops[1], Ops[0]);
2832 }
2833 case NEON::BI__builtin_neon_vld1_dup_v:
2834 case NEON::BI__builtin_neon_vld1q_dup_v: {
2835 Value *V = UndefValue::get(Ty);
2836 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
2837 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2838 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
2839 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
2840 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
2841 Ops[0] = Builder.CreateInsertElement(V, Ld, CI);
2842 return EmitNeonSplat(Ops[0], CI);
2843 }
2844 case NEON::BI__builtin_neon_vld2_lane_v:
2845 case NEON::BI__builtin_neon_vld2q_lane_v:
2846 case NEON::BI__builtin_neon_vld3_lane_v:
2847 case NEON::BI__builtin_neon_vld3q_lane_v:
2848 case NEON::BI__builtin_neon_vld4_lane_v:
2849 case NEON::BI__builtin_neon_vld4q_lane_v: {
2850 Function *F = CGM.getIntrinsic(LLVMIntrinsic, Ty);
2851 for (unsigned I = 2; I < Ops.size() - 1; ++I)
2852 Ops[I] = Builder.CreateBitCast(Ops[I], Ty);
2853 Ops.push_back(Align);
2854 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), NameHint);
2855 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
2856 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
2857 return Builder.CreateStore(Ops[1], Ops[0]);
2858 }
2859 case NEON::BI__builtin_neon_vmovl_v: {
2860 llvm::Type *DTy =llvm::VectorType::getTruncatedElementVectorType(VTy);
2861 Ops[0] = Builder.CreateBitCast(Ops[0], DTy);
2862 if (Usgn)
2863 return Builder.CreateZExt(Ops[0], Ty, "vmovl");
2864 return Builder.CreateSExt(Ops[0], Ty, "vmovl");
2865 }
2866 case NEON::BI__builtin_neon_vmovn_v: {
2867 llvm::Type *QTy = llvm::VectorType::getExtendedElementVectorType(VTy);
2868 Ops[0] = Builder.CreateBitCast(Ops[0], QTy);
2869 return Builder.CreateTrunc(Ops[0], Ty, "vmovn");
2870 }
2871 case NEON::BI__builtin_neon_vmull_v:
2872 // FIXME: the integer vmull operations could be emitted in terms of pure
2873 // LLVM IR (2 exts followed by a mul). Unfortunately LLVM has a habit of
2874 // hoisting the exts outside loops. Until global ISel comes along that can
2875 // see through such movement this leads to bad CodeGen. So we need an
2876 // intrinsic for now.
2877 Int = Usgn ? Intrinsic::arm_neon_vmullu : Intrinsic::arm_neon_vmulls;
2878 Int = Type.isPoly() ? (unsigned)Intrinsic::arm_neon_vmullp : Int;
2879 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
2880 case NEON::BI__builtin_neon_vpadal_v:
2881 case NEON::BI__builtin_neon_vpadalq_v: {
2882 // The source operand type has twice as many elements of half the size.
2883 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2884 llvm::Type *EltTy =
2885 llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
2886 llvm::Type *NarrowTy =
2887 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
2888 llvm::Type *Tys[2] = { Ty, NarrowTy };
2889 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, NameHint);
2890 }
2891 case NEON::BI__builtin_neon_vpaddl_v:
2892 case NEON::BI__builtin_neon_vpaddlq_v: {
2893 // The source operand type has twice as many elements of half the size.
2894 unsigned EltBits = VTy->getElementType()->getPrimitiveSizeInBits();
2895 llvm::Type *EltTy = llvm::IntegerType::get(getLLVMContext(), EltBits / 2);
2896 llvm::Type *NarrowTy =
2897 llvm::VectorType::get(EltTy, VTy->getNumElements() * 2);
2898 llvm::Type *Tys[2] = { Ty, NarrowTy };
2899 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vpaddl");
2900 }
2901 case NEON::BI__builtin_neon_vqdmlal_v:
2902 case NEON::BI__builtin_neon_vqdmlsl_v: {
2903 SmallVector<Value *, 2> MulOps(Ops.begin() + 1, Ops.end());
2904 Value *Mul = EmitNeonCall(CGM.getIntrinsic(LLVMIntrinsic, Ty),
2905 MulOps, "vqdmlal");
2906
2907 SmallVector<Value *, 2> AccumOps;
2908 AccumOps.push_back(Ops[0]);
2909 AccumOps.push_back(Mul);
2910 return EmitNeonCall(CGM.getIntrinsic(AltLLVMIntrinsic, Ty),
2911 AccumOps, NameHint);
2912 }
2913 case NEON::BI__builtin_neon_vqshl_n_v:
2914 case NEON::BI__builtin_neon_vqshlq_n_v:
2915 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshl_n",
2916 1, false);
2917 case NEON::BI__builtin_neon_vqshlu_n_v:
2918 case NEON::BI__builtin_neon_vqshluq_n_v:
2919 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshlu_n",
2920 1, false);
2921 case NEON::BI__builtin_neon_vrecpe_v:
2922 case NEON::BI__builtin_neon_vrecpeq_v:
2923 case NEON::BI__builtin_neon_vrsqrte_v:
2924 case NEON::BI__builtin_neon_vrsqrteq_v:
2925 Int = Ty->isFPOrFPVectorTy() ? LLVMIntrinsic : AltLLVMIntrinsic;
2926 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, NameHint);
2927
2928 case NEON::BI__builtin_neon_vrshr_n_v:
2929 case NEON::BI__builtin_neon_vrshrq_n_v:
2930 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshr_n",
2931 1, true);
2932 case NEON::BI__builtin_neon_vshl_n_v:
2933 case NEON::BI__builtin_neon_vshlq_n_v:
2934 Ops[1] = EmitNeonShiftVector(Ops[1], Ty, false);
2935 return Builder.CreateShl(Builder.CreateBitCast(Ops[0],Ty), Ops[1],
2936 "vshl_n");
2937 case NEON::BI__builtin_neon_vshll_n_v: {
2938 llvm::Type *SrcTy = llvm::VectorType::getTruncatedElementVectorType(VTy);
2939 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2940 if (Usgn)
2941 Ops[0] = Builder.CreateZExt(Ops[0], VTy);
2942 else
2943 Ops[0] = Builder.CreateSExt(Ops[0], VTy);
2944 Ops[1] = EmitNeonShiftVector(Ops[1], VTy, false);
2945 return Builder.CreateShl(Ops[0], Ops[1], "vshll_n");
2946 }
2947 case NEON::BI__builtin_neon_vshrn_n_v: {
2948 llvm::Type *SrcTy = llvm::VectorType::getExtendedElementVectorType(VTy);
2949 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2950 Ops[1] = EmitNeonShiftVector(Ops[1], SrcTy, false);
2951 if (Usgn)
2952 Ops[0] = Builder.CreateLShr(Ops[0], Ops[1]);
2953 else
2954 Ops[0] = Builder.CreateAShr(Ops[0], Ops[1]);
2955 return Builder.CreateTrunc(Ops[0], Ty, "vshrn_n");
2956 }
2957 case NEON::BI__builtin_neon_vshr_n_v:
2958 case NEON::BI__builtin_neon_vshrq_n_v:
2959 return EmitNeonRShiftImm(Ops[0], Ops[1], Ty, Usgn, "vshr_n");
2960 case NEON::BI__builtin_neon_vst1_v:
2961 case NEON::BI__builtin_neon_vst1q_v:
2962 case NEON::BI__builtin_neon_vst2_v:
2963 case NEON::BI__builtin_neon_vst2q_v:
2964 case NEON::BI__builtin_neon_vst3_v:
2965 case NEON::BI__builtin_neon_vst3q_v:
2966 case NEON::BI__builtin_neon_vst4_v:
2967 case NEON::BI__builtin_neon_vst4q_v:
2968 case NEON::BI__builtin_neon_vst2_lane_v:
2969 case NEON::BI__builtin_neon_vst2q_lane_v:
2970 case NEON::BI__builtin_neon_vst3_lane_v:
2971 case NEON::BI__builtin_neon_vst3q_lane_v:
2972 case NEON::BI__builtin_neon_vst4_lane_v:
2973 case NEON::BI__builtin_neon_vst4q_lane_v:
2974 Ops.push_back(Align);
2975 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "");
2976 case NEON::BI__builtin_neon_vsubhn_v: {
2977 llvm::VectorType *SrcTy =
2978 llvm::VectorType::getExtendedElementVectorType(VTy);
2979
2980 // %sum = add <4 x i32> %lhs, %rhs
2981 Ops[0] = Builder.CreateBitCast(Ops[0], SrcTy);
2982 Ops[1] = Builder.CreateBitCast(Ops[1], SrcTy);
2983 Ops[0] = Builder.CreateSub(Ops[0], Ops[1], "vsubhn");
2984
2985 // %high = lshr <4 x i32> %sum, <i32 16, i32 16, i32 16, i32 16>
2986 Constant *ShiftAmt = ConstantInt::get(SrcTy->getElementType(),
2987 SrcTy->getScalarSizeInBits() / 2);
2988 ShiftAmt = ConstantVector::getSplat(VTy->getNumElements(), ShiftAmt);
2989 Ops[0] = Builder.CreateLShr(Ops[0], ShiftAmt, "vsubhn");
2990
2991 // %res = trunc <4 x i32> %high to <4 x i16>
2992 return Builder.CreateTrunc(Ops[0], VTy, "vsubhn");
2993 }
2994 case NEON::BI__builtin_neon_vtrn_v:
2995 case NEON::BI__builtin_neon_vtrnq_v: {
2996 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
2997 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
2998 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
2999 Value *SV = nullptr;
3000
3001 for (unsigned vi = 0; vi != 2; ++vi) {
3002 SmallVector<Constant*, 16> Indices;
3003 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
3004 Indices.push_back(Builder.getInt32(i+vi));
3005 Indices.push_back(Builder.getInt32(i+e+vi));
3006 }
3007 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
3008 SV = llvm::ConstantVector::get(Indices);
3009 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
3010 SV = Builder.CreateStore(SV, Addr);
3011 }
3012 return SV;
3013 }
3014 case NEON::BI__builtin_neon_vtst_v:
3015 case NEON::BI__builtin_neon_vtstq_v: {
3016 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3017 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3018 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
3019 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
3020 ConstantAggregateZero::get(Ty));
3021 return Builder.CreateSExt(Ops[0], Ty, "vtst");
3022 }
3023 case NEON::BI__builtin_neon_vuzp_v:
3024 case NEON::BI__builtin_neon_vuzpq_v: {
3025 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
3026 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3027 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
3028 Value *SV = nullptr;
3029
3030 for (unsigned vi = 0; vi != 2; ++vi) {
3031 SmallVector<Constant*, 16> Indices;
3032 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
3033 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
3034
3035 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
3036 SV = llvm::ConstantVector::get(Indices);
3037 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
3038 SV = Builder.CreateStore(SV, Addr);
3039 }
3040 return SV;
3041 }
3042 case NEON::BI__builtin_neon_vzip_v:
3043 case NEON::BI__builtin_neon_vzipq_v: {
3044 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
3045 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3046 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
3047 Value *SV = nullptr;
3048
3049 for (unsigned vi = 0; vi != 2; ++vi) {
3050 SmallVector<Constant*, 16> Indices;
3051 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
3052 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
3053 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
3054 }
3055 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
3056 SV = llvm::ConstantVector::get(Indices);
3057 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
3058 SV = Builder.CreateStore(SV, Addr);
3059 }
3060 return SV;
3061 }
3062 }
3063
3064 assert(Int && "Expected valid intrinsic number");
3065
3066 // Determine the type(s) of this overloaded AArch64 intrinsic.
3067 Function *F = LookupNeonLLVMIntrinsic(Int, Modifier, Ty, E);
3068
3069 Value *Result = EmitNeonCall(F, Ops, NameHint);
3070 llvm::Type *ResultType = ConvertType(E->getType());
3071 // AArch64 intrinsic one-element vector type cast to
3072 // scalar type expected by the builtin
3073 return Builder.CreateBitCast(Result, ResultType, NameHint);
3074 }
3075
EmitAArch64CompareBuiltinExpr(Value * Op,llvm::Type * Ty,const CmpInst::Predicate Fp,const CmpInst::Predicate Ip,const Twine & Name)3076 Value *CodeGenFunction::EmitAArch64CompareBuiltinExpr(
3077 Value *Op, llvm::Type *Ty, const CmpInst::Predicate Fp,
3078 const CmpInst::Predicate Ip, const Twine &Name) {
3079 llvm::Type *OTy = Op->getType();
3080
3081 // FIXME: this is utterly horrific. We should not be looking at previous
3082 // codegen context to find out what needs doing. Unfortunately TableGen
3083 // currently gives us exactly the same calls for vceqz_f32 and vceqz_s32
3084 // (etc).
3085 if (BitCastInst *BI = dyn_cast<BitCastInst>(Op))
3086 OTy = BI->getOperand(0)->getType();
3087
3088 Op = Builder.CreateBitCast(Op, OTy);
3089 if (OTy->getScalarType()->isFloatingPointTy()) {
3090 Op = Builder.CreateFCmp(Fp, Op, Constant::getNullValue(OTy));
3091 } else {
3092 Op = Builder.CreateICmp(Ip, Op, Constant::getNullValue(OTy));
3093 }
3094 return Builder.CreateSExt(Op, Ty, Name);
3095 }
3096
packTBLDVectorList(CodeGenFunction & CGF,ArrayRef<Value * > Ops,Value * ExtOp,Value * IndexOp,llvm::Type * ResTy,unsigned IntID,const char * Name)3097 static Value *packTBLDVectorList(CodeGenFunction &CGF, ArrayRef<Value *> Ops,
3098 Value *ExtOp, Value *IndexOp,
3099 llvm::Type *ResTy, unsigned IntID,
3100 const char *Name) {
3101 SmallVector<Value *, 2> TblOps;
3102 if (ExtOp)
3103 TblOps.push_back(ExtOp);
3104
3105 // Build a vector containing sequential number like (0, 1, 2, ..., 15)
3106 SmallVector<Constant*, 16> Indices;
3107 llvm::VectorType *TblTy = cast<llvm::VectorType>(Ops[0]->getType());
3108 for (unsigned i = 0, e = TblTy->getNumElements(); i != e; ++i) {
3109 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i));
3110 Indices.push_back(ConstantInt::get(CGF.Int32Ty, 2*i+1));
3111 }
3112 Value *SV = llvm::ConstantVector::get(Indices);
3113
3114 int PairPos = 0, End = Ops.size() - 1;
3115 while (PairPos < End) {
3116 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
3117 Ops[PairPos+1], SV, Name));
3118 PairPos += 2;
3119 }
3120
3121 // If there's an odd number of 64-bit lookup table, fill the high 64-bit
3122 // of the 128-bit lookup table with zero.
3123 if (PairPos == End) {
3124 Value *ZeroTbl = ConstantAggregateZero::get(TblTy);
3125 TblOps.push_back(CGF.Builder.CreateShuffleVector(Ops[PairPos],
3126 ZeroTbl, SV, Name));
3127 }
3128
3129 Function *TblF;
3130 TblOps.push_back(IndexOp);
3131 TblF = CGF.CGM.getIntrinsic(IntID, ResTy);
3132
3133 return CGF.EmitNeonCall(TblF, TblOps, Name);
3134 }
3135
GetValueForARMHint(unsigned BuiltinID)3136 Value *CodeGenFunction::GetValueForARMHint(unsigned BuiltinID) {
3137 switch (BuiltinID) {
3138 default:
3139 return nullptr;
3140 case ARM::BI__builtin_arm_nop:
3141 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3142 llvm::ConstantInt::get(Int32Ty, 0));
3143 case ARM::BI__builtin_arm_yield:
3144 case ARM::BI__yield:
3145 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3146 llvm::ConstantInt::get(Int32Ty, 1));
3147 case ARM::BI__builtin_arm_wfe:
3148 case ARM::BI__wfe:
3149 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3150 llvm::ConstantInt::get(Int32Ty, 2));
3151 case ARM::BI__builtin_arm_wfi:
3152 case ARM::BI__wfi:
3153 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3154 llvm::ConstantInt::get(Int32Ty, 3));
3155 case ARM::BI__builtin_arm_sev:
3156 case ARM::BI__sev:
3157 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3158 llvm::ConstantInt::get(Int32Ty, 4));
3159 case ARM::BI__builtin_arm_sevl:
3160 case ARM::BI__sevl:
3161 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_hint),
3162 llvm::ConstantInt::get(Int32Ty, 5));
3163 }
3164 }
3165
EmitARMBuiltinExpr(unsigned BuiltinID,const CallExpr * E)3166 Value *CodeGenFunction::EmitARMBuiltinExpr(unsigned BuiltinID,
3167 const CallExpr *E) {
3168 if (auto Hint = GetValueForARMHint(BuiltinID))
3169 return Hint;
3170
3171 if (BuiltinID == ARM::BI__emit) {
3172 bool IsThumb = getTarget().getTriple().getArch() == llvm::Triple::thumb;
3173 llvm::FunctionType *FTy =
3174 llvm::FunctionType::get(VoidTy, /*Variadic=*/false);
3175
3176 APSInt Value;
3177 if (!E->getArg(0)->EvaluateAsInt(Value, CGM.getContext()))
3178 llvm_unreachable("Sema will ensure that the parameter is constant");
3179
3180 uint64_t ZExtValue = Value.zextOrTrunc(IsThumb ? 16 : 32).getZExtValue();
3181
3182 llvm::InlineAsm *Emit =
3183 IsThumb ? InlineAsm::get(FTy, ".inst.n 0x" + utohexstr(ZExtValue), "",
3184 /*SideEffects=*/true)
3185 : InlineAsm::get(FTy, ".inst 0x" + utohexstr(ZExtValue), "",
3186 /*SideEffects=*/true);
3187
3188 return Builder.CreateCall(Emit);
3189 }
3190
3191 if (BuiltinID == ARM::BI__builtin_arm_dbg) {
3192 Value *Option = EmitScalarExpr(E->getArg(0));
3193 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_dbg), Option);
3194 }
3195
3196 if (BuiltinID == ARM::BI__builtin_arm_prefetch) {
3197 Value *Address = EmitScalarExpr(E->getArg(0));
3198 Value *RW = EmitScalarExpr(E->getArg(1));
3199 Value *IsData = EmitScalarExpr(E->getArg(2));
3200
3201 // Locality is not supported on ARM target
3202 Value *Locality = llvm::ConstantInt::get(Int32Ty, 3);
3203
3204 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
3205 return Builder.CreateCall4(F, Address, RW, Locality, IsData);
3206 }
3207
3208 if (BuiltinID == ARM::BI__builtin_arm_rbit) {
3209 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_rbit),
3210 EmitScalarExpr(E->getArg(0)),
3211 "rbit");
3212 }
3213
3214 if (BuiltinID == ARM::BI__clear_cache) {
3215 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
3216 const FunctionDecl *FD = E->getDirectCallee();
3217 SmallVector<Value*, 2> Ops;
3218 for (unsigned i = 0; i < 2; i++)
3219 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3220 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
3221 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
3222 StringRef Name = FD->getName();
3223 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
3224 }
3225
3226 if (BuiltinID == ARM::BI__builtin_arm_ldrexd ||
3227 ((BuiltinID == ARM::BI__builtin_arm_ldrex ||
3228 BuiltinID == ARM::BI__builtin_arm_ldaex) &&
3229 getContext().getTypeSize(E->getType()) == 64) ||
3230 BuiltinID == ARM::BI__ldrexd) {
3231 Function *F;
3232
3233 switch (BuiltinID) {
3234 default: llvm_unreachable("unexpected builtin");
3235 case ARM::BI__builtin_arm_ldaex:
3236 F = CGM.getIntrinsic(Intrinsic::arm_ldaexd);
3237 break;
3238 case ARM::BI__builtin_arm_ldrexd:
3239 case ARM::BI__builtin_arm_ldrex:
3240 case ARM::BI__ldrexd:
3241 F = CGM.getIntrinsic(Intrinsic::arm_ldrexd);
3242 break;
3243 }
3244
3245 Value *LdPtr = EmitScalarExpr(E->getArg(0));
3246 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
3247 "ldrexd");
3248
3249 Value *Val0 = Builder.CreateExtractValue(Val, 1);
3250 Value *Val1 = Builder.CreateExtractValue(Val, 0);
3251 Val0 = Builder.CreateZExt(Val0, Int64Ty);
3252 Val1 = Builder.CreateZExt(Val1, Int64Ty);
3253
3254 Value *ShiftCst = llvm::ConstantInt::get(Int64Ty, 32);
3255 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
3256 Val = Builder.CreateOr(Val, Val1);
3257 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
3258 }
3259
3260 if (BuiltinID == ARM::BI__builtin_arm_ldrex ||
3261 BuiltinID == ARM::BI__builtin_arm_ldaex) {
3262 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
3263
3264 QualType Ty = E->getType();
3265 llvm::Type *RealResTy = ConvertType(Ty);
3266 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
3267 getContext().getTypeSize(Ty));
3268 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
3269
3270 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_ldaex
3271 ? Intrinsic::arm_ldaex
3272 : Intrinsic::arm_ldrex,
3273 LoadAddr->getType());
3274 Value *Val = Builder.CreateCall(F, LoadAddr, "ldrex");
3275
3276 if (RealResTy->isPointerTy())
3277 return Builder.CreateIntToPtr(Val, RealResTy);
3278 else {
3279 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
3280 return Builder.CreateBitCast(Val, RealResTy);
3281 }
3282 }
3283
3284 if (BuiltinID == ARM::BI__builtin_arm_strexd ||
3285 ((BuiltinID == ARM::BI__builtin_arm_stlex ||
3286 BuiltinID == ARM::BI__builtin_arm_strex) &&
3287 getContext().getTypeSize(E->getArg(0)->getType()) == 64)) {
3288 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
3289 ? Intrinsic::arm_stlexd
3290 : Intrinsic::arm_strexd);
3291 llvm::Type *STy = llvm::StructType::get(Int32Ty, Int32Ty, nullptr);
3292
3293 Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
3294 Value *Val = EmitScalarExpr(E->getArg(0));
3295 Builder.CreateStore(Val, Tmp);
3296
3297 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
3298 Val = Builder.CreateLoad(LdPtr);
3299
3300 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
3301 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
3302 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), Int8PtrTy);
3303 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "strexd");
3304 }
3305
3306 if (BuiltinID == ARM::BI__builtin_arm_strex ||
3307 BuiltinID == ARM::BI__builtin_arm_stlex) {
3308 Value *StoreVal = EmitScalarExpr(E->getArg(0));
3309 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
3310
3311 QualType Ty = E->getArg(0)->getType();
3312 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
3313 getContext().getTypeSize(Ty));
3314 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
3315
3316 if (StoreVal->getType()->isPointerTy())
3317 StoreVal = Builder.CreatePtrToInt(StoreVal, Int32Ty);
3318 else {
3319 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
3320 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int32Ty);
3321 }
3322
3323 Function *F = CGM.getIntrinsic(BuiltinID == ARM::BI__builtin_arm_stlex
3324 ? Intrinsic::arm_stlex
3325 : Intrinsic::arm_strex,
3326 StoreAddr->getType());
3327 return Builder.CreateCall2(F, StoreVal, StoreAddr, "strex");
3328 }
3329
3330 if (BuiltinID == ARM::BI__builtin_arm_clrex) {
3331 Function *F = CGM.getIntrinsic(Intrinsic::arm_clrex);
3332 return Builder.CreateCall(F);
3333 }
3334
3335 // CRC32
3336 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
3337 switch (BuiltinID) {
3338 case ARM::BI__builtin_arm_crc32b:
3339 CRCIntrinsicID = Intrinsic::arm_crc32b; break;
3340 case ARM::BI__builtin_arm_crc32cb:
3341 CRCIntrinsicID = Intrinsic::arm_crc32cb; break;
3342 case ARM::BI__builtin_arm_crc32h:
3343 CRCIntrinsicID = Intrinsic::arm_crc32h; break;
3344 case ARM::BI__builtin_arm_crc32ch:
3345 CRCIntrinsicID = Intrinsic::arm_crc32ch; break;
3346 case ARM::BI__builtin_arm_crc32w:
3347 case ARM::BI__builtin_arm_crc32d:
3348 CRCIntrinsicID = Intrinsic::arm_crc32w; break;
3349 case ARM::BI__builtin_arm_crc32cw:
3350 case ARM::BI__builtin_arm_crc32cd:
3351 CRCIntrinsicID = Intrinsic::arm_crc32cw; break;
3352 }
3353
3354 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
3355 Value *Arg0 = EmitScalarExpr(E->getArg(0));
3356 Value *Arg1 = EmitScalarExpr(E->getArg(1));
3357
3358 // crc32{c,}d intrinsics are implemnted as two calls to crc32{c,}w
3359 // intrinsics, hence we need different codegen for these cases.
3360 if (BuiltinID == ARM::BI__builtin_arm_crc32d ||
3361 BuiltinID == ARM::BI__builtin_arm_crc32cd) {
3362 Value *C1 = llvm::ConstantInt::get(Int64Ty, 32);
3363 Value *Arg1a = Builder.CreateTruncOrBitCast(Arg1, Int32Ty);
3364 Value *Arg1b = Builder.CreateLShr(Arg1, C1);
3365 Arg1b = Builder.CreateTruncOrBitCast(Arg1b, Int32Ty);
3366
3367 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
3368 Value *Res = Builder.CreateCall2(F, Arg0, Arg1a);
3369 return Builder.CreateCall2(F, Res, Arg1b);
3370 } else {
3371 Arg1 = Builder.CreateZExtOrBitCast(Arg1, Int32Ty);
3372
3373 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
3374 return Builder.CreateCall2(F, Arg0, Arg1);
3375 }
3376 }
3377
3378 SmallVector<Value*, 4> Ops;
3379 llvm::Value *Align = nullptr;
3380 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++) {
3381 if (i == 0) {
3382 switch (BuiltinID) {
3383 case NEON::BI__builtin_neon_vld1_v:
3384 case NEON::BI__builtin_neon_vld1q_v:
3385 case NEON::BI__builtin_neon_vld1q_lane_v:
3386 case NEON::BI__builtin_neon_vld1_lane_v:
3387 case NEON::BI__builtin_neon_vld1_dup_v:
3388 case NEON::BI__builtin_neon_vld1q_dup_v:
3389 case NEON::BI__builtin_neon_vst1_v:
3390 case NEON::BI__builtin_neon_vst1q_v:
3391 case NEON::BI__builtin_neon_vst1q_lane_v:
3392 case NEON::BI__builtin_neon_vst1_lane_v:
3393 case NEON::BI__builtin_neon_vst2_v:
3394 case NEON::BI__builtin_neon_vst2q_v:
3395 case NEON::BI__builtin_neon_vst2_lane_v:
3396 case NEON::BI__builtin_neon_vst2q_lane_v:
3397 case NEON::BI__builtin_neon_vst3_v:
3398 case NEON::BI__builtin_neon_vst3q_v:
3399 case NEON::BI__builtin_neon_vst3_lane_v:
3400 case NEON::BI__builtin_neon_vst3q_lane_v:
3401 case NEON::BI__builtin_neon_vst4_v:
3402 case NEON::BI__builtin_neon_vst4q_v:
3403 case NEON::BI__builtin_neon_vst4_lane_v:
3404 case NEON::BI__builtin_neon_vst4q_lane_v:
3405 // Get the alignment for the argument in addition to the value;
3406 // we'll use it later.
3407 std::pair<llvm::Value*, unsigned> Src =
3408 EmitPointerWithAlignment(E->getArg(0));
3409 Ops.push_back(Src.first);
3410 Align = Builder.getInt32(Src.second);
3411 continue;
3412 }
3413 }
3414 if (i == 1) {
3415 switch (BuiltinID) {
3416 case NEON::BI__builtin_neon_vld2_v:
3417 case NEON::BI__builtin_neon_vld2q_v:
3418 case NEON::BI__builtin_neon_vld3_v:
3419 case NEON::BI__builtin_neon_vld3q_v:
3420 case NEON::BI__builtin_neon_vld4_v:
3421 case NEON::BI__builtin_neon_vld4q_v:
3422 case NEON::BI__builtin_neon_vld2_lane_v:
3423 case NEON::BI__builtin_neon_vld2q_lane_v:
3424 case NEON::BI__builtin_neon_vld3_lane_v:
3425 case NEON::BI__builtin_neon_vld3q_lane_v:
3426 case NEON::BI__builtin_neon_vld4_lane_v:
3427 case NEON::BI__builtin_neon_vld4q_lane_v:
3428 case NEON::BI__builtin_neon_vld2_dup_v:
3429 case NEON::BI__builtin_neon_vld3_dup_v:
3430 case NEON::BI__builtin_neon_vld4_dup_v:
3431 // Get the alignment for the argument in addition to the value;
3432 // we'll use it later.
3433 std::pair<llvm::Value*, unsigned> Src =
3434 EmitPointerWithAlignment(E->getArg(1));
3435 Ops.push_back(Src.first);
3436 Align = Builder.getInt32(Src.second);
3437 continue;
3438 }
3439 }
3440 Ops.push_back(EmitScalarExpr(E->getArg(i)));
3441 }
3442
3443 switch (BuiltinID) {
3444 default: break;
3445 // vget_lane and vset_lane are not overloaded and do not have an extra
3446 // argument that specifies the vector type.
3447 case NEON::BI__builtin_neon_vget_lane_i8:
3448 case NEON::BI__builtin_neon_vget_lane_i16:
3449 case NEON::BI__builtin_neon_vget_lane_i32:
3450 case NEON::BI__builtin_neon_vget_lane_i64:
3451 case NEON::BI__builtin_neon_vget_lane_f32:
3452 case NEON::BI__builtin_neon_vgetq_lane_i8:
3453 case NEON::BI__builtin_neon_vgetq_lane_i16:
3454 case NEON::BI__builtin_neon_vgetq_lane_i32:
3455 case NEON::BI__builtin_neon_vgetq_lane_i64:
3456 case NEON::BI__builtin_neon_vgetq_lane_f32:
3457 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
3458 "vget_lane");
3459 case NEON::BI__builtin_neon_vset_lane_i8:
3460 case NEON::BI__builtin_neon_vset_lane_i16:
3461 case NEON::BI__builtin_neon_vset_lane_i32:
3462 case NEON::BI__builtin_neon_vset_lane_i64:
3463 case NEON::BI__builtin_neon_vset_lane_f32:
3464 case NEON::BI__builtin_neon_vsetq_lane_i8:
3465 case NEON::BI__builtin_neon_vsetq_lane_i16:
3466 case NEON::BI__builtin_neon_vsetq_lane_i32:
3467 case NEON::BI__builtin_neon_vsetq_lane_i64:
3468 case NEON::BI__builtin_neon_vsetq_lane_f32:
3469 Ops.push_back(EmitScalarExpr(E->getArg(2)));
3470 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
3471
3472 // Non-polymorphic crypto instructions also not overloaded
3473 case NEON::BI__builtin_neon_vsha1h_u32:
3474 Ops.push_back(EmitScalarExpr(E->getArg(0)));
3475 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1h), Ops,
3476 "vsha1h");
3477 case NEON::BI__builtin_neon_vsha1cq_u32:
3478 Ops.push_back(EmitScalarExpr(E->getArg(2)));
3479 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1c), Ops,
3480 "vsha1h");
3481 case NEON::BI__builtin_neon_vsha1pq_u32:
3482 Ops.push_back(EmitScalarExpr(E->getArg(2)));
3483 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1p), Ops,
3484 "vsha1h");
3485 case NEON::BI__builtin_neon_vsha1mq_u32:
3486 Ops.push_back(EmitScalarExpr(E->getArg(2)));
3487 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_sha1m), Ops,
3488 "vsha1h");
3489 }
3490
3491 // Get the last argument, which specifies the vector type.
3492 llvm::APSInt Result;
3493 const Expr *Arg = E->getArg(E->getNumArgs()-1);
3494 if (!Arg->isIntegerConstantExpr(Result, getContext()))
3495 return nullptr;
3496
3497 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f ||
3498 BuiltinID == ARM::BI__builtin_arm_vcvtr_d) {
3499 // Determine the overloaded type of this builtin.
3500 llvm::Type *Ty;
3501 if (BuiltinID == ARM::BI__builtin_arm_vcvtr_f)
3502 Ty = FloatTy;
3503 else
3504 Ty = DoubleTy;
3505
3506 // Determine whether this is an unsigned conversion or not.
3507 bool usgn = Result.getZExtValue() == 1;
3508 unsigned Int = usgn ? Intrinsic::arm_vcvtru : Intrinsic::arm_vcvtr;
3509
3510 // Call the appropriate intrinsic.
3511 Function *F = CGM.getIntrinsic(Int, Ty);
3512 return Builder.CreateCall(F, Ops, "vcvtr");
3513 }
3514
3515 // Determine the type of this overloaded NEON intrinsic.
3516 NeonTypeFlags Type(Result.getZExtValue());
3517 bool usgn = Type.isUnsigned();
3518 bool rightShift = false;
3519
3520 llvm::VectorType *VTy = GetNeonType(this, Type);
3521 llvm::Type *Ty = VTy;
3522 if (!Ty)
3523 return nullptr;
3524
3525 // Many NEON builtins have identical semantics and uses in ARM and
3526 // AArch64. Emit these in a single function.
3527 auto IntrinsicMap = makeArrayRef(ARMSIMDIntrinsicMap);
3528 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
3529 IntrinsicMap, BuiltinID, NEONSIMDIntrinsicsProvenSorted);
3530 if (Builtin)
3531 return EmitCommonNeonBuiltinExpr(
3532 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
3533 Builtin->NameHint, Builtin->TypeModifier, E, Ops, Align);
3534
3535 unsigned Int;
3536 switch (BuiltinID) {
3537 default: return nullptr;
3538 case NEON::BI__builtin_neon_vld1q_lane_v:
3539 // Handle 64-bit integer elements as a special case. Use shuffles of
3540 // one-element vectors to avoid poor code for i64 in the backend.
3541 if (VTy->getElementType()->isIntegerTy(64)) {
3542 // Extract the other lane.
3543 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3544 int Lane = cast<ConstantInt>(Ops[2])->getZExtValue();
3545 Value *SV = llvm::ConstantVector::get(ConstantInt::get(Int32Ty, 1-Lane));
3546 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3547 // Load the value as a one-element vector.
3548 Ty = llvm::VectorType::get(VTy->getElementType(), 1);
3549 Function *F = CGM.getIntrinsic(Intrinsic::arm_neon_vld1, Ty);
3550 Value *Ld = Builder.CreateCall2(F, Ops[0], Align);
3551 // Combine them.
3552 SmallVector<Constant*, 2> Indices;
3553 Indices.push_back(ConstantInt::get(Int32Ty, 1-Lane));
3554 Indices.push_back(ConstantInt::get(Int32Ty, Lane));
3555 SV = llvm::ConstantVector::get(Indices);
3556 return Builder.CreateShuffleVector(Ops[1], Ld, SV, "vld1q_lane");
3557 }
3558 // fall through
3559 case NEON::BI__builtin_neon_vld1_lane_v: {
3560 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3561 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
3562 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3563 LoadInst *Ld = Builder.CreateLoad(Ops[0]);
3564 Ld->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
3565 return Builder.CreateInsertElement(Ops[1], Ld, Ops[2], "vld1_lane");
3566 }
3567 case NEON::BI__builtin_neon_vld2_dup_v:
3568 case NEON::BI__builtin_neon_vld3_dup_v:
3569 case NEON::BI__builtin_neon_vld4_dup_v: {
3570 // Handle 64-bit elements as a special-case. There is no "dup" needed.
3571 if (VTy->getElementType()->getPrimitiveSizeInBits() == 64) {
3572 switch (BuiltinID) {
3573 case NEON::BI__builtin_neon_vld2_dup_v:
3574 Int = Intrinsic::arm_neon_vld2;
3575 break;
3576 case NEON::BI__builtin_neon_vld3_dup_v:
3577 Int = Intrinsic::arm_neon_vld3;
3578 break;
3579 case NEON::BI__builtin_neon_vld4_dup_v:
3580 Int = Intrinsic::arm_neon_vld4;
3581 break;
3582 default: llvm_unreachable("unknown vld_dup intrinsic?");
3583 }
3584 Function *F = CGM.getIntrinsic(Int, Ty);
3585 Ops[1] = Builder.CreateCall2(F, Ops[1], Align, "vld_dup");
3586 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3587 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3588 return Builder.CreateStore(Ops[1], Ops[0]);
3589 }
3590 switch (BuiltinID) {
3591 case NEON::BI__builtin_neon_vld2_dup_v:
3592 Int = Intrinsic::arm_neon_vld2lane;
3593 break;
3594 case NEON::BI__builtin_neon_vld3_dup_v:
3595 Int = Intrinsic::arm_neon_vld3lane;
3596 break;
3597 case NEON::BI__builtin_neon_vld4_dup_v:
3598 Int = Intrinsic::arm_neon_vld4lane;
3599 break;
3600 default: llvm_unreachable("unknown vld_dup intrinsic?");
3601 }
3602 Function *F = CGM.getIntrinsic(Int, Ty);
3603 llvm::StructType *STy = cast<llvm::StructType>(F->getReturnType());
3604
3605 SmallVector<Value*, 6> Args;
3606 Args.push_back(Ops[1]);
3607 Args.append(STy->getNumElements(), UndefValue::get(Ty));
3608
3609 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
3610 Args.push_back(CI);
3611 Args.push_back(Align);
3612
3613 Ops[1] = Builder.CreateCall(F, Args, "vld_dup");
3614 // splat lane 0 to all elts in each vector of the result.
3615 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3616 Value *Val = Builder.CreateExtractValue(Ops[1], i);
3617 Value *Elt = Builder.CreateBitCast(Val, Ty);
3618 Elt = EmitNeonSplat(Elt, CI);
3619 Elt = Builder.CreateBitCast(Elt, Val->getType());
3620 Ops[1] = Builder.CreateInsertValue(Ops[1], Elt, i);
3621 }
3622 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3623 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3624 return Builder.CreateStore(Ops[1], Ops[0]);
3625 }
3626 case NEON::BI__builtin_neon_vqrshrn_n_v:
3627 Int =
3628 usgn ? Intrinsic::arm_neon_vqrshiftnu : Intrinsic::arm_neon_vqrshiftns;
3629 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n",
3630 1, true);
3631 case NEON::BI__builtin_neon_vqrshrun_n_v:
3632 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqrshiftnsu, Ty),
3633 Ops, "vqrshrun_n", 1, true);
3634 case NEON::BI__builtin_neon_vqshrn_n_v:
3635 Int = usgn ? Intrinsic::arm_neon_vqshiftnu : Intrinsic::arm_neon_vqshiftns;
3636 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n",
3637 1, true);
3638 case NEON::BI__builtin_neon_vqshrun_n_v:
3639 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vqshiftnsu, Ty),
3640 Ops, "vqshrun_n", 1, true);
3641 case NEON::BI__builtin_neon_vrecpe_v:
3642 case NEON::BI__builtin_neon_vrecpeq_v:
3643 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrecpe, Ty),
3644 Ops, "vrecpe");
3645 case NEON::BI__builtin_neon_vrshrn_n_v:
3646 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vrshiftn, Ty),
3647 Ops, "vrshrn_n", 1, true);
3648 case NEON::BI__builtin_neon_vrsra_n_v:
3649 case NEON::BI__builtin_neon_vrsraq_n_v:
3650 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3651 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3652 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, true);
3653 Int = usgn ? Intrinsic::arm_neon_vrshiftu : Intrinsic::arm_neon_vrshifts;
3654 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Ty), Ops[1], Ops[2]);
3655 return Builder.CreateAdd(Ops[0], Ops[1], "vrsra_n");
3656 case NEON::BI__builtin_neon_vsri_n_v:
3657 case NEON::BI__builtin_neon_vsriq_n_v:
3658 rightShift = true;
3659 case NEON::BI__builtin_neon_vsli_n_v:
3660 case NEON::BI__builtin_neon_vsliq_n_v:
3661 Ops[2] = EmitNeonShiftVector(Ops[2], Ty, rightShift);
3662 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vshiftins, Ty),
3663 Ops, "vsli_n");
3664 case NEON::BI__builtin_neon_vsra_n_v:
3665 case NEON::BI__builtin_neon_vsraq_n_v:
3666 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
3667 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
3668 return Builder.CreateAdd(Ops[0], Ops[1]);
3669 case NEON::BI__builtin_neon_vst1q_lane_v:
3670 // Handle 64-bit integer elements as a special case. Use a shuffle to get
3671 // a one-element vector and avoid poor code for i64 in the backend.
3672 if (VTy->getElementType()->isIntegerTy(64)) {
3673 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3674 Value *SV = llvm::ConstantVector::get(cast<llvm::Constant>(Ops[2]));
3675 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV);
3676 Ops[2] = Align;
3677 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::arm_neon_vst1,
3678 Ops[1]->getType()), Ops);
3679 }
3680 // fall through
3681 case NEON::BI__builtin_neon_vst1_lane_v: {
3682 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
3683 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
3684 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
3685 StoreInst *St = Builder.CreateStore(Ops[1],
3686 Builder.CreateBitCast(Ops[0], Ty));
3687 St->setAlignment(cast<ConstantInt>(Align)->getZExtValue());
3688 return St;
3689 }
3690 case NEON::BI__builtin_neon_vtbl1_v:
3691 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl1),
3692 Ops, "vtbl1");
3693 case NEON::BI__builtin_neon_vtbl2_v:
3694 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl2),
3695 Ops, "vtbl2");
3696 case NEON::BI__builtin_neon_vtbl3_v:
3697 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl3),
3698 Ops, "vtbl3");
3699 case NEON::BI__builtin_neon_vtbl4_v:
3700 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbl4),
3701 Ops, "vtbl4");
3702 case NEON::BI__builtin_neon_vtbx1_v:
3703 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx1),
3704 Ops, "vtbx1");
3705 case NEON::BI__builtin_neon_vtbx2_v:
3706 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx2),
3707 Ops, "vtbx2");
3708 case NEON::BI__builtin_neon_vtbx3_v:
3709 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx3),
3710 Ops, "vtbx3");
3711 case NEON::BI__builtin_neon_vtbx4_v:
3712 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::arm_neon_vtbx4),
3713 Ops, "vtbx4");
3714 }
3715 }
3716
EmitAArch64TblBuiltinExpr(CodeGenFunction & CGF,unsigned BuiltinID,const CallExpr * E,SmallVectorImpl<Value * > & Ops)3717 static Value *EmitAArch64TblBuiltinExpr(CodeGenFunction &CGF, unsigned BuiltinID,
3718 const CallExpr *E,
3719 SmallVectorImpl<Value *> &Ops) {
3720 unsigned int Int = 0;
3721 const char *s = nullptr;
3722
3723 switch (BuiltinID) {
3724 default:
3725 return nullptr;
3726 case NEON::BI__builtin_neon_vtbl1_v:
3727 case NEON::BI__builtin_neon_vqtbl1_v:
3728 case NEON::BI__builtin_neon_vqtbl1q_v:
3729 case NEON::BI__builtin_neon_vtbl2_v:
3730 case NEON::BI__builtin_neon_vqtbl2_v:
3731 case NEON::BI__builtin_neon_vqtbl2q_v:
3732 case NEON::BI__builtin_neon_vtbl3_v:
3733 case NEON::BI__builtin_neon_vqtbl3_v:
3734 case NEON::BI__builtin_neon_vqtbl3q_v:
3735 case NEON::BI__builtin_neon_vtbl4_v:
3736 case NEON::BI__builtin_neon_vqtbl4_v:
3737 case NEON::BI__builtin_neon_vqtbl4q_v:
3738 break;
3739 case NEON::BI__builtin_neon_vtbx1_v:
3740 case NEON::BI__builtin_neon_vqtbx1_v:
3741 case NEON::BI__builtin_neon_vqtbx1q_v:
3742 case NEON::BI__builtin_neon_vtbx2_v:
3743 case NEON::BI__builtin_neon_vqtbx2_v:
3744 case NEON::BI__builtin_neon_vqtbx2q_v:
3745 case NEON::BI__builtin_neon_vtbx3_v:
3746 case NEON::BI__builtin_neon_vqtbx3_v:
3747 case NEON::BI__builtin_neon_vqtbx3q_v:
3748 case NEON::BI__builtin_neon_vtbx4_v:
3749 case NEON::BI__builtin_neon_vqtbx4_v:
3750 case NEON::BI__builtin_neon_vqtbx4q_v:
3751 break;
3752 }
3753
3754 assert(E->getNumArgs() >= 3);
3755
3756 // Get the last argument, which specifies the vector type.
3757 llvm::APSInt Result;
3758 const Expr *Arg = E->getArg(E->getNumArgs() - 1);
3759 if (!Arg->isIntegerConstantExpr(Result, CGF.getContext()))
3760 return nullptr;
3761
3762 // Determine the type of this overloaded NEON intrinsic.
3763 NeonTypeFlags Type(Result.getZExtValue());
3764 llvm::VectorType *VTy = GetNeonType(&CGF, Type);
3765 llvm::Type *Ty = VTy;
3766 if (!Ty)
3767 return nullptr;
3768
3769 unsigned nElts = VTy->getNumElements();
3770
3771 CodeGen::CGBuilderTy &Builder = CGF.Builder;
3772
3773 // AArch64 scalar builtins are not overloaded, they do not have an extra
3774 // argument that specifies the vector type, need to handle each case.
3775 SmallVector<Value *, 2> TblOps;
3776 switch (BuiltinID) {
3777 case NEON::BI__builtin_neon_vtbl1_v: {
3778 TblOps.push_back(Ops[0]);
3779 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[1], Ty,
3780 Intrinsic::aarch64_neon_tbl1, "vtbl1");
3781 }
3782 case NEON::BI__builtin_neon_vtbl2_v: {
3783 TblOps.push_back(Ops[0]);
3784 TblOps.push_back(Ops[1]);
3785 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
3786 Intrinsic::aarch64_neon_tbl1, "vtbl1");
3787 }
3788 case NEON::BI__builtin_neon_vtbl3_v: {
3789 TblOps.push_back(Ops[0]);
3790 TblOps.push_back(Ops[1]);
3791 TblOps.push_back(Ops[2]);
3792 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[3], Ty,
3793 Intrinsic::aarch64_neon_tbl2, "vtbl2");
3794 }
3795 case NEON::BI__builtin_neon_vtbl4_v: {
3796 TblOps.push_back(Ops[0]);
3797 TblOps.push_back(Ops[1]);
3798 TblOps.push_back(Ops[2]);
3799 TblOps.push_back(Ops[3]);
3800 return packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
3801 Intrinsic::aarch64_neon_tbl2, "vtbl2");
3802 }
3803 case NEON::BI__builtin_neon_vtbx1_v: {
3804 TblOps.push_back(Ops[1]);
3805 Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[2], Ty,
3806 Intrinsic::aarch64_neon_tbl1, "vtbl1");
3807
3808 llvm::Constant *Eight = ConstantInt::get(VTy->getElementType(), 8);
3809 Value* EightV = llvm::ConstantVector::getSplat(nElts, Eight);
3810 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[2], EightV);
3811 CmpRes = Builder.CreateSExt(CmpRes, Ty);
3812
3813 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
3814 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
3815 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
3816 }
3817 case NEON::BI__builtin_neon_vtbx2_v: {
3818 TblOps.push_back(Ops[1]);
3819 TblOps.push_back(Ops[2]);
3820 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[3], Ty,
3821 Intrinsic::aarch64_neon_tbx1, "vtbx1");
3822 }
3823 case NEON::BI__builtin_neon_vtbx3_v: {
3824 TblOps.push_back(Ops[1]);
3825 TblOps.push_back(Ops[2]);
3826 TblOps.push_back(Ops[3]);
3827 Value *TblRes = packTBLDVectorList(CGF, TblOps, nullptr, Ops[4], Ty,
3828 Intrinsic::aarch64_neon_tbl2, "vtbl2");
3829
3830 llvm::Constant *TwentyFour = ConstantInt::get(VTy->getElementType(), 24);
3831 Value* TwentyFourV = llvm::ConstantVector::getSplat(nElts, TwentyFour);
3832 Value *CmpRes = Builder.CreateICmp(ICmpInst::ICMP_UGE, Ops[4],
3833 TwentyFourV);
3834 CmpRes = Builder.CreateSExt(CmpRes, Ty);
3835
3836 Value *EltsFromInput = Builder.CreateAnd(CmpRes, Ops[0]);
3837 Value *EltsFromTbl = Builder.CreateAnd(Builder.CreateNot(CmpRes), TblRes);
3838 return Builder.CreateOr(EltsFromInput, EltsFromTbl, "vtbx");
3839 }
3840 case NEON::BI__builtin_neon_vtbx4_v: {
3841 TblOps.push_back(Ops[1]);
3842 TblOps.push_back(Ops[2]);
3843 TblOps.push_back(Ops[3]);
3844 TblOps.push_back(Ops[4]);
3845 return packTBLDVectorList(CGF, TblOps, Ops[0], Ops[5], Ty,
3846 Intrinsic::aarch64_neon_tbx2, "vtbx2");
3847 }
3848 case NEON::BI__builtin_neon_vqtbl1_v:
3849 case NEON::BI__builtin_neon_vqtbl1q_v:
3850 Int = Intrinsic::aarch64_neon_tbl1; s = "vtbl1"; break;
3851 case NEON::BI__builtin_neon_vqtbl2_v:
3852 case NEON::BI__builtin_neon_vqtbl2q_v: {
3853 Int = Intrinsic::aarch64_neon_tbl2; s = "vtbl2"; break;
3854 case NEON::BI__builtin_neon_vqtbl3_v:
3855 case NEON::BI__builtin_neon_vqtbl3q_v:
3856 Int = Intrinsic::aarch64_neon_tbl3; s = "vtbl3"; break;
3857 case NEON::BI__builtin_neon_vqtbl4_v:
3858 case NEON::BI__builtin_neon_vqtbl4q_v:
3859 Int = Intrinsic::aarch64_neon_tbl4; s = "vtbl4"; break;
3860 case NEON::BI__builtin_neon_vqtbx1_v:
3861 case NEON::BI__builtin_neon_vqtbx1q_v:
3862 Int = Intrinsic::aarch64_neon_tbx1; s = "vtbx1"; break;
3863 case NEON::BI__builtin_neon_vqtbx2_v:
3864 case NEON::BI__builtin_neon_vqtbx2q_v:
3865 Int = Intrinsic::aarch64_neon_tbx2; s = "vtbx2"; break;
3866 case NEON::BI__builtin_neon_vqtbx3_v:
3867 case NEON::BI__builtin_neon_vqtbx3q_v:
3868 Int = Intrinsic::aarch64_neon_tbx3; s = "vtbx3"; break;
3869 case NEON::BI__builtin_neon_vqtbx4_v:
3870 case NEON::BI__builtin_neon_vqtbx4q_v:
3871 Int = Intrinsic::aarch64_neon_tbx4; s = "vtbx4"; break;
3872 }
3873 }
3874
3875 if (!Int)
3876 return nullptr;
3877
3878 Function *F = CGF.CGM.getIntrinsic(Int, Ty);
3879 return CGF.EmitNeonCall(F, Ops, s);
3880 }
3881
vectorWrapScalar16(Value * Op)3882 Value *CodeGenFunction::vectorWrapScalar16(Value *Op) {
3883 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
3884 Op = Builder.CreateBitCast(Op, Int16Ty);
3885 Value *V = UndefValue::get(VTy);
3886 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
3887 Op = Builder.CreateInsertElement(V, Op, CI);
3888 return Op;
3889 }
3890
vectorWrapScalar8(Value * Op)3891 Value *CodeGenFunction::vectorWrapScalar8(Value *Op) {
3892 llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
3893 Op = Builder.CreateBitCast(Op, Int8Ty);
3894 Value *V = UndefValue::get(VTy);
3895 llvm::Constant *CI = ConstantInt::get(SizeTy, 0);
3896 Op = Builder.CreateInsertElement(V, Op, CI);
3897 return Op;
3898 }
3899
3900 Value *CodeGenFunction::
emitVectorWrappedScalar8Intrinsic(unsigned Int,SmallVectorImpl<Value * > & Ops,const char * Name)3901 emitVectorWrappedScalar8Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
3902 const char *Name) {
3903 // i8 is not a legal types for AArch64, so we can't just use
3904 // a normal overloaded intrinsic call for these scalar types. Instead
3905 // we'll build 64-bit vectors w/ lane zero being our input values and
3906 // perform the operation on that. The back end can pattern match directly
3907 // to the scalar instruction.
3908 Ops[0] = vectorWrapScalar8(Ops[0]);
3909 Ops[1] = vectorWrapScalar8(Ops[1]);
3910 llvm::Type *VTy = llvm::VectorType::get(Int8Ty, 8);
3911 Value *V = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, Name);
3912 Constant *CI = ConstantInt::get(SizeTy, 0);
3913 return Builder.CreateExtractElement(V, CI, "lane0");
3914 }
3915
3916 Value *CodeGenFunction::
emitVectorWrappedScalar16Intrinsic(unsigned Int,SmallVectorImpl<Value * > & Ops,const char * Name)3917 emitVectorWrappedScalar16Intrinsic(unsigned Int, SmallVectorImpl<Value*> &Ops,
3918 const char *Name) {
3919 // i16 is not a legal types for AArch64, so we can't just use
3920 // a normal overloaded intrinsic call for these scalar types. Instead
3921 // we'll build 64-bit vectors w/ lane zero being our input values and
3922 // perform the operation on that. The back end can pattern match directly
3923 // to the scalar instruction.
3924 Ops[0] = vectorWrapScalar16(Ops[0]);
3925 Ops[1] = vectorWrapScalar16(Ops[1]);
3926 llvm::Type *VTy = llvm::VectorType::get(Int16Ty, 4);
3927 Value *V = EmitNeonCall(CGM.getIntrinsic(Int, VTy), Ops, Name);
3928 Constant *CI = ConstantInt::get(SizeTy, 0);
3929 return Builder.CreateExtractElement(V, CI, "lane0");
3930 }
3931
EmitAArch64BuiltinExpr(unsigned BuiltinID,const CallExpr * E)3932 Value *CodeGenFunction::EmitAArch64BuiltinExpr(unsigned BuiltinID,
3933 const CallExpr *E) {
3934 unsigned HintID = static_cast<unsigned>(-1);
3935 switch (BuiltinID) {
3936 default: break;
3937 case AArch64::BI__builtin_arm_nop:
3938 HintID = 0;
3939 break;
3940 case AArch64::BI__builtin_arm_yield:
3941 HintID = 1;
3942 break;
3943 case AArch64::BI__builtin_arm_wfe:
3944 HintID = 2;
3945 break;
3946 case AArch64::BI__builtin_arm_wfi:
3947 HintID = 3;
3948 break;
3949 case AArch64::BI__builtin_arm_sev:
3950 HintID = 4;
3951 break;
3952 case AArch64::BI__builtin_arm_sevl:
3953 HintID = 5;
3954 break;
3955 }
3956
3957 if (HintID != static_cast<unsigned>(-1)) {
3958 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_hint);
3959 return Builder.CreateCall(F, llvm::ConstantInt::get(Int32Ty, HintID));
3960 }
3961
3962 if (BuiltinID == AArch64::BI__builtin_arm_prefetch) {
3963 Value *Address = EmitScalarExpr(E->getArg(0));
3964 Value *RW = EmitScalarExpr(E->getArg(1));
3965 Value *CacheLevel = EmitScalarExpr(E->getArg(2));
3966 Value *RetentionPolicy = EmitScalarExpr(E->getArg(3));
3967 Value *IsData = EmitScalarExpr(E->getArg(4));
3968
3969 Value *Locality = nullptr;
3970 if (cast<llvm::ConstantInt>(RetentionPolicy)->isZero()) {
3971 // Temporal fetch, needs to convert cache level to locality.
3972 Locality = llvm::ConstantInt::get(Int32Ty,
3973 -cast<llvm::ConstantInt>(CacheLevel)->getValue() + 3);
3974 } else {
3975 // Streaming fetch.
3976 Locality = llvm::ConstantInt::get(Int32Ty, 0);
3977 }
3978
3979 // FIXME: We need AArch64 specific LLVM intrinsic if we want to specify
3980 // PLDL3STRM or PLDL2STRM.
3981 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
3982 return Builder.CreateCall4(F, Address, RW, Locality, IsData);
3983 }
3984
3985 if (BuiltinID == AArch64::BI__builtin_arm_rbit) {
3986 assert((getContext().getTypeSize(E->getType()) == 32) &&
3987 "rbit of unusual size!");
3988 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
3989 return Builder.CreateCall(
3990 CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
3991 }
3992 if (BuiltinID == AArch64::BI__builtin_arm_rbit64) {
3993 assert((getContext().getTypeSize(E->getType()) == 64) &&
3994 "rbit of unusual size!");
3995 llvm::Value *Arg = EmitScalarExpr(E->getArg(0));
3996 return Builder.CreateCall(
3997 CGM.getIntrinsic(Intrinsic::aarch64_rbit, Arg->getType()), Arg, "rbit");
3998 }
3999
4000 if (BuiltinID == AArch64::BI__clear_cache) {
4001 assert(E->getNumArgs() == 2 && "__clear_cache takes 2 arguments");
4002 const FunctionDecl *FD = E->getDirectCallee();
4003 SmallVector<Value*, 2> Ops;
4004 for (unsigned i = 0; i < 2; i++)
4005 Ops.push_back(EmitScalarExpr(E->getArg(i)));
4006 llvm::Type *Ty = CGM.getTypes().ConvertType(FD->getType());
4007 llvm::FunctionType *FTy = cast<llvm::FunctionType>(Ty);
4008 StringRef Name = FD->getName();
4009 return EmitNounwindRuntimeCall(CGM.CreateRuntimeFunction(FTy, Name), Ops);
4010 }
4011
4012 if ((BuiltinID == AArch64::BI__builtin_arm_ldrex ||
4013 BuiltinID == AArch64::BI__builtin_arm_ldaex) &&
4014 getContext().getTypeSize(E->getType()) == 128) {
4015 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
4016 ? Intrinsic::aarch64_ldaxp
4017 : Intrinsic::aarch64_ldxp);
4018
4019 Value *LdPtr = EmitScalarExpr(E->getArg(0));
4020 Value *Val = Builder.CreateCall(F, Builder.CreateBitCast(LdPtr, Int8PtrTy),
4021 "ldxp");
4022
4023 Value *Val0 = Builder.CreateExtractValue(Val, 1);
4024 Value *Val1 = Builder.CreateExtractValue(Val, 0);
4025 llvm::Type *Int128Ty = llvm::IntegerType::get(getLLVMContext(), 128);
4026 Val0 = Builder.CreateZExt(Val0, Int128Ty);
4027 Val1 = Builder.CreateZExt(Val1, Int128Ty);
4028
4029 Value *ShiftCst = llvm::ConstantInt::get(Int128Ty, 64);
4030 Val = Builder.CreateShl(Val0, ShiftCst, "shl", true /* nuw */);
4031 Val = Builder.CreateOr(Val, Val1);
4032 return Builder.CreateBitCast(Val, ConvertType(E->getType()));
4033 } else if (BuiltinID == AArch64::BI__builtin_arm_ldrex ||
4034 BuiltinID == AArch64::BI__builtin_arm_ldaex) {
4035 Value *LoadAddr = EmitScalarExpr(E->getArg(0));
4036
4037 QualType Ty = E->getType();
4038 llvm::Type *RealResTy = ConvertType(Ty);
4039 llvm::Type *IntResTy = llvm::IntegerType::get(getLLVMContext(),
4040 getContext().getTypeSize(Ty));
4041 LoadAddr = Builder.CreateBitCast(LoadAddr, IntResTy->getPointerTo());
4042
4043 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_ldaex
4044 ? Intrinsic::aarch64_ldaxr
4045 : Intrinsic::aarch64_ldxr,
4046 LoadAddr->getType());
4047 Value *Val = Builder.CreateCall(F, LoadAddr, "ldxr");
4048
4049 if (RealResTy->isPointerTy())
4050 return Builder.CreateIntToPtr(Val, RealResTy);
4051
4052 Val = Builder.CreateTruncOrBitCast(Val, IntResTy);
4053 return Builder.CreateBitCast(Val, RealResTy);
4054 }
4055
4056 if ((BuiltinID == AArch64::BI__builtin_arm_strex ||
4057 BuiltinID == AArch64::BI__builtin_arm_stlex) &&
4058 getContext().getTypeSize(E->getArg(0)->getType()) == 128) {
4059 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
4060 ? Intrinsic::aarch64_stlxp
4061 : Intrinsic::aarch64_stxp);
4062 llvm::Type *STy = llvm::StructType::get(Int64Ty, Int64Ty, nullptr);
4063
4064 Value *One = llvm::ConstantInt::get(Int32Ty, 1);
4065 Value *Tmp = Builder.CreateAlloca(ConvertType(E->getArg(0)->getType()),
4066 One);
4067 Value *Val = EmitScalarExpr(E->getArg(0));
4068 Builder.CreateStore(Val, Tmp);
4069
4070 Value *LdPtr = Builder.CreateBitCast(Tmp,llvm::PointerType::getUnqual(STy));
4071 Val = Builder.CreateLoad(LdPtr);
4072
4073 Value *Arg0 = Builder.CreateExtractValue(Val, 0);
4074 Value *Arg1 = Builder.CreateExtractValue(Val, 1);
4075 Value *StPtr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)),
4076 Int8PtrTy);
4077 return Builder.CreateCall3(F, Arg0, Arg1, StPtr, "stxp");
4078 } else if (BuiltinID == AArch64::BI__builtin_arm_strex ||
4079 BuiltinID == AArch64::BI__builtin_arm_stlex) {
4080 Value *StoreVal = EmitScalarExpr(E->getArg(0));
4081 Value *StoreAddr = EmitScalarExpr(E->getArg(1));
4082
4083 QualType Ty = E->getArg(0)->getType();
4084 llvm::Type *StoreTy = llvm::IntegerType::get(getLLVMContext(),
4085 getContext().getTypeSize(Ty));
4086 StoreAddr = Builder.CreateBitCast(StoreAddr, StoreTy->getPointerTo());
4087
4088 if (StoreVal->getType()->isPointerTy())
4089 StoreVal = Builder.CreatePtrToInt(StoreVal, Int64Ty);
4090 else {
4091 StoreVal = Builder.CreateBitCast(StoreVal, StoreTy);
4092 StoreVal = Builder.CreateZExtOrBitCast(StoreVal, Int64Ty);
4093 }
4094
4095 Function *F = CGM.getIntrinsic(BuiltinID == AArch64::BI__builtin_arm_stlex
4096 ? Intrinsic::aarch64_stlxr
4097 : Intrinsic::aarch64_stxr,
4098 StoreAddr->getType());
4099 return Builder.CreateCall2(F, StoreVal, StoreAddr, "stxr");
4100 }
4101
4102 if (BuiltinID == AArch64::BI__builtin_arm_clrex) {
4103 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_clrex);
4104 return Builder.CreateCall(F);
4105 }
4106
4107 // CRC32
4108 Intrinsic::ID CRCIntrinsicID = Intrinsic::not_intrinsic;
4109 switch (BuiltinID) {
4110 case AArch64::BI__builtin_arm_crc32b:
4111 CRCIntrinsicID = Intrinsic::aarch64_crc32b; break;
4112 case AArch64::BI__builtin_arm_crc32cb:
4113 CRCIntrinsicID = Intrinsic::aarch64_crc32cb; break;
4114 case AArch64::BI__builtin_arm_crc32h:
4115 CRCIntrinsicID = Intrinsic::aarch64_crc32h; break;
4116 case AArch64::BI__builtin_arm_crc32ch:
4117 CRCIntrinsicID = Intrinsic::aarch64_crc32ch; break;
4118 case AArch64::BI__builtin_arm_crc32w:
4119 CRCIntrinsicID = Intrinsic::aarch64_crc32w; break;
4120 case AArch64::BI__builtin_arm_crc32cw:
4121 CRCIntrinsicID = Intrinsic::aarch64_crc32cw; break;
4122 case AArch64::BI__builtin_arm_crc32d:
4123 CRCIntrinsicID = Intrinsic::aarch64_crc32x; break;
4124 case AArch64::BI__builtin_arm_crc32cd:
4125 CRCIntrinsicID = Intrinsic::aarch64_crc32cx; break;
4126 }
4127
4128 if (CRCIntrinsicID != Intrinsic::not_intrinsic) {
4129 Value *Arg0 = EmitScalarExpr(E->getArg(0));
4130 Value *Arg1 = EmitScalarExpr(E->getArg(1));
4131 Function *F = CGM.getIntrinsic(CRCIntrinsicID);
4132
4133 llvm::Type *DataTy = F->getFunctionType()->getParamType(1);
4134 Arg1 = Builder.CreateZExtOrBitCast(Arg1, DataTy);
4135
4136 return Builder.CreateCall2(F, Arg0, Arg1);
4137 }
4138
4139 llvm::SmallVector<Value*, 4> Ops;
4140 for (unsigned i = 0, e = E->getNumArgs() - 1; i != e; i++)
4141 Ops.push_back(EmitScalarExpr(E->getArg(i)));
4142
4143 auto SISDMap = makeArrayRef(AArch64SISDIntrinsicMap);
4144 const NeonIntrinsicInfo *Builtin = findNeonIntrinsicInMap(
4145 SISDMap, BuiltinID, AArch64SISDIntrinsicsProvenSorted);
4146
4147 if (Builtin) {
4148 Ops.push_back(EmitScalarExpr(E->getArg(E->getNumArgs() - 1)));
4149 Value *Result = EmitCommonNeonSISDBuiltinExpr(*this, *Builtin, Ops, E);
4150 assert(Result && "SISD intrinsic should have been handled");
4151 return Result;
4152 }
4153
4154 llvm::APSInt Result;
4155 const Expr *Arg = E->getArg(E->getNumArgs()-1);
4156 NeonTypeFlags Type(0);
4157 if (Arg->isIntegerConstantExpr(Result, getContext()))
4158 // Determine the type of this overloaded NEON intrinsic.
4159 Type = NeonTypeFlags(Result.getZExtValue());
4160
4161 bool usgn = Type.isUnsigned();
4162 bool quad = Type.isQuad();
4163
4164 // Handle non-overloaded intrinsics first.
4165 switch (BuiltinID) {
4166 default: break;
4167 case NEON::BI__builtin_neon_vldrq_p128: {
4168 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
4169 Value *Ptr = Builder.CreateBitCast(EmitScalarExpr(E->getArg(0)), Int128PTy);
4170 return Builder.CreateLoad(Ptr);
4171 }
4172 case NEON::BI__builtin_neon_vstrq_p128: {
4173 llvm::Type *Int128PTy = llvm::Type::getIntNPtrTy(getLLVMContext(), 128);
4174 Value *Ptr = Builder.CreateBitCast(Ops[0], Int128PTy);
4175 return Builder.CreateStore(EmitScalarExpr(E->getArg(1)), Ptr);
4176 }
4177 case NEON::BI__builtin_neon_vcvts_u32_f32:
4178 case NEON::BI__builtin_neon_vcvtd_u64_f64:
4179 usgn = true;
4180 // FALL THROUGH
4181 case NEON::BI__builtin_neon_vcvts_s32_f32:
4182 case NEON::BI__builtin_neon_vcvtd_s64_f64: {
4183 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4184 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
4185 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
4186 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
4187 Ops[0] = Builder.CreateBitCast(Ops[0], FTy);
4188 if (usgn)
4189 return Builder.CreateFPToUI(Ops[0], InTy);
4190 return Builder.CreateFPToSI(Ops[0], InTy);
4191 }
4192 case NEON::BI__builtin_neon_vcvts_f32_u32:
4193 case NEON::BI__builtin_neon_vcvtd_f64_u64:
4194 usgn = true;
4195 // FALL THROUGH
4196 case NEON::BI__builtin_neon_vcvts_f32_s32:
4197 case NEON::BI__builtin_neon_vcvtd_f64_s64: {
4198 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4199 bool Is64 = Ops[0]->getType()->getPrimitiveSizeInBits() == 64;
4200 llvm::Type *InTy = Is64 ? Int64Ty : Int32Ty;
4201 llvm::Type *FTy = Is64 ? DoubleTy : FloatTy;
4202 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
4203 if (usgn)
4204 return Builder.CreateUIToFP(Ops[0], FTy);
4205 return Builder.CreateSIToFP(Ops[0], FTy);
4206 }
4207 case NEON::BI__builtin_neon_vpaddd_s64: {
4208 llvm::Type *Ty =
4209 llvm::VectorType::get(llvm::Type::getInt64Ty(getLLVMContext()), 2);
4210 Value *Vec = EmitScalarExpr(E->getArg(0));
4211 // The vector is v2f64, so make sure it's bitcast to that.
4212 Vec = Builder.CreateBitCast(Vec, Ty, "v2i64");
4213 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4214 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4215 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4216 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4217 // Pairwise addition of a v2f64 into a scalar f64.
4218 return Builder.CreateAdd(Op0, Op1, "vpaddd");
4219 }
4220 case NEON::BI__builtin_neon_vpaddd_f64: {
4221 llvm::Type *Ty =
4222 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2);
4223 Value *Vec = EmitScalarExpr(E->getArg(0));
4224 // The vector is v2f64, so make sure it's bitcast to that.
4225 Vec = Builder.CreateBitCast(Vec, Ty, "v2f64");
4226 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4227 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4228 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4229 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4230 // Pairwise addition of a v2f64 into a scalar f64.
4231 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
4232 }
4233 case NEON::BI__builtin_neon_vpadds_f32: {
4234 llvm::Type *Ty =
4235 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2);
4236 Value *Vec = EmitScalarExpr(E->getArg(0));
4237 // The vector is v2f32, so make sure it's bitcast to that.
4238 Vec = Builder.CreateBitCast(Vec, Ty, "v2f32");
4239 llvm::Value *Idx0 = llvm::ConstantInt::get(SizeTy, 0);
4240 llvm::Value *Idx1 = llvm::ConstantInt::get(SizeTy, 1);
4241 Value *Op0 = Builder.CreateExtractElement(Vec, Idx0, "lane0");
4242 Value *Op1 = Builder.CreateExtractElement(Vec, Idx1, "lane1");
4243 // Pairwise addition of a v2f32 into a scalar f32.
4244 return Builder.CreateFAdd(Op0, Op1, "vpaddd");
4245 }
4246 case NEON::BI__builtin_neon_vceqzd_s64:
4247 case NEON::BI__builtin_neon_vceqzd_f64:
4248 case NEON::BI__builtin_neon_vceqzs_f32:
4249 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4250 return EmitAArch64CompareBuiltinExpr(
4251 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OEQ,
4252 ICmpInst::ICMP_EQ, "vceqz");
4253 case NEON::BI__builtin_neon_vcgezd_s64:
4254 case NEON::BI__builtin_neon_vcgezd_f64:
4255 case NEON::BI__builtin_neon_vcgezs_f32:
4256 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4257 return EmitAArch64CompareBuiltinExpr(
4258 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OGE,
4259 ICmpInst::ICMP_SGE, "vcgez");
4260 case NEON::BI__builtin_neon_vclezd_s64:
4261 case NEON::BI__builtin_neon_vclezd_f64:
4262 case NEON::BI__builtin_neon_vclezs_f32:
4263 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4264 return EmitAArch64CompareBuiltinExpr(
4265 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OLE,
4266 ICmpInst::ICMP_SLE, "vclez");
4267 case NEON::BI__builtin_neon_vcgtzd_s64:
4268 case NEON::BI__builtin_neon_vcgtzd_f64:
4269 case NEON::BI__builtin_neon_vcgtzs_f32:
4270 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4271 return EmitAArch64CompareBuiltinExpr(
4272 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OGT,
4273 ICmpInst::ICMP_SGT, "vcgtz");
4274 case NEON::BI__builtin_neon_vcltzd_s64:
4275 case NEON::BI__builtin_neon_vcltzd_f64:
4276 case NEON::BI__builtin_neon_vcltzs_f32:
4277 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4278 return EmitAArch64CompareBuiltinExpr(
4279 Ops[0], ConvertType(E->getCallReturnType()), ICmpInst::FCMP_OLT,
4280 ICmpInst::ICMP_SLT, "vcltz");
4281
4282 case NEON::BI__builtin_neon_vceqzd_u64: {
4283 llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
4284 Ops.push_back(EmitScalarExpr(E->getArg(0)));
4285 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4286 Ops[0] = Builder.CreateICmp(llvm::ICmpInst::ICMP_EQ, Ops[0],
4287 llvm::Constant::getNullValue(Ty));
4288 return Builder.CreateSExt(Ops[0], Ty, "vceqzd");
4289 }
4290 case NEON::BI__builtin_neon_vceqd_f64:
4291 case NEON::BI__builtin_neon_vcled_f64:
4292 case NEON::BI__builtin_neon_vcltd_f64:
4293 case NEON::BI__builtin_neon_vcged_f64:
4294 case NEON::BI__builtin_neon_vcgtd_f64: {
4295 llvm::CmpInst::Predicate P;
4296 switch (BuiltinID) {
4297 default: llvm_unreachable("missing builtin ID in switch!");
4298 case NEON::BI__builtin_neon_vceqd_f64: P = llvm::FCmpInst::FCMP_OEQ; break;
4299 case NEON::BI__builtin_neon_vcled_f64: P = llvm::FCmpInst::FCMP_OLE; break;
4300 case NEON::BI__builtin_neon_vcltd_f64: P = llvm::FCmpInst::FCMP_OLT; break;
4301 case NEON::BI__builtin_neon_vcged_f64: P = llvm::FCmpInst::FCMP_OGE; break;
4302 case NEON::BI__builtin_neon_vcgtd_f64: P = llvm::FCmpInst::FCMP_OGT; break;
4303 }
4304 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4305 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
4306 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
4307 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
4308 return Builder.CreateSExt(Ops[0], Int64Ty, "vcmpd");
4309 }
4310 case NEON::BI__builtin_neon_vceqs_f32:
4311 case NEON::BI__builtin_neon_vcles_f32:
4312 case NEON::BI__builtin_neon_vclts_f32:
4313 case NEON::BI__builtin_neon_vcges_f32:
4314 case NEON::BI__builtin_neon_vcgts_f32: {
4315 llvm::CmpInst::Predicate P;
4316 switch (BuiltinID) {
4317 default: llvm_unreachable("missing builtin ID in switch!");
4318 case NEON::BI__builtin_neon_vceqs_f32: P = llvm::FCmpInst::FCMP_OEQ; break;
4319 case NEON::BI__builtin_neon_vcles_f32: P = llvm::FCmpInst::FCMP_OLE; break;
4320 case NEON::BI__builtin_neon_vclts_f32: P = llvm::FCmpInst::FCMP_OLT; break;
4321 case NEON::BI__builtin_neon_vcges_f32: P = llvm::FCmpInst::FCMP_OGE; break;
4322 case NEON::BI__builtin_neon_vcgts_f32: P = llvm::FCmpInst::FCMP_OGT; break;
4323 }
4324 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4325 Ops[0] = Builder.CreateBitCast(Ops[0], FloatTy);
4326 Ops[1] = Builder.CreateBitCast(Ops[1], FloatTy);
4327 Ops[0] = Builder.CreateFCmp(P, Ops[0], Ops[1]);
4328 return Builder.CreateSExt(Ops[0], Int32Ty, "vcmpd");
4329 }
4330 case NEON::BI__builtin_neon_vceqd_s64:
4331 case NEON::BI__builtin_neon_vceqd_u64:
4332 case NEON::BI__builtin_neon_vcgtd_s64:
4333 case NEON::BI__builtin_neon_vcgtd_u64:
4334 case NEON::BI__builtin_neon_vcltd_s64:
4335 case NEON::BI__builtin_neon_vcltd_u64:
4336 case NEON::BI__builtin_neon_vcged_u64:
4337 case NEON::BI__builtin_neon_vcged_s64:
4338 case NEON::BI__builtin_neon_vcled_u64:
4339 case NEON::BI__builtin_neon_vcled_s64: {
4340 llvm::CmpInst::Predicate P;
4341 switch (BuiltinID) {
4342 default: llvm_unreachable("missing builtin ID in switch!");
4343 case NEON::BI__builtin_neon_vceqd_s64:
4344 case NEON::BI__builtin_neon_vceqd_u64:P = llvm::ICmpInst::ICMP_EQ;break;
4345 case NEON::BI__builtin_neon_vcgtd_s64:P = llvm::ICmpInst::ICMP_SGT;break;
4346 case NEON::BI__builtin_neon_vcgtd_u64:P = llvm::ICmpInst::ICMP_UGT;break;
4347 case NEON::BI__builtin_neon_vcltd_s64:P = llvm::ICmpInst::ICMP_SLT;break;
4348 case NEON::BI__builtin_neon_vcltd_u64:P = llvm::ICmpInst::ICMP_ULT;break;
4349 case NEON::BI__builtin_neon_vcged_u64:P = llvm::ICmpInst::ICMP_UGE;break;
4350 case NEON::BI__builtin_neon_vcged_s64:P = llvm::ICmpInst::ICMP_SGE;break;
4351 case NEON::BI__builtin_neon_vcled_u64:P = llvm::ICmpInst::ICMP_ULE;break;
4352 case NEON::BI__builtin_neon_vcled_s64:P = llvm::ICmpInst::ICMP_SLE;break;
4353 }
4354 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4355 Ops[0] = Builder.CreateBitCast(Ops[0], Int64Ty);
4356 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
4357 Ops[0] = Builder.CreateICmp(P, Ops[0], Ops[1]);
4358 return Builder.CreateSExt(Ops[0], Int64Ty, "vceqd");
4359 }
4360 case NEON::BI__builtin_neon_vtstd_s64:
4361 case NEON::BI__builtin_neon_vtstd_u64: {
4362 llvm::Type *Ty = llvm::Type::getInt64Ty(getLLVMContext());
4363 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4364 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4365 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4366 Ops[0] = Builder.CreateAnd(Ops[0], Ops[1]);
4367 Ops[0] = Builder.CreateICmp(ICmpInst::ICMP_NE, Ops[0],
4368 llvm::Constant::getNullValue(Ty));
4369 return Builder.CreateSExt(Ops[0], Ty, "vtstd");
4370 }
4371 case NEON::BI__builtin_neon_vset_lane_i8:
4372 case NEON::BI__builtin_neon_vset_lane_i16:
4373 case NEON::BI__builtin_neon_vset_lane_i32:
4374 case NEON::BI__builtin_neon_vset_lane_i64:
4375 case NEON::BI__builtin_neon_vset_lane_f32:
4376 case NEON::BI__builtin_neon_vsetq_lane_i8:
4377 case NEON::BI__builtin_neon_vsetq_lane_i16:
4378 case NEON::BI__builtin_neon_vsetq_lane_i32:
4379 case NEON::BI__builtin_neon_vsetq_lane_i64:
4380 case NEON::BI__builtin_neon_vsetq_lane_f32:
4381 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4382 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4383 case NEON::BI__builtin_neon_vset_lane_f64:
4384 // The vector type needs a cast for the v1f64 variant.
4385 Ops[1] = Builder.CreateBitCast(Ops[1],
4386 llvm::VectorType::get(DoubleTy, 1));
4387 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4388 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4389 case NEON::BI__builtin_neon_vsetq_lane_f64:
4390 // The vector type needs a cast for the v2f64 variant.
4391 Ops[1] = Builder.CreateBitCast(Ops[1],
4392 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
4393 Ops.push_back(EmitScalarExpr(E->getArg(2)));
4394 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vset_lane");
4395
4396 case NEON::BI__builtin_neon_vget_lane_i8:
4397 case NEON::BI__builtin_neon_vdupb_lane_i8:
4398 Ops[0] = Builder.CreateBitCast(Ops[0],
4399 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8));
4400 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4401 "vget_lane");
4402 case NEON::BI__builtin_neon_vgetq_lane_i8:
4403 case NEON::BI__builtin_neon_vdupb_laneq_i8:
4404 Ops[0] = Builder.CreateBitCast(Ops[0],
4405 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16));
4406 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4407 "vgetq_lane");
4408 case NEON::BI__builtin_neon_vget_lane_i16:
4409 case NEON::BI__builtin_neon_vduph_lane_i16:
4410 Ops[0] = Builder.CreateBitCast(Ops[0],
4411 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4));
4412 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4413 "vget_lane");
4414 case NEON::BI__builtin_neon_vgetq_lane_i16:
4415 case NEON::BI__builtin_neon_vduph_laneq_i16:
4416 Ops[0] = Builder.CreateBitCast(Ops[0],
4417 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8));
4418 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4419 "vgetq_lane");
4420 case NEON::BI__builtin_neon_vget_lane_i32:
4421 case NEON::BI__builtin_neon_vdups_lane_i32:
4422 Ops[0] = Builder.CreateBitCast(
4423 Ops[0],
4424 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 2));
4425 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4426 "vget_lane");
4427 case NEON::BI__builtin_neon_vdups_lane_f32:
4428 Ops[0] = Builder.CreateBitCast(Ops[0],
4429 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
4430 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4431 "vdups_lane");
4432 case NEON::BI__builtin_neon_vgetq_lane_i32:
4433 case NEON::BI__builtin_neon_vdups_laneq_i32:
4434 Ops[0] = Builder.CreateBitCast(Ops[0],
4435 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 32), 4));
4436 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4437 "vgetq_lane");
4438 case NEON::BI__builtin_neon_vget_lane_i64:
4439 case NEON::BI__builtin_neon_vdupd_lane_i64:
4440 Ops[0] = Builder.CreateBitCast(Ops[0],
4441 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 1));
4442 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4443 "vget_lane");
4444 case NEON::BI__builtin_neon_vdupd_lane_f64:
4445 Ops[0] = Builder.CreateBitCast(Ops[0],
4446 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
4447 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4448 "vdupd_lane");
4449 case NEON::BI__builtin_neon_vgetq_lane_i64:
4450 case NEON::BI__builtin_neon_vdupd_laneq_i64:
4451 Ops[0] = Builder.CreateBitCast(Ops[0],
4452 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 64), 2));
4453 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4454 "vgetq_lane");
4455 case NEON::BI__builtin_neon_vget_lane_f32:
4456 Ops[0] = Builder.CreateBitCast(Ops[0],
4457 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 2));
4458 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4459 "vget_lane");
4460 case NEON::BI__builtin_neon_vget_lane_f64:
4461 Ops[0] = Builder.CreateBitCast(Ops[0],
4462 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 1));
4463 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4464 "vget_lane");
4465 case NEON::BI__builtin_neon_vgetq_lane_f32:
4466 case NEON::BI__builtin_neon_vdups_laneq_f32:
4467 Ops[0] = Builder.CreateBitCast(Ops[0],
4468 llvm::VectorType::get(llvm::Type::getFloatTy(getLLVMContext()), 4));
4469 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4470 "vgetq_lane");
4471 case NEON::BI__builtin_neon_vgetq_lane_f64:
4472 case NEON::BI__builtin_neon_vdupd_laneq_f64:
4473 Ops[0] = Builder.CreateBitCast(Ops[0],
4474 llvm::VectorType::get(llvm::Type::getDoubleTy(getLLVMContext()), 2));
4475 return Builder.CreateExtractElement(Ops[0], EmitScalarExpr(E->getArg(1)),
4476 "vgetq_lane");
4477 case NEON::BI__builtin_neon_vaddd_s64:
4478 case NEON::BI__builtin_neon_vaddd_u64:
4479 return Builder.CreateAdd(Ops[0], EmitScalarExpr(E->getArg(1)), "vaddd");
4480 case NEON::BI__builtin_neon_vsubd_s64:
4481 case NEON::BI__builtin_neon_vsubd_u64:
4482 return Builder.CreateSub(Ops[0], EmitScalarExpr(E->getArg(1)), "vsubd");
4483 case NEON::BI__builtin_neon_vqdmlalh_s16:
4484 case NEON::BI__builtin_neon_vqdmlslh_s16: {
4485 SmallVector<Value *, 2> ProductOps;
4486 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
4487 ProductOps.push_back(vectorWrapScalar16(EmitScalarExpr(E->getArg(2))));
4488 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
4489 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
4490 ProductOps, "vqdmlXl");
4491 Constant *CI = ConstantInt::get(SizeTy, 0);
4492 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
4493
4494 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlalh_s16
4495 ? Intrinsic::aarch64_neon_sqadd
4496 : Intrinsic::aarch64_neon_sqsub;
4497 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int32Ty), Ops, "vqdmlXl");
4498 }
4499 case NEON::BI__builtin_neon_vqshlud_n_s64: {
4500 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4501 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
4502 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqshlu, Int64Ty),
4503 Ops, "vqshlu_n");
4504 }
4505 case NEON::BI__builtin_neon_vqshld_n_u64:
4506 case NEON::BI__builtin_neon_vqshld_n_s64: {
4507 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vqshld_n_u64
4508 ? Intrinsic::aarch64_neon_uqshl
4509 : Intrinsic::aarch64_neon_sqshl;
4510 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4511 Ops[1] = Builder.CreateZExt(Ops[1], Int64Ty);
4512 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vqshl_n");
4513 }
4514 case NEON::BI__builtin_neon_vrshrd_n_u64:
4515 case NEON::BI__builtin_neon_vrshrd_n_s64: {
4516 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrshrd_n_u64
4517 ? Intrinsic::aarch64_neon_urshl
4518 : Intrinsic::aarch64_neon_srshl;
4519 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4520 int SV = cast<ConstantInt>(Ops[1])->getSExtValue();
4521 Ops[1] = ConstantInt::get(Int64Ty, -SV);
4522 return EmitNeonCall(CGM.getIntrinsic(Int, Int64Ty), Ops, "vrshr_n");
4523 }
4524 case NEON::BI__builtin_neon_vrsrad_n_u64:
4525 case NEON::BI__builtin_neon_vrsrad_n_s64: {
4526 unsigned Int = BuiltinID == NEON::BI__builtin_neon_vrsrad_n_u64
4527 ? Intrinsic::aarch64_neon_urshl
4528 : Intrinsic::aarch64_neon_srshl;
4529 Ops[1] = Builder.CreateBitCast(Ops[1], Int64Ty);
4530 Ops.push_back(Builder.CreateNeg(EmitScalarExpr(E->getArg(2))));
4531 Ops[1] = Builder.CreateCall2(CGM.getIntrinsic(Int, Int64Ty), Ops[1],
4532 Builder.CreateSExt(Ops[2], Int64Ty));
4533 return Builder.CreateAdd(Ops[0], Builder.CreateBitCast(Ops[1], Int64Ty));
4534 }
4535 case NEON::BI__builtin_neon_vshld_n_s64:
4536 case NEON::BI__builtin_neon_vshld_n_u64: {
4537 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4538 return Builder.CreateShl(
4539 Ops[0], ConstantInt::get(Int64Ty, Amt->getZExtValue()), "shld_n");
4540 }
4541 case NEON::BI__builtin_neon_vshrd_n_s64: {
4542 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4543 return Builder.CreateAShr(
4544 Ops[0], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
4545 Amt->getZExtValue())),
4546 "shrd_n");
4547 }
4548 case NEON::BI__builtin_neon_vshrd_n_u64: {
4549 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(1)));
4550 uint64_t ShiftAmt = Amt->getZExtValue();
4551 // Right-shifting an unsigned value by its size yields 0.
4552 if (ShiftAmt == 64)
4553 return ConstantInt::get(Int64Ty, 0);
4554 return Builder.CreateLShr(Ops[0], ConstantInt::get(Int64Ty, ShiftAmt),
4555 "shrd_n");
4556 }
4557 case NEON::BI__builtin_neon_vsrad_n_s64: {
4558 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
4559 Ops[1] = Builder.CreateAShr(
4560 Ops[1], ConstantInt::get(Int64Ty, std::min(static_cast<uint64_t>(63),
4561 Amt->getZExtValue())),
4562 "shrd_n");
4563 return Builder.CreateAdd(Ops[0], Ops[1]);
4564 }
4565 case NEON::BI__builtin_neon_vsrad_n_u64: {
4566 llvm::ConstantInt *Amt = cast<ConstantInt>(EmitScalarExpr(E->getArg(2)));
4567 uint64_t ShiftAmt = Amt->getZExtValue();
4568 // Right-shifting an unsigned value by its size yields 0.
4569 // As Op + 0 = Op, return Ops[0] directly.
4570 if (ShiftAmt == 64)
4571 return Ops[0];
4572 Ops[1] = Builder.CreateLShr(Ops[1], ConstantInt::get(Int64Ty, ShiftAmt),
4573 "shrd_n");
4574 return Builder.CreateAdd(Ops[0], Ops[1]);
4575 }
4576 case NEON::BI__builtin_neon_vqdmlalh_lane_s16:
4577 case NEON::BI__builtin_neon_vqdmlalh_laneq_s16:
4578 case NEON::BI__builtin_neon_vqdmlslh_lane_s16:
4579 case NEON::BI__builtin_neon_vqdmlslh_laneq_s16: {
4580 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
4581 "lane");
4582 SmallVector<Value *, 2> ProductOps;
4583 ProductOps.push_back(vectorWrapScalar16(Ops[1]));
4584 ProductOps.push_back(vectorWrapScalar16(Ops[2]));
4585 llvm::Type *VTy = llvm::VectorType::get(Int32Ty, 4);
4586 Ops[1] = EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmull, VTy),
4587 ProductOps, "vqdmlXl");
4588 Constant *CI = ConstantInt::get(SizeTy, 0);
4589 Ops[1] = Builder.CreateExtractElement(Ops[1], CI, "lane0");
4590 Ops.pop_back();
4591
4592 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlalh_lane_s16 ||
4593 BuiltinID == NEON::BI__builtin_neon_vqdmlalh_laneq_s16)
4594 ? Intrinsic::aarch64_neon_sqadd
4595 : Intrinsic::aarch64_neon_sqsub;
4596 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int32Ty), Ops, "vqdmlXl");
4597 }
4598 case NEON::BI__builtin_neon_vqdmlals_s32:
4599 case NEON::BI__builtin_neon_vqdmlsls_s32: {
4600 SmallVector<Value *, 2> ProductOps;
4601 ProductOps.push_back(Ops[1]);
4602 ProductOps.push_back(EmitScalarExpr(E->getArg(2)));
4603 Ops[1] =
4604 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
4605 ProductOps, "vqdmlXl");
4606
4607 unsigned AccumInt = BuiltinID == NEON::BI__builtin_neon_vqdmlals_s32
4608 ? Intrinsic::aarch64_neon_sqadd
4609 : Intrinsic::aarch64_neon_sqsub;
4610 return EmitNeonCall(CGM.getIntrinsic(AccumInt, Int64Ty), Ops, "vqdmlXl");
4611 }
4612 case NEON::BI__builtin_neon_vqdmlals_lane_s32:
4613 case NEON::BI__builtin_neon_vqdmlals_laneq_s32:
4614 case NEON::BI__builtin_neon_vqdmlsls_lane_s32:
4615 case NEON::BI__builtin_neon_vqdmlsls_laneq_s32: {
4616 Ops[2] = Builder.CreateExtractElement(Ops[2], EmitScalarExpr(E->getArg(3)),
4617 "lane");
4618 SmallVector<Value *, 2> ProductOps;
4619 ProductOps.push_back(Ops[1]);
4620 ProductOps.push_back(Ops[2]);
4621 Ops[1] =
4622 EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_sqdmulls_scalar),
4623 ProductOps, "vqdmlXl");
4624 Ops.pop_back();
4625
4626 unsigned AccInt = (BuiltinID == NEON::BI__builtin_neon_vqdmlals_lane_s32 ||
4627 BuiltinID == NEON::BI__builtin_neon_vqdmlals_laneq_s32)
4628 ? Intrinsic::aarch64_neon_sqadd
4629 : Intrinsic::aarch64_neon_sqsub;
4630 return EmitNeonCall(CGM.getIntrinsic(AccInt, Int64Ty), Ops, "vqdmlXl");
4631 }
4632 }
4633
4634 llvm::VectorType *VTy = GetNeonType(this, Type);
4635 llvm::Type *Ty = VTy;
4636 if (!Ty)
4637 return nullptr;
4638
4639 // Not all intrinsics handled by the common case work for AArch64 yet, so only
4640 // defer to common code if it's been added to our special map.
4641 Builtin = findNeonIntrinsicInMap(AArch64SIMDIntrinsicMap, BuiltinID,
4642 AArch64SIMDIntrinsicsProvenSorted);
4643
4644 if (Builtin)
4645 return EmitCommonNeonBuiltinExpr(
4646 Builtin->BuiltinID, Builtin->LLVMIntrinsic, Builtin->AltLLVMIntrinsic,
4647 Builtin->NameHint, Builtin->TypeModifier, E, Ops, nullptr);
4648
4649 if (Value *V = EmitAArch64TblBuiltinExpr(*this, BuiltinID, E, Ops))
4650 return V;
4651
4652 unsigned Int;
4653 switch (BuiltinID) {
4654 default: return nullptr;
4655 case NEON::BI__builtin_neon_vbsl_v:
4656 case NEON::BI__builtin_neon_vbslq_v: {
4657 llvm::Type *BitTy = llvm::VectorType::getInteger(VTy);
4658 Ops[0] = Builder.CreateBitCast(Ops[0], BitTy, "vbsl");
4659 Ops[1] = Builder.CreateBitCast(Ops[1], BitTy, "vbsl");
4660 Ops[2] = Builder.CreateBitCast(Ops[2], BitTy, "vbsl");
4661
4662 Ops[1] = Builder.CreateAnd(Ops[0], Ops[1], "vbsl");
4663 Ops[2] = Builder.CreateAnd(Builder.CreateNot(Ops[0]), Ops[2], "vbsl");
4664 Ops[0] = Builder.CreateOr(Ops[1], Ops[2], "vbsl");
4665 return Builder.CreateBitCast(Ops[0], Ty);
4666 }
4667 case NEON::BI__builtin_neon_vfma_lane_v:
4668 case NEON::BI__builtin_neon_vfmaq_lane_v: { // Only used for FP types
4669 // The ARM builtins (and instructions) have the addend as the first
4670 // operand, but the 'fma' intrinsics have it last. Swap it around here.
4671 Value *Addend = Ops[0];
4672 Value *Multiplicand = Ops[1];
4673 Value *LaneSource = Ops[2];
4674 Ops[0] = Multiplicand;
4675 Ops[1] = LaneSource;
4676 Ops[2] = Addend;
4677
4678 // Now adjust things to handle the lane access.
4679 llvm::Type *SourceTy = BuiltinID == NEON::BI__builtin_neon_vfmaq_lane_v ?
4680 llvm::VectorType::get(VTy->getElementType(), VTy->getNumElements() / 2) :
4681 VTy;
4682 llvm::Constant *cst = cast<Constant>(Ops[3]);
4683 Value *SV = llvm::ConstantVector::getSplat(VTy->getNumElements(), cst);
4684 Ops[1] = Builder.CreateBitCast(Ops[1], SourceTy);
4685 Ops[1] = Builder.CreateShuffleVector(Ops[1], Ops[1], SV, "lane");
4686
4687 Ops.pop_back();
4688 Int = Intrinsic::fma;
4689 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmla");
4690 }
4691 case NEON::BI__builtin_neon_vfma_laneq_v: {
4692 llvm::VectorType *VTy = cast<llvm::VectorType>(Ty);
4693 // v1f64 fma should be mapped to Neon scalar f64 fma
4694 if (VTy && VTy->getElementType() == DoubleTy) {
4695 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
4696 Ops[1] = Builder.CreateBitCast(Ops[1], DoubleTy);
4697 llvm::Type *VTy = GetNeonType(this,
4698 NeonTypeFlags(NeonTypeFlags::Float64, false, true));
4699 Ops[2] = Builder.CreateBitCast(Ops[2], VTy);
4700 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
4701 Value *F = CGM.getIntrinsic(Intrinsic::fma, DoubleTy);
4702 Value *Result = Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
4703 return Builder.CreateBitCast(Result, Ty);
4704 }
4705 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
4706 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4707 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4708
4709 llvm::Type *STy = llvm::VectorType::get(VTy->getElementType(),
4710 VTy->getNumElements() * 2);
4711 Ops[2] = Builder.CreateBitCast(Ops[2], STy);
4712 Value* SV = llvm::ConstantVector::getSplat(VTy->getNumElements(),
4713 cast<ConstantInt>(Ops[3]));
4714 Ops[2] = Builder.CreateShuffleVector(Ops[2], Ops[2], SV, "lane");
4715
4716 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
4717 }
4718 case NEON::BI__builtin_neon_vfmaq_laneq_v: {
4719 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
4720 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4721 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
4722
4723 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
4724 Ops[2] = EmitNeonSplat(Ops[2], cast<ConstantInt>(Ops[3]));
4725 return Builder.CreateCall3(F, Ops[2], Ops[1], Ops[0]);
4726 }
4727 case NEON::BI__builtin_neon_vfmas_lane_f32:
4728 case NEON::BI__builtin_neon_vfmas_laneq_f32:
4729 case NEON::BI__builtin_neon_vfmad_lane_f64:
4730 case NEON::BI__builtin_neon_vfmad_laneq_f64: {
4731 Ops.push_back(EmitScalarExpr(E->getArg(3)));
4732 llvm::Type *Ty = ConvertType(E->getCallReturnType());
4733 Value *F = CGM.getIntrinsic(Intrinsic::fma, Ty);
4734 Ops[2] = Builder.CreateExtractElement(Ops[2], Ops[3], "extract");
4735 return Builder.CreateCall3(F, Ops[1], Ops[2], Ops[0]);
4736 }
4737 case NEON::BI__builtin_neon_vfms_v:
4738 case NEON::BI__builtin_neon_vfmsq_v: { // Only used for FP types
4739 // FIXME: probably remove when we no longer support aarch64_simd.h
4740 // (arm_neon.h delegates to vfma).
4741
4742 // The ARM builtins (and instructions) have the addend as the first
4743 // operand, but the 'fma' intrinsics have it last. Swap it around here.
4744 Value *Subtrahend = Ops[0];
4745 Value *Multiplicand = Ops[2];
4746 Ops[0] = Multiplicand;
4747 Ops[2] = Subtrahend;
4748 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
4749 Ops[1] = Builder.CreateFNeg(Ops[1]);
4750 Int = Intrinsic::fma;
4751 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "fmls");
4752 }
4753 case NEON::BI__builtin_neon_vmull_v:
4754 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4755 Int = usgn ? Intrinsic::aarch64_neon_umull : Intrinsic::aarch64_neon_smull;
4756 if (Type.isPoly()) Int = Intrinsic::aarch64_neon_pmull;
4757 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmull");
4758 case NEON::BI__builtin_neon_vmax_v:
4759 case NEON::BI__builtin_neon_vmaxq_v:
4760 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4761 Int = usgn ? Intrinsic::aarch64_neon_umax : Intrinsic::aarch64_neon_smax;
4762 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmax;
4763 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmax");
4764 case NEON::BI__builtin_neon_vmin_v:
4765 case NEON::BI__builtin_neon_vminq_v:
4766 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4767 Int = usgn ? Intrinsic::aarch64_neon_umin : Intrinsic::aarch64_neon_smin;
4768 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmin;
4769 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmin");
4770 case NEON::BI__builtin_neon_vabd_v:
4771 case NEON::BI__builtin_neon_vabdq_v:
4772 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4773 Int = usgn ? Intrinsic::aarch64_neon_uabd : Intrinsic::aarch64_neon_sabd;
4774 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fabd;
4775 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vabd");
4776 case NEON::BI__builtin_neon_vpadal_v:
4777 case NEON::BI__builtin_neon_vpadalq_v: {
4778 unsigned ArgElts = VTy->getNumElements();
4779 llvm::IntegerType *EltTy = cast<IntegerType>(VTy->getElementType());
4780 unsigned BitWidth = EltTy->getBitWidth();
4781 llvm::Type *ArgTy = llvm::VectorType::get(
4782 llvm::IntegerType::get(getLLVMContext(), BitWidth/2), 2*ArgElts);
4783 llvm::Type* Tys[2] = { VTy, ArgTy };
4784 Int = usgn ? Intrinsic::aarch64_neon_uaddlp : Intrinsic::aarch64_neon_saddlp;
4785 SmallVector<llvm::Value*, 1> TmpOps;
4786 TmpOps.push_back(Ops[1]);
4787 Function *F = CGM.getIntrinsic(Int, Tys);
4788 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vpadal");
4789 llvm::Value *addend = Builder.CreateBitCast(Ops[0], tmp->getType());
4790 return Builder.CreateAdd(tmp, addend);
4791 }
4792 case NEON::BI__builtin_neon_vpmin_v:
4793 case NEON::BI__builtin_neon_vpminq_v:
4794 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4795 Int = usgn ? Intrinsic::aarch64_neon_uminp : Intrinsic::aarch64_neon_sminp;
4796 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fminp;
4797 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmin");
4798 case NEON::BI__builtin_neon_vpmax_v:
4799 case NEON::BI__builtin_neon_vpmaxq_v:
4800 // FIXME: improve sharing scheme to cope with 3 alternative LLVM intrinsics.
4801 Int = usgn ? Intrinsic::aarch64_neon_umaxp : Intrinsic::aarch64_neon_smaxp;
4802 if (Ty->isFPOrFPVectorTy()) Int = Intrinsic::aarch64_neon_fmaxp;
4803 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmax");
4804 case NEON::BI__builtin_neon_vminnm_v:
4805 case NEON::BI__builtin_neon_vminnmq_v:
4806 Int = Intrinsic::aarch64_neon_fminnm;
4807 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vminnm");
4808 case NEON::BI__builtin_neon_vmaxnm_v:
4809 case NEON::BI__builtin_neon_vmaxnmq_v:
4810 Int = Intrinsic::aarch64_neon_fmaxnm;
4811 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmaxnm");
4812 case NEON::BI__builtin_neon_vrecpss_f32: {
4813 llvm::Type *f32Type = llvm::Type::getFloatTy(getLLVMContext());
4814 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4815 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f32Type),
4816 Ops, "vrecps");
4817 }
4818 case NEON::BI__builtin_neon_vrecpsd_f64: {
4819 llvm::Type *f64Type = llvm::Type::getDoubleTy(getLLVMContext());
4820 Ops.push_back(EmitScalarExpr(E->getArg(1)));
4821 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_frecps, f64Type),
4822 Ops, "vrecps");
4823 }
4824 case NEON::BI__builtin_neon_vqshrun_n_v:
4825 Int = Intrinsic::aarch64_neon_sqshrun;
4826 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrun_n");
4827 case NEON::BI__builtin_neon_vqrshrun_n_v:
4828 Int = Intrinsic::aarch64_neon_sqrshrun;
4829 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrun_n");
4830 case NEON::BI__builtin_neon_vqshrn_n_v:
4831 Int = usgn ? Intrinsic::aarch64_neon_uqshrn : Intrinsic::aarch64_neon_sqshrn;
4832 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqshrn_n");
4833 case NEON::BI__builtin_neon_vrshrn_n_v:
4834 Int = Intrinsic::aarch64_neon_rshrn;
4835 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrshrn_n");
4836 case NEON::BI__builtin_neon_vqrshrn_n_v:
4837 Int = usgn ? Intrinsic::aarch64_neon_uqrshrn : Intrinsic::aarch64_neon_sqrshrn;
4838 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vqrshrn_n");
4839 case NEON::BI__builtin_neon_vrnda_v:
4840 case NEON::BI__builtin_neon_vrndaq_v: {
4841 Int = Intrinsic::round;
4842 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrnda");
4843 }
4844 case NEON::BI__builtin_neon_vrndi_v:
4845 case NEON::BI__builtin_neon_vrndiq_v: {
4846 Int = Intrinsic::nearbyint;
4847 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndi");
4848 }
4849 case NEON::BI__builtin_neon_vrndm_v:
4850 case NEON::BI__builtin_neon_vrndmq_v: {
4851 Int = Intrinsic::floor;
4852 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndm");
4853 }
4854 case NEON::BI__builtin_neon_vrndn_v:
4855 case NEON::BI__builtin_neon_vrndnq_v: {
4856 Int = Intrinsic::aarch64_neon_frintn;
4857 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndn");
4858 }
4859 case NEON::BI__builtin_neon_vrndp_v:
4860 case NEON::BI__builtin_neon_vrndpq_v: {
4861 Int = Intrinsic::ceil;
4862 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndp");
4863 }
4864 case NEON::BI__builtin_neon_vrndx_v:
4865 case NEON::BI__builtin_neon_vrndxq_v: {
4866 Int = Intrinsic::rint;
4867 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndx");
4868 }
4869 case NEON::BI__builtin_neon_vrnd_v:
4870 case NEON::BI__builtin_neon_vrndq_v: {
4871 Int = Intrinsic::trunc;
4872 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrndz");
4873 }
4874 case NEON::BI__builtin_neon_vceqz_v:
4875 case NEON::BI__builtin_neon_vceqzq_v:
4876 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OEQ,
4877 ICmpInst::ICMP_EQ, "vceqz");
4878 case NEON::BI__builtin_neon_vcgez_v:
4879 case NEON::BI__builtin_neon_vcgezq_v:
4880 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGE,
4881 ICmpInst::ICMP_SGE, "vcgez");
4882 case NEON::BI__builtin_neon_vclez_v:
4883 case NEON::BI__builtin_neon_vclezq_v:
4884 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLE,
4885 ICmpInst::ICMP_SLE, "vclez");
4886 case NEON::BI__builtin_neon_vcgtz_v:
4887 case NEON::BI__builtin_neon_vcgtzq_v:
4888 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OGT,
4889 ICmpInst::ICMP_SGT, "vcgtz");
4890 case NEON::BI__builtin_neon_vcltz_v:
4891 case NEON::BI__builtin_neon_vcltzq_v:
4892 return EmitAArch64CompareBuiltinExpr(Ops[0], Ty, ICmpInst::FCMP_OLT,
4893 ICmpInst::ICMP_SLT, "vcltz");
4894 case NEON::BI__builtin_neon_vcvt_f64_v:
4895 case NEON::BI__builtin_neon_vcvtq_f64_v:
4896 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
4897 Ty = GetNeonType(this, NeonTypeFlags(NeonTypeFlags::Float64, false, quad));
4898 return usgn ? Builder.CreateUIToFP(Ops[0], Ty, "vcvt")
4899 : Builder.CreateSIToFP(Ops[0], Ty, "vcvt");
4900 case NEON::BI__builtin_neon_vcvt_f64_f32: {
4901 assert(Type.getEltType() == NeonTypeFlags::Float64 && quad &&
4902 "unexpected vcvt_f64_f32 builtin");
4903 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float32, false, false);
4904 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
4905
4906 return Builder.CreateFPExt(Ops[0], Ty, "vcvt");
4907 }
4908 case NEON::BI__builtin_neon_vcvt_f32_f64: {
4909 assert(Type.getEltType() == NeonTypeFlags::Float32 &&
4910 "unexpected vcvt_f32_f64 builtin");
4911 NeonTypeFlags SrcFlag = NeonTypeFlags(NeonTypeFlags::Float64, false, true);
4912 Ops[0] = Builder.CreateBitCast(Ops[0], GetNeonType(this, SrcFlag));
4913
4914 return Builder.CreateFPTrunc(Ops[0], Ty, "vcvt");
4915 }
4916 case NEON::BI__builtin_neon_vcvt_s32_v:
4917 case NEON::BI__builtin_neon_vcvt_u32_v:
4918 case NEON::BI__builtin_neon_vcvt_s64_v:
4919 case NEON::BI__builtin_neon_vcvt_u64_v:
4920 case NEON::BI__builtin_neon_vcvtq_s32_v:
4921 case NEON::BI__builtin_neon_vcvtq_u32_v:
4922 case NEON::BI__builtin_neon_vcvtq_s64_v:
4923 case NEON::BI__builtin_neon_vcvtq_u64_v: {
4924 bool Double =
4925 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4926 llvm::Type *InTy =
4927 GetNeonType(this,
4928 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4929 : NeonTypeFlags::Float32, false, quad));
4930 Ops[0] = Builder.CreateBitCast(Ops[0], InTy);
4931 if (usgn)
4932 return Builder.CreateFPToUI(Ops[0], Ty);
4933 return Builder.CreateFPToSI(Ops[0], Ty);
4934 }
4935 case NEON::BI__builtin_neon_vcvta_s32_v:
4936 case NEON::BI__builtin_neon_vcvtaq_s32_v:
4937 case NEON::BI__builtin_neon_vcvta_u32_v:
4938 case NEON::BI__builtin_neon_vcvtaq_u32_v:
4939 case NEON::BI__builtin_neon_vcvta_s64_v:
4940 case NEON::BI__builtin_neon_vcvtaq_s64_v:
4941 case NEON::BI__builtin_neon_vcvta_u64_v:
4942 case NEON::BI__builtin_neon_vcvtaq_u64_v: {
4943 Int = usgn ? Intrinsic::aarch64_neon_fcvtau : Intrinsic::aarch64_neon_fcvtas;
4944 bool Double =
4945 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4946 llvm::Type *InTy =
4947 GetNeonType(this,
4948 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4949 : NeonTypeFlags::Float32, false, quad));
4950 llvm::Type *Tys[2] = { Ty, InTy };
4951 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvta");
4952 }
4953 case NEON::BI__builtin_neon_vcvtm_s32_v:
4954 case NEON::BI__builtin_neon_vcvtmq_s32_v:
4955 case NEON::BI__builtin_neon_vcvtm_u32_v:
4956 case NEON::BI__builtin_neon_vcvtmq_u32_v:
4957 case NEON::BI__builtin_neon_vcvtm_s64_v:
4958 case NEON::BI__builtin_neon_vcvtmq_s64_v:
4959 case NEON::BI__builtin_neon_vcvtm_u64_v:
4960 case NEON::BI__builtin_neon_vcvtmq_u64_v: {
4961 Int = usgn ? Intrinsic::aarch64_neon_fcvtmu : Intrinsic::aarch64_neon_fcvtms;
4962 bool Double =
4963 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4964 llvm::Type *InTy =
4965 GetNeonType(this,
4966 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4967 : NeonTypeFlags::Float32, false, quad));
4968 llvm::Type *Tys[2] = { Ty, InTy };
4969 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtm");
4970 }
4971 case NEON::BI__builtin_neon_vcvtn_s32_v:
4972 case NEON::BI__builtin_neon_vcvtnq_s32_v:
4973 case NEON::BI__builtin_neon_vcvtn_u32_v:
4974 case NEON::BI__builtin_neon_vcvtnq_u32_v:
4975 case NEON::BI__builtin_neon_vcvtn_s64_v:
4976 case NEON::BI__builtin_neon_vcvtnq_s64_v:
4977 case NEON::BI__builtin_neon_vcvtn_u64_v:
4978 case NEON::BI__builtin_neon_vcvtnq_u64_v: {
4979 Int = usgn ? Intrinsic::aarch64_neon_fcvtnu : Intrinsic::aarch64_neon_fcvtns;
4980 bool Double =
4981 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
4982 llvm::Type *InTy =
4983 GetNeonType(this,
4984 NeonTypeFlags(Double ? NeonTypeFlags::Float64
4985 : NeonTypeFlags::Float32, false, quad));
4986 llvm::Type *Tys[2] = { Ty, InTy };
4987 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtn");
4988 }
4989 case NEON::BI__builtin_neon_vcvtp_s32_v:
4990 case NEON::BI__builtin_neon_vcvtpq_s32_v:
4991 case NEON::BI__builtin_neon_vcvtp_u32_v:
4992 case NEON::BI__builtin_neon_vcvtpq_u32_v:
4993 case NEON::BI__builtin_neon_vcvtp_s64_v:
4994 case NEON::BI__builtin_neon_vcvtpq_s64_v:
4995 case NEON::BI__builtin_neon_vcvtp_u64_v:
4996 case NEON::BI__builtin_neon_vcvtpq_u64_v: {
4997 Int = usgn ? Intrinsic::aarch64_neon_fcvtpu : Intrinsic::aarch64_neon_fcvtps;
4998 bool Double =
4999 (cast<llvm::IntegerType>(VTy->getElementType())->getBitWidth() == 64);
5000 llvm::Type *InTy =
5001 GetNeonType(this,
5002 NeonTypeFlags(Double ? NeonTypeFlags::Float64
5003 : NeonTypeFlags::Float32, false, quad));
5004 llvm::Type *Tys[2] = { Ty, InTy };
5005 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vcvtp");
5006 }
5007 case NEON::BI__builtin_neon_vmulx_v:
5008 case NEON::BI__builtin_neon_vmulxq_v: {
5009 Int = Intrinsic::aarch64_neon_fmulx;
5010 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vmulx");
5011 }
5012 case NEON::BI__builtin_neon_vmul_lane_v:
5013 case NEON::BI__builtin_neon_vmul_laneq_v: {
5014 // v1f64 vmul_lane should be mapped to Neon scalar mul lane
5015 bool Quad = false;
5016 if (BuiltinID == NEON::BI__builtin_neon_vmul_laneq_v)
5017 Quad = true;
5018 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
5019 llvm::Type *VTy = GetNeonType(this,
5020 NeonTypeFlags(NeonTypeFlags::Float64, false, Quad));
5021 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
5022 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2], "extract");
5023 Value *Result = Builder.CreateFMul(Ops[0], Ops[1]);
5024 return Builder.CreateBitCast(Result, Ty);
5025 }
5026 case NEON::BI__builtin_neon_vnegd_s64:
5027 return Builder.CreateNeg(EmitScalarExpr(E->getArg(0)), "vnegd");
5028 case NEON::BI__builtin_neon_vpmaxnm_v:
5029 case NEON::BI__builtin_neon_vpmaxnmq_v: {
5030 Int = Intrinsic::aarch64_neon_fmaxnmp;
5031 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpmaxnm");
5032 }
5033 case NEON::BI__builtin_neon_vpminnm_v:
5034 case NEON::BI__builtin_neon_vpminnmq_v: {
5035 Int = Intrinsic::aarch64_neon_fminnmp;
5036 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vpminnm");
5037 }
5038 case NEON::BI__builtin_neon_vsqrt_v:
5039 case NEON::BI__builtin_neon_vsqrtq_v: {
5040 Int = Intrinsic::sqrt;
5041 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5042 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqrt");
5043 }
5044 case NEON::BI__builtin_neon_vrbit_v:
5045 case NEON::BI__builtin_neon_vrbitq_v: {
5046 Int = Intrinsic::aarch64_neon_rbit;
5047 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vrbit");
5048 }
5049 case NEON::BI__builtin_neon_vaddv_u8:
5050 // FIXME: These are handled by the AArch64 scalar code.
5051 usgn = true;
5052 // FALLTHROUGH
5053 case NEON::BI__builtin_neon_vaddv_s8: {
5054 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
5055 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5056 VTy =
5057 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5058 llvm::Type *Tys[2] = { Ty, VTy };
5059 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5060 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
5061 return Builder.CreateTrunc(Ops[0],
5062 llvm::IntegerType::get(getLLVMContext(), 8));
5063 }
5064 case NEON::BI__builtin_neon_vaddv_u16:
5065 usgn = true;
5066 // FALLTHROUGH
5067 case NEON::BI__builtin_neon_vaddv_s16: {
5068 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
5069 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5070 VTy =
5071 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5072 llvm::Type *Tys[2] = { Ty, VTy };
5073 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5074 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
5075 return Builder.CreateTrunc(Ops[0],
5076 llvm::IntegerType::get(getLLVMContext(), 16));
5077 }
5078 case NEON::BI__builtin_neon_vaddvq_u8:
5079 usgn = true;
5080 // FALLTHROUGH
5081 case NEON::BI__builtin_neon_vaddvq_s8: {
5082 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
5083 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5084 VTy =
5085 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5086 llvm::Type *Tys[2] = { Ty, VTy };
5087 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5088 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
5089 return Builder.CreateTrunc(Ops[0],
5090 llvm::IntegerType::get(getLLVMContext(), 8));
5091 }
5092 case NEON::BI__builtin_neon_vaddvq_u16:
5093 usgn = true;
5094 // FALLTHROUGH
5095 case NEON::BI__builtin_neon_vaddvq_s16: {
5096 Int = usgn ? Intrinsic::aarch64_neon_uaddv : Intrinsic::aarch64_neon_saddv;
5097 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5098 VTy =
5099 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5100 llvm::Type *Tys[2] = { Ty, VTy };
5101 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5102 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddv");
5103 return Builder.CreateTrunc(Ops[0],
5104 llvm::IntegerType::get(getLLVMContext(), 16));
5105 }
5106 case NEON::BI__builtin_neon_vmaxv_u8: {
5107 Int = Intrinsic::aarch64_neon_umaxv;
5108 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5109 VTy =
5110 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5111 llvm::Type *Tys[2] = { Ty, VTy };
5112 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5113 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5114 return Builder.CreateTrunc(Ops[0],
5115 llvm::IntegerType::get(getLLVMContext(), 8));
5116 }
5117 case NEON::BI__builtin_neon_vmaxv_u16: {
5118 Int = Intrinsic::aarch64_neon_umaxv;
5119 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5120 VTy =
5121 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5122 llvm::Type *Tys[2] = { Ty, VTy };
5123 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5124 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5125 return Builder.CreateTrunc(Ops[0],
5126 llvm::IntegerType::get(getLLVMContext(), 16));
5127 }
5128 case NEON::BI__builtin_neon_vmaxvq_u8: {
5129 Int = Intrinsic::aarch64_neon_umaxv;
5130 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5131 VTy =
5132 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5133 llvm::Type *Tys[2] = { Ty, VTy };
5134 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5135 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5136 return Builder.CreateTrunc(Ops[0],
5137 llvm::IntegerType::get(getLLVMContext(), 8));
5138 }
5139 case NEON::BI__builtin_neon_vmaxvq_u16: {
5140 Int = Intrinsic::aarch64_neon_umaxv;
5141 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5142 VTy =
5143 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5144 llvm::Type *Tys[2] = { Ty, VTy };
5145 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5146 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5147 return Builder.CreateTrunc(Ops[0],
5148 llvm::IntegerType::get(getLLVMContext(), 16));
5149 }
5150 case NEON::BI__builtin_neon_vmaxv_s8: {
5151 Int = Intrinsic::aarch64_neon_smaxv;
5152 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5153 VTy =
5154 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5155 llvm::Type *Tys[2] = { Ty, VTy };
5156 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5157 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5158 return Builder.CreateTrunc(Ops[0],
5159 llvm::IntegerType::get(getLLVMContext(), 8));
5160 }
5161 case NEON::BI__builtin_neon_vmaxv_s16: {
5162 Int = Intrinsic::aarch64_neon_smaxv;
5163 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5164 VTy =
5165 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5166 llvm::Type *Tys[2] = { Ty, VTy };
5167 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5168 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5169 return Builder.CreateTrunc(Ops[0],
5170 llvm::IntegerType::get(getLLVMContext(), 16));
5171 }
5172 case NEON::BI__builtin_neon_vmaxvq_s8: {
5173 Int = Intrinsic::aarch64_neon_smaxv;
5174 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5175 VTy =
5176 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5177 llvm::Type *Tys[2] = { Ty, VTy };
5178 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5179 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5180 return Builder.CreateTrunc(Ops[0],
5181 llvm::IntegerType::get(getLLVMContext(), 8));
5182 }
5183 case NEON::BI__builtin_neon_vmaxvq_s16: {
5184 Int = Intrinsic::aarch64_neon_smaxv;
5185 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5186 VTy =
5187 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5188 llvm::Type *Tys[2] = { Ty, VTy };
5189 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5190 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vmaxv");
5191 return Builder.CreateTrunc(Ops[0],
5192 llvm::IntegerType::get(getLLVMContext(), 16));
5193 }
5194 case NEON::BI__builtin_neon_vminv_u8: {
5195 Int = Intrinsic::aarch64_neon_uminv;
5196 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5197 VTy =
5198 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5199 llvm::Type *Tys[2] = { Ty, VTy };
5200 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5201 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5202 return Builder.CreateTrunc(Ops[0],
5203 llvm::IntegerType::get(getLLVMContext(), 8));
5204 }
5205 case NEON::BI__builtin_neon_vminv_u16: {
5206 Int = Intrinsic::aarch64_neon_uminv;
5207 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5208 VTy =
5209 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5210 llvm::Type *Tys[2] = { Ty, VTy };
5211 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5212 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5213 return Builder.CreateTrunc(Ops[0],
5214 llvm::IntegerType::get(getLLVMContext(), 16));
5215 }
5216 case NEON::BI__builtin_neon_vminvq_u8: {
5217 Int = Intrinsic::aarch64_neon_uminv;
5218 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5219 VTy =
5220 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5221 llvm::Type *Tys[2] = { Ty, VTy };
5222 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5223 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5224 return Builder.CreateTrunc(Ops[0],
5225 llvm::IntegerType::get(getLLVMContext(), 8));
5226 }
5227 case NEON::BI__builtin_neon_vminvq_u16: {
5228 Int = Intrinsic::aarch64_neon_uminv;
5229 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5230 VTy =
5231 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5232 llvm::Type *Tys[2] = { Ty, VTy };
5233 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5234 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5235 return Builder.CreateTrunc(Ops[0],
5236 llvm::IntegerType::get(getLLVMContext(), 16));
5237 }
5238 case NEON::BI__builtin_neon_vminv_s8: {
5239 Int = Intrinsic::aarch64_neon_sminv;
5240 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5241 VTy =
5242 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5243 llvm::Type *Tys[2] = { Ty, VTy };
5244 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5245 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5246 return Builder.CreateTrunc(Ops[0],
5247 llvm::IntegerType::get(getLLVMContext(), 8));
5248 }
5249 case NEON::BI__builtin_neon_vminv_s16: {
5250 Int = Intrinsic::aarch64_neon_sminv;
5251 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5252 VTy =
5253 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5254 llvm::Type *Tys[2] = { Ty, VTy };
5255 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5256 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5257 return Builder.CreateTrunc(Ops[0],
5258 llvm::IntegerType::get(getLLVMContext(), 16));
5259 }
5260 case NEON::BI__builtin_neon_vminvq_s8: {
5261 Int = Intrinsic::aarch64_neon_sminv;
5262 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5263 VTy =
5264 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5265 llvm::Type *Tys[2] = { Ty, VTy };
5266 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5267 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5268 return Builder.CreateTrunc(Ops[0],
5269 llvm::IntegerType::get(getLLVMContext(), 8));
5270 }
5271 case NEON::BI__builtin_neon_vminvq_s16: {
5272 Int = Intrinsic::aarch64_neon_sminv;
5273 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5274 VTy =
5275 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5276 llvm::Type *Tys[2] = { Ty, VTy };
5277 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5278 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vminv");
5279 return Builder.CreateTrunc(Ops[0],
5280 llvm::IntegerType::get(getLLVMContext(), 16));
5281 }
5282 case NEON::BI__builtin_neon_vmul_n_f64: {
5283 Ops[0] = Builder.CreateBitCast(Ops[0], DoubleTy);
5284 Value *RHS = Builder.CreateBitCast(EmitScalarExpr(E->getArg(1)), DoubleTy);
5285 return Builder.CreateFMul(Ops[0], RHS);
5286 }
5287 case NEON::BI__builtin_neon_vaddlv_u8: {
5288 Int = Intrinsic::aarch64_neon_uaddlv;
5289 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5290 VTy =
5291 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5292 llvm::Type *Tys[2] = { Ty, VTy };
5293 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5294 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5295 return Builder.CreateTrunc(Ops[0],
5296 llvm::IntegerType::get(getLLVMContext(), 16));
5297 }
5298 case NEON::BI__builtin_neon_vaddlv_u16: {
5299 Int = Intrinsic::aarch64_neon_uaddlv;
5300 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5301 VTy =
5302 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5303 llvm::Type *Tys[2] = { Ty, VTy };
5304 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5305 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5306 }
5307 case NEON::BI__builtin_neon_vaddlvq_u8: {
5308 Int = Intrinsic::aarch64_neon_uaddlv;
5309 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5310 VTy =
5311 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5312 llvm::Type *Tys[2] = { Ty, VTy };
5313 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5314 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5315 return Builder.CreateTrunc(Ops[0],
5316 llvm::IntegerType::get(getLLVMContext(), 16));
5317 }
5318 case NEON::BI__builtin_neon_vaddlvq_u16: {
5319 Int = Intrinsic::aarch64_neon_uaddlv;
5320 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5321 VTy =
5322 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5323 llvm::Type *Tys[2] = { Ty, VTy };
5324 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5325 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5326 }
5327 case NEON::BI__builtin_neon_vaddlv_s8: {
5328 Int = Intrinsic::aarch64_neon_saddlv;
5329 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5330 VTy =
5331 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 8);
5332 llvm::Type *Tys[2] = { Ty, VTy };
5333 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5334 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5335 return Builder.CreateTrunc(Ops[0],
5336 llvm::IntegerType::get(getLLVMContext(), 16));
5337 }
5338 case NEON::BI__builtin_neon_vaddlv_s16: {
5339 Int = Intrinsic::aarch64_neon_saddlv;
5340 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5341 VTy =
5342 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 4);
5343 llvm::Type *Tys[2] = { Ty, VTy };
5344 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5345 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5346 }
5347 case NEON::BI__builtin_neon_vaddlvq_s8: {
5348 Int = Intrinsic::aarch64_neon_saddlv;
5349 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5350 VTy =
5351 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 8), 16);
5352 llvm::Type *Tys[2] = { Ty, VTy };
5353 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5354 Ops[0] = EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5355 return Builder.CreateTrunc(Ops[0],
5356 llvm::IntegerType::get(getLLVMContext(), 16));
5357 }
5358 case NEON::BI__builtin_neon_vaddlvq_s16: {
5359 Int = Intrinsic::aarch64_neon_saddlv;
5360 Ty = llvm::IntegerType::get(getLLVMContext(), 32);
5361 VTy =
5362 llvm::VectorType::get(llvm::IntegerType::get(getLLVMContext(), 16), 8);
5363 llvm::Type *Tys[2] = { Ty, VTy };
5364 Ops.push_back(EmitScalarExpr(E->getArg(0)));
5365 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), Ops, "vaddlv");
5366 }
5367 case NEON::BI__builtin_neon_vsri_n_v:
5368 case NEON::BI__builtin_neon_vsriq_n_v: {
5369 Int = Intrinsic::aarch64_neon_vsri;
5370 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
5371 return EmitNeonCall(Intrin, Ops, "vsri_n");
5372 }
5373 case NEON::BI__builtin_neon_vsli_n_v:
5374 case NEON::BI__builtin_neon_vsliq_n_v: {
5375 Int = Intrinsic::aarch64_neon_vsli;
5376 llvm::Function *Intrin = CGM.getIntrinsic(Int, Ty);
5377 return EmitNeonCall(Intrin, Ops, "vsli_n");
5378 }
5379 case NEON::BI__builtin_neon_vsra_n_v:
5380 case NEON::BI__builtin_neon_vsraq_n_v:
5381 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5382 Ops[1] = EmitNeonRShiftImm(Ops[1], Ops[2], Ty, usgn, "vsra_n");
5383 return Builder.CreateAdd(Ops[0], Ops[1]);
5384 case NEON::BI__builtin_neon_vrsra_n_v:
5385 case NEON::BI__builtin_neon_vrsraq_n_v: {
5386 Int = usgn ? Intrinsic::aarch64_neon_urshl : Intrinsic::aarch64_neon_srshl;
5387 SmallVector<llvm::Value*,2> TmpOps;
5388 TmpOps.push_back(Ops[1]);
5389 TmpOps.push_back(Ops[2]);
5390 Function* F = CGM.getIntrinsic(Int, Ty);
5391 llvm::Value *tmp = EmitNeonCall(F, TmpOps, "vrshr_n", 1, true);
5392 Ops[0] = Builder.CreateBitCast(Ops[0], VTy);
5393 return Builder.CreateAdd(Ops[0], tmp);
5394 }
5395 // FIXME: Sharing loads & stores with 32-bit is complicated by the absence
5396 // of an Align parameter here.
5397 case NEON::BI__builtin_neon_vld1_x2_v:
5398 case NEON::BI__builtin_neon_vld1q_x2_v:
5399 case NEON::BI__builtin_neon_vld1_x3_v:
5400 case NEON::BI__builtin_neon_vld1q_x3_v:
5401 case NEON::BI__builtin_neon_vld1_x4_v:
5402 case NEON::BI__builtin_neon_vld1q_x4_v: {
5403 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5404 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5405 llvm::Type *Tys[2] = { VTy, PTy };
5406 unsigned Int;
5407 switch (BuiltinID) {
5408 case NEON::BI__builtin_neon_vld1_x2_v:
5409 case NEON::BI__builtin_neon_vld1q_x2_v:
5410 Int = Intrinsic::aarch64_neon_ld1x2;
5411 break;
5412 case NEON::BI__builtin_neon_vld1_x3_v:
5413 case NEON::BI__builtin_neon_vld1q_x3_v:
5414 Int = Intrinsic::aarch64_neon_ld1x3;
5415 break;
5416 case NEON::BI__builtin_neon_vld1_x4_v:
5417 case NEON::BI__builtin_neon_vld1q_x4_v:
5418 Int = Intrinsic::aarch64_neon_ld1x4;
5419 break;
5420 }
5421 Function *F = CGM.getIntrinsic(Int, Tys);
5422 Ops[1] = Builder.CreateCall(F, Ops[1], "vld1xN");
5423 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5424 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5425 return Builder.CreateStore(Ops[1], Ops[0]);
5426 }
5427 case NEON::BI__builtin_neon_vst1_x2_v:
5428 case NEON::BI__builtin_neon_vst1q_x2_v:
5429 case NEON::BI__builtin_neon_vst1_x3_v:
5430 case NEON::BI__builtin_neon_vst1q_x3_v:
5431 case NEON::BI__builtin_neon_vst1_x4_v:
5432 case NEON::BI__builtin_neon_vst1q_x4_v: {
5433 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy->getVectorElementType());
5434 llvm::Type *Tys[2] = { VTy, PTy };
5435 unsigned Int;
5436 switch (BuiltinID) {
5437 case NEON::BI__builtin_neon_vst1_x2_v:
5438 case NEON::BI__builtin_neon_vst1q_x2_v:
5439 Int = Intrinsic::aarch64_neon_st1x2;
5440 break;
5441 case NEON::BI__builtin_neon_vst1_x3_v:
5442 case NEON::BI__builtin_neon_vst1q_x3_v:
5443 Int = Intrinsic::aarch64_neon_st1x3;
5444 break;
5445 case NEON::BI__builtin_neon_vst1_x4_v:
5446 case NEON::BI__builtin_neon_vst1q_x4_v:
5447 Int = Intrinsic::aarch64_neon_st1x4;
5448 break;
5449 }
5450 SmallVector<Value *, 4> IntOps(Ops.begin()+1, Ops.end());
5451 IntOps.push_back(Ops[0]);
5452 return EmitNeonCall(CGM.getIntrinsic(Int, Tys), IntOps, "");
5453 }
5454 case NEON::BI__builtin_neon_vld1_v:
5455 case NEON::BI__builtin_neon_vld1q_v:
5456 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
5457 return Builder.CreateLoad(Ops[0]);
5458 case NEON::BI__builtin_neon_vst1_v:
5459 case NEON::BI__builtin_neon_vst1q_v:
5460 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(VTy));
5461 Ops[1] = Builder.CreateBitCast(Ops[1], VTy);
5462 return Builder.CreateStore(Ops[1], Ops[0]);
5463 case NEON::BI__builtin_neon_vld1_lane_v:
5464 case NEON::BI__builtin_neon_vld1q_lane_v:
5465 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5466 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5467 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5468 Ops[0] = Builder.CreateLoad(Ops[0]);
5469 return Builder.CreateInsertElement(Ops[1], Ops[0], Ops[2], "vld1_lane");
5470 case NEON::BI__builtin_neon_vld1_dup_v:
5471 case NEON::BI__builtin_neon_vld1q_dup_v: {
5472 Value *V = UndefValue::get(Ty);
5473 Ty = llvm::PointerType::getUnqual(VTy->getElementType());
5474 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5475 Ops[0] = Builder.CreateLoad(Ops[0]);
5476 llvm::Constant *CI = ConstantInt::get(Int32Ty, 0);
5477 Ops[0] = Builder.CreateInsertElement(V, Ops[0], CI);
5478 return EmitNeonSplat(Ops[0], CI);
5479 }
5480 case NEON::BI__builtin_neon_vst1_lane_v:
5481 case NEON::BI__builtin_neon_vst1q_lane_v:
5482 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5483 Ops[1] = Builder.CreateExtractElement(Ops[1], Ops[2]);
5484 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5485 return Builder.CreateStore(Ops[1], Builder.CreateBitCast(Ops[0], Ty));
5486 case NEON::BI__builtin_neon_vld2_v:
5487 case NEON::BI__builtin_neon_vld2q_v: {
5488 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5489 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5490 llvm::Type *Tys[2] = { VTy, PTy };
5491 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2, Tys);
5492 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
5493 Ops[0] = Builder.CreateBitCast(Ops[0],
5494 llvm::PointerType::getUnqual(Ops[1]->getType()));
5495 return Builder.CreateStore(Ops[1], Ops[0]);
5496 }
5497 case NEON::BI__builtin_neon_vld3_v:
5498 case NEON::BI__builtin_neon_vld3q_v: {
5499 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5500 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5501 llvm::Type *Tys[2] = { VTy, PTy };
5502 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3, Tys);
5503 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
5504 Ops[0] = Builder.CreateBitCast(Ops[0],
5505 llvm::PointerType::getUnqual(Ops[1]->getType()));
5506 return Builder.CreateStore(Ops[1], Ops[0]);
5507 }
5508 case NEON::BI__builtin_neon_vld4_v:
5509 case NEON::BI__builtin_neon_vld4q_v: {
5510 llvm::Type *PTy = llvm::PointerType::getUnqual(VTy);
5511 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5512 llvm::Type *Tys[2] = { VTy, PTy };
5513 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4, Tys);
5514 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
5515 Ops[0] = Builder.CreateBitCast(Ops[0],
5516 llvm::PointerType::getUnqual(Ops[1]->getType()));
5517 return Builder.CreateStore(Ops[1], Ops[0]);
5518 }
5519 case NEON::BI__builtin_neon_vld2_dup_v:
5520 case NEON::BI__builtin_neon_vld2q_dup_v: {
5521 llvm::Type *PTy =
5522 llvm::PointerType::getUnqual(VTy->getElementType());
5523 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5524 llvm::Type *Tys[2] = { VTy, PTy };
5525 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2r, Tys);
5526 Ops[1] = Builder.CreateCall(F, Ops[1], "vld2");
5527 Ops[0] = Builder.CreateBitCast(Ops[0],
5528 llvm::PointerType::getUnqual(Ops[1]->getType()));
5529 return Builder.CreateStore(Ops[1], Ops[0]);
5530 }
5531 case NEON::BI__builtin_neon_vld3_dup_v:
5532 case NEON::BI__builtin_neon_vld3q_dup_v: {
5533 llvm::Type *PTy =
5534 llvm::PointerType::getUnqual(VTy->getElementType());
5535 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5536 llvm::Type *Tys[2] = { VTy, PTy };
5537 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3r, Tys);
5538 Ops[1] = Builder.CreateCall(F, Ops[1], "vld3");
5539 Ops[0] = Builder.CreateBitCast(Ops[0],
5540 llvm::PointerType::getUnqual(Ops[1]->getType()));
5541 return Builder.CreateStore(Ops[1], Ops[0]);
5542 }
5543 case NEON::BI__builtin_neon_vld4_dup_v:
5544 case NEON::BI__builtin_neon_vld4q_dup_v: {
5545 llvm::Type *PTy =
5546 llvm::PointerType::getUnqual(VTy->getElementType());
5547 Ops[1] = Builder.CreateBitCast(Ops[1], PTy);
5548 llvm::Type *Tys[2] = { VTy, PTy };
5549 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4r, Tys);
5550 Ops[1] = Builder.CreateCall(F, Ops[1], "vld4");
5551 Ops[0] = Builder.CreateBitCast(Ops[0],
5552 llvm::PointerType::getUnqual(Ops[1]->getType()));
5553 return Builder.CreateStore(Ops[1], Ops[0]);
5554 }
5555 case NEON::BI__builtin_neon_vld2_lane_v:
5556 case NEON::BI__builtin_neon_vld2q_lane_v: {
5557 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5558 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld2lane, Tys);
5559 Ops.push_back(Ops[1]);
5560 Ops.erase(Ops.begin()+1);
5561 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5562 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5563 Ops[3] = Builder.CreateZExt(Ops[3],
5564 llvm::IntegerType::get(getLLVMContext(), 64));
5565 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld2_lane");
5566 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5567 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5568 return Builder.CreateStore(Ops[1], Ops[0]);
5569 }
5570 case NEON::BI__builtin_neon_vld3_lane_v:
5571 case NEON::BI__builtin_neon_vld3q_lane_v: {
5572 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5573 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld3lane, Tys);
5574 Ops.push_back(Ops[1]);
5575 Ops.erase(Ops.begin()+1);
5576 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5577 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5578 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
5579 Ops[4] = Builder.CreateZExt(Ops[4],
5580 llvm::IntegerType::get(getLLVMContext(), 64));
5581 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld3_lane");
5582 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5583 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5584 return Builder.CreateStore(Ops[1], Ops[0]);
5585 }
5586 case NEON::BI__builtin_neon_vld4_lane_v:
5587 case NEON::BI__builtin_neon_vld4q_lane_v: {
5588 llvm::Type *Tys[2] = { VTy, Ops[1]->getType() };
5589 Function *F = CGM.getIntrinsic(Intrinsic::aarch64_neon_ld4lane, Tys);
5590 Ops.push_back(Ops[1]);
5591 Ops.erase(Ops.begin()+1);
5592 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5593 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5594 Ops[3] = Builder.CreateBitCast(Ops[3], Ty);
5595 Ops[4] = Builder.CreateBitCast(Ops[4], Ty);
5596 Ops[5] = Builder.CreateZExt(Ops[5],
5597 llvm::IntegerType::get(getLLVMContext(), 64));
5598 Ops[1] = Builder.CreateCall(F, makeArrayRef(Ops).slice(1), "vld4_lane");
5599 Ty = llvm::PointerType::getUnqual(Ops[1]->getType());
5600 Ops[0] = Builder.CreateBitCast(Ops[0], Ty);
5601 return Builder.CreateStore(Ops[1], Ops[0]);
5602 }
5603 case NEON::BI__builtin_neon_vst2_v:
5604 case NEON::BI__builtin_neon_vst2q_v: {
5605 Ops.push_back(Ops[0]);
5606 Ops.erase(Ops.begin());
5607 llvm::Type *Tys[2] = { VTy, Ops[2]->getType() };
5608 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2, Tys),
5609 Ops, "");
5610 }
5611 case NEON::BI__builtin_neon_vst2_lane_v:
5612 case NEON::BI__builtin_neon_vst2q_lane_v: {
5613 Ops.push_back(Ops[0]);
5614 Ops.erase(Ops.begin());
5615 Ops[2] = Builder.CreateZExt(Ops[2],
5616 llvm::IntegerType::get(getLLVMContext(), 64));
5617 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
5618 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st2lane, Tys),
5619 Ops, "");
5620 }
5621 case NEON::BI__builtin_neon_vst3_v:
5622 case NEON::BI__builtin_neon_vst3q_v: {
5623 Ops.push_back(Ops[0]);
5624 Ops.erase(Ops.begin());
5625 llvm::Type *Tys[2] = { VTy, Ops[3]->getType() };
5626 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3, Tys),
5627 Ops, "");
5628 }
5629 case NEON::BI__builtin_neon_vst3_lane_v:
5630 case NEON::BI__builtin_neon_vst3q_lane_v: {
5631 Ops.push_back(Ops[0]);
5632 Ops.erase(Ops.begin());
5633 Ops[3] = Builder.CreateZExt(Ops[3],
5634 llvm::IntegerType::get(getLLVMContext(), 64));
5635 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
5636 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st3lane, Tys),
5637 Ops, "");
5638 }
5639 case NEON::BI__builtin_neon_vst4_v:
5640 case NEON::BI__builtin_neon_vst4q_v: {
5641 Ops.push_back(Ops[0]);
5642 Ops.erase(Ops.begin());
5643 llvm::Type *Tys[2] = { VTy, Ops[4]->getType() };
5644 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4, Tys),
5645 Ops, "");
5646 }
5647 case NEON::BI__builtin_neon_vst4_lane_v:
5648 case NEON::BI__builtin_neon_vst4q_lane_v: {
5649 Ops.push_back(Ops[0]);
5650 Ops.erase(Ops.begin());
5651 Ops[4] = Builder.CreateZExt(Ops[4],
5652 llvm::IntegerType::get(getLLVMContext(), 64));
5653 llvm::Type *Tys[2] = { VTy, Ops[5]->getType() };
5654 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_st4lane, Tys),
5655 Ops, "");
5656 }
5657 case NEON::BI__builtin_neon_vtrn_v:
5658 case NEON::BI__builtin_neon_vtrnq_v: {
5659 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5660 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5661 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5662 Value *SV = nullptr;
5663
5664 for (unsigned vi = 0; vi != 2; ++vi) {
5665 SmallVector<Constant*, 16> Indices;
5666 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5667 Indices.push_back(ConstantInt::get(Int32Ty, i+vi));
5668 Indices.push_back(ConstantInt::get(Int32Ty, i+e+vi));
5669 }
5670 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
5671 SV = llvm::ConstantVector::get(Indices);
5672 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vtrn");
5673 SV = Builder.CreateStore(SV, Addr);
5674 }
5675 return SV;
5676 }
5677 case NEON::BI__builtin_neon_vuzp_v:
5678 case NEON::BI__builtin_neon_vuzpq_v: {
5679 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5680 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5681 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5682 Value *SV = nullptr;
5683
5684 for (unsigned vi = 0; vi != 2; ++vi) {
5685 SmallVector<Constant*, 16> Indices;
5686 for (unsigned i = 0, e = VTy->getNumElements(); i != e; ++i)
5687 Indices.push_back(ConstantInt::get(Int32Ty, 2*i+vi));
5688
5689 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
5690 SV = llvm::ConstantVector::get(Indices);
5691 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vuzp");
5692 SV = Builder.CreateStore(SV, Addr);
5693 }
5694 return SV;
5695 }
5696 case NEON::BI__builtin_neon_vzip_v:
5697 case NEON::BI__builtin_neon_vzipq_v: {
5698 Ops[0] = Builder.CreateBitCast(Ops[0], llvm::PointerType::getUnqual(Ty));
5699 Ops[1] = Builder.CreateBitCast(Ops[1], Ty);
5700 Ops[2] = Builder.CreateBitCast(Ops[2], Ty);
5701 Value *SV = nullptr;
5702
5703 for (unsigned vi = 0; vi != 2; ++vi) {
5704 SmallVector<Constant*, 16> Indices;
5705 for (unsigned i = 0, e = VTy->getNumElements(); i != e; i += 2) {
5706 Indices.push_back(ConstantInt::get(Int32Ty, (i + vi*e) >> 1));
5707 Indices.push_back(ConstantInt::get(Int32Ty, ((i + vi*e) >> 1)+e));
5708 }
5709 Value *Addr = Builder.CreateConstInBoundsGEP1_32(Ops[0], vi);
5710 SV = llvm::ConstantVector::get(Indices);
5711 SV = Builder.CreateShuffleVector(Ops[1], Ops[2], SV, "vzip");
5712 SV = Builder.CreateStore(SV, Addr);
5713 }
5714 return SV;
5715 }
5716 case NEON::BI__builtin_neon_vqtbl1q_v: {
5717 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl1, Ty),
5718 Ops, "vtbl1");
5719 }
5720 case NEON::BI__builtin_neon_vqtbl2q_v: {
5721 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl2, Ty),
5722 Ops, "vtbl2");
5723 }
5724 case NEON::BI__builtin_neon_vqtbl3q_v: {
5725 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl3, Ty),
5726 Ops, "vtbl3");
5727 }
5728 case NEON::BI__builtin_neon_vqtbl4q_v: {
5729 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbl4, Ty),
5730 Ops, "vtbl4");
5731 }
5732 case NEON::BI__builtin_neon_vqtbx1q_v: {
5733 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx1, Ty),
5734 Ops, "vtbx1");
5735 }
5736 case NEON::BI__builtin_neon_vqtbx2q_v: {
5737 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx2, Ty),
5738 Ops, "vtbx2");
5739 }
5740 case NEON::BI__builtin_neon_vqtbx3q_v: {
5741 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx3, Ty),
5742 Ops, "vtbx3");
5743 }
5744 case NEON::BI__builtin_neon_vqtbx4q_v: {
5745 return EmitNeonCall(CGM.getIntrinsic(Intrinsic::aarch64_neon_tbx4, Ty),
5746 Ops, "vtbx4");
5747 }
5748 case NEON::BI__builtin_neon_vsqadd_v:
5749 case NEON::BI__builtin_neon_vsqaddq_v: {
5750 Int = Intrinsic::aarch64_neon_usqadd;
5751 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vsqadd");
5752 }
5753 case NEON::BI__builtin_neon_vuqadd_v:
5754 case NEON::BI__builtin_neon_vuqaddq_v: {
5755 Int = Intrinsic::aarch64_neon_suqadd;
5756 return EmitNeonCall(CGM.getIntrinsic(Int, Ty), Ops, "vuqadd");
5757 }
5758 }
5759 }
5760
5761 llvm::Value *CodeGenFunction::
BuildVector(ArrayRef<llvm::Value * > Ops)5762 BuildVector(ArrayRef<llvm::Value*> Ops) {
5763 assert((Ops.size() & (Ops.size() - 1)) == 0 &&
5764 "Not a power-of-two sized vector!");
5765 bool AllConstants = true;
5766 for (unsigned i = 0, e = Ops.size(); i != e && AllConstants; ++i)
5767 AllConstants &= isa<Constant>(Ops[i]);
5768
5769 // If this is a constant vector, create a ConstantVector.
5770 if (AllConstants) {
5771 SmallVector<llvm::Constant*, 16> CstOps;
5772 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
5773 CstOps.push_back(cast<Constant>(Ops[i]));
5774 return llvm::ConstantVector::get(CstOps);
5775 }
5776
5777 // Otherwise, insertelement the values to build the vector.
5778 Value *Result =
5779 llvm::UndefValue::get(llvm::VectorType::get(Ops[0]->getType(), Ops.size()));
5780
5781 for (unsigned i = 0, e = Ops.size(); i != e; ++i)
5782 Result = Builder.CreateInsertElement(Result, Ops[i], Builder.getInt32(i));
5783
5784 return Result;
5785 }
5786
EmitX86BuiltinExpr(unsigned BuiltinID,const CallExpr * E)5787 Value *CodeGenFunction::EmitX86BuiltinExpr(unsigned BuiltinID,
5788 const CallExpr *E) {
5789 SmallVector<Value*, 4> Ops;
5790
5791 // Find out if any arguments are required to be integer constant expressions.
5792 unsigned ICEArguments = 0;
5793 ASTContext::GetBuiltinTypeError Error;
5794 getContext().GetBuiltinType(BuiltinID, Error, &ICEArguments);
5795 assert(Error == ASTContext::GE_None && "Should not codegen an error");
5796
5797 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++) {
5798 // If this is a normal argument, just emit it as a scalar.
5799 if ((ICEArguments & (1 << i)) == 0) {
5800 Ops.push_back(EmitScalarExpr(E->getArg(i)));
5801 continue;
5802 }
5803
5804 // If this is required to be a constant, constant fold it so that we know
5805 // that the generated intrinsic gets a ConstantInt.
5806 llvm::APSInt Result;
5807 bool IsConst = E->getArg(i)->isIntegerConstantExpr(Result, getContext());
5808 assert(IsConst && "Constant arg isn't actually constant?"); (void)IsConst;
5809 Ops.push_back(llvm::ConstantInt::get(getLLVMContext(), Result));
5810 }
5811
5812 switch (BuiltinID) {
5813 default: return nullptr;
5814 case X86::BI_mm_prefetch: {
5815 Value *Address = EmitScalarExpr(E->getArg(0));
5816 Value *RW = ConstantInt::get(Int32Ty, 0);
5817 Value *Locality = EmitScalarExpr(E->getArg(1));
5818 Value *Data = ConstantInt::get(Int32Ty, 1);
5819 Value *F = CGM.getIntrinsic(Intrinsic::prefetch);
5820 return Builder.CreateCall4(F, Address, RW, Locality, Data);
5821 }
5822 case X86::BI__builtin_ia32_vec_init_v8qi:
5823 case X86::BI__builtin_ia32_vec_init_v4hi:
5824 case X86::BI__builtin_ia32_vec_init_v2si:
5825 return Builder.CreateBitCast(BuildVector(Ops),
5826 llvm::Type::getX86_MMXTy(getLLVMContext()));
5827 case X86::BI__builtin_ia32_vec_ext_v2si:
5828 return Builder.CreateExtractElement(Ops[0],
5829 llvm::ConstantInt::get(Ops[1]->getType(), 0));
5830 case X86::BI__builtin_ia32_ldmxcsr: {
5831 Value *Tmp = CreateMemTemp(E->getArg(0)->getType());
5832 Builder.CreateStore(Ops[0], Tmp);
5833 return Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_ldmxcsr),
5834 Builder.CreateBitCast(Tmp, Int8PtrTy));
5835 }
5836 case X86::BI__builtin_ia32_stmxcsr: {
5837 Value *Tmp = CreateMemTemp(E->getType());
5838 Builder.CreateCall(CGM.getIntrinsic(Intrinsic::x86_sse_stmxcsr),
5839 Builder.CreateBitCast(Tmp, Int8PtrTy));
5840 return Builder.CreateLoad(Tmp, "stmxcsr");
5841 }
5842 case X86::BI__builtin_ia32_storehps:
5843 case X86::BI__builtin_ia32_storelps: {
5844 llvm::Type *PtrTy = llvm::PointerType::getUnqual(Int64Ty);
5845 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
5846
5847 // cast val v2i64
5848 Ops[1] = Builder.CreateBitCast(Ops[1], VecTy, "cast");
5849
5850 // extract (0, 1)
5851 unsigned Index = BuiltinID == X86::BI__builtin_ia32_storelps ? 0 : 1;
5852 llvm::Value *Idx = llvm::ConstantInt::get(SizeTy, Index);
5853 Ops[1] = Builder.CreateExtractElement(Ops[1], Idx, "extract");
5854
5855 // cast pointer to i64 & store
5856 Ops[0] = Builder.CreateBitCast(Ops[0], PtrTy);
5857 return Builder.CreateStore(Ops[1], Ops[0]);
5858 }
5859 case X86::BI__builtin_ia32_palignr: {
5860 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5861
5862 // If palignr is shifting the pair of input vectors less than 9 bytes,
5863 // emit a shuffle instruction.
5864 if (shiftVal <= 8) {
5865 SmallVector<llvm::Constant*, 8> Indices;
5866 for (unsigned i = 0; i != 8; ++i)
5867 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
5868
5869 Value* SV = llvm::ConstantVector::get(Indices);
5870 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5871 }
5872
5873 // If palignr is shifting the pair of input vectors more than 8 but less
5874 // than 16 bytes, emit a logical right shift of the destination.
5875 if (shiftVal < 16) {
5876 // MMX has these as 1 x i64 vectors for some odd optimization reasons.
5877 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 1);
5878
5879 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5880 Ops[1] = llvm::ConstantInt::get(VecTy, (shiftVal-8) * 8);
5881
5882 // create i32 constant
5883 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_mmx_psrl_q);
5884 return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
5885 }
5886
5887 // If palignr is shifting the pair of vectors more than 16 bytes, emit zero.
5888 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5889 }
5890 case X86::BI__builtin_ia32_palignr128: {
5891 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5892
5893 // If palignr is shifting the pair of input vectors less than 17 bytes,
5894 // emit a shuffle instruction.
5895 if (shiftVal <= 16) {
5896 SmallVector<llvm::Constant*, 16> Indices;
5897 for (unsigned i = 0; i != 16; ++i)
5898 Indices.push_back(llvm::ConstantInt::get(Int32Ty, shiftVal + i));
5899
5900 Value* SV = llvm::ConstantVector::get(Indices);
5901 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5902 }
5903
5904 // If palignr is shifting the pair of input vectors more than 16 but less
5905 // than 32 bytes, emit a logical right shift of the destination.
5906 if (shiftVal < 32) {
5907 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 2);
5908
5909 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5910 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
5911
5912 // create i32 constant
5913 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_sse2_psrl_dq);
5914 return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
5915 }
5916
5917 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
5918 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5919 }
5920 case X86::BI__builtin_ia32_palignr256: {
5921 unsigned shiftVal = cast<llvm::ConstantInt>(Ops[2])->getZExtValue();
5922
5923 // If palignr is shifting the pair of input vectors less than 17 bytes,
5924 // emit a shuffle instruction.
5925 if (shiftVal <= 16) {
5926 SmallVector<llvm::Constant*, 32> Indices;
5927 // 256-bit palignr operates on 128-bit lanes so we need to handle that
5928 for (unsigned l = 0; l != 2; ++l) {
5929 unsigned LaneStart = l * 16;
5930 unsigned LaneEnd = (l+1) * 16;
5931 for (unsigned i = 0; i != 16; ++i) {
5932 unsigned Idx = shiftVal + i + LaneStart;
5933 if (Idx >= LaneEnd) Idx += 16; // end of lane, switch operand
5934 Indices.push_back(llvm::ConstantInt::get(Int32Ty, Idx));
5935 }
5936 }
5937
5938 Value* SV = llvm::ConstantVector::get(Indices);
5939 return Builder.CreateShuffleVector(Ops[1], Ops[0], SV, "palignr");
5940 }
5941
5942 // If palignr is shifting the pair of input vectors more than 16 but less
5943 // than 32 bytes, emit a logical right shift of the destination.
5944 if (shiftVal < 32) {
5945 llvm::Type *VecTy = llvm::VectorType::get(Int64Ty, 4);
5946
5947 Ops[0] = Builder.CreateBitCast(Ops[0], VecTy, "cast");
5948 Ops[1] = llvm::ConstantInt::get(Int32Ty, (shiftVal-16) * 8);
5949
5950 // create i32 constant
5951 llvm::Function *F = CGM.getIntrinsic(Intrinsic::x86_avx2_psrl_dq);
5952 return Builder.CreateCall(F, makeArrayRef(Ops.data(), 2), "palignr");
5953 }
5954
5955 // If palignr is shifting the pair of vectors more than 32 bytes, emit zero.
5956 return llvm::Constant::getNullValue(ConvertType(E->getType()));
5957 }
5958 case X86::BI__builtin_ia32_movntps:
5959 case X86::BI__builtin_ia32_movntps256:
5960 case X86::BI__builtin_ia32_movntpd:
5961 case X86::BI__builtin_ia32_movntpd256:
5962 case X86::BI__builtin_ia32_movntdq:
5963 case X86::BI__builtin_ia32_movntdq256:
5964 case X86::BI__builtin_ia32_movnti:
5965 case X86::BI__builtin_ia32_movnti64: {
5966 llvm::MDNode *Node = llvm::MDNode::get(
5967 getLLVMContext(), llvm::ConstantAsMetadata::get(Builder.getInt32(1)));
5968
5969 // Convert the type of the pointer to a pointer to the stored type.
5970 Value *BC = Builder.CreateBitCast(Ops[0],
5971 llvm::PointerType::getUnqual(Ops[1]->getType()),
5972 "cast");
5973 StoreInst *SI = Builder.CreateStore(Ops[1], BC);
5974 SI->setMetadata(CGM.getModule().getMDKindID("nontemporal"), Node);
5975
5976 // If the operand is an integer, we can't assume alignment. Otherwise,
5977 // assume natural alignment.
5978 QualType ArgTy = E->getArg(1)->getType();
5979 unsigned Align;
5980 if (ArgTy->isIntegerType())
5981 Align = 1;
5982 else
5983 Align = getContext().getTypeSizeInChars(ArgTy).getQuantity();
5984 SI->setAlignment(Align);
5985 return SI;
5986 }
5987 // 3DNow!
5988 case X86::BI__builtin_ia32_pswapdsf:
5989 case X86::BI__builtin_ia32_pswapdsi: {
5990 const char *name;
5991 Intrinsic::ID ID;
5992 switch(BuiltinID) {
5993 default: llvm_unreachable("Unsupported intrinsic!");
5994 case X86::BI__builtin_ia32_pswapdsf:
5995 case X86::BI__builtin_ia32_pswapdsi:
5996 name = "pswapd";
5997 ID = Intrinsic::x86_3dnowa_pswapd;
5998 break;
5999 }
6000 llvm::Type *MMXTy = llvm::Type::getX86_MMXTy(getLLVMContext());
6001 Ops[0] = Builder.CreateBitCast(Ops[0], MMXTy, "cast");
6002 llvm::Function *F = CGM.getIntrinsic(ID);
6003 return Builder.CreateCall(F, Ops, name);
6004 }
6005 case X86::BI__builtin_ia32_rdrand16_step:
6006 case X86::BI__builtin_ia32_rdrand32_step:
6007 case X86::BI__builtin_ia32_rdrand64_step:
6008 case X86::BI__builtin_ia32_rdseed16_step:
6009 case X86::BI__builtin_ia32_rdseed32_step:
6010 case X86::BI__builtin_ia32_rdseed64_step: {
6011 Intrinsic::ID ID;
6012 switch (BuiltinID) {
6013 default: llvm_unreachable("Unsupported intrinsic!");
6014 case X86::BI__builtin_ia32_rdrand16_step:
6015 ID = Intrinsic::x86_rdrand_16;
6016 break;
6017 case X86::BI__builtin_ia32_rdrand32_step:
6018 ID = Intrinsic::x86_rdrand_32;
6019 break;
6020 case X86::BI__builtin_ia32_rdrand64_step:
6021 ID = Intrinsic::x86_rdrand_64;
6022 break;
6023 case X86::BI__builtin_ia32_rdseed16_step:
6024 ID = Intrinsic::x86_rdseed_16;
6025 break;
6026 case X86::BI__builtin_ia32_rdseed32_step:
6027 ID = Intrinsic::x86_rdseed_32;
6028 break;
6029 case X86::BI__builtin_ia32_rdseed64_step:
6030 ID = Intrinsic::x86_rdseed_64;
6031 break;
6032 }
6033
6034 Value *Call = Builder.CreateCall(CGM.getIntrinsic(ID));
6035 Builder.CreateStore(Builder.CreateExtractValue(Call, 0), Ops[0]);
6036 return Builder.CreateExtractValue(Call, 1);
6037 }
6038 // AVX2 broadcast
6039 case X86::BI__builtin_ia32_vbroadcastsi256: {
6040 Value *VecTmp = CreateMemTemp(E->getArg(0)->getType());
6041 Builder.CreateStore(Ops[0], VecTmp);
6042 Value *F = CGM.getIntrinsic(Intrinsic::x86_avx2_vbroadcasti128);
6043 return Builder.CreateCall(F, Builder.CreateBitCast(VecTmp, Int8PtrTy));
6044 }
6045 // SSE comparison intrisics
6046 case X86::BI__builtin_ia32_cmpeqps:
6047 case X86::BI__builtin_ia32_cmpltps:
6048 case X86::BI__builtin_ia32_cmpleps:
6049 case X86::BI__builtin_ia32_cmpunordps:
6050 case X86::BI__builtin_ia32_cmpneqps:
6051 case X86::BI__builtin_ia32_cmpnltps:
6052 case X86::BI__builtin_ia32_cmpnleps:
6053 case X86::BI__builtin_ia32_cmpordps:
6054 case X86::BI__builtin_ia32_cmpeqss:
6055 case X86::BI__builtin_ia32_cmpltss:
6056 case X86::BI__builtin_ia32_cmpless:
6057 case X86::BI__builtin_ia32_cmpunordss:
6058 case X86::BI__builtin_ia32_cmpneqss:
6059 case X86::BI__builtin_ia32_cmpnltss:
6060 case X86::BI__builtin_ia32_cmpnless:
6061 case X86::BI__builtin_ia32_cmpordss:
6062 case X86::BI__builtin_ia32_cmpeqpd:
6063 case X86::BI__builtin_ia32_cmpltpd:
6064 case X86::BI__builtin_ia32_cmplepd:
6065 case X86::BI__builtin_ia32_cmpunordpd:
6066 case X86::BI__builtin_ia32_cmpneqpd:
6067 case X86::BI__builtin_ia32_cmpnltpd:
6068 case X86::BI__builtin_ia32_cmpnlepd:
6069 case X86::BI__builtin_ia32_cmpordpd:
6070 case X86::BI__builtin_ia32_cmpeqsd:
6071 case X86::BI__builtin_ia32_cmpltsd:
6072 case X86::BI__builtin_ia32_cmplesd:
6073 case X86::BI__builtin_ia32_cmpunordsd:
6074 case X86::BI__builtin_ia32_cmpneqsd:
6075 case X86::BI__builtin_ia32_cmpnltsd:
6076 case X86::BI__builtin_ia32_cmpnlesd:
6077 case X86::BI__builtin_ia32_cmpordsd:
6078 // These exist so that the builtin that takes an immediate can be bounds
6079 // checked by clang to avoid passing bad immediates to the backend. Since
6080 // AVX has a larger immediate than SSE we would need separate builtins to
6081 // do the different bounds checking. Rather than create a clang specific
6082 // SSE only builtin, this implements eight separate builtins to match gcc
6083 // implementation.
6084
6085 // Choose the immediate.
6086 unsigned Imm;
6087 switch (BuiltinID) {
6088 default: llvm_unreachable("Unsupported intrinsic!");
6089 case X86::BI__builtin_ia32_cmpeqps:
6090 case X86::BI__builtin_ia32_cmpeqss:
6091 case X86::BI__builtin_ia32_cmpeqpd:
6092 case X86::BI__builtin_ia32_cmpeqsd:
6093 Imm = 0;
6094 break;
6095 case X86::BI__builtin_ia32_cmpltps:
6096 case X86::BI__builtin_ia32_cmpltss:
6097 case X86::BI__builtin_ia32_cmpltpd:
6098 case X86::BI__builtin_ia32_cmpltsd:
6099 Imm = 1;
6100 break;
6101 case X86::BI__builtin_ia32_cmpleps:
6102 case X86::BI__builtin_ia32_cmpless:
6103 case X86::BI__builtin_ia32_cmplepd:
6104 case X86::BI__builtin_ia32_cmplesd:
6105 Imm = 2;
6106 break;
6107 case X86::BI__builtin_ia32_cmpunordps:
6108 case X86::BI__builtin_ia32_cmpunordss:
6109 case X86::BI__builtin_ia32_cmpunordpd:
6110 case X86::BI__builtin_ia32_cmpunordsd:
6111 Imm = 3;
6112 break;
6113 case X86::BI__builtin_ia32_cmpneqps:
6114 case X86::BI__builtin_ia32_cmpneqss:
6115 case X86::BI__builtin_ia32_cmpneqpd:
6116 case X86::BI__builtin_ia32_cmpneqsd:
6117 Imm = 4;
6118 break;
6119 case X86::BI__builtin_ia32_cmpnltps:
6120 case X86::BI__builtin_ia32_cmpnltss:
6121 case X86::BI__builtin_ia32_cmpnltpd:
6122 case X86::BI__builtin_ia32_cmpnltsd:
6123 Imm = 5;
6124 break;
6125 case X86::BI__builtin_ia32_cmpnleps:
6126 case X86::BI__builtin_ia32_cmpnless:
6127 case X86::BI__builtin_ia32_cmpnlepd:
6128 case X86::BI__builtin_ia32_cmpnlesd:
6129 Imm = 6;
6130 break;
6131 case X86::BI__builtin_ia32_cmpordps:
6132 case X86::BI__builtin_ia32_cmpordss:
6133 case X86::BI__builtin_ia32_cmpordpd:
6134 case X86::BI__builtin_ia32_cmpordsd:
6135 Imm = 7;
6136 break;
6137 }
6138
6139 // Choose the intrinsic ID.
6140 const char *name;
6141 Intrinsic::ID ID;
6142 switch (BuiltinID) {
6143 default: llvm_unreachable("Unsupported intrinsic!");
6144 case X86::BI__builtin_ia32_cmpeqps:
6145 case X86::BI__builtin_ia32_cmpltps:
6146 case X86::BI__builtin_ia32_cmpleps:
6147 case X86::BI__builtin_ia32_cmpunordps:
6148 case X86::BI__builtin_ia32_cmpneqps:
6149 case X86::BI__builtin_ia32_cmpnltps:
6150 case X86::BI__builtin_ia32_cmpnleps:
6151 case X86::BI__builtin_ia32_cmpordps:
6152 name = "cmpps";
6153 ID = Intrinsic::x86_sse_cmp_ps;
6154 break;
6155 case X86::BI__builtin_ia32_cmpeqss:
6156 case X86::BI__builtin_ia32_cmpltss:
6157 case X86::BI__builtin_ia32_cmpless:
6158 case X86::BI__builtin_ia32_cmpunordss:
6159 case X86::BI__builtin_ia32_cmpneqss:
6160 case X86::BI__builtin_ia32_cmpnltss:
6161 case X86::BI__builtin_ia32_cmpnless:
6162 case X86::BI__builtin_ia32_cmpordss:
6163 name = "cmpss";
6164 ID = Intrinsic::x86_sse_cmp_ss;
6165 break;
6166 case X86::BI__builtin_ia32_cmpeqpd:
6167 case X86::BI__builtin_ia32_cmpltpd:
6168 case X86::BI__builtin_ia32_cmplepd:
6169 case X86::BI__builtin_ia32_cmpunordpd:
6170 case X86::BI__builtin_ia32_cmpneqpd:
6171 case X86::BI__builtin_ia32_cmpnltpd:
6172 case X86::BI__builtin_ia32_cmpnlepd:
6173 case X86::BI__builtin_ia32_cmpordpd:
6174 name = "cmppd";
6175 ID = Intrinsic::x86_sse2_cmp_pd;
6176 break;
6177 case X86::BI__builtin_ia32_cmpeqsd:
6178 case X86::BI__builtin_ia32_cmpltsd:
6179 case X86::BI__builtin_ia32_cmplesd:
6180 case X86::BI__builtin_ia32_cmpunordsd:
6181 case X86::BI__builtin_ia32_cmpneqsd:
6182 case X86::BI__builtin_ia32_cmpnltsd:
6183 case X86::BI__builtin_ia32_cmpnlesd:
6184 case X86::BI__builtin_ia32_cmpordsd:
6185 name = "cmpsd";
6186 ID = Intrinsic::x86_sse2_cmp_sd;
6187 break;
6188 }
6189
6190 Ops.push_back(llvm::ConstantInt::get(Int8Ty, Imm));
6191 llvm::Function *F = CGM.getIntrinsic(ID);
6192 return Builder.CreateCall(F, Ops, name);
6193 }
6194 }
6195
6196
EmitPPCBuiltinExpr(unsigned BuiltinID,const CallExpr * E)6197 Value *CodeGenFunction::EmitPPCBuiltinExpr(unsigned BuiltinID,
6198 const CallExpr *E) {
6199 SmallVector<Value*, 4> Ops;
6200
6201 for (unsigned i = 0, e = E->getNumArgs(); i != e; i++)
6202 Ops.push_back(EmitScalarExpr(E->getArg(i)));
6203
6204 Intrinsic::ID ID = Intrinsic::not_intrinsic;
6205
6206 switch (BuiltinID) {
6207 default: return nullptr;
6208
6209 // vec_ld, vec_lvsl, vec_lvsr
6210 case PPC::BI__builtin_altivec_lvx:
6211 case PPC::BI__builtin_altivec_lvxl:
6212 case PPC::BI__builtin_altivec_lvebx:
6213 case PPC::BI__builtin_altivec_lvehx:
6214 case PPC::BI__builtin_altivec_lvewx:
6215 case PPC::BI__builtin_altivec_lvsl:
6216 case PPC::BI__builtin_altivec_lvsr:
6217 case PPC::BI__builtin_vsx_lxvd2x:
6218 case PPC::BI__builtin_vsx_lxvw4x:
6219 {
6220 Ops[1] = Builder.CreateBitCast(Ops[1], Int8PtrTy);
6221
6222 Ops[0] = Builder.CreateGEP(Ops[1], Ops[0]);
6223 Ops.pop_back();
6224
6225 switch (BuiltinID) {
6226 default: llvm_unreachable("Unsupported ld/lvsl/lvsr intrinsic!");
6227 case PPC::BI__builtin_altivec_lvx:
6228 ID = Intrinsic::ppc_altivec_lvx;
6229 break;
6230 case PPC::BI__builtin_altivec_lvxl:
6231 ID = Intrinsic::ppc_altivec_lvxl;
6232 break;
6233 case PPC::BI__builtin_altivec_lvebx:
6234 ID = Intrinsic::ppc_altivec_lvebx;
6235 break;
6236 case PPC::BI__builtin_altivec_lvehx:
6237 ID = Intrinsic::ppc_altivec_lvehx;
6238 break;
6239 case PPC::BI__builtin_altivec_lvewx:
6240 ID = Intrinsic::ppc_altivec_lvewx;
6241 break;
6242 case PPC::BI__builtin_altivec_lvsl:
6243 ID = Intrinsic::ppc_altivec_lvsl;
6244 break;
6245 case PPC::BI__builtin_altivec_lvsr:
6246 ID = Intrinsic::ppc_altivec_lvsr;
6247 break;
6248 case PPC::BI__builtin_vsx_lxvd2x:
6249 ID = Intrinsic::ppc_vsx_lxvd2x;
6250 break;
6251 case PPC::BI__builtin_vsx_lxvw4x:
6252 ID = Intrinsic::ppc_vsx_lxvw4x;
6253 break;
6254 }
6255 llvm::Function *F = CGM.getIntrinsic(ID);
6256 return Builder.CreateCall(F, Ops, "");
6257 }
6258
6259 // vec_st
6260 case PPC::BI__builtin_altivec_stvx:
6261 case PPC::BI__builtin_altivec_stvxl:
6262 case PPC::BI__builtin_altivec_stvebx:
6263 case PPC::BI__builtin_altivec_stvehx:
6264 case PPC::BI__builtin_altivec_stvewx:
6265 case PPC::BI__builtin_vsx_stxvd2x:
6266 case PPC::BI__builtin_vsx_stxvw4x:
6267 {
6268 Ops[2] = Builder.CreateBitCast(Ops[2], Int8PtrTy);
6269 Ops[1] = Builder.CreateGEP(Ops[2], Ops[1]);
6270 Ops.pop_back();
6271
6272 switch (BuiltinID) {
6273 default: llvm_unreachable("Unsupported st intrinsic!");
6274 case PPC::BI__builtin_altivec_stvx:
6275 ID = Intrinsic::ppc_altivec_stvx;
6276 break;
6277 case PPC::BI__builtin_altivec_stvxl:
6278 ID = Intrinsic::ppc_altivec_stvxl;
6279 break;
6280 case PPC::BI__builtin_altivec_stvebx:
6281 ID = Intrinsic::ppc_altivec_stvebx;
6282 break;
6283 case PPC::BI__builtin_altivec_stvehx:
6284 ID = Intrinsic::ppc_altivec_stvehx;
6285 break;
6286 case PPC::BI__builtin_altivec_stvewx:
6287 ID = Intrinsic::ppc_altivec_stvewx;
6288 break;
6289 case PPC::BI__builtin_vsx_stxvd2x:
6290 ID = Intrinsic::ppc_vsx_stxvd2x;
6291 break;
6292 case PPC::BI__builtin_vsx_stxvw4x:
6293 ID = Intrinsic::ppc_vsx_stxvw4x;
6294 break;
6295 }
6296 llvm::Function *F = CGM.getIntrinsic(ID);
6297 return Builder.CreateCall(F, Ops, "");
6298 }
6299 }
6300 }
6301
6302 // Emit an intrinsic that has 1 float or double.
emitUnaryFPBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID)6303 static Value *emitUnaryFPBuiltin(CodeGenFunction &CGF,
6304 const CallExpr *E,
6305 unsigned IntrinsicID) {
6306 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
6307
6308 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
6309 return CGF.Builder.CreateCall(F, Src0);
6310 }
6311
6312 // Emit an intrinsic that has 3 float or double operands.
emitTernaryFPBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID)6313 static Value *emitTernaryFPBuiltin(CodeGenFunction &CGF,
6314 const CallExpr *E,
6315 unsigned IntrinsicID) {
6316 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
6317 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
6318 llvm::Value *Src2 = CGF.EmitScalarExpr(E->getArg(2));
6319
6320 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
6321 return CGF.Builder.CreateCall3(F, Src0, Src1, Src2);
6322 }
6323
6324 // Emit an intrinsic that has 1 float or double operand, and 1 integer.
emitFPIntBuiltin(CodeGenFunction & CGF,const CallExpr * E,unsigned IntrinsicID)6325 static Value *emitFPIntBuiltin(CodeGenFunction &CGF,
6326 const CallExpr *E,
6327 unsigned IntrinsicID) {
6328 llvm::Value *Src0 = CGF.EmitScalarExpr(E->getArg(0));
6329 llvm::Value *Src1 = CGF.EmitScalarExpr(E->getArg(1));
6330
6331 Value *F = CGF.CGM.getIntrinsic(IntrinsicID, Src0->getType());
6332 return CGF.Builder.CreateCall2(F, Src0, Src1);
6333 }
6334
EmitR600BuiltinExpr(unsigned BuiltinID,const CallExpr * E)6335 Value *CodeGenFunction::EmitR600BuiltinExpr(unsigned BuiltinID,
6336 const CallExpr *E) {
6337 switch (BuiltinID) {
6338 case R600::BI__builtin_amdgpu_div_scale:
6339 case R600::BI__builtin_amdgpu_div_scalef: {
6340 // Translate from the intrinsics's struct return to the builtin's out
6341 // argument.
6342
6343 std::pair<llvm::Value *, unsigned> FlagOutPtr
6344 = EmitPointerWithAlignment(E->getArg(3));
6345
6346 llvm::Value *X = EmitScalarExpr(E->getArg(0));
6347 llvm::Value *Y = EmitScalarExpr(E->getArg(1));
6348 llvm::Value *Z = EmitScalarExpr(E->getArg(2));
6349
6350 llvm::Value *Callee = CGM.getIntrinsic(Intrinsic::AMDGPU_div_scale,
6351 X->getType());
6352
6353 llvm::Value *Tmp = Builder.CreateCall3(Callee, X, Y, Z);
6354
6355 llvm::Value *Result = Builder.CreateExtractValue(Tmp, 0);
6356 llvm::Value *Flag = Builder.CreateExtractValue(Tmp, 1);
6357
6358 llvm::Type *RealFlagType
6359 = FlagOutPtr.first->getType()->getPointerElementType();
6360
6361 llvm::Value *FlagExt = Builder.CreateZExt(Flag, RealFlagType);
6362 llvm::StoreInst *FlagStore = Builder.CreateStore(FlagExt, FlagOutPtr.first);
6363 FlagStore->setAlignment(FlagOutPtr.second);
6364 return Result;
6365 }
6366 case R600::BI__builtin_amdgpu_div_fmas:
6367 case R600::BI__builtin_amdgpu_div_fmasf: {
6368 llvm::Value *Src0 = EmitScalarExpr(E->getArg(0));
6369 llvm::Value *Src1 = EmitScalarExpr(E->getArg(1));
6370 llvm::Value *Src2 = EmitScalarExpr(E->getArg(2));
6371 llvm::Value *Src3 = EmitScalarExpr(E->getArg(3));
6372
6373 llvm::Value *F = CGM.getIntrinsic(Intrinsic::AMDGPU_div_fmas,
6374 Src0->getType());
6375 llvm::Value *Src3ToBool = Builder.CreateIsNotNull(Src3);
6376 return Builder.CreateCall4(F, Src0, Src1, Src2, Src3ToBool);
6377 }
6378 case R600::BI__builtin_amdgpu_div_fixup:
6379 case R600::BI__builtin_amdgpu_div_fixupf:
6380 return emitTernaryFPBuiltin(*this, E, Intrinsic::AMDGPU_div_fixup);
6381 case R600::BI__builtin_amdgpu_trig_preop:
6382 case R600::BI__builtin_amdgpu_trig_preopf:
6383 return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_trig_preop);
6384 case R600::BI__builtin_amdgpu_rcp:
6385 case R600::BI__builtin_amdgpu_rcpf:
6386 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rcp);
6387 case R600::BI__builtin_amdgpu_rsq:
6388 case R600::BI__builtin_amdgpu_rsqf:
6389 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq);
6390 case R600::BI__builtin_amdgpu_rsq_clamped:
6391 case R600::BI__builtin_amdgpu_rsq_clampedf:
6392 return emitUnaryFPBuiltin(*this, E, Intrinsic::AMDGPU_rsq_clamped);
6393 case R600::BI__builtin_amdgpu_ldexp:
6394 case R600::BI__builtin_amdgpu_ldexpf:
6395 return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_ldexp);
6396 case R600::BI__builtin_amdgpu_class:
6397 case R600::BI__builtin_amdgpu_classf:
6398 return emitFPIntBuiltin(*this, E, Intrinsic::AMDGPU_class);
6399 default:
6400 return nullptr;
6401 }
6402 }
6403