1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // These classes wrap the information about a call or function
11 // definition used to handle ABI compliancy.
12 //
13 //===----------------------------------------------------------------------===//
14
15 #include "CGCall.h"
16 #include "ABIInfo.h"
17 #include "CGCXXABI.h"
18 #include "CodeGenFunction.h"
19 #include "CodeGenModule.h"
20 #include "TargetInfo.h"
21 #include "clang/AST/Decl.h"
22 #include "clang/AST/DeclCXX.h"
23 #include "clang/AST/DeclObjC.h"
24 #include "clang/Basic/TargetInfo.h"
25 #include "clang/CodeGen/CGFunctionInfo.h"
26 #include "clang/Frontend/CodeGenOptions.h"
27 #include "llvm/ADT/StringExtras.h"
28 #include "llvm/IR/Attributes.h"
29 #include "llvm/IR/CallSite.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/InlineAsm.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/Transforms/Utils/Local.h"
34 using namespace clang;
35 using namespace CodeGen;
36
37 /***/
38
ClangCallConvToLLVMCallConv(CallingConv CC)39 static unsigned ClangCallConvToLLVMCallConv(CallingConv CC) {
40 switch (CC) {
41 default: return llvm::CallingConv::C;
42 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
43 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
44 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
45 case CC_X86_64Win64: return llvm::CallingConv::X86_64_Win64;
46 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
47 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
48 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
49 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
50 // TODO: Add support for __pascal to LLVM.
51 case CC_X86Pascal: return llvm::CallingConv::C;
52 // TODO: Add support for __vectorcall to LLVM.
53 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
54 }
55 }
56
57 /// Derives the 'this' type for codegen purposes, i.e. ignoring method
58 /// qualification.
59 /// FIXME: address space qualification?
GetThisType(ASTContext & Context,const CXXRecordDecl * RD)60 static CanQualType GetThisType(ASTContext &Context, const CXXRecordDecl *RD) {
61 QualType RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
62 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
63 }
64
65 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)66 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
67 return MD->getType()->getCanonicalTypeUnqualified()
68 .getAs<FunctionProtoType>();
69 }
70
71 /// Returns the "extra-canonicalized" return type, which discards
72 /// qualifiers on the return type. Codegen doesn't care about them,
73 /// and it makes ABI code a little easier to be able to assume that
74 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)75 static CanQualType GetReturnType(QualType RetTy) {
76 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
77 }
78
79 /// Arrange the argument and result information for a value of the given
80 /// unprototyped freestanding function type.
81 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)82 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
83 // When translating an unprototyped function type, always use a
84 // variadic type.
85 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
86 /*instanceMethod=*/false,
87 /*chainCall=*/false, None,
88 FTNP->getExtInfo(), RequiredArgs(0));
89 }
90
91 /// Arrange the LLVM function layout for a value of the given function
92 /// type, on top of any implicit parameters already stored.
93 static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,bool instanceMethod,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)94 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
95 SmallVectorImpl<CanQualType> &prefix,
96 CanQual<FunctionProtoType> FTP) {
97 RequiredArgs required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
98 // FIXME: Kill copy.
99 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
100 prefix.push_back(FTP->getParamType(i));
101 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
102 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
103 /*chainCall=*/false, prefix,
104 FTP->getExtInfo(), required);
105 }
106
107 /// Arrange the argument and result information for a value of the
108 /// given freestanding function type.
109 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP)110 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
111 SmallVector<CanQualType, 16> argTypes;
112 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
113 FTP);
114 }
115
getCallingConventionForDecl(const Decl * D,bool IsWindows)116 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
117 // Set the appropriate calling convention for the Function.
118 if (D->hasAttr<StdCallAttr>())
119 return CC_X86StdCall;
120
121 if (D->hasAttr<FastCallAttr>())
122 return CC_X86FastCall;
123
124 if (D->hasAttr<ThisCallAttr>())
125 return CC_X86ThisCall;
126
127 if (D->hasAttr<VectorCallAttr>())
128 return CC_X86VectorCall;
129
130 if (D->hasAttr<PascalAttr>())
131 return CC_X86Pascal;
132
133 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
134 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
135
136 if (D->hasAttr<PnaclCallAttr>())
137 return CC_PnaclCall;
138
139 if (D->hasAttr<IntelOclBiccAttr>())
140 return CC_IntelOclBicc;
141
142 if (D->hasAttr<MSABIAttr>())
143 return IsWindows ? CC_C : CC_X86_64Win64;
144
145 if (D->hasAttr<SysVABIAttr>())
146 return IsWindows ? CC_X86_64SysV : CC_C;
147
148 return CC_C;
149 }
150
151 /// Arrange the argument and result information for a call to an
152 /// unknown C++ non-static member function of the given abstract type.
153 /// (Zero value of RD means we don't have any meaningful "this" argument type,
154 /// so fall back to a generic pointer type).
155 /// The member function must be an ordinary function, i.e. not a
156 /// constructor or destructor.
157 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP)158 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
159 const FunctionProtoType *FTP) {
160 SmallVector<CanQualType, 16> argTypes;
161
162 // Add the 'this' pointer.
163 if (RD)
164 argTypes.push_back(GetThisType(Context, RD));
165 else
166 argTypes.push_back(Context.VoidPtrTy);
167
168 return ::arrangeLLVMFunctionInfo(
169 *this, true, argTypes,
170 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
171 }
172
173 /// Arrange the argument and result information for a declaration or
174 /// definition of the given C++ non-static member function. The
175 /// member function must be an ordinary function, i.e. not a
176 /// constructor or destructor.
177 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)178 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
179 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
180 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
181
182 CanQual<FunctionProtoType> prototype = GetFormalType(MD);
183
184 if (MD->isInstance()) {
185 // The abstract case is perfectly fine.
186 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
187 return arrangeCXXMethodType(ThisType, prototype.getTypePtr());
188 }
189
190 return arrangeFreeFunctionType(prototype);
191 }
192
193 const CGFunctionInfo &
arrangeCXXStructorDeclaration(const CXXMethodDecl * MD,StructorType Type)194 CodeGenTypes::arrangeCXXStructorDeclaration(const CXXMethodDecl *MD,
195 StructorType Type) {
196
197 SmallVector<CanQualType, 16> argTypes;
198 argTypes.push_back(GetThisType(Context, MD->getParent()));
199
200 GlobalDecl GD;
201 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
202 GD = GlobalDecl(CD, toCXXCtorType(Type));
203 } else {
204 auto *DD = dyn_cast<CXXDestructorDecl>(MD);
205 GD = GlobalDecl(DD, toCXXDtorType(Type));
206 }
207
208 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
209
210 // Add the formal parameters.
211 for (unsigned i = 0, e = FTP->getNumParams(); i != e; ++i)
212 argTypes.push_back(FTP->getParamType(i));
213
214 TheCXXABI.buildStructorSignature(MD, Type, argTypes);
215
216 RequiredArgs required =
217 (MD->isVariadic() ? RequiredArgs(argTypes.size()) : RequiredArgs::All);
218
219 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
220 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
221 ? argTypes.front()
222 : TheCXXABI.hasMostDerivedReturn(GD)
223 ? CGM.getContext().VoidPtrTy
224 : Context.VoidTy;
225 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
226 /*chainCall=*/false, argTypes, extInfo,
227 required);
228 }
229
230 /// Arrange a call to a C++ method, passing the given arguments.
231 const CGFunctionInfo &
arrangeCXXConstructorCall(const CallArgList & args,const CXXConstructorDecl * D,CXXCtorType CtorKind,unsigned ExtraArgs)232 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
233 const CXXConstructorDecl *D,
234 CXXCtorType CtorKind,
235 unsigned ExtraArgs) {
236 // FIXME: Kill copy.
237 SmallVector<CanQualType, 16> ArgTypes;
238 for (const auto &Arg : args)
239 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
240
241 CanQual<FunctionProtoType> FPT = GetFormalType(D);
242 RequiredArgs Required = RequiredArgs::forPrototypePlus(FPT, 1 + ExtraArgs);
243 GlobalDecl GD(D, CtorKind);
244 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
245 ? ArgTypes.front()
246 : TheCXXABI.hasMostDerivedReturn(GD)
247 ? CGM.getContext().VoidPtrTy
248 : Context.VoidTy;
249
250 FunctionType::ExtInfo Info = FPT->getExtInfo();
251 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
252 /*chainCall=*/false, ArgTypes, Info,
253 Required);
254 }
255
256 /// Arrange the argument and result information for the declaration or
257 /// definition of the given function.
258 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)259 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
260 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
261 if (MD->isInstance())
262 return arrangeCXXMethodDeclaration(MD);
263
264 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
265
266 assert(isa<FunctionType>(FTy));
267
268 // When declaring a function without a prototype, always use a
269 // non-variadic type.
270 if (isa<FunctionNoProtoType>(FTy)) {
271 CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>();
272 return arrangeLLVMFunctionInfo(
273 noProto->getReturnType(), /*instanceMethod=*/false,
274 /*chainCall=*/false, None, noProto->getExtInfo(), RequiredArgs::All);
275 }
276
277 assert(isa<FunctionProtoType>(FTy));
278 return arrangeFreeFunctionType(FTy.getAs<FunctionProtoType>());
279 }
280
281 /// Arrange the argument and result information for the declaration or
282 /// definition of an Objective-C method.
283 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)284 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
285 // It happens that this is the same as a call with no optional
286 // arguments, except also using the formal 'self' type.
287 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
288 }
289
290 /// Arrange the argument and result information for the function type
291 /// through which to perform a send to the given Objective-C method,
292 /// using the given receiver type. The receiver type is not always
293 /// the 'self' type of the method or even an Objective-C pointer type.
294 /// This is *not* the right method for actually performing such a
295 /// message send, due to the possibility of optional arguments.
296 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)297 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
298 QualType receiverType) {
299 SmallVector<CanQualType, 16> argTys;
300 argTys.push_back(Context.getCanonicalParamType(receiverType));
301 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
302 // FIXME: Kill copy?
303 for (const auto *I : MD->params()) {
304 argTys.push_back(Context.getCanonicalParamType(I->getType()));
305 }
306
307 FunctionType::ExtInfo einfo;
308 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
309 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
310
311 if (getContext().getLangOpts().ObjCAutoRefCount &&
312 MD->hasAttr<NSReturnsRetainedAttr>())
313 einfo = einfo.withProducesResult(true);
314
315 RequiredArgs required =
316 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
317
318 return arrangeLLVMFunctionInfo(
319 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
320 /*chainCall=*/false, argTys, einfo, required);
321 }
322
323 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)324 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
325 // FIXME: Do we need to handle ObjCMethodDecl?
326 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
327
328 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
329 return arrangeCXXStructorDeclaration(CD, getFromCtorType(GD.getCtorType()));
330
331 if (const CXXDestructorDecl *DD = dyn_cast<CXXDestructorDecl>(FD))
332 return arrangeCXXStructorDeclaration(DD, getFromDtorType(GD.getDtorType()));
333
334 return arrangeFunctionDeclaration(FD);
335 }
336
337 /// Arrange a thunk that takes 'this' as the first parameter followed by
338 /// varargs. Return a void pointer, regardless of the actual return type.
339 /// The body of the thunk will end in a musttail call to a function of the
340 /// correct type, and the caller will bitcast the function to the correct
341 /// prototype.
342 const CGFunctionInfo &
arrangeMSMemberPointerThunk(const CXXMethodDecl * MD)343 CodeGenTypes::arrangeMSMemberPointerThunk(const CXXMethodDecl *MD) {
344 assert(MD->isVirtual() && "only virtual memptrs have thunks");
345 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
346 CanQualType ArgTys[] = { GetThisType(Context, MD->getParent()) };
347 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
348 /*chainCall=*/false, ArgTys,
349 FTP->getExtInfo(), RequiredArgs(1));
350 }
351
352 /// Arrange a call as unto a free function, except possibly with an
353 /// additional number of formal parameters considered required.
354 static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes & CGT,CodeGenModule & CGM,const CallArgList & args,const FunctionType * fnType,unsigned numExtraRequiredArgs,bool chainCall)355 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
356 CodeGenModule &CGM,
357 const CallArgList &args,
358 const FunctionType *fnType,
359 unsigned numExtraRequiredArgs,
360 bool chainCall) {
361 assert(args.size() >= numExtraRequiredArgs);
362
363 // In most cases, there are no optional arguments.
364 RequiredArgs required = RequiredArgs::All;
365
366 // If we have a variadic prototype, the required arguments are the
367 // extra prefix plus the arguments in the prototype.
368 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
369 if (proto->isVariadic())
370 required = RequiredArgs(proto->getNumParams() + numExtraRequiredArgs);
371
372 // If we don't have a prototype at all, but we're supposed to
373 // explicitly use the variadic convention for unprototyped calls,
374 // treat all of the arguments as required but preserve the nominal
375 // possibility of variadics.
376 } else if (CGM.getTargetCodeGenInfo()
377 .isNoProtoCallVariadic(args,
378 cast<FunctionNoProtoType>(fnType))) {
379 required = RequiredArgs(args.size());
380 }
381
382 // FIXME: Kill copy.
383 SmallVector<CanQualType, 16> argTypes;
384 for (const auto &arg : args)
385 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
386 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
387 /*instanceMethod=*/false, chainCall,
388 argTypes, fnType->getExtInfo(), required);
389 }
390
391 /// Figure out the rules for calling a function with the given formal
392 /// type using the given arguments. The arguments are necessary
393 /// because the function might be unprototyped, in which case it's
394 /// target-dependent in crazy ways.
395 const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType,bool chainCall)396 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
397 const FunctionType *fnType,
398 bool chainCall) {
399 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
400 chainCall ? 1 : 0, chainCall);
401 }
402
403 /// A block function call is essentially a free-function call with an
404 /// extra implicit argument.
405 const CGFunctionInfo &
arrangeBlockFunctionCall(const CallArgList & args,const FunctionType * fnType)406 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
407 const FunctionType *fnType) {
408 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
409 /*chainCall=*/false);
410 }
411
412 const CGFunctionInfo &
arrangeFreeFunctionCall(QualType resultType,const CallArgList & args,FunctionType::ExtInfo info,RequiredArgs required)413 CodeGenTypes::arrangeFreeFunctionCall(QualType resultType,
414 const CallArgList &args,
415 FunctionType::ExtInfo info,
416 RequiredArgs required) {
417 // FIXME: Kill copy.
418 SmallVector<CanQualType, 16> argTypes;
419 for (const auto &Arg : args)
420 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
421 return arrangeLLVMFunctionInfo(
422 GetReturnType(resultType), /*instanceMethod=*/false,
423 /*chainCall=*/false, argTypes, info, required);
424 }
425
426 /// Arrange a call to a C++ method, passing the given arguments.
427 const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * FPT,RequiredArgs required)428 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
429 const FunctionProtoType *FPT,
430 RequiredArgs required) {
431 // FIXME: Kill copy.
432 SmallVector<CanQualType, 16> argTypes;
433 for (const auto &Arg : args)
434 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
435
436 FunctionType::ExtInfo info = FPT->getExtInfo();
437 return arrangeLLVMFunctionInfo(
438 GetReturnType(FPT->getReturnType()), /*instanceMethod=*/true,
439 /*chainCall=*/false, argTypes, info, required);
440 }
441
arrangeFreeFunctionDeclaration(QualType resultType,const FunctionArgList & args,const FunctionType::ExtInfo & info,bool isVariadic)442 const CGFunctionInfo &CodeGenTypes::arrangeFreeFunctionDeclaration(
443 QualType resultType, const FunctionArgList &args,
444 const FunctionType::ExtInfo &info, bool isVariadic) {
445 // FIXME: Kill copy.
446 SmallVector<CanQualType, 16> argTypes;
447 for (auto Arg : args)
448 argTypes.push_back(Context.getCanonicalParamType(Arg->getType()));
449
450 RequiredArgs required =
451 (isVariadic ? RequiredArgs(args.size()) : RequiredArgs::All);
452 return arrangeLLVMFunctionInfo(
453 GetReturnType(resultType), /*instanceMethod=*/false,
454 /*chainCall=*/false, argTypes, info, required);
455 }
456
arrangeNullaryFunction()457 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
458 return arrangeLLVMFunctionInfo(
459 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
460 None, FunctionType::ExtInfo(), RequiredArgs::All);
461 }
462
463 /// Arrange the argument and result information for an abstract value
464 /// of a given function type. This is the method which all of the
465 /// above functions ultimately defer to.
466 const CGFunctionInfo &
arrangeLLVMFunctionInfo(CanQualType resultType,bool instanceMethod,bool chainCall,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,RequiredArgs required)467 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
468 bool instanceMethod,
469 bool chainCall,
470 ArrayRef<CanQualType> argTypes,
471 FunctionType::ExtInfo info,
472 RequiredArgs required) {
473 assert(std::all_of(argTypes.begin(), argTypes.end(),
474 std::mem_fun_ref(&CanQualType::isCanonicalAsParam)));
475
476 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
477
478 // Lookup or create unique function info.
479 llvm::FoldingSetNodeID ID;
480 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, required,
481 resultType, argTypes);
482
483 void *insertPos = nullptr;
484 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
485 if (FI)
486 return *FI;
487
488 // Construct the function info. We co-allocate the ArgInfos.
489 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
490 resultType, argTypes, required);
491 FunctionInfos.InsertNode(FI, insertPos);
492
493 bool inserted = FunctionsBeingProcessed.insert(FI).second;
494 (void)inserted;
495 assert(inserted && "Recursively being processed?");
496
497 // Compute ABI information.
498 getABIInfo().computeInfo(*FI);
499
500 // Loop over all of the computed argument and return value info. If any of
501 // them are direct or extend without a specified coerce type, specify the
502 // default now.
503 ABIArgInfo &retInfo = FI->getReturnInfo();
504 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
505 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
506
507 for (auto &I : FI->arguments())
508 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
509 I.info.setCoerceToType(ConvertType(I.type));
510
511 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
512 assert(erased && "Not in set?");
513
514 return *FI;
515 }
516
create(unsigned llvmCC,bool instanceMethod,bool chainCall,const FunctionType::ExtInfo & info,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)517 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
518 bool instanceMethod,
519 bool chainCall,
520 const FunctionType::ExtInfo &info,
521 CanQualType resultType,
522 ArrayRef<CanQualType> argTypes,
523 RequiredArgs required) {
524 void *buffer = operator new(sizeof(CGFunctionInfo) +
525 sizeof(ArgInfo) * (argTypes.size() + 1));
526 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
527 FI->CallingConvention = llvmCC;
528 FI->EffectiveCallingConvention = llvmCC;
529 FI->ASTCallingConvention = info.getCC();
530 FI->InstanceMethod = instanceMethod;
531 FI->ChainCall = chainCall;
532 FI->NoReturn = info.getNoReturn();
533 FI->ReturnsRetained = info.getProducesResult();
534 FI->Required = required;
535 FI->HasRegParm = info.getHasRegParm();
536 FI->RegParm = info.getRegParm();
537 FI->ArgStruct = nullptr;
538 FI->NumArgs = argTypes.size();
539 FI->getArgsBuffer()[0].type = resultType;
540 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
541 FI->getArgsBuffer()[i + 1].type = argTypes[i];
542 return FI;
543 }
544
545 /***/
546
547 namespace {
548 // ABIArgInfo::Expand implementation.
549
550 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
551 struct TypeExpansion {
552 enum TypeExpansionKind {
553 // Elements of constant arrays are expanded recursively.
554 TEK_ConstantArray,
555 // Record fields are expanded recursively (but if record is a union, only
556 // the field with the largest size is expanded).
557 TEK_Record,
558 // For complex types, real and imaginary parts are expanded recursively.
559 TEK_Complex,
560 // All other types are not expandable.
561 TEK_None
562 };
563
564 const TypeExpansionKind Kind;
565
TypeExpansion__anond234a42f0111::TypeExpansion566 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
~TypeExpansion__anond234a42f0111::TypeExpansion567 virtual ~TypeExpansion() {}
568 };
569
570 struct ConstantArrayExpansion : TypeExpansion {
571 QualType EltTy;
572 uint64_t NumElts;
573
ConstantArrayExpansion__anond234a42f0111::ConstantArrayExpansion574 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
575 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
classof__anond234a42f0111::ConstantArrayExpansion576 static bool classof(const TypeExpansion *TE) {
577 return TE->Kind == TEK_ConstantArray;
578 }
579 };
580
581 struct RecordExpansion : TypeExpansion {
582 SmallVector<const CXXBaseSpecifier *, 1> Bases;
583
584 SmallVector<const FieldDecl *, 1> Fields;
585
RecordExpansion__anond234a42f0111::RecordExpansion586 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
587 SmallVector<const FieldDecl *, 1> &&Fields)
588 : TypeExpansion(TEK_Record), Bases(Bases), Fields(Fields) {}
classof__anond234a42f0111::RecordExpansion589 static bool classof(const TypeExpansion *TE) {
590 return TE->Kind == TEK_Record;
591 }
592 };
593
594 struct ComplexExpansion : TypeExpansion {
595 QualType EltTy;
596
ComplexExpansion__anond234a42f0111::ComplexExpansion597 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
classof__anond234a42f0111::ComplexExpansion598 static bool classof(const TypeExpansion *TE) {
599 return TE->Kind == TEK_Complex;
600 }
601 };
602
603 struct NoExpansion : TypeExpansion {
NoExpansion__anond234a42f0111::NoExpansion604 NoExpansion() : TypeExpansion(TEK_None) {}
classof__anond234a42f0111::NoExpansion605 static bool classof(const TypeExpansion *TE) {
606 return TE->Kind == TEK_None;
607 }
608 };
609 } // namespace
610
611 static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty,const ASTContext & Context)612 getTypeExpansion(QualType Ty, const ASTContext &Context) {
613 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
614 return llvm::make_unique<ConstantArrayExpansion>(
615 AT->getElementType(), AT->getSize().getZExtValue());
616 }
617 if (const RecordType *RT = Ty->getAs<RecordType>()) {
618 SmallVector<const CXXBaseSpecifier *, 1> Bases;
619 SmallVector<const FieldDecl *, 1> Fields;
620 const RecordDecl *RD = RT->getDecl();
621 assert(!RD->hasFlexibleArrayMember() &&
622 "Cannot expand structure with flexible array.");
623 if (RD->isUnion()) {
624 // Unions can be here only in degenerative cases - all the fields are same
625 // after flattening. Thus we have to use the "largest" field.
626 const FieldDecl *LargestFD = nullptr;
627 CharUnits UnionSize = CharUnits::Zero();
628
629 for (const auto *FD : RD->fields()) {
630 // Skip zero length bitfields.
631 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
632 continue;
633 assert(!FD->isBitField() &&
634 "Cannot expand structure with bit-field members.");
635 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
636 if (UnionSize < FieldSize) {
637 UnionSize = FieldSize;
638 LargestFD = FD;
639 }
640 }
641 if (LargestFD)
642 Fields.push_back(LargestFD);
643 } else {
644 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
645 assert(!CXXRD->isDynamicClass() &&
646 "cannot expand vtable pointers in dynamic classes");
647 for (const CXXBaseSpecifier &BS : CXXRD->bases())
648 Bases.push_back(&BS);
649 }
650
651 for (const auto *FD : RD->fields()) {
652 // Skip zero length bitfields.
653 if (FD->isBitField() && FD->getBitWidthValue(Context) == 0)
654 continue;
655 assert(!FD->isBitField() &&
656 "Cannot expand structure with bit-field members.");
657 Fields.push_back(FD);
658 }
659 }
660 return llvm::make_unique<RecordExpansion>(std::move(Bases),
661 std::move(Fields));
662 }
663 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
664 return llvm::make_unique<ComplexExpansion>(CT->getElementType());
665 }
666 return llvm::make_unique<NoExpansion>();
667 }
668
getExpansionSize(QualType Ty,const ASTContext & Context)669 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
670 auto Exp = getTypeExpansion(Ty, Context);
671 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
672 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
673 }
674 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
675 int Res = 0;
676 for (auto BS : RExp->Bases)
677 Res += getExpansionSize(BS->getType(), Context);
678 for (auto FD : RExp->Fields)
679 Res += getExpansionSize(FD->getType(), Context);
680 return Res;
681 }
682 if (isa<ComplexExpansion>(Exp.get()))
683 return 2;
684 assert(isa<NoExpansion>(Exp.get()));
685 return 1;
686 }
687
688 void
getExpandedTypes(QualType Ty,SmallVectorImpl<llvm::Type * >::iterator & TI)689 CodeGenTypes::getExpandedTypes(QualType Ty,
690 SmallVectorImpl<llvm::Type *>::iterator &TI) {
691 auto Exp = getTypeExpansion(Ty, Context);
692 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
693 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
694 getExpandedTypes(CAExp->EltTy, TI);
695 }
696 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
697 for (auto BS : RExp->Bases)
698 getExpandedTypes(BS->getType(), TI);
699 for (auto FD : RExp->Fields)
700 getExpandedTypes(FD->getType(), TI);
701 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
702 llvm::Type *EltTy = ConvertType(CExp->EltTy);
703 *TI++ = EltTy;
704 *TI++ = EltTy;
705 } else {
706 assert(isa<NoExpansion>(Exp.get()));
707 *TI++ = ConvertType(Ty);
708 }
709 }
710
ExpandTypeFromArgs(QualType Ty,LValue LV,SmallVectorImpl<llvm::Argument * >::iterator & AI)711 void CodeGenFunction::ExpandTypeFromArgs(
712 QualType Ty, LValue LV, SmallVectorImpl<llvm::Argument *>::iterator &AI) {
713 assert(LV.isSimple() &&
714 "Unexpected non-simple lvalue during struct expansion.");
715
716 auto Exp = getTypeExpansion(Ty, getContext());
717 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
718 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
719 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(LV.getAddress(), 0, i);
720 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
721 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
722 }
723 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
724 llvm::Value *This = LV.getAddress();
725 for (const CXXBaseSpecifier *BS : RExp->Bases) {
726 // Perform a single step derived-to-base conversion.
727 llvm::Value *Base =
728 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
729 /*NullCheckValue=*/false, SourceLocation());
730 LValue SubLV = MakeAddrLValue(Base, BS->getType());
731
732 // Recurse onto bases.
733 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
734 }
735 for (auto FD : RExp->Fields) {
736 // FIXME: What are the right qualifiers here?
737 LValue SubLV = EmitLValueForField(LV, FD);
738 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
739 }
740 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
741 llvm::Value *RealAddr = Builder.CreateStructGEP(LV.getAddress(), 0, "real");
742 EmitStoreThroughLValue(RValue::get(*AI++),
743 MakeAddrLValue(RealAddr, CExp->EltTy));
744 llvm::Value *ImagAddr = Builder.CreateStructGEP(LV.getAddress(), 1, "imag");
745 EmitStoreThroughLValue(RValue::get(*AI++),
746 MakeAddrLValue(ImagAddr, CExp->EltTy));
747 } else {
748 assert(isa<NoExpansion>(Exp.get()));
749 EmitStoreThroughLValue(RValue::get(*AI++), LV);
750 }
751 }
752
ExpandTypeToArgs(QualType Ty,RValue RV,llvm::FunctionType * IRFuncTy,SmallVectorImpl<llvm::Value * > & IRCallArgs,unsigned & IRCallArgPos)753 void CodeGenFunction::ExpandTypeToArgs(
754 QualType Ty, RValue RV, llvm::FunctionType *IRFuncTy,
755 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
756 auto Exp = getTypeExpansion(Ty, getContext());
757 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
758 llvm::Value *Addr = RV.getAggregateAddr();
759 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
760 llvm::Value *EltAddr = Builder.CreateConstGEP2_32(Addr, 0, i);
761 RValue EltRV =
762 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation());
763 ExpandTypeToArgs(CAExp->EltTy, EltRV, IRFuncTy, IRCallArgs, IRCallArgPos);
764 }
765 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
766 llvm::Value *This = RV.getAggregateAddr();
767 for (const CXXBaseSpecifier *BS : RExp->Bases) {
768 // Perform a single step derived-to-base conversion.
769 llvm::Value *Base =
770 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
771 /*NullCheckValue=*/false, SourceLocation());
772 RValue BaseRV = RValue::getAggregate(Base);
773
774 // Recurse onto bases.
775 ExpandTypeToArgs(BS->getType(), BaseRV, IRFuncTy, IRCallArgs,
776 IRCallArgPos);
777 }
778
779 LValue LV = MakeAddrLValue(This, Ty);
780 for (auto FD : RExp->Fields) {
781 RValue FldRV = EmitRValueForField(LV, FD, SourceLocation());
782 ExpandTypeToArgs(FD->getType(), FldRV, IRFuncTy, IRCallArgs,
783 IRCallArgPos);
784 }
785 } else if (isa<ComplexExpansion>(Exp.get())) {
786 ComplexPairTy CV = RV.getComplexVal();
787 IRCallArgs[IRCallArgPos++] = CV.first;
788 IRCallArgs[IRCallArgPos++] = CV.second;
789 } else {
790 assert(isa<NoExpansion>(Exp.get()));
791 assert(RV.isScalar() &&
792 "Unexpected non-scalar rvalue during struct expansion.");
793
794 // Insert a bitcast as needed.
795 llvm::Value *V = RV.getScalarVal();
796 if (IRCallArgPos < IRFuncTy->getNumParams() &&
797 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
798 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
799
800 IRCallArgs[IRCallArgPos++] = V;
801 }
802 }
803
804 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
805 /// accessing some number of bytes out of it, try to gep into the struct to get
806 /// at its inner goodness. Dive as deep as possible without entering an element
807 /// with an in-memory size smaller than DstSize.
808 static llvm::Value *
EnterStructPointerForCoercedAccess(llvm::Value * SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)809 EnterStructPointerForCoercedAccess(llvm::Value *SrcPtr,
810 llvm::StructType *SrcSTy,
811 uint64_t DstSize, CodeGenFunction &CGF) {
812 // We can't dive into a zero-element struct.
813 if (SrcSTy->getNumElements() == 0) return SrcPtr;
814
815 llvm::Type *FirstElt = SrcSTy->getElementType(0);
816
817 // If the first elt is at least as large as what we're looking for, or if the
818 // first element is the same size as the whole struct, we can enter it. The
819 // comparison must be made on the store size and not the alloca size. Using
820 // the alloca size may overstate the size of the load.
821 uint64_t FirstEltSize =
822 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
823 if (FirstEltSize < DstSize &&
824 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
825 return SrcPtr;
826
827 // GEP into the first element.
828 SrcPtr = CGF.Builder.CreateConstGEP2_32(SrcPtr, 0, 0, "coerce.dive");
829
830 // If the first element is a struct, recurse.
831 llvm::Type *SrcTy =
832 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
833 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
834 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
835
836 return SrcPtr;
837 }
838
839 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
840 /// are either integers or pointers. This does a truncation of the value if it
841 /// is too large or a zero extension if it is too small.
842 ///
843 /// This behaves as if the value were coerced through memory, so on big-endian
844 /// targets the high bits are preserved in a truncation, while little-endian
845 /// targets preserve the low bits.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)846 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
847 llvm::Type *Ty,
848 CodeGenFunction &CGF) {
849 if (Val->getType() == Ty)
850 return Val;
851
852 if (isa<llvm::PointerType>(Val->getType())) {
853 // If this is Pointer->Pointer avoid conversion to and from int.
854 if (isa<llvm::PointerType>(Ty))
855 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
856
857 // Convert the pointer to an integer so we can play with its width.
858 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
859 }
860
861 llvm::Type *DestIntTy = Ty;
862 if (isa<llvm::PointerType>(DestIntTy))
863 DestIntTy = CGF.IntPtrTy;
864
865 if (Val->getType() != DestIntTy) {
866 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
867 if (DL.isBigEndian()) {
868 // Preserve the high bits on big-endian targets.
869 // That is what memory coercion does.
870 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
871 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
872
873 if (SrcSize > DstSize) {
874 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
875 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
876 } else {
877 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
878 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
879 }
880 } else {
881 // Little-endian targets preserve the low bits. No shifts required.
882 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
883 }
884 }
885
886 if (isa<llvm::PointerType>(Ty))
887 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
888 return Val;
889 }
890
891
892
893 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
894 /// a pointer to an object of type \arg Ty.
895 ///
896 /// This safely handles the case when the src type is smaller than the
897 /// destination type; in this situation the values of bits which not
898 /// present in the src are undefined.
CreateCoercedLoad(llvm::Value * SrcPtr,llvm::Type * Ty,CodeGenFunction & CGF)899 static llvm::Value *CreateCoercedLoad(llvm::Value *SrcPtr,
900 llvm::Type *Ty,
901 CodeGenFunction &CGF) {
902 llvm::Type *SrcTy =
903 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
904
905 // If SrcTy and Ty are the same, just do a load.
906 if (SrcTy == Ty)
907 return CGF.Builder.CreateLoad(SrcPtr);
908
909 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
910
911 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
912 SrcPtr = EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
913 SrcTy = cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
914 }
915
916 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
917
918 // If the source and destination are integer or pointer types, just do an
919 // extension or truncation to the desired type.
920 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
921 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
922 llvm::LoadInst *Load = CGF.Builder.CreateLoad(SrcPtr);
923 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
924 }
925
926 // If load is legal, just bitcast the src pointer.
927 if (SrcSize >= DstSize) {
928 // Generally SrcSize is never greater than DstSize, since this means we are
929 // losing bits. However, this can happen in cases where the structure has
930 // additional padding, for example due to a user specified alignment.
931 //
932 // FIXME: Assert that we aren't truncating non-padding bits when have access
933 // to that information.
934 llvm::Value *Casted =
935 CGF.Builder.CreateBitCast(SrcPtr, llvm::PointerType::getUnqual(Ty));
936 llvm::LoadInst *Load = CGF.Builder.CreateLoad(Casted);
937 // FIXME: Use better alignment / avoid requiring aligned load.
938 Load->setAlignment(1);
939 return Load;
940 }
941
942 // Otherwise do coercion through memory. This is stupid, but
943 // simple.
944 llvm::Value *Tmp = CGF.CreateTempAlloca(Ty);
945 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
946 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
947 llvm::Value *SrcCasted = CGF.Builder.CreateBitCast(SrcPtr, I8PtrTy);
948 // FIXME: Use better alignment.
949 CGF.Builder.CreateMemCpy(Casted, SrcCasted,
950 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize),
951 1, false);
952 return CGF.Builder.CreateLoad(Tmp);
953 }
954
955 // Function to store a first-class aggregate into memory. We prefer to
956 // store the elements rather than the aggregate to be more friendly to
957 // fast-isel.
958 // FIXME: Do we need to recurse here?
BuildAggStore(CodeGenFunction & CGF,llvm::Value * Val,llvm::Value * DestPtr,bool DestIsVolatile,bool LowAlignment)959 static void BuildAggStore(CodeGenFunction &CGF, llvm::Value *Val,
960 llvm::Value *DestPtr, bool DestIsVolatile,
961 bool LowAlignment) {
962 // Prefer scalar stores to first-class aggregate stores.
963 if (llvm::StructType *STy =
964 dyn_cast<llvm::StructType>(Val->getType())) {
965 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
966 llvm::Value *EltPtr = CGF.Builder.CreateConstGEP2_32(DestPtr, 0, i);
967 llvm::Value *Elt = CGF.Builder.CreateExtractValue(Val, i);
968 llvm::StoreInst *SI = CGF.Builder.CreateStore(Elt, EltPtr,
969 DestIsVolatile);
970 if (LowAlignment)
971 SI->setAlignment(1);
972 }
973 } else {
974 llvm::StoreInst *SI = CGF.Builder.CreateStore(Val, DestPtr, DestIsVolatile);
975 if (LowAlignment)
976 SI->setAlignment(1);
977 }
978 }
979
980 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
981 /// where the source and destination may have different types.
982 ///
983 /// This safely handles the case when the src type is larger than the
984 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,llvm::Value * DstPtr,bool DstIsVolatile,CodeGenFunction & CGF)985 static void CreateCoercedStore(llvm::Value *Src,
986 llvm::Value *DstPtr,
987 bool DstIsVolatile,
988 CodeGenFunction &CGF) {
989 llvm::Type *SrcTy = Src->getType();
990 llvm::Type *DstTy =
991 cast<llvm::PointerType>(DstPtr->getType())->getElementType();
992 if (SrcTy == DstTy) {
993 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
994 return;
995 }
996
997 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
998
999 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1000 DstPtr = EnterStructPointerForCoercedAccess(DstPtr, DstSTy, SrcSize, CGF);
1001 DstTy = cast<llvm::PointerType>(DstPtr->getType())->getElementType();
1002 }
1003
1004 // If the source and destination are integer or pointer types, just do an
1005 // extension or truncation to the desired type.
1006 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1007 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1008 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1009 CGF.Builder.CreateStore(Src, DstPtr, DstIsVolatile);
1010 return;
1011 }
1012
1013 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1014
1015 // If store is legal, just bitcast the src pointer.
1016 if (SrcSize <= DstSize) {
1017 llvm::Value *Casted =
1018 CGF.Builder.CreateBitCast(DstPtr, llvm::PointerType::getUnqual(SrcTy));
1019 // FIXME: Use better alignment / avoid requiring aligned store.
1020 BuildAggStore(CGF, Src, Casted, DstIsVolatile, true);
1021 } else {
1022 // Otherwise do coercion through memory. This is stupid, but
1023 // simple.
1024
1025 // Generally SrcSize is never greater than DstSize, since this means we are
1026 // losing bits. However, this can happen in cases where the structure has
1027 // additional padding, for example due to a user specified alignment.
1028 //
1029 // FIXME: Assert that we aren't truncating non-padding bits when have access
1030 // to that information.
1031 llvm::Value *Tmp = CGF.CreateTempAlloca(SrcTy);
1032 CGF.Builder.CreateStore(Src, Tmp);
1033 llvm::Type *I8PtrTy = CGF.Builder.getInt8PtrTy();
1034 llvm::Value *Casted = CGF.Builder.CreateBitCast(Tmp, I8PtrTy);
1035 llvm::Value *DstCasted = CGF.Builder.CreateBitCast(DstPtr, I8PtrTy);
1036 // FIXME: Use better alignment.
1037 CGF.Builder.CreateMemCpy(DstCasted, Casted,
1038 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize),
1039 1, false);
1040 }
1041 }
1042
1043 namespace {
1044
1045 /// Encapsulates information about the way function arguments from
1046 /// CGFunctionInfo should be passed to actual LLVM IR function.
1047 class ClangToLLVMArgMapping {
1048 static const unsigned InvalidIndex = ~0U;
1049 unsigned InallocaArgNo;
1050 unsigned SRetArgNo;
1051 unsigned TotalIRArgs;
1052
1053 /// Arguments of LLVM IR function corresponding to single Clang argument.
1054 struct IRArgs {
1055 unsigned PaddingArgIndex;
1056 // Argument is expanded to IR arguments at positions
1057 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1058 unsigned FirstArgIndex;
1059 unsigned NumberOfArgs;
1060
IRArgs__anond234a42f0211::ClangToLLVMArgMapping::IRArgs1061 IRArgs()
1062 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1063 NumberOfArgs(0) {}
1064 };
1065
1066 SmallVector<IRArgs, 8> ArgInfo;
1067
1068 public:
ClangToLLVMArgMapping(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs=false)1069 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1070 bool OnlyRequiredArgs = false)
1071 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1072 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1073 construct(Context, FI, OnlyRequiredArgs);
1074 }
1075
hasInallocaArg() const1076 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
getInallocaArgNo() const1077 unsigned getInallocaArgNo() const {
1078 assert(hasInallocaArg());
1079 return InallocaArgNo;
1080 }
1081
hasSRetArg() const1082 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
getSRetArgNo() const1083 unsigned getSRetArgNo() const {
1084 assert(hasSRetArg());
1085 return SRetArgNo;
1086 }
1087
totalIRArgs() const1088 unsigned totalIRArgs() const { return TotalIRArgs; }
1089
hasPaddingArg(unsigned ArgNo) const1090 bool hasPaddingArg(unsigned ArgNo) const {
1091 assert(ArgNo < ArgInfo.size());
1092 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1093 }
getPaddingArgNo(unsigned ArgNo) const1094 unsigned getPaddingArgNo(unsigned ArgNo) const {
1095 assert(hasPaddingArg(ArgNo));
1096 return ArgInfo[ArgNo].PaddingArgIndex;
1097 }
1098
1099 /// Returns index of first IR argument corresponding to ArgNo, and their
1100 /// quantity.
getIRArgs(unsigned ArgNo) const1101 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1102 assert(ArgNo < ArgInfo.size());
1103 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1104 ArgInfo[ArgNo].NumberOfArgs);
1105 }
1106
1107 private:
1108 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1109 bool OnlyRequiredArgs);
1110 };
1111
construct(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs)1112 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1113 const CGFunctionInfo &FI,
1114 bool OnlyRequiredArgs) {
1115 unsigned IRArgNo = 0;
1116 bool SwapThisWithSRet = false;
1117 const ABIArgInfo &RetAI = FI.getReturnInfo();
1118
1119 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1120 SwapThisWithSRet = RetAI.isSRetAfterThis();
1121 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1122 }
1123
1124 unsigned ArgNo = 0;
1125 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1126 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1127 ++I, ++ArgNo) {
1128 assert(I != FI.arg_end());
1129 QualType ArgType = I->type;
1130 const ABIArgInfo &AI = I->info;
1131 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1132 auto &IRArgs = ArgInfo[ArgNo];
1133
1134 if (AI.getPaddingType())
1135 IRArgs.PaddingArgIndex = IRArgNo++;
1136
1137 switch (AI.getKind()) {
1138 case ABIArgInfo::Extend:
1139 case ABIArgInfo::Direct: {
1140 // FIXME: handle sseregparm someday...
1141 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1142 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1143 IRArgs.NumberOfArgs = STy->getNumElements();
1144 } else {
1145 IRArgs.NumberOfArgs = 1;
1146 }
1147 break;
1148 }
1149 case ABIArgInfo::Indirect:
1150 IRArgs.NumberOfArgs = 1;
1151 break;
1152 case ABIArgInfo::Ignore:
1153 case ABIArgInfo::InAlloca:
1154 // ignore and inalloca doesn't have matching LLVM parameters.
1155 IRArgs.NumberOfArgs = 0;
1156 break;
1157 case ABIArgInfo::Expand: {
1158 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1159 break;
1160 }
1161 }
1162
1163 if (IRArgs.NumberOfArgs > 0) {
1164 IRArgs.FirstArgIndex = IRArgNo;
1165 IRArgNo += IRArgs.NumberOfArgs;
1166 }
1167
1168 // Skip over the sret parameter when it comes second. We already handled it
1169 // above.
1170 if (IRArgNo == 1 && SwapThisWithSRet)
1171 IRArgNo++;
1172 }
1173 assert(ArgNo == ArgInfo.size());
1174
1175 if (FI.usesInAlloca())
1176 InallocaArgNo = IRArgNo++;
1177
1178 TotalIRArgs = IRArgNo;
1179 }
1180 } // namespace
1181
1182 /***/
1183
ReturnTypeUsesSRet(const CGFunctionInfo & FI)1184 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1185 return FI.getReturnInfo().isIndirect();
1186 }
1187
ReturnSlotInterferesWithArgs(const CGFunctionInfo & FI)1188 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1189 return ReturnTypeUsesSRet(FI) &&
1190 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1191 }
1192
ReturnTypeUsesFPRet(QualType ResultType)1193 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1194 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1195 switch (BT->getKind()) {
1196 default:
1197 return false;
1198 case BuiltinType::Float:
1199 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1200 case BuiltinType::Double:
1201 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1202 case BuiltinType::LongDouble:
1203 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1204 }
1205 }
1206
1207 return false;
1208 }
1209
ReturnTypeUsesFP2Ret(QualType ResultType)1210 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1211 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1212 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1213 if (BT->getKind() == BuiltinType::LongDouble)
1214 return getTarget().useObjCFP2RetForComplexLongDouble();
1215 }
1216 }
1217
1218 return false;
1219 }
1220
GetFunctionType(GlobalDecl GD)1221 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1222 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1223 return GetFunctionType(FI);
1224 }
1225
1226 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)1227 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1228
1229 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1230 (void)Inserted;
1231 assert(Inserted && "Recursively being processed?");
1232
1233 llvm::Type *resultType = nullptr;
1234 const ABIArgInfo &retAI = FI.getReturnInfo();
1235 switch (retAI.getKind()) {
1236 case ABIArgInfo::Expand:
1237 llvm_unreachable("Invalid ABI kind for return argument");
1238
1239 case ABIArgInfo::Extend:
1240 case ABIArgInfo::Direct:
1241 resultType = retAI.getCoerceToType();
1242 break;
1243
1244 case ABIArgInfo::InAlloca:
1245 if (retAI.getInAllocaSRet()) {
1246 // sret things on win32 aren't void, they return the sret pointer.
1247 QualType ret = FI.getReturnType();
1248 llvm::Type *ty = ConvertType(ret);
1249 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1250 resultType = llvm::PointerType::get(ty, addressSpace);
1251 } else {
1252 resultType = llvm::Type::getVoidTy(getLLVMContext());
1253 }
1254 break;
1255
1256 case ABIArgInfo::Indirect: {
1257 assert(!retAI.getIndirectAlign() && "Align unused on indirect return.");
1258 resultType = llvm::Type::getVoidTy(getLLVMContext());
1259 break;
1260 }
1261
1262 case ABIArgInfo::Ignore:
1263 resultType = llvm::Type::getVoidTy(getLLVMContext());
1264 break;
1265 }
1266
1267 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1268 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1269
1270 // Add type for sret argument.
1271 if (IRFunctionArgs.hasSRetArg()) {
1272 QualType Ret = FI.getReturnType();
1273 llvm::Type *Ty = ConvertType(Ret);
1274 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1275 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1276 llvm::PointerType::get(Ty, AddressSpace);
1277 }
1278
1279 // Add type for inalloca argument.
1280 if (IRFunctionArgs.hasInallocaArg()) {
1281 auto ArgStruct = FI.getArgStruct();
1282 assert(ArgStruct);
1283 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1284 }
1285
1286 // Add in all of the required arguments.
1287 unsigned ArgNo = 0;
1288 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1289 ie = it + FI.getNumRequiredArgs();
1290 for (; it != ie; ++it, ++ArgNo) {
1291 const ABIArgInfo &ArgInfo = it->info;
1292
1293 // Insert a padding type to ensure proper alignment.
1294 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1295 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1296 ArgInfo.getPaddingType();
1297
1298 unsigned FirstIRArg, NumIRArgs;
1299 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1300
1301 switch (ArgInfo.getKind()) {
1302 case ABIArgInfo::Ignore:
1303 case ABIArgInfo::InAlloca:
1304 assert(NumIRArgs == 0);
1305 break;
1306
1307 case ABIArgInfo::Indirect: {
1308 assert(NumIRArgs == 1);
1309 // indirect arguments are always on the stack, which is addr space #0.
1310 llvm::Type *LTy = ConvertTypeForMem(it->type);
1311 ArgTypes[FirstIRArg] = LTy->getPointerTo();
1312 break;
1313 }
1314
1315 case ABIArgInfo::Extend:
1316 case ABIArgInfo::Direct: {
1317 // Fast-isel and the optimizer generally like scalar values better than
1318 // FCAs, so we flatten them if this is safe to do for this argument.
1319 llvm::Type *argType = ArgInfo.getCoerceToType();
1320 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1321 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1322 assert(NumIRArgs == st->getNumElements());
1323 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1324 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1325 } else {
1326 assert(NumIRArgs == 1);
1327 ArgTypes[FirstIRArg] = argType;
1328 }
1329 break;
1330 }
1331
1332 case ABIArgInfo::Expand:
1333 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1334 getExpandedTypes(it->type, ArgTypesIter);
1335 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1336 break;
1337 }
1338 }
1339
1340 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1341 assert(Erased && "Not in set?");
1342
1343 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1344 }
1345
GetFunctionTypeForVTable(GlobalDecl GD)1346 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1347 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1348 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1349
1350 if (!isFuncTypeConvertible(FPT))
1351 return llvm::StructType::get(getLLVMContext());
1352
1353 const CGFunctionInfo *Info;
1354 if (isa<CXXDestructorDecl>(MD))
1355 Info =
1356 &arrangeCXXStructorDeclaration(MD, getFromDtorType(GD.getDtorType()));
1357 else
1358 Info = &arrangeCXXMethodDeclaration(MD);
1359 return GetFunctionType(*Info);
1360 }
1361
ConstructAttributeList(const CGFunctionInfo & FI,const Decl * TargetDecl,AttributeListType & PAL,unsigned & CallingConv,bool AttrOnCallSite)1362 void CodeGenModule::ConstructAttributeList(const CGFunctionInfo &FI,
1363 const Decl *TargetDecl,
1364 AttributeListType &PAL,
1365 unsigned &CallingConv,
1366 bool AttrOnCallSite) {
1367 llvm::AttrBuilder FuncAttrs;
1368 llvm::AttrBuilder RetAttrs;
1369 bool HasOptnone = false;
1370
1371 CallingConv = FI.getEffectiveCallingConvention();
1372
1373 if (FI.isNoReturn())
1374 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1375
1376 // FIXME: handle sseregparm someday...
1377 if (TargetDecl) {
1378 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1379 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1380 if (TargetDecl->hasAttr<NoThrowAttr>())
1381 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1382 if (TargetDecl->hasAttr<NoReturnAttr>())
1383 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1384 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1385 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1386
1387 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1388 const FunctionProtoType *FPT = Fn->getType()->getAs<FunctionProtoType>();
1389 if (FPT && FPT->isNothrow(getContext()))
1390 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1391 // Don't use [[noreturn]] or _Noreturn for a call to a virtual function.
1392 // These attributes are not inherited by overloads.
1393 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1394 if (Fn->isNoReturn() && !(AttrOnCallSite && MD && MD->isVirtual()))
1395 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1396 }
1397
1398 // 'const' and 'pure' attribute functions are also nounwind.
1399 if (TargetDecl->hasAttr<ConstAttr>()) {
1400 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1401 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1402 } else if (TargetDecl->hasAttr<PureAttr>()) {
1403 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1404 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1405 }
1406 if (TargetDecl->hasAttr<MallocAttr>())
1407 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1408 if (TargetDecl->hasAttr<ReturnsNonNullAttr>())
1409 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1410
1411 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1412 }
1413
1414 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1415 if (!HasOptnone) {
1416 if (CodeGenOpts.OptimizeSize)
1417 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1418 if (CodeGenOpts.OptimizeSize == 2)
1419 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1420 }
1421
1422 if (CodeGenOpts.DisableRedZone)
1423 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1424 if (CodeGenOpts.NoImplicitFloat)
1425 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1426 if (CodeGenOpts.EnableSegmentedStacks &&
1427 !(TargetDecl && TargetDecl->hasAttr<NoSplitStackAttr>()))
1428 FuncAttrs.addAttribute("split-stack");
1429
1430 if (AttrOnCallSite) {
1431 // Attributes that should go on the call site only.
1432 if (!CodeGenOpts.SimplifyLibCalls)
1433 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1434 } else {
1435 // Attributes that should go on the function, but not the call site.
1436 if (!CodeGenOpts.DisableFPElim) {
1437 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1438 } else if (CodeGenOpts.OmitLeafFramePointer) {
1439 FuncAttrs.addAttribute("no-frame-pointer-elim", "false");
1440 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1441 } else {
1442 FuncAttrs.addAttribute("no-frame-pointer-elim", "true");
1443 FuncAttrs.addAttribute("no-frame-pointer-elim-non-leaf");
1444 }
1445
1446 FuncAttrs.addAttribute("less-precise-fpmad",
1447 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1448 FuncAttrs.addAttribute("no-infs-fp-math",
1449 llvm::toStringRef(CodeGenOpts.NoInfsFPMath));
1450 FuncAttrs.addAttribute("no-nans-fp-math",
1451 llvm::toStringRef(CodeGenOpts.NoNaNsFPMath));
1452 FuncAttrs.addAttribute("unsafe-fp-math",
1453 llvm::toStringRef(CodeGenOpts.UnsafeFPMath));
1454 FuncAttrs.addAttribute("use-soft-float",
1455 llvm::toStringRef(CodeGenOpts.SoftFloat));
1456 FuncAttrs.addAttribute("stack-protector-buffer-size",
1457 llvm::utostr(CodeGenOpts.SSPBufferSize));
1458
1459 if (!CodeGenOpts.StackRealignment)
1460 FuncAttrs.addAttribute("no-realign-stack");
1461 }
1462
1463 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
1464
1465 QualType RetTy = FI.getReturnType();
1466 const ABIArgInfo &RetAI = FI.getReturnInfo();
1467 switch (RetAI.getKind()) {
1468 case ABIArgInfo::Extend:
1469 if (RetTy->hasSignedIntegerRepresentation())
1470 RetAttrs.addAttribute(llvm::Attribute::SExt);
1471 else if (RetTy->hasUnsignedIntegerRepresentation())
1472 RetAttrs.addAttribute(llvm::Attribute::ZExt);
1473 // FALL THROUGH
1474 case ABIArgInfo::Direct:
1475 if (RetAI.getInReg())
1476 RetAttrs.addAttribute(llvm::Attribute::InReg);
1477 break;
1478 case ABIArgInfo::Ignore:
1479 break;
1480
1481 case ABIArgInfo::InAlloca:
1482 case ABIArgInfo::Indirect: {
1483 // inalloca and sret disable readnone and readonly
1484 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1485 .removeAttribute(llvm::Attribute::ReadNone);
1486 break;
1487 }
1488
1489 case ABIArgInfo::Expand:
1490 llvm_unreachable("Invalid ABI kind for return argument");
1491 }
1492
1493 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
1494 QualType PTy = RefTy->getPointeeType();
1495 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1496 RetAttrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1497 .getQuantity());
1498 else if (getContext().getTargetAddressSpace(PTy) == 0)
1499 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1500 }
1501
1502 // Attach return attributes.
1503 if (RetAttrs.hasAttributes()) {
1504 PAL.push_back(llvm::AttributeSet::get(
1505 getLLVMContext(), llvm::AttributeSet::ReturnIndex, RetAttrs));
1506 }
1507
1508 // Attach attributes to sret.
1509 if (IRFunctionArgs.hasSRetArg()) {
1510 llvm::AttrBuilder SRETAttrs;
1511 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
1512 if (RetAI.getInReg())
1513 SRETAttrs.addAttribute(llvm::Attribute::InReg);
1514 PAL.push_back(llvm::AttributeSet::get(
1515 getLLVMContext(), IRFunctionArgs.getSRetArgNo() + 1, SRETAttrs));
1516 }
1517
1518 // Attach attributes to inalloca argument.
1519 if (IRFunctionArgs.hasInallocaArg()) {
1520 llvm::AttrBuilder Attrs;
1521 Attrs.addAttribute(llvm::Attribute::InAlloca);
1522 PAL.push_back(llvm::AttributeSet::get(
1523 getLLVMContext(), IRFunctionArgs.getInallocaArgNo() + 1, Attrs));
1524 }
1525
1526 unsigned ArgNo = 0;
1527 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
1528 E = FI.arg_end();
1529 I != E; ++I, ++ArgNo) {
1530 QualType ParamType = I->type;
1531 const ABIArgInfo &AI = I->info;
1532 llvm::AttrBuilder Attrs;
1533
1534 // Add attribute for padding argument, if necessary.
1535 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
1536 if (AI.getPaddingInReg())
1537 PAL.push_back(llvm::AttributeSet::get(
1538 getLLVMContext(), IRFunctionArgs.getPaddingArgNo(ArgNo) + 1,
1539 llvm::Attribute::InReg));
1540 }
1541
1542 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
1543 // have the corresponding parameter variable. It doesn't make
1544 // sense to do it here because parameters are so messed up.
1545 switch (AI.getKind()) {
1546 case ABIArgInfo::Extend:
1547 if (ParamType->isSignedIntegerOrEnumerationType())
1548 Attrs.addAttribute(llvm::Attribute::SExt);
1549 else if (ParamType->isUnsignedIntegerOrEnumerationType())
1550 Attrs.addAttribute(llvm::Attribute::ZExt);
1551 // FALL THROUGH
1552 case ABIArgInfo::Direct:
1553 if (ArgNo == 0 && FI.isChainCall())
1554 Attrs.addAttribute(llvm::Attribute::Nest);
1555 else if (AI.getInReg())
1556 Attrs.addAttribute(llvm::Attribute::InReg);
1557 break;
1558
1559 case ABIArgInfo::Indirect:
1560 if (AI.getInReg())
1561 Attrs.addAttribute(llvm::Attribute::InReg);
1562
1563 if (AI.getIndirectByVal())
1564 Attrs.addAttribute(llvm::Attribute::ByVal);
1565
1566 Attrs.addAlignmentAttr(AI.getIndirectAlign());
1567
1568 // byval disables readnone and readonly.
1569 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1570 .removeAttribute(llvm::Attribute::ReadNone);
1571 break;
1572
1573 case ABIArgInfo::Ignore:
1574 case ABIArgInfo::Expand:
1575 continue;
1576
1577 case ABIArgInfo::InAlloca:
1578 // inalloca disables readnone and readonly.
1579 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
1580 .removeAttribute(llvm::Attribute::ReadNone);
1581 continue;
1582 }
1583
1584 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
1585 QualType PTy = RefTy->getPointeeType();
1586 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
1587 Attrs.addDereferenceableAttr(getContext().getTypeSizeInChars(PTy)
1588 .getQuantity());
1589 else if (getContext().getTargetAddressSpace(PTy) == 0)
1590 Attrs.addAttribute(llvm::Attribute::NonNull);
1591 }
1592
1593 if (Attrs.hasAttributes()) {
1594 unsigned FirstIRArg, NumIRArgs;
1595 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1596 for (unsigned i = 0; i < NumIRArgs; i++)
1597 PAL.push_back(llvm::AttributeSet::get(getLLVMContext(),
1598 FirstIRArg + i + 1, Attrs));
1599 }
1600 }
1601 assert(ArgNo == FI.arg_size());
1602
1603 if (FuncAttrs.hasAttributes())
1604 PAL.push_back(llvm::
1605 AttributeSet::get(getLLVMContext(),
1606 llvm::AttributeSet::FunctionIndex,
1607 FuncAttrs));
1608 }
1609
1610 /// An argument came in as a promoted argument; demote it back to its
1611 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)1612 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
1613 const VarDecl *var,
1614 llvm::Value *value) {
1615 llvm::Type *varType = CGF.ConvertType(var->getType());
1616
1617 // This can happen with promotions that actually don't change the
1618 // underlying type, like the enum promotions.
1619 if (value->getType() == varType) return value;
1620
1621 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
1622 && "unexpected promotion type");
1623
1624 if (isa<llvm::IntegerType>(varType))
1625 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
1626
1627 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
1628 }
1629
1630 /// Returns the attribute (either parameter attribute, or function
1631 /// attribute), which declares argument ArgNo to be non-null.
getNonNullAttr(const Decl * FD,const ParmVarDecl * PVD,QualType ArgType,unsigned ArgNo)1632 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
1633 QualType ArgType, unsigned ArgNo) {
1634 // FIXME: __attribute__((nonnull)) can also be applied to:
1635 // - references to pointers, where the pointee is known to be
1636 // nonnull (apparently a Clang extension)
1637 // - transparent unions containing pointers
1638 // In the former case, LLVM IR cannot represent the constraint. In
1639 // the latter case, we have no guarantee that the transparent union
1640 // is in fact passed as a pointer.
1641 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
1642 return nullptr;
1643 // First, check attribute on parameter itself.
1644 if (PVD) {
1645 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
1646 return ParmNNAttr;
1647 }
1648 // Check function attributes.
1649 if (!FD)
1650 return nullptr;
1651 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
1652 if (NNAttr->isNonNull(ArgNo))
1653 return NNAttr;
1654 }
1655 return nullptr;
1656 }
1657
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)1658 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
1659 llvm::Function *Fn,
1660 const FunctionArgList &Args) {
1661 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
1662 // Naked functions don't have prologues.
1663 return;
1664
1665 // If this is an implicit-return-zero function, go ahead and
1666 // initialize the return value. TODO: it might be nice to have
1667 // a more general mechanism for this that didn't require synthesized
1668 // return statements.
1669 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
1670 if (FD->hasImplicitReturnZero()) {
1671 QualType RetTy = FD->getReturnType().getUnqualifiedType();
1672 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
1673 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
1674 Builder.CreateStore(Zero, ReturnValue);
1675 }
1676 }
1677
1678 // FIXME: We no longer need the types from FunctionArgList; lift up and
1679 // simplify.
1680
1681 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
1682 // Flattened function arguments.
1683 SmallVector<llvm::Argument *, 16> FnArgs;
1684 FnArgs.reserve(IRFunctionArgs.totalIRArgs());
1685 for (auto &Arg : Fn->args()) {
1686 FnArgs.push_back(&Arg);
1687 }
1688 assert(FnArgs.size() == IRFunctionArgs.totalIRArgs());
1689
1690 // If we're using inalloca, all the memory arguments are GEPs off of the last
1691 // parameter, which is a pointer to the complete memory area.
1692 llvm::Value *ArgStruct = nullptr;
1693 if (IRFunctionArgs.hasInallocaArg()) {
1694 ArgStruct = FnArgs[IRFunctionArgs.getInallocaArgNo()];
1695 assert(ArgStruct->getType() == FI.getArgStruct()->getPointerTo());
1696 }
1697
1698 // Name the struct return parameter.
1699 if (IRFunctionArgs.hasSRetArg()) {
1700 auto AI = FnArgs[IRFunctionArgs.getSRetArgNo()];
1701 AI->setName("agg.result");
1702 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(), AI->getArgNo() + 1,
1703 llvm::Attribute::NoAlias));
1704 }
1705
1706 // Track if we received the parameter as a pointer (indirect, byval, or
1707 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
1708 // into a local alloca for us.
1709 enum ValOrPointer { HaveValue = 0, HavePointer = 1 };
1710 typedef llvm::PointerIntPair<llvm::Value *, 1> ValueAndIsPtr;
1711 SmallVector<ValueAndIsPtr, 16> ArgVals;
1712 ArgVals.reserve(Args.size());
1713
1714 // Create a pointer value for every parameter declaration. This usually
1715 // entails copying one or more LLVM IR arguments into an alloca. Don't push
1716 // any cleanups or do anything that might unwind. We do that separately, so
1717 // we can push the cleanups in the correct order for the ABI.
1718 assert(FI.arg_size() == Args.size() &&
1719 "Mismatch between function signature & arguments.");
1720 unsigned ArgNo = 0;
1721 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
1722 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1723 i != e; ++i, ++info_it, ++ArgNo) {
1724 const VarDecl *Arg = *i;
1725 QualType Ty = info_it->type;
1726 const ABIArgInfo &ArgI = info_it->info;
1727
1728 bool isPromoted =
1729 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
1730
1731 unsigned FirstIRArg, NumIRArgs;
1732 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1733
1734 switch (ArgI.getKind()) {
1735 case ABIArgInfo::InAlloca: {
1736 assert(NumIRArgs == 0);
1737 llvm::Value *V = Builder.CreateStructGEP(
1738 ArgStruct, ArgI.getInAllocaFieldIndex(), Arg->getName());
1739 ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1740 break;
1741 }
1742
1743 case ABIArgInfo::Indirect: {
1744 assert(NumIRArgs == 1);
1745 llvm::Value *V = FnArgs[FirstIRArg];
1746
1747 if (!hasScalarEvaluationKind(Ty)) {
1748 // Aggregates and complex variables are accessed by reference. All we
1749 // need to do is realign the value, if requested
1750 if (ArgI.getIndirectRealign()) {
1751 llvm::Value *AlignedTemp = CreateMemTemp(Ty, "coerce");
1752
1753 // Copy from the incoming argument pointer to the temporary with the
1754 // appropriate alignment.
1755 //
1756 // FIXME: We should have a common utility for generating an aggregate
1757 // copy.
1758 llvm::Type *I8PtrTy = Builder.getInt8PtrTy();
1759 CharUnits Size = getContext().getTypeSizeInChars(Ty);
1760 llvm::Value *Dst = Builder.CreateBitCast(AlignedTemp, I8PtrTy);
1761 llvm::Value *Src = Builder.CreateBitCast(V, I8PtrTy);
1762 Builder.CreateMemCpy(Dst,
1763 Src,
1764 llvm::ConstantInt::get(IntPtrTy,
1765 Size.getQuantity()),
1766 ArgI.getIndirectAlign(),
1767 false);
1768 V = AlignedTemp;
1769 }
1770 ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1771 } else {
1772 // Load scalar value from indirect argument.
1773 CharUnits Alignment = getContext().getTypeAlignInChars(Ty);
1774 V = EmitLoadOfScalar(V, false, Alignment.getQuantity(), Ty,
1775 Arg->getLocStart());
1776
1777 if (isPromoted)
1778 V = emitArgumentDemotion(*this, Arg, V);
1779 ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1780 }
1781 break;
1782 }
1783
1784 case ABIArgInfo::Extend:
1785 case ABIArgInfo::Direct: {
1786
1787 // If we have the trivial case, handle it with no muss and fuss.
1788 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
1789 ArgI.getCoerceToType() == ConvertType(Ty) &&
1790 ArgI.getDirectOffset() == 0) {
1791 assert(NumIRArgs == 1);
1792 auto AI = FnArgs[FirstIRArg];
1793 llvm::Value *V = AI;
1794
1795 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
1796 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
1797 PVD->getFunctionScopeIndex()))
1798 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1799 AI->getArgNo() + 1,
1800 llvm::Attribute::NonNull));
1801
1802 QualType OTy = PVD->getOriginalType();
1803 if (const auto *ArrTy =
1804 getContext().getAsConstantArrayType(OTy)) {
1805 // A C99 array parameter declaration with the static keyword also
1806 // indicates dereferenceability, and if the size is constant we can
1807 // use the dereferenceable attribute (which requires the size in
1808 // bytes).
1809 if (ArrTy->getSizeModifier() == ArrayType::Static) {
1810 QualType ETy = ArrTy->getElementType();
1811 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
1812 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
1813 ArrSize) {
1814 llvm::AttrBuilder Attrs;
1815 Attrs.addDereferenceableAttr(
1816 getContext().getTypeSizeInChars(ETy).getQuantity()*ArrSize);
1817 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1818 AI->getArgNo() + 1, Attrs));
1819 } else if (getContext().getTargetAddressSpace(ETy) == 0) {
1820 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1821 AI->getArgNo() + 1,
1822 llvm::Attribute::NonNull));
1823 }
1824 }
1825 } else if (const auto *ArrTy =
1826 getContext().getAsVariableArrayType(OTy)) {
1827 // For C99 VLAs with the static keyword, we don't know the size so
1828 // we can't use the dereferenceable attribute, but in addrspace(0)
1829 // we know that it must be nonnull.
1830 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
1831 !getContext().getTargetAddressSpace(ArrTy->getElementType()))
1832 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1833 AI->getArgNo() + 1,
1834 llvm::Attribute::NonNull));
1835 }
1836
1837 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
1838 if (!AVAttr)
1839 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
1840 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
1841 if (AVAttr) {
1842 llvm::Value *AlignmentValue =
1843 EmitScalarExpr(AVAttr->getAlignment());
1844 llvm::ConstantInt *AlignmentCI =
1845 cast<llvm::ConstantInt>(AlignmentValue);
1846 unsigned Alignment =
1847 std::min((unsigned) AlignmentCI->getZExtValue(),
1848 +llvm::Value::MaximumAlignment);
1849
1850 llvm::AttrBuilder Attrs;
1851 Attrs.addAlignmentAttr(Alignment);
1852 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1853 AI->getArgNo() + 1, Attrs));
1854 }
1855 }
1856
1857 if (Arg->getType().isRestrictQualified())
1858 AI->addAttr(llvm::AttributeSet::get(getLLVMContext(),
1859 AI->getArgNo() + 1,
1860 llvm::Attribute::NoAlias));
1861
1862 // Ensure the argument is the correct type.
1863 if (V->getType() != ArgI.getCoerceToType())
1864 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
1865
1866 if (isPromoted)
1867 V = emitArgumentDemotion(*this, Arg, V);
1868
1869 if (const CXXMethodDecl *MD =
1870 dyn_cast_or_null<CXXMethodDecl>(CurCodeDecl)) {
1871 if (MD->isVirtual() && Arg == CXXABIThisDecl)
1872 V = CGM.getCXXABI().
1873 adjustThisParameterInVirtualFunctionPrologue(*this, CurGD, V);
1874 }
1875
1876 // Because of merging of function types from multiple decls it is
1877 // possible for the type of an argument to not match the corresponding
1878 // type in the function type. Since we are codegening the callee
1879 // in here, add a cast to the argument type.
1880 llvm::Type *LTy = ConvertType(Arg->getType());
1881 if (V->getType() != LTy)
1882 V = Builder.CreateBitCast(V, LTy);
1883
1884 ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1885 break;
1886 }
1887
1888 llvm::AllocaInst *Alloca = CreateMemTemp(Ty, Arg->getName());
1889
1890 // The alignment we need to use is the max of the requested alignment for
1891 // the argument plus the alignment required by our access code below.
1892 unsigned AlignmentToUse =
1893 CGM.getDataLayout().getABITypeAlignment(ArgI.getCoerceToType());
1894 AlignmentToUse = std::max(AlignmentToUse,
1895 (unsigned)getContext().getDeclAlign(Arg).getQuantity());
1896
1897 Alloca->setAlignment(AlignmentToUse);
1898 llvm::Value *V = Alloca;
1899 llvm::Value *Ptr = V; // Pointer to store into.
1900
1901 // If the value is offset in memory, apply the offset now.
1902 if (unsigned Offs = ArgI.getDirectOffset()) {
1903 Ptr = Builder.CreateBitCast(Ptr, Builder.getInt8PtrTy());
1904 Ptr = Builder.CreateConstGEP1_32(Ptr, Offs);
1905 Ptr = Builder.CreateBitCast(Ptr,
1906 llvm::PointerType::getUnqual(ArgI.getCoerceToType()));
1907 }
1908
1909 // Fast-isel and the optimizer generally like scalar values better than
1910 // FCAs, so we flatten them if this is safe to do for this argument.
1911 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
1912 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
1913 STy->getNumElements() > 1) {
1914 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
1915 llvm::Type *DstTy =
1916 cast<llvm::PointerType>(Ptr->getType())->getElementType();
1917 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
1918
1919 if (SrcSize <= DstSize) {
1920 Ptr = Builder.CreateBitCast(Ptr, llvm::PointerType::getUnqual(STy));
1921
1922 assert(STy->getNumElements() == NumIRArgs);
1923 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1924 auto AI = FnArgs[FirstIRArg + i];
1925 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1926 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(Ptr, 0, i);
1927 Builder.CreateStore(AI, EltPtr);
1928 }
1929 } else {
1930 llvm::AllocaInst *TempAlloca =
1931 CreateTempAlloca(ArgI.getCoerceToType(), "coerce");
1932 TempAlloca->setAlignment(AlignmentToUse);
1933 llvm::Value *TempV = TempAlloca;
1934
1935 assert(STy->getNumElements() == NumIRArgs);
1936 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1937 auto AI = FnArgs[FirstIRArg + i];
1938 AI->setName(Arg->getName() + ".coerce" + Twine(i));
1939 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(TempV, 0, i);
1940 Builder.CreateStore(AI, EltPtr);
1941 }
1942
1943 Builder.CreateMemCpy(Ptr, TempV, DstSize, AlignmentToUse);
1944 }
1945 } else {
1946 // Simple case, just do a coerced store of the argument into the alloca.
1947 assert(NumIRArgs == 1);
1948 auto AI = FnArgs[FirstIRArg];
1949 AI->setName(Arg->getName() + ".coerce");
1950 CreateCoercedStore(AI, Ptr, /*DestIsVolatile=*/false, *this);
1951 }
1952
1953
1954 // Match to what EmitParmDecl is expecting for this type.
1955 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
1956 V = EmitLoadOfScalar(V, false, AlignmentToUse, Ty, Arg->getLocStart());
1957 if (isPromoted)
1958 V = emitArgumentDemotion(*this, Arg, V);
1959 ArgVals.push_back(ValueAndIsPtr(V, HaveValue));
1960 } else {
1961 ArgVals.push_back(ValueAndIsPtr(V, HavePointer));
1962 }
1963 break;
1964 }
1965
1966 case ABIArgInfo::Expand: {
1967 // If this structure was expanded into multiple arguments then
1968 // we need to create a temporary and reconstruct it from the
1969 // arguments.
1970 llvm::AllocaInst *Alloca = CreateMemTemp(Ty);
1971 CharUnits Align = getContext().getDeclAlign(Arg);
1972 Alloca->setAlignment(Align.getQuantity());
1973 LValue LV = MakeAddrLValue(Alloca, Ty, Align);
1974 ArgVals.push_back(ValueAndIsPtr(Alloca, HavePointer));
1975
1976 auto FnArgIter = FnArgs.begin() + FirstIRArg;
1977 ExpandTypeFromArgs(Ty, LV, FnArgIter);
1978 assert(FnArgIter == FnArgs.begin() + FirstIRArg + NumIRArgs);
1979 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
1980 auto AI = FnArgs[FirstIRArg + i];
1981 AI->setName(Arg->getName() + "." + Twine(i));
1982 }
1983 break;
1984 }
1985
1986 case ABIArgInfo::Ignore:
1987 assert(NumIRArgs == 0);
1988 // Initialize the local variable appropriately.
1989 if (!hasScalarEvaluationKind(Ty)) {
1990 ArgVals.push_back(ValueAndIsPtr(CreateMemTemp(Ty), HavePointer));
1991 } else {
1992 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
1993 ArgVals.push_back(ValueAndIsPtr(U, HaveValue));
1994 }
1995 break;
1996 }
1997 }
1998
1999 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2000 for (int I = Args.size() - 1; I >= 0; --I)
2001 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
2002 I + 1);
2003 } else {
2004 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2005 EmitParmDecl(*Args[I], ArgVals[I].getPointer(), ArgVals[I].getInt(),
2006 I + 1);
2007 }
2008 }
2009
eraseUnusedBitCasts(llvm::Instruction * insn)2010 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2011 while (insn->use_empty()) {
2012 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2013 if (!bitcast) return;
2014
2015 // This is "safe" because we would have used a ConstantExpr otherwise.
2016 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2017 bitcast->eraseFromParent();
2018 }
2019 }
2020
2021 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2022 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2023 llvm::Value *result) {
2024 // We must be immediately followed the cast.
2025 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2026 if (BB->empty()) return nullptr;
2027 if (&BB->back() != result) return nullptr;
2028
2029 llvm::Type *resultType = result->getType();
2030
2031 // result is in a BasicBlock and is therefore an Instruction.
2032 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2033
2034 SmallVector<llvm::Instruction*,4> insnsToKill;
2035
2036 // Look for:
2037 // %generator = bitcast %type1* %generator2 to %type2*
2038 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2039 // We would have emitted this as a constant if the operand weren't
2040 // an Instruction.
2041 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2042
2043 // Require the generator to be immediately followed by the cast.
2044 if (generator->getNextNode() != bitcast)
2045 return nullptr;
2046
2047 insnsToKill.push_back(bitcast);
2048 }
2049
2050 // Look for:
2051 // %generator = call i8* @objc_retain(i8* %originalResult)
2052 // or
2053 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2054 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2055 if (!call) return nullptr;
2056
2057 bool doRetainAutorelease;
2058
2059 if (call->getCalledValue() == CGF.CGM.getARCEntrypoints().objc_retain) {
2060 doRetainAutorelease = true;
2061 } else if (call->getCalledValue() == CGF.CGM.getARCEntrypoints()
2062 .objc_retainAutoreleasedReturnValue) {
2063 doRetainAutorelease = false;
2064
2065 // If we emitted an assembly marker for this call (and the
2066 // ARCEntrypoints field should have been set if so), go looking
2067 // for that call. If we can't find it, we can't do this
2068 // optimization. But it should always be the immediately previous
2069 // instruction, unless we needed bitcasts around the call.
2070 if (CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker) {
2071 llvm::Instruction *prev = call->getPrevNode();
2072 assert(prev);
2073 if (isa<llvm::BitCastInst>(prev)) {
2074 prev = prev->getPrevNode();
2075 assert(prev);
2076 }
2077 assert(isa<llvm::CallInst>(prev));
2078 assert(cast<llvm::CallInst>(prev)->getCalledValue() ==
2079 CGF.CGM.getARCEntrypoints().retainAutoreleasedReturnValueMarker);
2080 insnsToKill.push_back(prev);
2081 }
2082 } else {
2083 return nullptr;
2084 }
2085
2086 result = call->getArgOperand(0);
2087 insnsToKill.push_back(call);
2088
2089 // Keep killing bitcasts, for sanity. Note that we no longer care
2090 // about precise ordering as long as there's exactly one use.
2091 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2092 if (!bitcast->hasOneUse()) break;
2093 insnsToKill.push_back(bitcast);
2094 result = bitcast->getOperand(0);
2095 }
2096
2097 // Delete all the unnecessary instructions, from latest to earliest.
2098 for (SmallVectorImpl<llvm::Instruction*>::iterator
2099 i = insnsToKill.begin(), e = insnsToKill.end(); i != e; ++i)
2100 (*i)->eraseFromParent();
2101
2102 // Do the fused retain/autorelease if we were asked to.
2103 if (doRetainAutorelease)
2104 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2105
2106 // Cast back to the result type.
2107 return CGF.Builder.CreateBitCast(result, resultType);
2108 }
2109
2110 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)2111 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2112 llvm::Value *result) {
2113 // This is only applicable to a method with an immutable 'self'.
2114 const ObjCMethodDecl *method =
2115 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2116 if (!method) return nullptr;
2117 const VarDecl *self = method->getSelfDecl();
2118 if (!self->getType().isConstQualified()) return nullptr;
2119
2120 // Look for a retain call.
2121 llvm::CallInst *retainCall =
2122 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2123 if (!retainCall ||
2124 retainCall->getCalledValue() != CGF.CGM.getARCEntrypoints().objc_retain)
2125 return nullptr;
2126
2127 // Look for an ordinary load of 'self'.
2128 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2129 llvm::LoadInst *load =
2130 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2131 if (!load || load->isAtomic() || load->isVolatile() ||
2132 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self))
2133 return nullptr;
2134
2135 // Okay! Burn it all down. This relies for correctness on the
2136 // assumption that the retain is emitted as part of the return and
2137 // that thereafter everything is used "linearly".
2138 llvm::Type *resultType = result->getType();
2139 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2140 assert(retainCall->use_empty());
2141 retainCall->eraseFromParent();
2142 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2143
2144 return CGF.Builder.CreateBitCast(load, resultType);
2145 }
2146
2147 /// Emit an ARC autorelease of the result of a function.
2148 ///
2149 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2150 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2151 llvm::Value *result) {
2152 // If we're returning 'self', kill the initial retain. This is a
2153 // heuristic attempt to "encourage correctness" in the really unfortunate
2154 // case where we have a return of self during a dealloc and we desperately
2155 // need to avoid the possible autorelease.
2156 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2157 return self;
2158
2159 // At -O0, try to emit a fused retain/autorelease.
2160 if (CGF.shouldUseFusedARCCalls())
2161 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2162 return fused;
2163
2164 return CGF.EmitARCAutoreleaseReturnValue(result);
2165 }
2166
2167 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)2168 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2169 // If there are multiple uses of the return-value slot, just check
2170 // for something immediately preceding the IP. Sometimes this can
2171 // happen with how we generate implicit-returns; it can also happen
2172 // with noreturn cleanups.
2173 if (!CGF.ReturnValue->hasOneUse()) {
2174 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2175 if (IP->empty()) return nullptr;
2176 llvm::StoreInst *store = dyn_cast<llvm::StoreInst>(&IP->back());
2177 if (!store) return nullptr;
2178 if (store->getPointerOperand() != CGF.ReturnValue) return nullptr;
2179 assert(!store->isAtomic() && !store->isVolatile()); // see below
2180 return store;
2181 }
2182
2183 llvm::StoreInst *store =
2184 dyn_cast<llvm::StoreInst>(CGF.ReturnValue->user_back());
2185 if (!store) return nullptr;
2186
2187 // These aren't actually possible for non-coerced returns, and we
2188 // only care about non-coerced returns on this code path.
2189 assert(!store->isAtomic() && !store->isVolatile());
2190
2191 // Now do a first-and-dirty dominance check: just walk up the
2192 // single-predecessors chain from the current insertion point.
2193 llvm::BasicBlock *StoreBB = store->getParent();
2194 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2195 while (IP != StoreBB) {
2196 if (!(IP = IP->getSinglePredecessor()))
2197 return nullptr;
2198 }
2199
2200 // Okay, the store's basic block dominates the insertion point; we
2201 // can do our thing.
2202 return store;
2203 }
2204
EmitFunctionEpilog(const CGFunctionInfo & FI,bool EmitRetDbgLoc,SourceLocation EndLoc)2205 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
2206 bool EmitRetDbgLoc,
2207 SourceLocation EndLoc) {
2208 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
2209 // Naked functions don't have epilogues.
2210 Builder.CreateUnreachable();
2211 return;
2212 }
2213
2214 // Functions with no result always return void.
2215 if (!ReturnValue) {
2216 Builder.CreateRetVoid();
2217 return;
2218 }
2219
2220 llvm::DebugLoc RetDbgLoc;
2221 llvm::Value *RV = nullptr;
2222 QualType RetTy = FI.getReturnType();
2223 const ABIArgInfo &RetAI = FI.getReturnInfo();
2224
2225 switch (RetAI.getKind()) {
2226 case ABIArgInfo::InAlloca:
2227 // Aggregrates get evaluated directly into the destination. Sometimes we
2228 // need to return the sret value in a register, though.
2229 assert(hasAggregateEvaluationKind(RetTy));
2230 if (RetAI.getInAllocaSRet()) {
2231 llvm::Function::arg_iterator EI = CurFn->arg_end();
2232 --EI;
2233 llvm::Value *ArgStruct = EI;
2234 llvm::Value *SRet =
2235 Builder.CreateStructGEP(ArgStruct, RetAI.getInAllocaFieldIndex());
2236 RV = Builder.CreateLoad(SRet, "sret");
2237 }
2238 break;
2239
2240 case ABIArgInfo::Indirect: {
2241 auto AI = CurFn->arg_begin();
2242 if (RetAI.isSRetAfterThis())
2243 ++AI;
2244 switch (getEvaluationKind(RetTy)) {
2245 case TEK_Complex: {
2246 ComplexPairTy RT =
2247 EmitLoadOfComplex(MakeNaturalAlignAddrLValue(ReturnValue, RetTy),
2248 EndLoc);
2249 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(AI, RetTy),
2250 /*isInit*/ true);
2251 break;
2252 }
2253 case TEK_Aggregate:
2254 // Do nothing; aggregrates get evaluated directly into the destination.
2255 break;
2256 case TEK_Scalar:
2257 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
2258 MakeNaturalAlignAddrLValue(AI, RetTy),
2259 /*isInit*/ true);
2260 break;
2261 }
2262 break;
2263 }
2264
2265 case ABIArgInfo::Extend:
2266 case ABIArgInfo::Direct:
2267 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
2268 RetAI.getDirectOffset() == 0) {
2269 // The internal return value temp always will have pointer-to-return-type
2270 // type, just do a load.
2271
2272 // If there is a dominating store to ReturnValue, we can elide
2273 // the load, zap the store, and usually zap the alloca.
2274 if (llvm::StoreInst *SI = findDominatingStoreToReturnValue(*this)) {
2275 // Reuse the debug location from the store unless there is
2276 // cleanup code to be emitted between the store and return
2277 // instruction.
2278 if (EmitRetDbgLoc && !AutoreleaseResult)
2279 RetDbgLoc = SI->getDebugLoc();
2280 // Get the stored value and nuke the now-dead store.
2281 RV = SI->getValueOperand();
2282 SI->eraseFromParent();
2283
2284 // If that was the only use of the return value, nuke it as well now.
2285 if (ReturnValue->use_empty() && isa<llvm::AllocaInst>(ReturnValue)) {
2286 cast<llvm::AllocaInst>(ReturnValue)->eraseFromParent();
2287 ReturnValue = nullptr;
2288 }
2289
2290 // Otherwise, we have to do a simple load.
2291 } else {
2292 RV = Builder.CreateLoad(ReturnValue);
2293 }
2294 } else {
2295 llvm::Value *V = ReturnValue;
2296 // If the value is offset in memory, apply the offset now.
2297 if (unsigned Offs = RetAI.getDirectOffset()) {
2298 V = Builder.CreateBitCast(V, Builder.getInt8PtrTy());
2299 V = Builder.CreateConstGEP1_32(V, Offs);
2300 V = Builder.CreateBitCast(V,
2301 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
2302 }
2303
2304 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
2305 }
2306
2307 // In ARC, end functions that return a retainable type with a call
2308 // to objc_autoreleaseReturnValue.
2309 if (AutoreleaseResult) {
2310 assert(getLangOpts().ObjCAutoRefCount &&
2311 !FI.isReturnsRetained() &&
2312 RetTy->isObjCRetainableType());
2313 RV = emitAutoreleaseOfResult(*this, RV);
2314 }
2315
2316 break;
2317
2318 case ABIArgInfo::Ignore:
2319 break;
2320
2321 case ABIArgInfo::Expand:
2322 llvm_unreachable("Invalid ABI kind for return argument");
2323 }
2324
2325 llvm::Instruction *Ret;
2326 if (RV) {
2327 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute)) {
2328 if (auto RetNNAttr = CurGD.getDecl()->getAttr<ReturnsNonNullAttr>()) {
2329 SanitizerScope SanScope(this);
2330 llvm::Value *Cond = Builder.CreateICmpNE(
2331 RV, llvm::Constant::getNullValue(RV->getType()));
2332 llvm::Constant *StaticData[] = {
2333 EmitCheckSourceLocation(EndLoc),
2334 EmitCheckSourceLocation(RetNNAttr->getLocation()),
2335 };
2336 EmitCheck(std::make_pair(Cond, SanitizerKind::ReturnsNonnullAttribute),
2337 "nonnull_return", StaticData, None);
2338 }
2339 }
2340 Ret = Builder.CreateRet(RV);
2341 } else {
2342 Ret = Builder.CreateRetVoid();
2343 }
2344
2345 if (!RetDbgLoc.isUnknown())
2346 Ret->setDebugLoc(RetDbgLoc);
2347 }
2348
isInAllocaArgument(CGCXXABI & ABI,QualType type)2349 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
2350 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2351 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
2352 }
2353
createPlaceholderSlot(CodeGenFunction & CGF,QualType Ty)2354 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF, QualType Ty) {
2355 // FIXME: Generate IR in one pass, rather than going back and fixing up these
2356 // placeholders.
2357 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
2358 llvm::Value *Placeholder =
2359 llvm::UndefValue::get(IRTy->getPointerTo()->getPointerTo());
2360 Placeholder = CGF.Builder.CreateLoad(Placeholder);
2361 return AggValueSlot::forAddr(Placeholder, CharUnits::Zero(),
2362 Ty.getQualifiers(),
2363 AggValueSlot::IsNotDestructed,
2364 AggValueSlot::DoesNotNeedGCBarriers,
2365 AggValueSlot::IsNotAliased);
2366 }
2367
EmitDelegateCallArg(CallArgList & args,const VarDecl * param,SourceLocation loc)2368 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
2369 const VarDecl *param,
2370 SourceLocation loc) {
2371 // StartFunction converted the ABI-lowered parameter(s) into a
2372 // local alloca. We need to turn that into an r-value suitable
2373 // for EmitCall.
2374 llvm::Value *local = GetAddrOfLocalVar(param);
2375
2376 QualType type = param->getType();
2377
2378 // For the most part, we just need to load the alloca, except:
2379 // 1) aggregate r-values are actually pointers to temporaries, and
2380 // 2) references to non-scalars are pointers directly to the aggregate.
2381 // I don't know why references to scalars are different here.
2382 if (const ReferenceType *ref = type->getAs<ReferenceType>()) {
2383 if (!hasScalarEvaluationKind(ref->getPointeeType()))
2384 return args.add(RValue::getAggregate(local), type);
2385
2386 // Locals which are references to scalars are represented
2387 // with allocas holding the pointer.
2388 return args.add(RValue::get(Builder.CreateLoad(local)), type);
2389 }
2390
2391 assert(!isInAllocaArgument(CGM.getCXXABI(), type) &&
2392 "cannot emit delegate call arguments for inalloca arguments!");
2393
2394 args.add(convertTempToRValue(local, type, loc), type);
2395 }
2396
isProvablyNull(llvm::Value * addr)2397 static bool isProvablyNull(llvm::Value *addr) {
2398 return isa<llvm::ConstantPointerNull>(addr);
2399 }
2400
isProvablyNonNull(llvm::Value * addr)2401 static bool isProvablyNonNull(llvm::Value *addr) {
2402 return isa<llvm::AllocaInst>(addr);
2403 }
2404
2405 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)2406 static void emitWriteback(CodeGenFunction &CGF,
2407 const CallArgList::Writeback &writeback) {
2408 const LValue &srcLV = writeback.Source;
2409 llvm::Value *srcAddr = srcLV.getAddress();
2410 assert(!isProvablyNull(srcAddr) &&
2411 "shouldn't have writeback for provably null argument");
2412
2413 llvm::BasicBlock *contBB = nullptr;
2414
2415 // If the argument wasn't provably non-null, we need to null check
2416 // before doing the store.
2417 bool provablyNonNull = isProvablyNonNull(srcAddr);
2418 if (!provablyNonNull) {
2419 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
2420 contBB = CGF.createBasicBlock("icr.done");
2421
2422 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2423 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
2424 CGF.EmitBlock(writebackBB);
2425 }
2426
2427 // Load the value to writeback.
2428 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
2429
2430 // Cast it back, in case we're writing an id to a Foo* or something.
2431 value = CGF.Builder.CreateBitCast(value,
2432 cast<llvm::PointerType>(srcAddr->getType())->getElementType(),
2433 "icr.writeback-cast");
2434
2435 // Perform the writeback.
2436
2437 // If we have a "to use" value, it's something we need to emit a use
2438 // of. This has to be carefully threaded in: if it's done after the
2439 // release it's potentially undefined behavior (and the optimizer
2440 // will ignore it), and if it happens before the retain then the
2441 // optimizer could move the release there.
2442 if (writeback.ToUse) {
2443 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
2444
2445 // Retain the new value. No need to block-copy here: the block's
2446 // being passed up the stack.
2447 value = CGF.EmitARCRetainNonBlock(value);
2448
2449 // Emit the intrinsic use here.
2450 CGF.EmitARCIntrinsicUse(writeback.ToUse);
2451
2452 // Load the old value (primitively).
2453 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
2454
2455 // Put the new value in place (primitively).
2456 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
2457
2458 // Release the old value.
2459 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
2460
2461 // Otherwise, we can just do a normal lvalue store.
2462 } else {
2463 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
2464 }
2465
2466 // Jump to the continuation block.
2467 if (!provablyNonNull)
2468 CGF.EmitBlock(contBB);
2469 }
2470
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)2471 static void emitWritebacks(CodeGenFunction &CGF,
2472 const CallArgList &args) {
2473 for (const auto &I : args.writebacks())
2474 emitWriteback(CGF, I);
2475 }
2476
deactivateArgCleanupsBeforeCall(CodeGenFunction & CGF,const CallArgList & CallArgs)2477 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
2478 const CallArgList &CallArgs) {
2479 assert(CGF.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee());
2480 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
2481 CallArgs.getCleanupsToDeactivate();
2482 // Iterate in reverse to increase the likelihood of popping the cleanup.
2483 for (ArrayRef<CallArgList::CallArgCleanup>::reverse_iterator
2484 I = Cleanups.rbegin(), E = Cleanups.rend(); I != E; ++I) {
2485 CGF.DeactivateCleanupBlock(I->Cleanup, I->IsActiveIP);
2486 I->IsActiveIP->eraseFromParent();
2487 }
2488 }
2489
maybeGetUnaryAddrOfOperand(const Expr * E)2490 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
2491 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
2492 if (uop->getOpcode() == UO_AddrOf)
2493 return uop->getSubExpr();
2494 return nullptr;
2495 }
2496
2497 /// Emit an argument that's being passed call-by-writeback. That is,
2498 /// we are passing the address of
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)2499 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
2500 const ObjCIndirectCopyRestoreExpr *CRE) {
2501 LValue srcLV;
2502
2503 // Make an optimistic effort to emit the address as an l-value.
2504 // This can fail if the the argument expression is more complicated.
2505 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
2506 srcLV = CGF.EmitLValue(lvExpr);
2507
2508 // Otherwise, just emit it as a scalar.
2509 } else {
2510 llvm::Value *srcAddr = CGF.EmitScalarExpr(CRE->getSubExpr());
2511
2512 QualType srcAddrType =
2513 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
2514 srcLV = CGF.MakeNaturalAlignAddrLValue(srcAddr, srcAddrType);
2515 }
2516 llvm::Value *srcAddr = srcLV.getAddress();
2517
2518 // The dest and src types don't necessarily match in LLVM terms
2519 // because of the crazy ObjC compatibility rules.
2520
2521 llvm::PointerType *destType =
2522 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
2523
2524 // If the address is a constant null, just pass the appropriate null.
2525 if (isProvablyNull(srcAddr)) {
2526 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
2527 CRE->getType());
2528 return;
2529 }
2530
2531 // Create the temporary.
2532 llvm::Value *temp = CGF.CreateTempAlloca(destType->getElementType(),
2533 "icr.temp");
2534 // Loading an l-value can introduce a cleanup if the l-value is __weak,
2535 // and that cleanup will be conditional if we can't prove that the l-value
2536 // isn't null, so we need to register a dominating point so that the cleanups
2537 // system will make valid IR.
2538 CodeGenFunction::ConditionalEvaluation condEval(CGF);
2539
2540 // Zero-initialize it if we're not doing a copy-initialization.
2541 bool shouldCopy = CRE->shouldCopy();
2542 if (!shouldCopy) {
2543 llvm::Value *null =
2544 llvm::ConstantPointerNull::get(
2545 cast<llvm::PointerType>(destType->getElementType()));
2546 CGF.Builder.CreateStore(null, temp);
2547 }
2548
2549 llvm::BasicBlock *contBB = nullptr;
2550 llvm::BasicBlock *originBB = nullptr;
2551
2552 // If the address is *not* known to be non-null, we need to switch.
2553 llvm::Value *finalArgument;
2554
2555 bool provablyNonNull = isProvablyNonNull(srcAddr);
2556 if (provablyNonNull) {
2557 finalArgument = temp;
2558 } else {
2559 llvm::Value *isNull = CGF.Builder.CreateIsNull(srcAddr, "icr.isnull");
2560
2561 finalArgument = CGF.Builder.CreateSelect(isNull,
2562 llvm::ConstantPointerNull::get(destType),
2563 temp, "icr.argument");
2564
2565 // If we need to copy, then the load has to be conditional, which
2566 // means we need control flow.
2567 if (shouldCopy) {
2568 originBB = CGF.Builder.GetInsertBlock();
2569 contBB = CGF.createBasicBlock("icr.cont");
2570 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
2571 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
2572 CGF.EmitBlock(copyBB);
2573 condEval.begin(CGF);
2574 }
2575 }
2576
2577 llvm::Value *valueToUse = nullptr;
2578
2579 // Perform a copy if necessary.
2580 if (shouldCopy) {
2581 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
2582 assert(srcRV.isScalar());
2583
2584 llvm::Value *src = srcRV.getScalarVal();
2585 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
2586 "icr.cast");
2587
2588 // Use an ordinary store, not a store-to-lvalue.
2589 CGF.Builder.CreateStore(src, temp);
2590
2591 // If optimization is enabled, and the value was held in a
2592 // __strong variable, we need to tell the optimizer that this
2593 // value has to stay alive until we're doing the store back.
2594 // This is because the temporary is effectively unretained,
2595 // and so otherwise we can violate the high-level semantics.
2596 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2597 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
2598 valueToUse = src;
2599 }
2600 }
2601
2602 // Finish the control flow if we needed it.
2603 if (shouldCopy && !provablyNonNull) {
2604 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
2605 CGF.EmitBlock(contBB);
2606
2607 // Make a phi for the value to intrinsically use.
2608 if (valueToUse) {
2609 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
2610 "icr.to-use");
2611 phiToUse->addIncoming(valueToUse, copyBB);
2612 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
2613 originBB);
2614 valueToUse = phiToUse;
2615 }
2616
2617 condEval.end(CGF);
2618 }
2619
2620 args.addWriteback(srcLV, temp, valueToUse);
2621 args.add(RValue::get(finalArgument), CRE->getType());
2622 }
2623
allocateArgumentMemory(CodeGenFunction & CGF)2624 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
2625 assert(!StackBase && !StackCleanup.isValid());
2626
2627 // Save the stack.
2628 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
2629 StackBase = CGF.Builder.CreateCall(F, "inalloca.save");
2630
2631 // Control gets really tied up in landing pads, so we have to spill the
2632 // stacksave to an alloca to avoid violating SSA form.
2633 // TODO: This is dead if we never emit the cleanup. We should create the
2634 // alloca and store lazily on the first cleanup emission.
2635 StackBaseMem = CGF.CreateTempAlloca(CGF.Int8PtrTy, "inalloca.spmem");
2636 CGF.Builder.CreateStore(StackBase, StackBaseMem);
2637 CGF.pushStackRestore(EHCleanup, StackBaseMem);
2638 StackCleanup = CGF.EHStack.getInnermostEHScope();
2639 assert(StackCleanup.isValid());
2640 }
2641
freeArgumentMemory(CodeGenFunction & CGF) const2642 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
2643 if (StackBase) {
2644 CGF.DeactivateCleanupBlock(StackCleanup, StackBase);
2645 llvm::Value *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
2646 // We could load StackBase from StackBaseMem, but in the non-exceptional
2647 // case we can skip it.
2648 CGF.Builder.CreateCall(F, StackBase);
2649 }
2650 }
2651
emitNonNullArgCheck(CodeGenFunction & CGF,RValue RV,QualType ArgType,SourceLocation ArgLoc,const FunctionDecl * FD,unsigned ParmNum)2652 static void emitNonNullArgCheck(CodeGenFunction &CGF, RValue RV,
2653 QualType ArgType, SourceLocation ArgLoc,
2654 const FunctionDecl *FD, unsigned ParmNum) {
2655 if (!CGF.SanOpts.has(SanitizerKind::NonnullAttribute) || !FD)
2656 return;
2657 auto PVD = ParmNum < FD->getNumParams() ? FD->getParamDecl(ParmNum) : nullptr;
2658 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
2659 auto NNAttr = getNonNullAttr(FD, PVD, ArgType, ArgNo);
2660 if (!NNAttr)
2661 return;
2662 CodeGenFunction::SanitizerScope SanScope(&CGF);
2663 assert(RV.isScalar());
2664 llvm::Value *V = RV.getScalarVal();
2665 llvm::Value *Cond =
2666 CGF.Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
2667 llvm::Constant *StaticData[] = {
2668 CGF.EmitCheckSourceLocation(ArgLoc),
2669 CGF.EmitCheckSourceLocation(NNAttr->getLocation()),
2670 llvm::ConstantInt::get(CGF.Int32Ty, ArgNo + 1),
2671 };
2672 CGF.EmitCheck(std::make_pair(Cond, SanitizerKind::NonnullAttribute),
2673 "nonnull_arg", StaticData, None);
2674 }
2675
EmitCallArgs(CallArgList & Args,ArrayRef<QualType> ArgTypes,CallExpr::const_arg_iterator ArgBeg,CallExpr::const_arg_iterator ArgEnd,const FunctionDecl * CalleeDecl,unsigned ParamsToSkip,bool ForceColumnInfo)2676 void CodeGenFunction::EmitCallArgs(CallArgList &Args,
2677 ArrayRef<QualType> ArgTypes,
2678 CallExpr::const_arg_iterator ArgBeg,
2679 CallExpr::const_arg_iterator ArgEnd,
2680 const FunctionDecl *CalleeDecl,
2681 unsigned ParamsToSkip,
2682 bool ForceColumnInfo) {
2683 CGDebugInfo *DI = getDebugInfo();
2684 SourceLocation CallLoc;
2685 if (DI) CallLoc = DI->getLocation();
2686
2687 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
2688 // because arguments are destroyed left to right in the callee.
2689 if (CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2690 // Insert a stack save if we're going to need any inalloca args.
2691 bool HasInAllocaArgs = false;
2692 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
2693 I != E && !HasInAllocaArgs; ++I)
2694 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
2695 if (HasInAllocaArgs) {
2696 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
2697 Args.allocateArgumentMemory(*this);
2698 }
2699
2700 // Evaluate each argument.
2701 size_t CallArgsStart = Args.size();
2702 for (int I = ArgTypes.size() - 1; I >= 0; --I) {
2703 CallExpr::const_arg_iterator Arg = ArgBeg + I;
2704 EmitCallArg(Args, *Arg, ArgTypes[I]);
2705 emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
2706 CalleeDecl, ParamsToSkip + I);
2707 // Restore the debug location.
2708 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
2709 }
2710
2711 // Un-reverse the arguments we just evaluated so they match up with the LLVM
2712 // IR function.
2713 std::reverse(Args.begin() + CallArgsStart, Args.end());
2714 return;
2715 }
2716
2717 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
2718 CallExpr::const_arg_iterator Arg = ArgBeg + I;
2719 assert(Arg != ArgEnd);
2720 EmitCallArg(Args, *Arg, ArgTypes[I]);
2721 emitNonNullArgCheck(*this, Args.back().RV, ArgTypes[I], Arg->getExprLoc(),
2722 CalleeDecl, ParamsToSkip + I);
2723 // Restore the debug location.
2724 if (DI) DI->EmitLocation(Builder, CallLoc, ForceColumnInfo);
2725 }
2726 }
2727
2728 namespace {
2729
2730 struct DestroyUnpassedArg : EHScopeStack::Cleanup {
DestroyUnpassedArg__anond234a42f0311::DestroyUnpassedArg2731 DestroyUnpassedArg(llvm::Value *Addr, QualType Ty)
2732 : Addr(Addr), Ty(Ty) {}
2733
2734 llvm::Value *Addr;
2735 QualType Ty;
2736
Emit__anond234a42f0311::DestroyUnpassedArg2737 void Emit(CodeGenFunction &CGF, Flags flags) override {
2738 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
2739 assert(!Dtor->isTrivial());
2740 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
2741 /*Delegating=*/false, Addr);
2742 }
2743 };
2744
2745 }
2746
EmitCallArg(CallArgList & args,const Expr * E,QualType type)2747 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
2748 QualType type) {
2749 if (const ObjCIndirectCopyRestoreExpr *CRE
2750 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
2751 assert(getLangOpts().ObjCAutoRefCount);
2752 assert(getContext().hasSameType(E->getType(), type));
2753 return emitWritebackArg(*this, args, CRE);
2754 }
2755
2756 assert(type->isReferenceType() == E->isGLValue() &&
2757 "reference binding to unmaterialized r-value!");
2758
2759 if (E->isGLValue()) {
2760 assert(E->getObjectKind() == OK_Ordinary);
2761 return args.add(EmitReferenceBindingToExpr(E), type);
2762 }
2763
2764 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
2765
2766 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
2767 // However, we still have to push an EH-only cleanup in case we unwind before
2768 // we make it to the call.
2769 if (HasAggregateEvalKind &&
2770 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2771 // If we're using inalloca, use the argument memory. Otherwise, use a
2772 // temporary.
2773 AggValueSlot Slot;
2774 if (args.isUsingInAlloca())
2775 Slot = createPlaceholderSlot(*this, type);
2776 else
2777 Slot = CreateAggTemp(type, "agg.tmp");
2778
2779 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
2780 bool DestroyedInCallee =
2781 RD && RD->hasNonTrivialDestructor() &&
2782 CGM.getCXXABI().getRecordArgABI(RD) != CGCXXABI::RAA_Default;
2783 if (DestroyedInCallee)
2784 Slot.setExternallyDestructed();
2785
2786 EmitAggExpr(E, Slot);
2787 RValue RV = Slot.asRValue();
2788 args.add(RV, type);
2789
2790 if (DestroyedInCallee) {
2791 // Create a no-op GEP between the placeholder and the cleanup so we can
2792 // RAUW it successfully. It also serves as a marker of the first
2793 // instruction where the cleanup is active.
2794 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddr(), type);
2795 // This unreachable is a temporary marker which will be removed later.
2796 llvm::Instruction *IsActive = Builder.CreateUnreachable();
2797 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
2798 }
2799 return;
2800 }
2801
2802 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
2803 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
2804 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
2805 assert(L.isSimple());
2806 if (L.getAlignment() >= getContext().getTypeAlignInChars(type)) {
2807 args.add(L.asAggregateRValue(), type, /*NeedsCopy*/true);
2808 } else {
2809 // We can't represent a misaligned lvalue in the CallArgList, so copy
2810 // to an aligned temporary now.
2811 llvm::Value *tmp = CreateMemTemp(type);
2812 EmitAggregateCopy(tmp, L.getAddress(), type, L.isVolatile(),
2813 L.getAlignment());
2814 args.add(RValue::getAggregate(tmp), type);
2815 }
2816 return;
2817 }
2818
2819 args.add(EmitAnyExprToTemp(E), type);
2820 }
2821
getVarArgType(const Expr * Arg)2822 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
2823 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
2824 // implicitly widens null pointer constants that are arguments to varargs
2825 // functions to pointer-sized ints.
2826 if (!getTarget().getTriple().isOSWindows())
2827 return Arg->getType();
2828
2829 if (Arg->getType()->isIntegerType() &&
2830 getContext().getTypeSize(Arg->getType()) <
2831 getContext().getTargetInfo().getPointerWidth(0) &&
2832 Arg->isNullPointerConstant(getContext(),
2833 Expr::NPC_ValueDependentIsNotNull)) {
2834 return getContext().getIntPtrType();
2835 }
2836
2837 return Arg->getType();
2838 }
2839
2840 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2841 // optimizer it can aggressively ignore unwind edges.
2842 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)2843 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
2844 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
2845 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
2846 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
2847 CGM.getNoObjCARCExceptionsMetadata());
2848 }
2849
2850 /// Emits a call to the given no-arguments nounwind runtime function.
2851 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,const llvm::Twine & name)2852 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2853 const llvm::Twine &name) {
2854 return EmitNounwindRuntimeCall(callee, None, name);
2855 }
2856
2857 /// Emits a call to the given nounwind runtime function.
2858 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)2859 CodeGenFunction::EmitNounwindRuntimeCall(llvm::Value *callee,
2860 ArrayRef<llvm::Value*> args,
2861 const llvm::Twine &name) {
2862 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
2863 call->setDoesNotThrow();
2864 return call;
2865 }
2866
2867 /// Emits a simple call (never an invoke) to the given no-arguments
2868 /// runtime function.
2869 llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,const llvm::Twine & name)2870 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2871 const llvm::Twine &name) {
2872 return EmitRuntimeCall(callee, None, name);
2873 }
2874
2875 /// Emits a simple call (never an invoke) to the given runtime
2876 /// function.
2877 llvm::CallInst *
EmitRuntimeCall(llvm::Value * callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)2878 CodeGenFunction::EmitRuntimeCall(llvm::Value *callee,
2879 ArrayRef<llvm::Value*> args,
2880 const llvm::Twine &name) {
2881 llvm::CallInst *call = Builder.CreateCall(callee, args, name);
2882 call->setCallingConv(getRuntimeCC());
2883 return call;
2884 }
2885
2886 /// Emits a call or invoke to the given noreturn runtime function.
EmitNoreturnRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args)2887 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(llvm::Value *callee,
2888 ArrayRef<llvm::Value*> args) {
2889 if (getInvokeDest()) {
2890 llvm::InvokeInst *invoke =
2891 Builder.CreateInvoke(callee,
2892 getUnreachableBlock(),
2893 getInvokeDest(),
2894 args);
2895 invoke->setDoesNotReturn();
2896 invoke->setCallingConv(getRuntimeCC());
2897 } else {
2898 llvm::CallInst *call = Builder.CreateCall(callee, args);
2899 call->setDoesNotReturn();
2900 call->setCallingConv(getRuntimeCC());
2901 Builder.CreateUnreachable();
2902 }
2903 PGO.setCurrentRegionUnreachable();
2904 }
2905
2906 /// Emits a call or invoke instruction to the given nullary runtime
2907 /// function.
2908 llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,const Twine & name)2909 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2910 const Twine &name) {
2911 return EmitRuntimeCallOrInvoke(callee, None, name);
2912 }
2913
2914 /// Emits a call or invoke instruction to the given runtime function.
2915 llvm::CallSite
EmitRuntimeCallOrInvoke(llvm::Value * callee,ArrayRef<llvm::Value * > args,const Twine & name)2916 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::Value *callee,
2917 ArrayRef<llvm::Value*> args,
2918 const Twine &name) {
2919 llvm::CallSite callSite = EmitCallOrInvoke(callee, args, name);
2920 callSite.setCallingConv(getRuntimeCC());
2921 return callSite;
2922 }
2923
2924 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,const Twine & Name)2925 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2926 const Twine &Name) {
2927 return EmitCallOrInvoke(Callee, None, Name);
2928 }
2929
2930 /// Emits a call or invoke instruction to the given function, depending
2931 /// on the current state of the EH stack.
2932 llvm::CallSite
EmitCallOrInvoke(llvm::Value * Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)2933 CodeGenFunction::EmitCallOrInvoke(llvm::Value *Callee,
2934 ArrayRef<llvm::Value *> Args,
2935 const Twine &Name) {
2936 llvm::BasicBlock *InvokeDest = getInvokeDest();
2937
2938 llvm::Instruction *Inst;
2939 if (!InvokeDest)
2940 Inst = Builder.CreateCall(Callee, Args, Name);
2941 else {
2942 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
2943 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, Name);
2944 EmitBlock(ContBB);
2945 }
2946
2947 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
2948 // optimizer it can aggressively ignore unwind edges.
2949 if (CGM.getLangOpts().ObjCAutoRefCount)
2950 AddObjCARCExceptionMetadata(Inst);
2951
2952 return Inst;
2953 }
2954
2955 /// \brief Store a non-aggregate value to an address to initialize it. For
2956 /// initialization, a non-atomic store will be used.
EmitInitStoreOfNonAggregate(CodeGenFunction & CGF,RValue Src,LValue Dst)2957 static void EmitInitStoreOfNonAggregate(CodeGenFunction &CGF, RValue Src,
2958 LValue Dst) {
2959 if (Src.isScalar())
2960 CGF.EmitStoreOfScalar(Src.getScalarVal(), Dst, /*init=*/true);
2961 else
2962 CGF.EmitStoreOfComplex(Src.getComplexVal(), Dst, /*init=*/true);
2963 }
2964
deferPlaceholderReplacement(llvm::Instruction * Old,llvm::Value * New)2965 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
2966 llvm::Value *New) {
2967 DeferredReplacements.push_back(std::make_pair(Old, New));
2968 }
2969
EmitCall(const CGFunctionInfo & CallInfo,llvm::Value * Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,const Decl * TargetDecl,llvm::Instruction ** callOrInvoke)2970 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
2971 llvm::Value *Callee,
2972 ReturnValueSlot ReturnValue,
2973 const CallArgList &CallArgs,
2974 const Decl *TargetDecl,
2975 llvm::Instruction **callOrInvoke) {
2976 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
2977
2978 // Handle struct-return functions by passing a pointer to the
2979 // location that we would like to return into.
2980 QualType RetTy = CallInfo.getReturnType();
2981 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
2982
2983 llvm::FunctionType *IRFuncTy =
2984 cast<llvm::FunctionType>(
2985 cast<llvm::PointerType>(Callee->getType())->getElementType());
2986
2987 // If we're using inalloca, insert the allocation after the stack save.
2988 // FIXME: Do this earlier rather than hacking it in here!
2989 llvm::Value *ArgMemory = nullptr;
2990 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
2991 llvm::Instruction *IP = CallArgs.getStackBase();
2992 llvm::AllocaInst *AI;
2993 if (IP) {
2994 IP = IP->getNextNode();
2995 AI = new llvm::AllocaInst(ArgStruct, "argmem", IP);
2996 } else {
2997 AI = CreateTempAlloca(ArgStruct, "argmem");
2998 }
2999 AI->setUsedWithInAlloca(true);
3000 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
3001 ArgMemory = AI;
3002 }
3003
3004 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
3005 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
3006
3007 // If the call returns a temporary with struct return, create a temporary
3008 // alloca to hold the result, unless one is given to us.
3009 llvm::Value *SRetPtr = nullptr;
3010 if (RetAI.isIndirect() || RetAI.isInAlloca()) {
3011 SRetPtr = ReturnValue.getValue();
3012 if (!SRetPtr)
3013 SRetPtr = CreateMemTemp(RetTy);
3014 if (IRFunctionArgs.hasSRetArg()) {
3015 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr;
3016 } else {
3017 llvm::Value *Addr =
3018 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
3019 Builder.CreateStore(SRetPtr, Addr);
3020 }
3021 }
3022
3023 assert(CallInfo.arg_size() == CallArgs.size() &&
3024 "Mismatch between function signature & arguments.");
3025 unsigned ArgNo = 0;
3026 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
3027 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
3028 I != E; ++I, ++info_it, ++ArgNo) {
3029 const ABIArgInfo &ArgInfo = info_it->info;
3030 RValue RV = I->RV;
3031
3032 CharUnits TypeAlign = getContext().getTypeAlignInChars(I->Ty);
3033
3034 // Insert a padding argument to ensure proper alignment.
3035 if (IRFunctionArgs.hasPaddingArg(ArgNo))
3036 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
3037 llvm::UndefValue::get(ArgInfo.getPaddingType());
3038
3039 unsigned FirstIRArg, NumIRArgs;
3040 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3041
3042 switch (ArgInfo.getKind()) {
3043 case ABIArgInfo::InAlloca: {
3044 assert(NumIRArgs == 0);
3045 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3046 if (RV.isAggregate()) {
3047 // Replace the placeholder with the appropriate argument slot GEP.
3048 llvm::Instruction *Placeholder =
3049 cast<llvm::Instruction>(RV.getAggregateAddr());
3050 CGBuilderTy::InsertPoint IP = Builder.saveIP();
3051 Builder.SetInsertPoint(Placeholder);
3052 llvm::Value *Addr = Builder.CreateStructGEP(
3053 ArgMemory, ArgInfo.getInAllocaFieldIndex());
3054 Builder.restoreIP(IP);
3055 deferPlaceholderReplacement(Placeholder, Addr);
3056 } else {
3057 // Store the RValue into the argument struct.
3058 llvm::Value *Addr =
3059 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
3060 unsigned AS = Addr->getType()->getPointerAddressSpace();
3061 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
3062 // There are some cases where a trivial bitcast is not avoidable. The
3063 // definition of a type later in a translation unit may change it's type
3064 // from {}* to (%struct.foo*)*.
3065 if (Addr->getType() != MemType)
3066 Addr = Builder.CreateBitCast(Addr, MemType);
3067 LValue argLV = MakeAddrLValue(Addr, I->Ty, TypeAlign);
3068 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3069 }
3070 break;
3071 }
3072
3073 case ABIArgInfo::Indirect: {
3074 assert(NumIRArgs == 1);
3075 if (RV.isScalar() || RV.isComplex()) {
3076 // Make a temporary alloca to pass the argument.
3077 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
3078 if (ArgInfo.getIndirectAlign() > AI->getAlignment())
3079 AI->setAlignment(ArgInfo.getIndirectAlign());
3080 IRCallArgs[FirstIRArg] = AI;
3081
3082 LValue argLV = MakeAddrLValue(AI, I->Ty, TypeAlign);
3083 EmitInitStoreOfNonAggregate(*this, RV, argLV);
3084 } else {
3085 // We want to avoid creating an unnecessary temporary+copy here;
3086 // however, we need one in three cases:
3087 // 1. If the argument is not byval, and we are required to copy the
3088 // source. (This case doesn't occur on any common architecture.)
3089 // 2. If the argument is byval, RV is not sufficiently aligned, and
3090 // we cannot force it to be sufficiently aligned.
3091 // 3. If the argument is byval, but RV is located in an address space
3092 // different than that of the argument (0).
3093 llvm::Value *Addr = RV.getAggregateAddr();
3094 unsigned Align = ArgInfo.getIndirectAlign();
3095 const llvm::DataLayout *TD = &CGM.getDataLayout();
3096 const unsigned RVAddrSpace = Addr->getType()->getPointerAddressSpace();
3097 const unsigned ArgAddrSpace =
3098 (FirstIRArg < IRFuncTy->getNumParams()
3099 ? IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace()
3100 : 0);
3101 if ((!ArgInfo.getIndirectByVal() && I->NeedsCopy) ||
3102 (ArgInfo.getIndirectByVal() && TypeAlign.getQuantity() < Align &&
3103 llvm::getOrEnforceKnownAlignment(Addr, Align, TD) < Align) ||
3104 (ArgInfo.getIndirectByVal() && (RVAddrSpace != ArgAddrSpace))) {
3105 // Create an aligned temporary, and copy to it.
3106 llvm::AllocaInst *AI = CreateMemTemp(I->Ty);
3107 if (Align > AI->getAlignment())
3108 AI->setAlignment(Align);
3109 IRCallArgs[FirstIRArg] = AI;
3110 EmitAggregateCopy(AI, Addr, I->Ty, RV.isVolatileQualified());
3111 } else {
3112 // Skip the extra memcpy call.
3113 IRCallArgs[FirstIRArg] = Addr;
3114 }
3115 }
3116 break;
3117 }
3118
3119 case ABIArgInfo::Ignore:
3120 assert(NumIRArgs == 0);
3121 break;
3122
3123 case ABIArgInfo::Extend:
3124 case ABIArgInfo::Direct: {
3125 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
3126 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
3127 ArgInfo.getDirectOffset() == 0) {
3128 assert(NumIRArgs == 1);
3129 llvm::Value *V;
3130 if (RV.isScalar())
3131 V = RV.getScalarVal();
3132 else
3133 V = Builder.CreateLoad(RV.getAggregateAddr());
3134
3135 // We might have to widen integers, but we should never truncate.
3136 if (ArgInfo.getCoerceToType() != V->getType() &&
3137 V->getType()->isIntegerTy())
3138 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
3139
3140 // If the argument doesn't match, perform a bitcast to coerce it. This
3141 // can happen due to trivial type mismatches.
3142 if (FirstIRArg < IRFuncTy->getNumParams() &&
3143 V->getType() != IRFuncTy->getParamType(FirstIRArg))
3144 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
3145 IRCallArgs[FirstIRArg] = V;
3146 break;
3147 }
3148
3149 // FIXME: Avoid the conversion through memory if possible.
3150 llvm::Value *SrcPtr;
3151 if (RV.isScalar() || RV.isComplex()) {
3152 SrcPtr = CreateMemTemp(I->Ty, "coerce");
3153 LValue SrcLV = MakeAddrLValue(SrcPtr, I->Ty, TypeAlign);
3154 EmitInitStoreOfNonAggregate(*this, RV, SrcLV);
3155 } else
3156 SrcPtr = RV.getAggregateAddr();
3157
3158 // If the value is offset in memory, apply the offset now.
3159 if (unsigned Offs = ArgInfo.getDirectOffset()) {
3160 SrcPtr = Builder.CreateBitCast(SrcPtr, Builder.getInt8PtrTy());
3161 SrcPtr = Builder.CreateConstGEP1_32(SrcPtr, Offs);
3162 SrcPtr = Builder.CreateBitCast(SrcPtr,
3163 llvm::PointerType::getUnqual(ArgInfo.getCoerceToType()));
3164
3165 }
3166
3167 // Fast-isel and the optimizer generally like scalar values better than
3168 // FCAs, so we flatten them if this is safe to do for this argument.
3169 llvm::StructType *STy =
3170 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
3171 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
3172 llvm::Type *SrcTy =
3173 cast<llvm::PointerType>(SrcPtr->getType())->getElementType();
3174 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
3175 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
3176
3177 // If the source type is smaller than the destination type of the
3178 // coerce-to logic, copy the source value into a temp alloca the size
3179 // of the destination type to allow loading all of it. The bits past
3180 // the source value are left undef.
3181 if (SrcSize < DstSize) {
3182 llvm::AllocaInst *TempAlloca
3183 = CreateTempAlloca(STy, SrcPtr->getName() + ".coerce");
3184 Builder.CreateMemCpy(TempAlloca, SrcPtr, SrcSize, 0);
3185 SrcPtr = TempAlloca;
3186 } else {
3187 SrcPtr = Builder.CreateBitCast(SrcPtr,
3188 llvm::PointerType::getUnqual(STy));
3189 }
3190
3191 assert(NumIRArgs == STy->getNumElements());
3192 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3193 llvm::Value *EltPtr = Builder.CreateConstGEP2_32(SrcPtr, 0, i);
3194 llvm::LoadInst *LI = Builder.CreateLoad(EltPtr);
3195 // We don't know what we're loading from.
3196 LI->setAlignment(1);
3197 IRCallArgs[FirstIRArg + i] = LI;
3198 }
3199 } else {
3200 // In the simple case, just pass the coerced loaded value.
3201 assert(NumIRArgs == 1);
3202 IRCallArgs[FirstIRArg] =
3203 CreateCoercedLoad(SrcPtr, ArgInfo.getCoerceToType(), *this);
3204 }
3205
3206 break;
3207 }
3208
3209 case ABIArgInfo::Expand:
3210 unsigned IRArgPos = FirstIRArg;
3211 ExpandTypeToArgs(I->Ty, RV, IRFuncTy, IRCallArgs, IRArgPos);
3212 assert(IRArgPos == FirstIRArg + NumIRArgs);
3213 break;
3214 }
3215 }
3216
3217 if (ArgMemory) {
3218 llvm::Value *Arg = ArgMemory;
3219 if (CallInfo.isVariadic()) {
3220 // When passing non-POD arguments by value to variadic functions, we will
3221 // end up with a variadic prototype and an inalloca call site. In such
3222 // cases, we can't do any parameter mismatch checks. Give up and bitcast
3223 // the callee.
3224 unsigned CalleeAS =
3225 cast<llvm::PointerType>(Callee->getType())->getAddressSpace();
3226 Callee = Builder.CreateBitCast(
3227 Callee, getTypes().GetFunctionType(CallInfo)->getPointerTo(CalleeAS));
3228 } else {
3229 llvm::Type *LastParamTy =
3230 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
3231 if (Arg->getType() != LastParamTy) {
3232 #ifndef NDEBUG
3233 // Assert that these structs have equivalent element types.
3234 llvm::StructType *FullTy = CallInfo.getArgStruct();
3235 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
3236 cast<llvm::PointerType>(LastParamTy)->getElementType());
3237 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
3238 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
3239 DE = DeclaredTy->element_end(),
3240 FI = FullTy->element_begin();
3241 DI != DE; ++DI, ++FI)
3242 assert(*DI == *FI);
3243 #endif
3244 Arg = Builder.CreateBitCast(Arg, LastParamTy);
3245 }
3246 }
3247 assert(IRFunctionArgs.hasInallocaArg());
3248 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
3249 }
3250
3251 if (!CallArgs.getCleanupsToDeactivate().empty())
3252 deactivateArgCleanupsBeforeCall(*this, CallArgs);
3253
3254 // If the callee is a bitcast of a function to a varargs pointer to function
3255 // type, check to see if we can remove the bitcast. This handles some cases
3256 // with unprototyped functions.
3257 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Callee))
3258 if (llvm::Function *CalleeF = dyn_cast<llvm::Function>(CE->getOperand(0))) {
3259 llvm::PointerType *CurPT=cast<llvm::PointerType>(Callee->getType());
3260 llvm::FunctionType *CurFT =
3261 cast<llvm::FunctionType>(CurPT->getElementType());
3262 llvm::FunctionType *ActualFT = CalleeF->getFunctionType();
3263
3264 if (CE->getOpcode() == llvm::Instruction::BitCast &&
3265 ActualFT->getReturnType() == CurFT->getReturnType() &&
3266 ActualFT->getNumParams() == CurFT->getNumParams() &&
3267 ActualFT->getNumParams() == IRCallArgs.size() &&
3268 (CurFT->isVarArg() || !ActualFT->isVarArg())) {
3269 bool ArgsMatch = true;
3270 for (unsigned i = 0, e = ActualFT->getNumParams(); i != e; ++i)
3271 if (ActualFT->getParamType(i) != CurFT->getParamType(i)) {
3272 ArgsMatch = false;
3273 break;
3274 }
3275
3276 // Strip the cast if we can get away with it. This is a nice cleanup,
3277 // but also allows us to inline the function at -O0 if it is marked
3278 // always_inline.
3279 if (ArgsMatch)
3280 Callee = CalleeF;
3281 }
3282 }
3283
3284 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
3285 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
3286 // Inalloca argument can have different type.
3287 if (IRFunctionArgs.hasInallocaArg() &&
3288 i == IRFunctionArgs.getInallocaArgNo())
3289 continue;
3290 if (i < IRFuncTy->getNumParams())
3291 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
3292 }
3293
3294 unsigned CallingConv;
3295 CodeGen::AttributeListType AttributeList;
3296 CGM.ConstructAttributeList(CallInfo, TargetDecl, AttributeList,
3297 CallingConv, true);
3298 llvm::AttributeSet Attrs = llvm::AttributeSet::get(getLLVMContext(),
3299 AttributeList);
3300
3301 llvm::BasicBlock *InvokeDest = nullptr;
3302 if (!Attrs.hasAttribute(llvm::AttributeSet::FunctionIndex,
3303 llvm::Attribute::NoUnwind))
3304 InvokeDest = getInvokeDest();
3305
3306 llvm::CallSite CS;
3307 if (!InvokeDest) {
3308 CS = Builder.CreateCall(Callee, IRCallArgs);
3309 } else {
3310 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
3311 CS = Builder.CreateInvoke(Callee, Cont, InvokeDest, IRCallArgs);
3312 EmitBlock(Cont);
3313 }
3314 if (callOrInvoke)
3315 *callOrInvoke = CS.getInstruction();
3316
3317 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
3318 !CS.hasFnAttr(llvm::Attribute::NoInline))
3319 Attrs =
3320 Attrs.addAttribute(getLLVMContext(), llvm::AttributeSet::FunctionIndex,
3321 llvm::Attribute::AlwaysInline);
3322
3323 CS.setAttributes(Attrs);
3324 CS.setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
3325
3326 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3327 // optimizer it can aggressively ignore unwind edges.
3328 if (CGM.getLangOpts().ObjCAutoRefCount)
3329 AddObjCARCExceptionMetadata(CS.getInstruction());
3330
3331 // If the call doesn't return, finish the basic block and clear the
3332 // insertion point; this allows the rest of IRgen to discard
3333 // unreachable code.
3334 if (CS.doesNotReturn()) {
3335 Builder.CreateUnreachable();
3336 Builder.ClearInsertionPoint();
3337
3338 // FIXME: For now, emit a dummy basic block because expr emitters in
3339 // generally are not ready to handle emitting expressions at unreachable
3340 // points.
3341 EnsureInsertPoint();
3342
3343 // Return a reasonable RValue.
3344 return GetUndefRValue(RetTy);
3345 }
3346
3347 llvm::Instruction *CI = CS.getInstruction();
3348 if (Builder.isNamePreserving() && !CI->getType()->isVoidTy())
3349 CI->setName("call");
3350
3351 // Emit any writebacks immediately. Arguably this should happen
3352 // after any return-value munging.
3353 if (CallArgs.hasWritebacks())
3354 emitWritebacks(*this, CallArgs);
3355
3356 // The stack cleanup for inalloca arguments has to run out of the normal
3357 // lexical order, so deactivate it and run it manually here.
3358 CallArgs.freeArgumentMemory(*this);
3359
3360 RValue Ret = [&] {
3361 switch (RetAI.getKind()) {
3362 case ABIArgInfo::InAlloca:
3363 case ABIArgInfo::Indirect:
3364 return convertTempToRValue(SRetPtr, RetTy, SourceLocation());
3365
3366 case ABIArgInfo::Ignore:
3367 // If we are ignoring an argument that had a result, make sure to
3368 // construct the appropriate return value for our caller.
3369 return GetUndefRValue(RetTy);
3370
3371 case ABIArgInfo::Extend:
3372 case ABIArgInfo::Direct: {
3373 llvm::Type *RetIRTy = ConvertType(RetTy);
3374 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
3375 switch (getEvaluationKind(RetTy)) {
3376 case TEK_Complex: {
3377 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
3378 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
3379 return RValue::getComplex(std::make_pair(Real, Imag));
3380 }
3381 case TEK_Aggregate: {
3382 llvm::Value *DestPtr = ReturnValue.getValue();
3383 bool DestIsVolatile = ReturnValue.isVolatile();
3384
3385 if (!DestPtr) {
3386 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
3387 DestIsVolatile = false;
3388 }
3389 BuildAggStore(*this, CI, DestPtr, DestIsVolatile, false);
3390 return RValue::getAggregate(DestPtr);
3391 }
3392 case TEK_Scalar: {
3393 // If the argument doesn't match, perform a bitcast to coerce it. This
3394 // can happen due to trivial type mismatches.
3395 llvm::Value *V = CI;
3396 if (V->getType() != RetIRTy)
3397 V = Builder.CreateBitCast(V, RetIRTy);
3398 return RValue::get(V);
3399 }
3400 }
3401 llvm_unreachable("bad evaluation kind");
3402 }
3403
3404 llvm::Value *DestPtr = ReturnValue.getValue();
3405 bool DestIsVolatile = ReturnValue.isVolatile();
3406
3407 if (!DestPtr) {
3408 DestPtr = CreateMemTemp(RetTy, "coerce");
3409 DestIsVolatile = false;
3410 }
3411
3412 // If the value is offset in memory, apply the offset now.
3413 llvm::Value *StorePtr = DestPtr;
3414 if (unsigned Offs = RetAI.getDirectOffset()) {
3415 StorePtr = Builder.CreateBitCast(StorePtr, Builder.getInt8PtrTy());
3416 StorePtr = Builder.CreateConstGEP1_32(StorePtr, Offs);
3417 StorePtr = Builder.CreateBitCast(StorePtr,
3418 llvm::PointerType::getUnqual(RetAI.getCoerceToType()));
3419 }
3420 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
3421
3422 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
3423 }
3424
3425 case ABIArgInfo::Expand:
3426 llvm_unreachable("Invalid ABI kind for return argument");
3427 }
3428
3429 llvm_unreachable("Unhandled ABIArgInfo::Kind");
3430 } ();
3431
3432 if (Ret.isScalar() && TargetDecl) {
3433 if (const auto *AA = TargetDecl->getAttr<AssumeAlignedAttr>()) {
3434 llvm::Value *OffsetValue = nullptr;
3435 if (const auto *Offset = AA->getOffset())
3436 OffsetValue = EmitScalarExpr(Offset);
3437
3438 llvm::Value *Alignment = EmitScalarExpr(AA->getAlignment());
3439 llvm::ConstantInt *AlignmentCI = cast<llvm::ConstantInt>(Alignment);
3440 EmitAlignmentAssumption(Ret.getScalarVal(), AlignmentCI->getZExtValue(),
3441 OffsetValue);
3442 }
3443 }
3444
3445 return Ret;
3446 }
3447
3448 /* VarArg handling */
3449
EmitVAArg(llvm::Value * VAListAddr,QualType Ty)3450 llvm::Value *CodeGenFunction::EmitVAArg(llvm::Value *VAListAddr, QualType Ty) {
3451 return CGM.getTypes().getABIInfo().EmitVAArg(VAListAddr, Ty, *this);
3452 }
3453