1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CGRecordLayout.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/CodeGenOptions.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/CodeGen/SwiftCallingConv.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Attributes.h"
35 #include "llvm/IR/CallingConv.h"
36 #include "llvm/IR/DataLayout.h"
37 #include "llvm/IR/InlineAsm.h"
38 #include "llvm/IR/IntrinsicInst.h"
39 #include "llvm/IR/Intrinsics.h"
40 #include "llvm/Transforms/Utils/Local.h"
41 using namespace clang;
42 using namespace CodeGen;
43
44 /***/
45
ClangCallConvToLLVMCallConv(CallingConv CC)46 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
47 switch (CC) {
48 default: return llvm::CallingConv::C;
49 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
50 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
51 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
52 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
53 case CC_Win64: return llvm::CallingConv::Win64;
54 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
55 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
56 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
57 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
58 // TODO: Add support for __pascal to LLVM.
59 case CC_X86Pascal: return llvm::CallingConv::C;
60 // TODO: Add support for __vectorcall to LLVM.
61 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
62 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
63 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
64 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
65 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
66 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
67 case CC_Swift: return llvm::CallingConv::Swift;
68 }
69 }
70
71 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
72 /// qualification. Either or both of RD and MD may be null. A null RD indicates
73 /// that there is no meaningful 'this' type, and a null MD can occur when
74 /// calling a method pointer.
DeriveThisType(const CXXRecordDecl * RD,const CXXMethodDecl * MD)75 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
76 const CXXMethodDecl *MD) {
77 QualType RecTy;
78 if (RD)
79 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
80 else
81 RecTy = Context.VoidTy;
82
83 if (MD)
84 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
85 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
86 }
87
88 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)89 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
90 return MD->getType()->getCanonicalTypeUnqualified()
91 .getAs<FunctionProtoType>();
92 }
93
94 /// Returns the "extra-canonicalized" return type, which discards
95 /// qualifiers on the return type. Codegen doesn't care about them,
96 /// and it makes ABI code a little easier to be able to assume that
97 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)98 static CanQualType GetReturnType(QualType RetTy) {
99 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
100 }
101
102 /// Arrange the argument and result information for a value of the given
103 /// unprototyped freestanding function type.
104 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)105 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
106 // When translating an unprototyped function type, always use a
107 // variadic type.
108 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
109 /*instanceMethod=*/false,
110 /*chainCall=*/false, None,
111 FTNP->getExtInfo(), {}, RequiredArgs(0));
112 }
113
addExtParameterInfosForCall(llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> & paramInfos,const FunctionProtoType * proto,unsigned prefixArgs,unsigned totalArgs)114 static void addExtParameterInfosForCall(
115 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
116 const FunctionProtoType *proto,
117 unsigned prefixArgs,
118 unsigned totalArgs) {
119 assert(proto->hasExtParameterInfos());
120 assert(paramInfos.size() <= prefixArgs);
121 assert(proto->getNumParams() + prefixArgs <= totalArgs);
122
123 paramInfos.reserve(totalArgs);
124
125 // Add default infos for any prefix args that don't already have infos.
126 paramInfos.resize(prefixArgs);
127
128 // Add infos for the prototype.
129 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
130 paramInfos.push_back(ParamInfo);
131 // pass_object_size params have no parameter info.
132 if (ParamInfo.hasPassObjectSize())
133 paramInfos.emplace_back();
134 }
135
136 assert(paramInfos.size() <= totalArgs &&
137 "Did we forget to insert pass_object_size args?");
138 // Add default infos for the variadic and/or suffix arguments.
139 paramInfos.resize(totalArgs);
140 }
141
142 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
143 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
appendParameterTypes(const CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,SmallVectorImpl<FunctionProtoType::ExtParameterInfo> & paramInfos,CanQual<FunctionProtoType> FPT)144 static void appendParameterTypes(const CodeGenTypes &CGT,
145 SmallVectorImpl<CanQualType> &prefix,
146 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
147 CanQual<FunctionProtoType> FPT) {
148 // Fast path: don't touch param info if we don't need to.
149 if (!FPT->hasExtParameterInfos()) {
150 assert(paramInfos.empty() &&
151 "We have paramInfos, but the prototype doesn't?");
152 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
153 return;
154 }
155
156 unsigned PrefixSize = prefix.size();
157 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
158 // parameters; the only thing that can change this is the presence of
159 // pass_object_size. So, we preallocate for the common case.
160 prefix.reserve(prefix.size() + FPT->getNumParams());
161
162 auto ExtInfos = FPT->getExtParameterInfos();
163 assert(ExtInfos.size() == FPT->getNumParams());
164 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
165 prefix.push_back(FPT->getParamType(I));
166 if (ExtInfos[I].hasPassObjectSize())
167 prefix.push_back(CGT.getContext().getSizeType());
168 }
169
170 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
171 prefix.size());
172 }
173
174 /// Arrange the LLVM function layout for a value of the given function
175 /// type, on top of any implicit parameters already stored.
176 static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,bool instanceMethod,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)177 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
178 SmallVectorImpl<CanQualType> &prefix,
179 CanQual<FunctionProtoType> FTP) {
180 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
181 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
182 // FIXME: Kill copy.
183 appendParameterTypes(CGT, prefix, paramInfos, FTP);
184 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
185
186 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
187 /*chainCall=*/false, prefix,
188 FTP->getExtInfo(), paramInfos,
189 Required);
190 }
191
192 /// Arrange the argument and result information for a value of the
193 /// given freestanding function type.
194 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP)195 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
196 SmallVector<CanQualType, 16> argTypes;
197 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
198 FTP);
199 }
200
getCallingConventionForDecl(const Decl * D,bool IsWindows)201 static CallingConv getCallingConventionForDecl(const Decl *D, bool IsWindows) {
202 // Set the appropriate calling convention for the Function.
203 if (D->hasAttr<StdCallAttr>())
204 return CC_X86StdCall;
205
206 if (D->hasAttr<FastCallAttr>())
207 return CC_X86FastCall;
208
209 if (D->hasAttr<RegCallAttr>())
210 return CC_X86RegCall;
211
212 if (D->hasAttr<ThisCallAttr>())
213 return CC_X86ThisCall;
214
215 if (D->hasAttr<VectorCallAttr>())
216 return CC_X86VectorCall;
217
218 if (D->hasAttr<PascalAttr>())
219 return CC_X86Pascal;
220
221 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
222 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
223
224 if (D->hasAttr<AArch64VectorPcsAttr>())
225 return CC_AArch64VectorCall;
226
227 if (D->hasAttr<IntelOclBiccAttr>())
228 return CC_IntelOclBicc;
229
230 if (D->hasAttr<MSABIAttr>())
231 return IsWindows ? CC_C : CC_Win64;
232
233 if (D->hasAttr<SysVABIAttr>())
234 return IsWindows ? CC_X86_64SysV : CC_C;
235
236 if (D->hasAttr<PreserveMostAttr>())
237 return CC_PreserveMost;
238
239 if (D->hasAttr<PreserveAllAttr>())
240 return CC_PreserveAll;
241
242 return CC_C;
243 }
244
245 /// Arrange the argument and result information for a call to an
246 /// unknown C++ non-static member function of the given abstract type.
247 /// (A null RD means we don't have any meaningful "this" argument type,
248 /// so fall back to a generic pointer type).
249 /// The member function must be an ordinary function, i.e. not a
250 /// constructor or destructor.
251 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP,const CXXMethodDecl * MD)252 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
253 const FunctionProtoType *FTP,
254 const CXXMethodDecl *MD) {
255 SmallVector<CanQualType, 16> argTypes;
256
257 // Add the 'this' pointer.
258 argTypes.push_back(DeriveThisType(RD, MD));
259
260 return ::arrangeLLVMFunctionInfo(
261 *this, true, argTypes,
262 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
263 }
264
265 /// Set calling convention for CUDA/HIP kernel.
setCUDAKernelCallingConvention(CanQualType & FTy,CodeGenModule & CGM,const FunctionDecl * FD)266 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
267 const FunctionDecl *FD) {
268 if (FD->hasAttr<CUDAGlobalAttr>()) {
269 const FunctionType *FT = FTy->getAs<FunctionType>();
270 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
271 FTy = FT->getCanonicalTypeUnqualified();
272 }
273 }
274
275 /// Arrange the argument and result information for a declaration or
276 /// definition of the given C++ non-static member function. The
277 /// member function must be an ordinary function, i.e. not a
278 /// constructor or destructor.
279 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)280 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
281 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
282 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
283
284 CanQualType FT = GetFormalType(MD).getAs<Type>();
285 setCUDAKernelCallingConvention(FT, CGM, MD);
286 auto prototype = FT.getAs<FunctionProtoType>();
287
288 if (MD->isInstance()) {
289 // The abstract case is perfectly fine.
290 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
291 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
292 }
293
294 return arrangeFreeFunctionType(prototype);
295 }
296
inheritingCtorHasParams(const InheritedConstructor & Inherited,CXXCtorType Type)297 bool CodeGenTypes::inheritingCtorHasParams(
298 const InheritedConstructor &Inherited, CXXCtorType Type) {
299 // Parameters are unnecessary if we're constructing a base class subobject
300 // and the inherited constructor lives in a virtual base.
301 return Type == Ctor_Complete ||
302 !Inherited.getShadowDecl()->constructsVirtualBase() ||
303 !Target.getCXXABI().hasConstructorVariants();
304 }
305
306 const CGFunctionInfo &
arrangeCXXStructorDeclaration(GlobalDecl GD)307 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
308 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
309
310 SmallVector<CanQualType, 16> argTypes;
311 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
312 argTypes.push_back(DeriveThisType(MD->getParent(), MD));
313
314 bool PassParams = true;
315
316 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
317 // A base class inheriting constructor doesn't get forwarded arguments
318 // needed to construct a virtual base (or base class thereof).
319 if (auto Inherited = CD->getInheritedConstructor())
320 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
321 }
322
323 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
324
325 // Add the formal parameters.
326 if (PassParams)
327 appendParameterTypes(*this, argTypes, paramInfos, FTP);
328
329 CGCXXABI::AddedStructorArgCounts AddedArgs =
330 TheCXXABI.buildStructorSignature(GD, argTypes);
331 if (!paramInfos.empty()) {
332 // Note: prefix implies after the first param.
333 if (AddedArgs.Prefix)
334 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
335 FunctionProtoType::ExtParameterInfo{});
336 if (AddedArgs.Suffix)
337 paramInfos.append(AddedArgs.Suffix,
338 FunctionProtoType::ExtParameterInfo{});
339 }
340
341 RequiredArgs required =
342 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
343 : RequiredArgs::All);
344
345 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
346 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
347 ? argTypes.front()
348 : TheCXXABI.hasMostDerivedReturn(GD)
349 ? CGM.getContext().VoidPtrTy
350 : Context.VoidTy;
351 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
352 /*chainCall=*/false, argTypes, extInfo,
353 paramInfos, required);
354 }
355
356 static SmallVector<CanQualType, 16>
getArgTypesForCall(ASTContext & ctx,const CallArgList & args)357 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
358 SmallVector<CanQualType, 16> argTypes;
359 for (auto &arg : args)
360 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
361 return argTypes;
362 }
363
364 static SmallVector<CanQualType, 16>
getArgTypesForDeclaration(ASTContext & ctx,const FunctionArgList & args)365 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
366 SmallVector<CanQualType, 16> argTypes;
367 for (auto &arg : args)
368 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
369 return argTypes;
370 }
371
372 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
getExtParameterInfosForCall(const FunctionProtoType * proto,unsigned prefixArgs,unsigned totalArgs)373 getExtParameterInfosForCall(const FunctionProtoType *proto,
374 unsigned prefixArgs, unsigned totalArgs) {
375 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
376 if (proto->hasExtParameterInfos()) {
377 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
378 }
379 return result;
380 }
381
382 /// Arrange a call to a C++ method, passing the given arguments.
383 ///
384 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
385 /// parameter.
386 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
387 /// args.
388 /// PassProtoArgs indicates whether `args` has args for the parameters in the
389 /// given CXXConstructorDecl.
390 const CGFunctionInfo &
arrangeCXXConstructorCall(const CallArgList & args,const CXXConstructorDecl * D,CXXCtorType CtorKind,unsigned ExtraPrefixArgs,unsigned ExtraSuffixArgs,bool PassProtoArgs)391 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
392 const CXXConstructorDecl *D,
393 CXXCtorType CtorKind,
394 unsigned ExtraPrefixArgs,
395 unsigned ExtraSuffixArgs,
396 bool PassProtoArgs) {
397 // FIXME: Kill copy.
398 SmallVector<CanQualType, 16> ArgTypes;
399 for (const auto &Arg : args)
400 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
401
402 // +1 for implicit this, which should always be args[0].
403 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
404
405 CanQual<FunctionProtoType> FPT = GetFormalType(D);
406 RequiredArgs Required = PassProtoArgs
407 ? RequiredArgs::forPrototypePlus(
408 FPT, TotalPrefixArgs + ExtraSuffixArgs)
409 : RequiredArgs::All;
410
411 GlobalDecl GD(D, CtorKind);
412 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
413 ? ArgTypes.front()
414 : TheCXXABI.hasMostDerivedReturn(GD)
415 ? CGM.getContext().VoidPtrTy
416 : Context.VoidTy;
417
418 FunctionType::ExtInfo Info = FPT->getExtInfo();
419 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
420 // If the prototype args are elided, we should only have ABI-specific args,
421 // which never have param info.
422 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
423 // ABI-specific suffix arguments are treated the same as variadic arguments.
424 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
425 ArgTypes.size());
426 }
427 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
428 /*chainCall=*/false, ArgTypes, Info,
429 ParamInfos, Required);
430 }
431
432 /// Arrange the argument and result information for the declaration or
433 /// definition of the given function.
434 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)435 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
436 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
437 if (MD->isInstance())
438 return arrangeCXXMethodDeclaration(MD);
439
440 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
441
442 assert(isa<FunctionType>(FTy));
443 setCUDAKernelCallingConvention(FTy, CGM, FD);
444
445 // When declaring a function without a prototype, always use a
446 // non-variadic type.
447 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
448 return arrangeLLVMFunctionInfo(
449 noProto->getReturnType(), /*instanceMethod=*/false,
450 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
451 }
452
453 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
454 }
455
456 /// Arrange the argument and result information for the declaration or
457 /// definition of an Objective-C method.
458 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)459 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
460 // It happens that this is the same as a call with no optional
461 // arguments, except also using the formal 'self' type.
462 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
463 }
464
465 /// Arrange the argument and result information for the function type
466 /// through which to perform a send to the given Objective-C method,
467 /// using the given receiver type. The receiver type is not always
468 /// the 'self' type of the method or even an Objective-C pointer type.
469 /// This is *not* the right method for actually performing such a
470 /// message send, due to the possibility of optional arguments.
471 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)472 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
473 QualType receiverType) {
474 SmallVector<CanQualType, 16> argTys;
475 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
476 argTys.push_back(Context.getCanonicalParamType(receiverType));
477 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
478 // FIXME: Kill copy?
479 for (const auto *I : MD->parameters()) {
480 argTys.push_back(Context.getCanonicalParamType(I->getType()));
481 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
482 I->hasAttr<NoEscapeAttr>());
483 extParamInfos.push_back(extParamInfo);
484 }
485
486 FunctionType::ExtInfo einfo;
487 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
488 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
489
490 if (getContext().getLangOpts().ObjCAutoRefCount &&
491 MD->hasAttr<NSReturnsRetainedAttr>())
492 einfo = einfo.withProducesResult(true);
493
494 RequiredArgs required =
495 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
496
497 return arrangeLLVMFunctionInfo(
498 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
499 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
500 }
501
502 const CGFunctionInfo &
arrangeUnprototypedObjCMessageSend(QualType returnType,const CallArgList & args)503 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
504 const CallArgList &args) {
505 auto argTypes = getArgTypesForCall(Context, args);
506 FunctionType::ExtInfo einfo;
507
508 return arrangeLLVMFunctionInfo(
509 GetReturnType(returnType), /*instanceMethod=*/false,
510 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
511 }
512
513 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)514 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
515 // FIXME: Do we need to handle ObjCMethodDecl?
516 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
517
518 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
519 isa<CXXDestructorDecl>(GD.getDecl()))
520 return arrangeCXXStructorDeclaration(GD);
521
522 return arrangeFunctionDeclaration(FD);
523 }
524
525 /// Arrange a thunk that takes 'this' as the first parameter followed by
526 /// varargs. Return a void pointer, regardless of the actual return type.
527 /// The body of the thunk will end in a musttail call to a function of the
528 /// correct type, and the caller will bitcast the function to the correct
529 /// prototype.
530 const CGFunctionInfo &
arrangeUnprototypedMustTailThunk(const CXXMethodDecl * MD)531 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
532 assert(MD->isVirtual() && "only methods have thunks");
533 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
534 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
535 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
536 /*chainCall=*/false, ArgTys,
537 FTP->getExtInfo(), {}, RequiredArgs(1));
538 }
539
540 const CGFunctionInfo &
arrangeMSCtorClosure(const CXXConstructorDecl * CD,CXXCtorType CT)541 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
542 CXXCtorType CT) {
543 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
544
545 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
546 SmallVector<CanQualType, 2> ArgTys;
547 const CXXRecordDecl *RD = CD->getParent();
548 ArgTys.push_back(DeriveThisType(RD, CD));
549 if (CT == Ctor_CopyingClosure)
550 ArgTys.push_back(*FTP->param_type_begin());
551 if (RD->getNumVBases() > 0)
552 ArgTys.push_back(Context.IntTy);
553 CallingConv CC = Context.getDefaultCallingConvention(
554 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
555 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
556 /*chainCall=*/false, ArgTys,
557 FunctionType::ExtInfo(CC), {},
558 RequiredArgs::All);
559 }
560
561 /// Arrange a call as unto a free function, except possibly with an
562 /// additional number of formal parameters considered required.
563 static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes & CGT,CodeGenModule & CGM,const CallArgList & args,const FunctionType * fnType,unsigned numExtraRequiredArgs,bool chainCall)564 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
565 CodeGenModule &CGM,
566 const CallArgList &args,
567 const FunctionType *fnType,
568 unsigned numExtraRequiredArgs,
569 bool chainCall) {
570 assert(args.size() >= numExtraRequiredArgs);
571
572 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
573
574 // In most cases, there are no optional arguments.
575 RequiredArgs required = RequiredArgs::All;
576
577 // If we have a variadic prototype, the required arguments are the
578 // extra prefix plus the arguments in the prototype.
579 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
580 if (proto->isVariadic())
581 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
582
583 if (proto->hasExtParameterInfos())
584 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
585 args.size());
586
587 // If we don't have a prototype at all, but we're supposed to
588 // explicitly use the variadic convention for unprototyped calls,
589 // treat all of the arguments as required but preserve the nominal
590 // possibility of variadics.
591 } else if (CGM.getTargetCodeGenInfo()
592 .isNoProtoCallVariadic(args,
593 cast<FunctionNoProtoType>(fnType))) {
594 required = RequiredArgs(args.size());
595 }
596
597 // FIXME: Kill copy.
598 SmallVector<CanQualType, 16> argTypes;
599 for (const auto &arg : args)
600 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
601 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
602 /*instanceMethod=*/false, chainCall,
603 argTypes, fnType->getExtInfo(), paramInfos,
604 required);
605 }
606
607 /// Figure out the rules for calling a function with the given formal
608 /// type using the given arguments. The arguments are necessary
609 /// because the function might be unprototyped, in which case it's
610 /// target-dependent in crazy ways.
611 const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType,bool chainCall)612 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
613 const FunctionType *fnType,
614 bool chainCall) {
615 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
616 chainCall ? 1 : 0, chainCall);
617 }
618
619 /// A block function is essentially a free function with an
620 /// extra implicit argument.
621 const CGFunctionInfo &
arrangeBlockFunctionCall(const CallArgList & args,const FunctionType * fnType)622 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
623 const FunctionType *fnType) {
624 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
625 /*chainCall=*/false);
626 }
627
628 const CGFunctionInfo &
arrangeBlockFunctionDeclaration(const FunctionProtoType * proto,const FunctionArgList & params)629 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
630 const FunctionArgList ¶ms) {
631 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
632 auto argTypes = getArgTypesForDeclaration(Context, params);
633
634 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
635 /*instanceMethod*/ false, /*chainCall*/ false,
636 argTypes, proto->getExtInfo(), paramInfos,
637 RequiredArgs::forPrototypePlus(proto, 1));
638 }
639
640 const CGFunctionInfo &
arrangeBuiltinFunctionCall(QualType resultType,const CallArgList & args)641 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
642 const CallArgList &args) {
643 // FIXME: Kill copy.
644 SmallVector<CanQualType, 16> argTypes;
645 for (const auto &Arg : args)
646 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
647 return arrangeLLVMFunctionInfo(
648 GetReturnType(resultType), /*instanceMethod=*/false,
649 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
650 /*paramInfos=*/ {}, RequiredArgs::All);
651 }
652
653 const CGFunctionInfo &
arrangeBuiltinFunctionDeclaration(QualType resultType,const FunctionArgList & args)654 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
655 const FunctionArgList &args) {
656 auto argTypes = getArgTypesForDeclaration(Context, args);
657
658 return arrangeLLVMFunctionInfo(
659 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
660 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
661 }
662
663 const CGFunctionInfo &
arrangeBuiltinFunctionDeclaration(CanQualType resultType,ArrayRef<CanQualType> argTypes)664 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
665 ArrayRef<CanQualType> argTypes) {
666 return arrangeLLVMFunctionInfo(
667 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
668 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
669 }
670
671 /// Arrange a call to a C++ method, passing the given arguments.
672 ///
673 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
674 /// does not count `this`.
675 const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * proto,RequiredArgs required,unsigned numPrefixArgs)676 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
677 const FunctionProtoType *proto,
678 RequiredArgs required,
679 unsigned numPrefixArgs) {
680 assert(numPrefixArgs + 1 <= args.size() &&
681 "Emitting a call with less args than the required prefix?");
682 // Add one to account for `this`. It's a bit awkward here, but we don't count
683 // `this` in similar places elsewhere.
684 auto paramInfos =
685 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
686
687 // FIXME: Kill copy.
688 auto argTypes = getArgTypesForCall(Context, args);
689
690 FunctionType::ExtInfo info = proto->getExtInfo();
691 return arrangeLLVMFunctionInfo(
692 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
693 /*chainCall=*/false, argTypes, info, paramInfos, required);
694 }
695
arrangeNullaryFunction()696 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
697 return arrangeLLVMFunctionInfo(
698 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
699 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
700 }
701
702 const CGFunctionInfo &
arrangeCall(const CGFunctionInfo & signature,const CallArgList & args)703 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
704 const CallArgList &args) {
705 assert(signature.arg_size() <= args.size());
706 if (signature.arg_size() == args.size())
707 return signature;
708
709 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
710 auto sigParamInfos = signature.getExtParameterInfos();
711 if (!sigParamInfos.empty()) {
712 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
713 paramInfos.resize(args.size());
714 }
715
716 auto argTypes = getArgTypesForCall(Context, args);
717
718 assert(signature.getRequiredArgs().allowsOptionalArgs());
719 return arrangeLLVMFunctionInfo(signature.getReturnType(),
720 signature.isInstanceMethod(),
721 signature.isChainCall(),
722 argTypes,
723 signature.getExtInfo(),
724 paramInfos,
725 signature.getRequiredArgs());
726 }
727
728 namespace clang {
729 namespace CodeGen {
730 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
731 }
732 }
733
734 /// Arrange the argument and result information for an abstract value
735 /// of a given function type. This is the method which all of the
736 /// above functions ultimately defer to.
737 const CGFunctionInfo &
arrangeLLVMFunctionInfo(CanQualType resultType,bool instanceMethod,bool chainCall,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,RequiredArgs required)738 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
739 bool instanceMethod,
740 bool chainCall,
741 ArrayRef<CanQualType> argTypes,
742 FunctionType::ExtInfo info,
743 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
744 RequiredArgs required) {
745 assert(llvm::all_of(argTypes,
746 [](CanQualType T) { return T.isCanonicalAsParam(); }));
747
748 // Lookup or create unique function info.
749 llvm::FoldingSetNodeID ID;
750 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
751 required, resultType, argTypes);
752
753 void *insertPos = nullptr;
754 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
755 if (FI)
756 return *FI;
757
758 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
759
760 // Construct the function info. We co-allocate the ArgInfos.
761 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
762 paramInfos, resultType, argTypes, required);
763 FunctionInfos.InsertNode(FI, insertPos);
764
765 bool inserted = FunctionsBeingProcessed.insert(FI).second;
766 (void)inserted;
767 assert(inserted && "Recursively being processed?");
768
769 // Compute ABI information.
770 if (CC == llvm::CallingConv::SPIR_KERNEL) {
771 // Force target independent argument handling for the host visible
772 // kernel functions.
773 computeSPIRKernelABIInfo(CGM, *FI);
774 } else if (info.getCC() == CC_Swift) {
775 swiftcall::computeABIInfo(CGM, *FI);
776 } else {
777 getABIInfo().computeInfo(*FI);
778 }
779
780 // Loop over all of the computed argument and return value info. If any of
781 // them are direct or extend without a specified coerce type, specify the
782 // default now.
783 ABIArgInfo &retInfo = FI->getReturnInfo();
784 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
785 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
786
787 for (auto &I : FI->arguments())
788 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
789 I.info.setCoerceToType(ConvertType(I.type));
790
791 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
792 assert(erased && "Not in set?");
793
794 return *FI;
795 }
796
create(unsigned llvmCC,bool instanceMethod,bool chainCall,const FunctionType::ExtInfo & info,ArrayRef<ExtParameterInfo> paramInfos,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)797 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
798 bool instanceMethod,
799 bool chainCall,
800 const FunctionType::ExtInfo &info,
801 ArrayRef<ExtParameterInfo> paramInfos,
802 CanQualType resultType,
803 ArrayRef<CanQualType> argTypes,
804 RequiredArgs required) {
805 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
806 assert(!required.allowsOptionalArgs() ||
807 required.getNumRequiredArgs() <= argTypes.size());
808
809 void *buffer =
810 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
811 argTypes.size() + 1, paramInfos.size()));
812
813 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
814 FI->CallingConvention = llvmCC;
815 FI->EffectiveCallingConvention = llvmCC;
816 FI->ASTCallingConvention = info.getCC();
817 FI->InstanceMethod = instanceMethod;
818 FI->ChainCall = chainCall;
819 FI->CmseNSCall = info.getCmseNSCall();
820 FI->NoReturn = info.getNoReturn();
821 FI->ReturnsRetained = info.getProducesResult();
822 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
823 FI->NoCfCheck = info.getNoCfCheck();
824 FI->Required = required;
825 FI->HasRegParm = info.getHasRegParm();
826 FI->RegParm = info.getRegParm();
827 FI->ArgStruct = nullptr;
828 FI->ArgStructAlign = 0;
829 FI->NumArgs = argTypes.size();
830 FI->HasExtParameterInfos = !paramInfos.empty();
831 FI->getArgsBuffer()[0].type = resultType;
832 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
833 FI->getArgsBuffer()[i + 1].type = argTypes[i];
834 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
835 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
836 return FI;
837 }
838
839 /***/
840
841 namespace {
842 // ABIArgInfo::Expand implementation.
843
844 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
845 struct TypeExpansion {
846 enum TypeExpansionKind {
847 // Elements of constant arrays are expanded recursively.
848 TEK_ConstantArray,
849 // Record fields are expanded recursively (but if record is a union, only
850 // the field with the largest size is expanded).
851 TEK_Record,
852 // For complex types, real and imaginary parts are expanded recursively.
853 TEK_Complex,
854 // All other types are not expandable.
855 TEK_None
856 };
857
858 const TypeExpansionKind Kind;
859
TypeExpansion__anon9dd9c9960211::TypeExpansion860 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
~TypeExpansion__anon9dd9c9960211::TypeExpansion861 virtual ~TypeExpansion() {}
862 };
863
864 struct ConstantArrayExpansion : TypeExpansion {
865 QualType EltTy;
866 uint64_t NumElts;
867
ConstantArrayExpansion__anon9dd9c9960211::ConstantArrayExpansion868 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
869 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
classof__anon9dd9c9960211::ConstantArrayExpansion870 static bool classof(const TypeExpansion *TE) {
871 return TE->Kind == TEK_ConstantArray;
872 }
873 };
874
875 struct RecordExpansion : TypeExpansion {
876 SmallVector<const CXXBaseSpecifier *, 1> Bases;
877
878 SmallVector<const FieldDecl *, 1> Fields;
879
RecordExpansion__anon9dd9c9960211::RecordExpansion880 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
881 SmallVector<const FieldDecl *, 1> &&Fields)
882 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
883 Fields(std::move(Fields)) {}
classof__anon9dd9c9960211::RecordExpansion884 static bool classof(const TypeExpansion *TE) {
885 return TE->Kind == TEK_Record;
886 }
887 };
888
889 struct ComplexExpansion : TypeExpansion {
890 QualType EltTy;
891
ComplexExpansion__anon9dd9c9960211::ComplexExpansion892 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
classof__anon9dd9c9960211::ComplexExpansion893 static bool classof(const TypeExpansion *TE) {
894 return TE->Kind == TEK_Complex;
895 }
896 };
897
898 struct NoExpansion : TypeExpansion {
NoExpansion__anon9dd9c9960211::NoExpansion899 NoExpansion() : TypeExpansion(TEK_None) {}
classof__anon9dd9c9960211::NoExpansion900 static bool classof(const TypeExpansion *TE) {
901 return TE->Kind == TEK_None;
902 }
903 };
904 } // namespace
905
906 static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty,const ASTContext & Context)907 getTypeExpansion(QualType Ty, const ASTContext &Context) {
908 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
909 return std::make_unique<ConstantArrayExpansion>(
910 AT->getElementType(), AT->getSize().getZExtValue());
911 }
912 if (const RecordType *RT = Ty->getAs<RecordType>()) {
913 SmallVector<const CXXBaseSpecifier *, 1> Bases;
914 SmallVector<const FieldDecl *, 1> Fields;
915 const RecordDecl *RD = RT->getDecl();
916 assert(!RD->hasFlexibleArrayMember() &&
917 "Cannot expand structure with flexible array.");
918 if (RD->isUnion()) {
919 // Unions can be here only in degenerative cases - all the fields are same
920 // after flattening. Thus we have to use the "largest" field.
921 const FieldDecl *LargestFD = nullptr;
922 CharUnits UnionSize = CharUnits::Zero();
923
924 for (const auto *FD : RD->fields()) {
925 if (FD->isZeroLengthBitField(Context))
926 continue;
927 assert(!FD->isBitField() &&
928 "Cannot expand structure with bit-field members.");
929 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
930 if (UnionSize < FieldSize) {
931 UnionSize = FieldSize;
932 LargestFD = FD;
933 }
934 }
935 if (LargestFD)
936 Fields.push_back(LargestFD);
937 } else {
938 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
939 assert(!CXXRD->isDynamicClass() &&
940 "cannot expand vtable pointers in dynamic classes");
941 for (const CXXBaseSpecifier &BS : CXXRD->bases())
942 Bases.push_back(&BS);
943 }
944
945 for (const auto *FD : RD->fields()) {
946 if (FD->isZeroLengthBitField(Context))
947 continue;
948 assert(!FD->isBitField() &&
949 "Cannot expand structure with bit-field members.");
950 Fields.push_back(FD);
951 }
952 }
953 return std::make_unique<RecordExpansion>(std::move(Bases),
954 std::move(Fields));
955 }
956 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
957 return std::make_unique<ComplexExpansion>(CT->getElementType());
958 }
959 return std::make_unique<NoExpansion>();
960 }
961
getExpansionSize(QualType Ty,const ASTContext & Context)962 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
963 auto Exp = getTypeExpansion(Ty, Context);
964 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
965 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
966 }
967 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
968 int Res = 0;
969 for (auto BS : RExp->Bases)
970 Res += getExpansionSize(BS->getType(), Context);
971 for (auto FD : RExp->Fields)
972 Res += getExpansionSize(FD->getType(), Context);
973 return Res;
974 }
975 if (isa<ComplexExpansion>(Exp.get()))
976 return 2;
977 assert(isa<NoExpansion>(Exp.get()));
978 return 1;
979 }
980
981 void
getExpandedTypes(QualType Ty,SmallVectorImpl<llvm::Type * >::iterator & TI)982 CodeGenTypes::getExpandedTypes(QualType Ty,
983 SmallVectorImpl<llvm::Type *>::iterator &TI) {
984 auto Exp = getTypeExpansion(Ty, Context);
985 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
986 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
987 getExpandedTypes(CAExp->EltTy, TI);
988 }
989 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
990 for (auto BS : RExp->Bases)
991 getExpandedTypes(BS->getType(), TI);
992 for (auto FD : RExp->Fields)
993 getExpandedTypes(FD->getType(), TI);
994 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
995 llvm::Type *EltTy = ConvertType(CExp->EltTy);
996 *TI++ = EltTy;
997 *TI++ = EltTy;
998 } else {
999 assert(isa<NoExpansion>(Exp.get()));
1000 *TI++ = ConvertType(Ty);
1001 }
1002 }
1003
forConstantArrayExpansion(CodeGenFunction & CGF,ConstantArrayExpansion * CAE,Address BaseAddr,llvm::function_ref<void (Address)> Fn)1004 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1005 ConstantArrayExpansion *CAE,
1006 Address BaseAddr,
1007 llvm::function_ref<void(Address)> Fn) {
1008 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1009 CharUnits EltAlign =
1010 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1011
1012 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1013 llvm::Value *EltAddr =
1014 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1015 Fn(Address(EltAddr, EltAlign));
1016 }
1017 }
1018
ExpandTypeFromArgs(QualType Ty,LValue LV,llvm::Function::arg_iterator & AI)1019 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1020 llvm::Function::arg_iterator &AI) {
1021 assert(LV.isSimple() &&
1022 "Unexpected non-simple lvalue during struct expansion.");
1023
1024 auto Exp = getTypeExpansion(Ty, getContext());
1025 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1026 forConstantArrayExpansion(
1027 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1028 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1029 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1030 });
1031 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1032 Address This = LV.getAddress(*this);
1033 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1034 // Perform a single step derived-to-base conversion.
1035 Address Base =
1036 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1037 /*NullCheckValue=*/false, SourceLocation());
1038 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1039
1040 // Recurse onto bases.
1041 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1042 }
1043 for (auto FD : RExp->Fields) {
1044 // FIXME: What are the right qualifiers here?
1045 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1046 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1047 }
1048 } else if (isa<ComplexExpansion>(Exp.get())) {
1049 auto realValue = &*AI++;
1050 auto imagValue = &*AI++;
1051 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1052 } else {
1053 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1054 // primitive store.
1055 assert(isa<NoExpansion>(Exp.get()));
1056 if (LV.isBitField())
1057 EmitStoreThroughLValue(RValue::get(&*AI++), LV);
1058 else
1059 EmitStoreOfScalar(&*AI++, LV);
1060 }
1061 }
1062
ExpandTypeToArgs(QualType Ty,CallArg Arg,llvm::FunctionType * IRFuncTy,SmallVectorImpl<llvm::Value * > & IRCallArgs,unsigned & IRCallArgPos)1063 void CodeGenFunction::ExpandTypeToArgs(
1064 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1065 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1066 auto Exp = getTypeExpansion(Ty, getContext());
1067 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1068 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1069 : Arg.getKnownRValue().getAggregateAddress();
1070 forConstantArrayExpansion(
1071 *this, CAExp, Addr, [&](Address EltAddr) {
1072 CallArg EltArg = CallArg(
1073 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1074 CAExp->EltTy);
1075 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1076 IRCallArgPos);
1077 });
1078 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1079 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1080 : Arg.getKnownRValue().getAggregateAddress();
1081 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1082 // Perform a single step derived-to-base conversion.
1083 Address Base =
1084 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1085 /*NullCheckValue=*/false, SourceLocation());
1086 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1087
1088 // Recurse onto bases.
1089 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1090 IRCallArgPos);
1091 }
1092
1093 LValue LV = MakeAddrLValue(This, Ty);
1094 for (auto FD : RExp->Fields) {
1095 CallArg FldArg =
1096 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1097 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1098 IRCallArgPos);
1099 }
1100 } else if (isa<ComplexExpansion>(Exp.get())) {
1101 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1102 IRCallArgs[IRCallArgPos++] = CV.first;
1103 IRCallArgs[IRCallArgPos++] = CV.second;
1104 } else {
1105 assert(isa<NoExpansion>(Exp.get()));
1106 auto RV = Arg.getKnownRValue();
1107 assert(RV.isScalar() &&
1108 "Unexpected non-scalar rvalue during struct expansion.");
1109
1110 // Insert a bitcast as needed.
1111 llvm::Value *V = RV.getScalarVal();
1112 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1113 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1114 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1115
1116 IRCallArgs[IRCallArgPos++] = V;
1117 }
1118 }
1119
1120 /// Create a temporary allocation for the purposes of coercion.
CreateTempAllocaForCoercion(CodeGenFunction & CGF,llvm::Type * Ty,CharUnits MinAlign)1121 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1122 CharUnits MinAlign) {
1123 // Don't use an alignment that's worse than what LLVM would prefer.
1124 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1125 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1126
1127 return CGF.CreateTempAlloca(Ty, Align);
1128 }
1129
1130 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1131 /// accessing some number of bytes out of it, try to gep into the struct to get
1132 /// at its inner goodness. Dive as deep as possible without entering an element
1133 /// with an in-memory size smaller than DstSize.
1134 static Address
EnterStructPointerForCoercedAccess(Address SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)1135 EnterStructPointerForCoercedAccess(Address SrcPtr,
1136 llvm::StructType *SrcSTy,
1137 uint64_t DstSize, CodeGenFunction &CGF) {
1138 // We can't dive into a zero-element struct.
1139 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1140
1141 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1142
1143 // If the first elt is at least as large as what we're looking for, or if the
1144 // first element is the same size as the whole struct, we can enter it. The
1145 // comparison must be made on the store size and not the alloca size. Using
1146 // the alloca size may overstate the size of the load.
1147 uint64_t FirstEltSize =
1148 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1149 if (FirstEltSize < DstSize &&
1150 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1151 return SrcPtr;
1152
1153 // GEP into the first element.
1154 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1155
1156 // If the first element is a struct, recurse.
1157 llvm::Type *SrcTy = SrcPtr.getElementType();
1158 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1159 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1160
1161 return SrcPtr;
1162 }
1163
1164 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1165 /// are either integers or pointers. This does a truncation of the value if it
1166 /// is too large or a zero extension if it is too small.
1167 ///
1168 /// This behaves as if the value were coerced through memory, so on big-endian
1169 /// targets the high bits are preserved in a truncation, while little-endian
1170 /// targets preserve the low bits.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)1171 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1172 llvm::Type *Ty,
1173 CodeGenFunction &CGF) {
1174 if (Val->getType() == Ty)
1175 return Val;
1176
1177 if (isa<llvm::PointerType>(Val->getType())) {
1178 // If this is Pointer->Pointer avoid conversion to and from int.
1179 if (isa<llvm::PointerType>(Ty))
1180 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1181
1182 // Convert the pointer to an integer so we can play with its width.
1183 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1184 }
1185
1186 llvm::Type *DestIntTy = Ty;
1187 if (isa<llvm::PointerType>(DestIntTy))
1188 DestIntTy = CGF.IntPtrTy;
1189
1190 if (Val->getType() != DestIntTy) {
1191 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1192 if (DL.isBigEndian()) {
1193 // Preserve the high bits on big-endian targets.
1194 // That is what memory coercion does.
1195 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1196 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1197
1198 if (SrcSize > DstSize) {
1199 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1200 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1201 } else {
1202 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1203 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1204 }
1205 } else {
1206 // Little-endian targets preserve the low bits. No shifts required.
1207 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1208 }
1209 }
1210
1211 if (isa<llvm::PointerType>(Ty))
1212 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1213 return Val;
1214 }
1215
1216
1217
1218 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1219 /// a pointer to an object of type \arg Ty, known to be aligned to
1220 /// \arg SrcAlign bytes.
1221 ///
1222 /// This safely handles the case when the src type is smaller than the
1223 /// destination type; in this situation the values of bits which not
1224 /// present in the src are undefined.
CreateCoercedLoad(Address Src,llvm::Type * Ty,CodeGenFunction & CGF)1225 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1226 CodeGenFunction &CGF) {
1227 llvm::Type *SrcTy = Src.getElementType();
1228
1229 // If SrcTy and Ty are the same, just do a load.
1230 if (SrcTy == Ty)
1231 return CGF.Builder.CreateLoad(Src);
1232
1233 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1234
1235 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1236 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy, DstSize, CGF);
1237 SrcTy = Src.getElementType();
1238 }
1239
1240 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1241
1242 // If the source and destination are integer or pointer types, just do an
1243 // extension or truncation to the desired type.
1244 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1245 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1246 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1247 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1248 }
1249
1250 // If load is legal, just bitcast the src pointer.
1251 if (SrcSize >= DstSize) {
1252 // Generally SrcSize is never greater than DstSize, since this means we are
1253 // losing bits. However, this can happen in cases where the structure has
1254 // additional padding, for example due to a user specified alignment.
1255 //
1256 // FIXME: Assert that we aren't truncating non-padding bits when have access
1257 // to that information.
1258 Src = CGF.Builder.CreateBitCast(Src,
1259 Ty->getPointerTo(Src.getAddressSpace()));
1260 return CGF.Builder.CreateLoad(Src);
1261 }
1262
1263 // Otherwise do coercion through memory. This is stupid, but simple.
1264 Address Tmp = CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment());
1265 CGF.Builder.CreateMemCpy(Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
1266 Src.getPointer(), Src.getAlignment().getAsAlign(),
1267 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize));
1268 return CGF.Builder.CreateLoad(Tmp);
1269 }
1270
1271 // Function to store a first-class aggregate into memory. We prefer to
1272 // store the elements rather than the aggregate to be more friendly to
1273 // fast-isel.
1274 // FIXME: Do we need to recurse here?
EmitAggregateStore(llvm::Value * Val,Address Dest,bool DestIsVolatile)1275 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1276 bool DestIsVolatile) {
1277 // Prefer scalar stores to first-class aggregate stores.
1278 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1279 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1280 Address EltPtr = Builder.CreateStructGEP(Dest, i);
1281 llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1282 Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1283 }
1284 } else {
1285 Builder.CreateStore(Val, Dest, DestIsVolatile);
1286 }
1287 }
1288
1289 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1290 /// where the source and destination may have different types. The
1291 /// destination is known to be aligned to \arg DstAlign bytes.
1292 ///
1293 /// This safely handles the case when the src type is larger than the
1294 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,Address Dst,bool DstIsVolatile,CodeGenFunction & CGF)1295 static void CreateCoercedStore(llvm::Value *Src,
1296 Address Dst,
1297 bool DstIsVolatile,
1298 CodeGenFunction &CGF) {
1299 llvm::Type *SrcTy = Src->getType();
1300 llvm::Type *DstTy = Dst.getElementType();
1301 if (SrcTy == DstTy) {
1302 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1303 return;
1304 }
1305
1306 uint64_t SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1307
1308 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1309 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy, SrcSize, CGF);
1310 DstTy = Dst.getElementType();
1311 }
1312
1313 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1314 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1315 if (SrcPtrTy && DstPtrTy &&
1316 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1317 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1318 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1319 return;
1320 }
1321
1322 // If the source and destination are integer or pointer types, just do an
1323 // extension or truncation to the desired type.
1324 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1325 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1326 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1327 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1328 return;
1329 }
1330
1331 uint64_t DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1332
1333 // If store is legal, just bitcast the src pointer.
1334 if (SrcSize <= DstSize) {
1335 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1336 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1337 } else {
1338 // Otherwise do coercion through memory. This is stupid, but
1339 // simple.
1340
1341 // Generally SrcSize is never greater than DstSize, since this means we are
1342 // losing bits. However, this can happen in cases where the structure has
1343 // additional padding, for example due to a user specified alignment.
1344 //
1345 // FIXME: Assert that we aren't truncating non-padding bits when have access
1346 // to that information.
1347 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1348 CGF.Builder.CreateStore(Src, Tmp);
1349 CGF.Builder.CreateMemCpy(Dst.getPointer(), Dst.getAlignment().getAsAlign(),
1350 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(),
1351 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize));
1352 }
1353 }
1354
emitAddressAtOffset(CodeGenFunction & CGF,Address addr,const ABIArgInfo & info)1355 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1356 const ABIArgInfo &info) {
1357 if (unsigned offset = info.getDirectOffset()) {
1358 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1359 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1360 CharUnits::fromQuantity(offset));
1361 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1362 }
1363 return addr;
1364 }
1365
1366 namespace {
1367
1368 /// Encapsulates information about the way function arguments from
1369 /// CGFunctionInfo should be passed to actual LLVM IR function.
1370 class ClangToLLVMArgMapping {
1371 static const unsigned InvalidIndex = ~0U;
1372 unsigned InallocaArgNo;
1373 unsigned SRetArgNo;
1374 unsigned TotalIRArgs;
1375
1376 /// Arguments of LLVM IR function corresponding to single Clang argument.
1377 struct IRArgs {
1378 unsigned PaddingArgIndex;
1379 // Argument is expanded to IR arguments at positions
1380 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1381 unsigned FirstArgIndex;
1382 unsigned NumberOfArgs;
1383
IRArgs__anon9dd9c9960511::ClangToLLVMArgMapping::IRArgs1384 IRArgs()
1385 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1386 NumberOfArgs(0) {}
1387 };
1388
1389 SmallVector<IRArgs, 8> ArgInfo;
1390
1391 public:
ClangToLLVMArgMapping(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs=false)1392 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1393 bool OnlyRequiredArgs = false)
1394 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1395 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1396 construct(Context, FI, OnlyRequiredArgs);
1397 }
1398
hasInallocaArg() const1399 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
getInallocaArgNo() const1400 unsigned getInallocaArgNo() const {
1401 assert(hasInallocaArg());
1402 return InallocaArgNo;
1403 }
1404
hasSRetArg() const1405 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
getSRetArgNo() const1406 unsigned getSRetArgNo() const {
1407 assert(hasSRetArg());
1408 return SRetArgNo;
1409 }
1410
totalIRArgs() const1411 unsigned totalIRArgs() const { return TotalIRArgs; }
1412
hasPaddingArg(unsigned ArgNo) const1413 bool hasPaddingArg(unsigned ArgNo) const {
1414 assert(ArgNo < ArgInfo.size());
1415 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1416 }
getPaddingArgNo(unsigned ArgNo) const1417 unsigned getPaddingArgNo(unsigned ArgNo) const {
1418 assert(hasPaddingArg(ArgNo));
1419 return ArgInfo[ArgNo].PaddingArgIndex;
1420 }
1421
1422 /// Returns index of first IR argument corresponding to ArgNo, and their
1423 /// quantity.
getIRArgs(unsigned ArgNo) const1424 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1425 assert(ArgNo < ArgInfo.size());
1426 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1427 ArgInfo[ArgNo].NumberOfArgs);
1428 }
1429
1430 private:
1431 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1432 bool OnlyRequiredArgs);
1433 };
1434
construct(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs)1435 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1436 const CGFunctionInfo &FI,
1437 bool OnlyRequiredArgs) {
1438 unsigned IRArgNo = 0;
1439 bool SwapThisWithSRet = false;
1440 const ABIArgInfo &RetAI = FI.getReturnInfo();
1441
1442 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1443 SwapThisWithSRet = RetAI.isSRetAfterThis();
1444 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1445 }
1446
1447 unsigned ArgNo = 0;
1448 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1449 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1450 ++I, ++ArgNo) {
1451 assert(I != FI.arg_end());
1452 QualType ArgType = I->type;
1453 const ABIArgInfo &AI = I->info;
1454 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1455 auto &IRArgs = ArgInfo[ArgNo];
1456
1457 if (AI.getPaddingType())
1458 IRArgs.PaddingArgIndex = IRArgNo++;
1459
1460 switch (AI.getKind()) {
1461 case ABIArgInfo::Extend:
1462 case ABIArgInfo::Direct: {
1463 // FIXME: handle sseregparm someday...
1464 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1465 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1466 IRArgs.NumberOfArgs = STy->getNumElements();
1467 } else {
1468 IRArgs.NumberOfArgs = 1;
1469 }
1470 break;
1471 }
1472 case ABIArgInfo::Indirect:
1473 IRArgs.NumberOfArgs = 1;
1474 break;
1475 case ABIArgInfo::Ignore:
1476 case ABIArgInfo::InAlloca:
1477 // ignore and inalloca doesn't have matching LLVM parameters.
1478 IRArgs.NumberOfArgs = 0;
1479 break;
1480 case ABIArgInfo::CoerceAndExpand:
1481 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1482 break;
1483 case ABIArgInfo::Expand:
1484 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1485 break;
1486 }
1487
1488 if (IRArgs.NumberOfArgs > 0) {
1489 IRArgs.FirstArgIndex = IRArgNo;
1490 IRArgNo += IRArgs.NumberOfArgs;
1491 }
1492
1493 // Skip over the sret parameter when it comes second. We already handled it
1494 // above.
1495 if (IRArgNo == 1 && SwapThisWithSRet)
1496 IRArgNo++;
1497 }
1498 assert(ArgNo == ArgInfo.size());
1499
1500 if (FI.usesInAlloca())
1501 InallocaArgNo = IRArgNo++;
1502
1503 TotalIRArgs = IRArgNo;
1504 }
1505 } // namespace
1506
1507 /***/
1508
ReturnTypeUsesSRet(const CGFunctionInfo & FI)1509 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1510 const auto &RI = FI.getReturnInfo();
1511 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1512 }
1513
ReturnSlotInterferesWithArgs(const CGFunctionInfo & FI)1514 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1515 return ReturnTypeUsesSRet(FI) &&
1516 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1517 }
1518
ReturnTypeUsesFPRet(QualType ResultType)1519 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1520 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1521 switch (BT->getKind()) {
1522 default:
1523 return false;
1524 case BuiltinType::Float:
1525 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1526 case BuiltinType::Double:
1527 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1528 case BuiltinType::LongDouble:
1529 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1530 }
1531 }
1532
1533 return false;
1534 }
1535
ReturnTypeUsesFP2Ret(QualType ResultType)1536 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1537 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1538 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1539 if (BT->getKind() == BuiltinType::LongDouble)
1540 return getTarget().useObjCFP2RetForComplexLongDouble();
1541 }
1542 }
1543
1544 return false;
1545 }
1546
GetFunctionType(GlobalDecl GD)1547 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1548 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1549 return GetFunctionType(FI);
1550 }
1551
1552 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)1553 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1554
1555 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1556 (void)Inserted;
1557 assert(Inserted && "Recursively being processed?");
1558
1559 llvm::Type *resultType = nullptr;
1560 const ABIArgInfo &retAI = FI.getReturnInfo();
1561 switch (retAI.getKind()) {
1562 case ABIArgInfo::Expand:
1563 llvm_unreachable("Invalid ABI kind for return argument");
1564
1565 case ABIArgInfo::Extend:
1566 case ABIArgInfo::Direct:
1567 resultType = retAI.getCoerceToType();
1568 break;
1569
1570 case ABIArgInfo::InAlloca:
1571 if (retAI.getInAllocaSRet()) {
1572 // sret things on win32 aren't void, they return the sret pointer.
1573 QualType ret = FI.getReturnType();
1574 llvm::Type *ty = ConvertType(ret);
1575 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1576 resultType = llvm::PointerType::get(ty, addressSpace);
1577 } else {
1578 resultType = llvm::Type::getVoidTy(getLLVMContext());
1579 }
1580 break;
1581
1582 case ABIArgInfo::Indirect:
1583 case ABIArgInfo::Ignore:
1584 resultType = llvm::Type::getVoidTy(getLLVMContext());
1585 break;
1586
1587 case ABIArgInfo::CoerceAndExpand:
1588 resultType = retAI.getUnpaddedCoerceAndExpandType();
1589 break;
1590 }
1591
1592 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1593 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1594
1595 // Add type for sret argument.
1596 if (IRFunctionArgs.hasSRetArg()) {
1597 QualType Ret = FI.getReturnType();
1598 llvm::Type *Ty = ConvertType(Ret);
1599 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1600 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1601 llvm::PointerType::get(Ty, AddressSpace);
1602 }
1603
1604 // Add type for inalloca argument.
1605 if (IRFunctionArgs.hasInallocaArg()) {
1606 auto ArgStruct = FI.getArgStruct();
1607 assert(ArgStruct);
1608 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1609 }
1610
1611 // Add in all of the required arguments.
1612 unsigned ArgNo = 0;
1613 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1614 ie = it + FI.getNumRequiredArgs();
1615 for (; it != ie; ++it, ++ArgNo) {
1616 const ABIArgInfo &ArgInfo = it->info;
1617
1618 // Insert a padding type to ensure proper alignment.
1619 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1620 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1621 ArgInfo.getPaddingType();
1622
1623 unsigned FirstIRArg, NumIRArgs;
1624 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1625
1626 switch (ArgInfo.getKind()) {
1627 case ABIArgInfo::Ignore:
1628 case ABIArgInfo::InAlloca:
1629 assert(NumIRArgs == 0);
1630 break;
1631
1632 case ABIArgInfo::Indirect: {
1633 assert(NumIRArgs == 1);
1634 // indirect arguments are always on the stack, which is alloca addr space.
1635 llvm::Type *LTy = ConvertTypeForMem(it->type);
1636 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1637 CGM.getDataLayout().getAllocaAddrSpace());
1638 break;
1639 }
1640
1641 case ABIArgInfo::Extend:
1642 case ABIArgInfo::Direct: {
1643 // Fast-isel and the optimizer generally like scalar values better than
1644 // FCAs, so we flatten them if this is safe to do for this argument.
1645 llvm::Type *argType = ArgInfo.getCoerceToType();
1646 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1647 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1648 assert(NumIRArgs == st->getNumElements());
1649 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1650 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1651 } else {
1652 assert(NumIRArgs == 1);
1653 ArgTypes[FirstIRArg] = argType;
1654 }
1655 break;
1656 }
1657
1658 case ABIArgInfo::CoerceAndExpand: {
1659 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1660 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1661 *ArgTypesIter++ = EltTy;
1662 }
1663 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1664 break;
1665 }
1666
1667 case ABIArgInfo::Expand:
1668 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1669 getExpandedTypes(it->type, ArgTypesIter);
1670 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1671 break;
1672 }
1673 }
1674
1675 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1676 assert(Erased && "Not in set?");
1677
1678 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1679 }
1680
GetFunctionTypeForVTable(GlobalDecl GD)1681 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1682 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1683 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1684
1685 if (!isFuncTypeConvertible(FPT))
1686 return llvm::StructType::get(getLLVMContext());
1687
1688 return GetFunctionType(GD);
1689 }
1690
AddAttributesFromFunctionProtoType(ASTContext & Ctx,llvm::AttrBuilder & FuncAttrs,const FunctionProtoType * FPT)1691 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1692 llvm::AttrBuilder &FuncAttrs,
1693 const FunctionProtoType *FPT) {
1694 if (!FPT)
1695 return;
1696
1697 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1698 FPT->isNothrow())
1699 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1700 }
1701
getDefaultFunctionAttributes(StringRef Name,bool HasOptnone,bool AttrOnCallSite,llvm::AttrBuilder & FuncAttrs)1702 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1703 bool HasOptnone,
1704 bool AttrOnCallSite,
1705 llvm::AttrBuilder &FuncAttrs) {
1706 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1707 if (!HasOptnone) {
1708 if (CodeGenOpts.OptimizeSize)
1709 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1710 if (CodeGenOpts.OptimizeSize == 2)
1711 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1712 }
1713
1714 if (CodeGenOpts.DisableRedZone)
1715 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1716 if (CodeGenOpts.IndirectTlsSegRefs)
1717 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1718 if (CodeGenOpts.NoImplicitFloat)
1719 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1720
1721 if (AttrOnCallSite) {
1722 // Attributes that should go on the call site only.
1723 if (!CodeGenOpts.SimplifyLibCalls ||
1724 CodeGenOpts.isNoBuiltinFunc(Name.data()))
1725 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1726 if (!CodeGenOpts.TrapFuncName.empty())
1727 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1728 } else {
1729 StringRef FpKind;
1730 switch (CodeGenOpts.getFramePointer()) {
1731 case CodeGenOptions::FramePointerKind::None:
1732 FpKind = "none";
1733 break;
1734 case CodeGenOptions::FramePointerKind::NonLeaf:
1735 FpKind = "non-leaf";
1736 break;
1737 case CodeGenOptions::FramePointerKind::All:
1738 FpKind = "all";
1739 break;
1740 }
1741 FuncAttrs.addAttribute("frame-pointer", FpKind);
1742
1743 FuncAttrs.addAttribute("less-precise-fpmad",
1744 llvm::toStringRef(CodeGenOpts.LessPreciseFPMAD));
1745
1746 if (CodeGenOpts.NullPointerIsValid)
1747 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1748
1749 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1750 FuncAttrs.addAttribute("denormal-fp-math",
1751 CodeGenOpts.FPDenormalMode.str());
1752 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1753 FuncAttrs.addAttribute(
1754 "denormal-fp-math-f32",
1755 CodeGenOpts.FP32DenormalMode.str());
1756 }
1757
1758 FuncAttrs.addAttribute("no-trapping-math",
1759 llvm::toStringRef(LangOpts.getFPExceptionMode() ==
1760 LangOptions::FPE_Ignore));
1761
1762 // Strict (compliant) code is the default, so only add this attribute to
1763 // indicate that we are trying to workaround a problem case.
1764 if (!CodeGenOpts.StrictFloatCastOverflow)
1765 FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1766
1767 // TODO: Are these all needed?
1768 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1769 FuncAttrs.addAttribute("no-infs-fp-math",
1770 llvm::toStringRef(LangOpts.NoHonorInfs));
1771 FuncAttrs.addAttribute("no-nans-fp-math",
1772 llvm::toStringRef(LangOpts.NoHonorNaNs));
1773 FuncAttrs.addAttribute("unsafe-fp-math",
1774 llvm::toStringRef(LangOpts.UnsafeFPMath));
1775 FuncAttrs.addAttribute("use-soft-float",
1776 llvm::toStringRef(CodeGenOpts.SoftFloat));
1777 FuncAttrs.addAttribute("stack-protector-buffer-size",
1778 llvm::utostr(CodeGenOpts.SSPBufferSize));
1779 FuncAttrs.addAttribute("no-signed-zeros-fp-math",
1780 llvm::toStringRef(LangOpts.NoSignedZero));
1781 FuncAttrs.addAttribute(
1782 "correctly-rounded-divide-sqrt-fp-math",
1783 llvm::toStringRef(CodeGenOpts.CorrectlyRoundedDivSqrt));
1784
1785 // TODO: Reciprocal estimate codegen options should apply to instructions?
1786 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1787 if (!Recips.empty())
1788 FuncAttrs.addAttribute("reciprocal-estimates",
1789 llvm::join(Recips, ","));
1790
1791 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1792 CodeGenOpts.PreferVectorWidth != "none")
1793 FuncAttrs.addAttribute("prefer-vector-width",
1794 CodeGenOpts.PreferVectorWidth);
1795
1796 if (CodeGenOpts.StackRealignment)
1797 FuncAttrs.addAttribute("stackrealign");
1798 if (CodeGenOpts.Backchain)
1799 FuncAttrs.addAttribute("backchain");
1800 if (CodeGenOpts.EnableSegmentedStacks)
1801 FuncAttrs.addAttribute("split-stack");
1802
1803 if (CodeGenOpts.SpeculativeLoadHardening)
1804 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1805 }
1806
1807 if (getLangOpts().assumeFunctionsAreConvergent()) {
1808 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1809 // convergent (meaning, they may call an intrinsically convergent op, such
1810 // as __syncthreads() / barrier(), and so can't have certain optimizations
1811 // applied around them). LLVM will remove this attribute where it safely
1812 // can.
1813 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1814 }
1815
1816 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1817 // Exceptions aren't supported in CUDA device code.
1818 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1819 }
1820
1821 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1822 StringRef Var, Value;
1823 std::tie(Var, Value) = Attr.split('=');
1824 FuncAttrs.addAttribute(Var, Value);
1825 }
1826 }
1827
addDefaultFunctionDefinitionAttributes(llvm::Function & F)1828 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1829 llvm::AttrBuilder FuncAttrs;
1830 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1831 /* AttrOnCallSite = */ false, FuncAttrs);
1832 // TODO: call GetCPUAndFeaturesAttributes?
1833 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1834 }
1835
addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder & attrs)1836 void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1837 llvm::AttrBuilder &attrs) {
1838 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1839 /*for call*/ false, attrs);
1840 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1841 }
1842
addNoBuiltinAttributes(llvm::AttrBuilder & FuncAttrs,const LangOptions & LangOpts,const NoBuiltinAttr * NBA=nullptr)1843 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1844 const LangOptions &LangOpts,
1845 const NoBuiltinAttr *NBA = nullptr) {
1846 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1847 SmallString<32> AttributeName;
1848 AttributeName += "no-builtin-";
1849 AttributeName += BuiltinName;
1850 FuncAttrs.addAttribute(AttributeName);
1851 };
1852
1853 // First, handle the language options passed through -fno-builtin.
1854 if (LangOpts.NoBuiltin) {
1855 // -fno-builtin disables them all.
1856 FuncAttrs.addAttribute("no-builtins");
1857 return;
1858 }
1859
1860 // Then, add attributes for builtins specified through -fno-builtin-<name>.
1861 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1862
1863 // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1864 // the source.
1865 if (!NBA)
1866 return;
1867
1868 // If there is a wildcard in the builtin names specified through the
1869 // attribute, disable them all.
1870 if (llvm::is_contained(NBA->builtinNames(), "*")) {
1871 FuncAttrs.addAttribute("no-builtins");
1872 return;
1873 }
1874
1875 // And last, add the rest of the builtin names.
1876 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1877 }
1878
1879 /// Construct the IR attribute list of a function or call.
1880 ///
1881 /// When adding an attribute, please consider where it should be handled:
1882 ///
1883 /// - getDefaultFunctionAttributes is for attributes that are essentially
1884 /// part of the global target configuration (but perhaps can be
1885 /// overridden on a per-function basis). Adding attributes there
1886 /// will cause them to also be set in frontends that build on Clang's
1887 /// target-configuration logic, as well as for code defined in library
1888 /// modules such as CUDA's libdevice.
1889 ///
1890 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
1891 /// and adds declaration-specific, convention-specific, and
1892 /// frontend-specific logic. The last is of particular importance:
1893 /// attributes that restrict how the frontend generates code must be
1894 /// added here rather than getDefaultFunctionAttributes.
1895 ///
ConstructAttributeList(StringRef Name,const CGFunctionInfo & FI,CGCalleeInfo CalleeInfo,llvm::AttributeList & AttrList,unsigned & CallingConv,bool AttrOnCallSite)1896 void CodeGenModule::ConstructAttributeList(
1897 StringRef Name, const CGFunctionInfo &FI, CGCalleeInfo CalleeInfo,
1898 llvm::AttributeList &AttrList, unsigned &CallingConv, bool AttrOnCallSite) {
1899 llvm::AttrBuilder FuncAttrs;
1900 llvm::AttrBuilder RetAttrs;
1901
1902 // Collect function IR attributes from the CC lowering.
1903 // We'll collect the paramete and result attributes later.
1904 CallingConv = FI.getEffectiveCallingConvention();
1905 if (FI.isNoReturn())
1906 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1907 if (FI.isCmseNSCall())
1908 FuncAttrs.addAttribute("cmse_nonsecure_call");
1909
1910 // Collect function IR attributes from the callee prototype if we have one.
1911 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
1912 CalleeInfo.getCalleeFunctionProtoType());
1913
1914 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
1915
1916 bool HasOptnone = false;
1917 // The NoBuiltinAttr attached to the target FunctionDecl.
1918 const NoBuiltinAttr *NBA = nullptr;
1919
1920 // Collect function IR attributes based on declaration-specific
1921 // information.
1922 // FIXME: handle sseregparm someday...
1923 if (TargetDecl) {
1924 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
1925 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
1926 if (TargetDecl->hasAttr<NoThrowAttr>())
1927 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1928 if (TargetDecl->hasAttr<NoReturnAttr>())
1929 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1930 if (TargetDecl->hasAttr<ColdAttr>())
1931 FuncAttrs.addAttribute(llvm::Attribute::Cold);
1932 if (TargetDecl->hasAttr<NoDuplicateAttr>())
1933 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
1934 if (TargetDecl->hasAttr<ConvergentAttr>())
1935 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1936
1937 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
1938 AddAttributesFromFunctionProtoType(
1939 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
1940 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
1941 // A sane operator new returns a non-aliasing pointer.
1942 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
1943 if (getCodeGenOpts().AssumeSaneOperatorNew &&
1944 (Kind == OO_New || Kind == OO_Array_New))
1945 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1946 }
1947 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
1948 const bool IsVirtualCall = MD && MD->isVirtual();
1949 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
1950 // virtual function. These attributes are not inherited by overloads.
1951 if (!(AttrOnCallSite && IsVirtualCall)) {
1952 if (Fn->isNoReturn())
1953 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1954 NBA = Fn->getAttr<NoBuiltinAttr>();
1955 }
1956 }
1957
1958 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
1959 if (TargetDecl->hasAttr<ConstAttr>()) {
1960 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
1961 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1962 } else if (TargetDecl->hasAttr<PureAttr>()) {
1963 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
1964 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1965 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
1966 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
1967 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1968 }
1969 if (TargetDecl->hasAttr<RestrictAttr>())
1970 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
1971 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
1972 !CodeGenOpts.NullPointerIsValid)
1973 RetAttrs.addAttribute(llvm::Attribute::NonNull);
1974 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
1975 FuncAttrs.addAttribute("no_caller_saved_registers");
1976 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
1977 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
1978
1979 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
1980 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
1981 Optional<unsigned> NumElemsParam;
1982 if (AllocSize->getNumElemsParam().isValid())
1983 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
1984 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
1985 NumElemsParam);
1986 }
1987
1988 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
1989 if (getLangOpts().OpenCLVersion <= 120) {
1990 // OpenCL v1.2 Work groups are always uniform
1991 FuncAttrs.addAttribute("uniform-work-group-size", "true");
1992 } else {
1993 // OpenCL v2.0 Work groups may be whether uniform or not.
1994 // '-cl-uniform-work-group-size' compile option gets a hint
1995 // to the compiler that the global work-size be a multiple of
1996 // the work-group size specified to clEnqueueNDRangeKernel
1997 // (i.e. work groups are uniform).
1998 FuncAttrs.addAttribute("uniform-work-group-size",
1999 llvm::toStringRef(CodeGenOpts.UniformWGSize));
2000 }
2001 }
2002 }
2003
2004 // Attach "no-builtins" attributes to:
2005 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2006 // * definitions: "no-builtins" or "no-builtin-<name>" only.
2007 // The attributes can come from:
2008 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2009 // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2010 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2011
2012 // Collect function IR attributes based on global settiings.
2013 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2014
2015 // Override some default IR attributes based on declaration-specific
2016 // information.
2017 if (TargetDecl) {
2018 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2019 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2020 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2021 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2022 if (TargetDecl->hasAttr<NoSplitStackAttr>())
2023 FuncAttrs.removeAttribute("split-stack");
2024
2025 // Add NonLazyBind attribute to function declarations when -fno-plt
2026 // is used.
2027 // FIXME: what if we just haven't processed the function definition
2028 // yet, or if it's an external definition like C99 inline?
2029 if (CodeGenOpts.NoPLT) {
2030 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2031 if (!Fn->isDefined() && !AttrOnCallSite) {
2032 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2033 }
2034 }
2035 }
2036 }
2037
2038 // Collect non-call-site function IR attributes from declaration-specific
2039 // information.
2040 if (!AttrOnCallSite) {
2041 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2042 FuncAttrs.addAttribute("cmse_nonsecure_entry");
2043
2044 // Whether tail calls are enabled.
2045 auto shouldDisableTailCalls = [&] {
2046 // Should this be honored in getDefaultFunctionAttributes?
2047 if (CodeGenOpts.DisableTailCalls)
2048 return true;
2049
2050 if (!TargetDecl)
2051 return false;
2052
2053 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2054 TargetDecl->hasAttr<AnyX86InterruptAttr>())
2055 return true;
2056
2057 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2058 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2059 if (!BD->doesNotEscape())
2060 return true;
2061 }
2062
2063 return false;
2064 };
2065 FuncAttrs.addAttribute("disable-tail-calls",
2066 llvm::toStringRef(shouldDisableTailCalls()));
2067
2068 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
2069 // handles these separately to set them based on the global defaults.
2070 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2071 }
2072
2073 // Collect attributes from arguments and return values.
2074 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2075
2076 QualType RetTy = FI.getReturnType();
2077 const ABIArgInfo &RetAI = FI.getReturnInfo();
2078 switch (RetAI.getKind()) {
2079 case ABIArgInfo::Extend:
2080 if (RetAI.isSignExt())
2081 RetAttrs.addAttribute(llvm::Attribute::SExt);
2082 else
2083 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2084 LLVM_FALLTHROUGH;
2085 case ABIArgInfo::Direct:
2086 if (RetAI.getInReg())
2087 RetAttrs.addAttribute(llvm::Attribute::InReg);
2088 break;
2089 case ABIArgInfo::Ignore:
2090 break;
2091
2092 case ABIArgInfo::InAlloca:
2093 case ABIArgInfo::Indirect: {
2094 // inalloca and sret disable readnone and readonly
2095 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2096 .removeAttribute(llvm::Attribute::ReadNone);
2097 break;
2098 }
2099
2100 case ABIArgInfo::CoerceAndExpand:
2101 break;
2102
2103 case ABIArgInfo::Expand:
2104 llvm_unreachable("Invalid ABI kind for return argument");
2105 }
2106
2107 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2108 QualType PTy = RefTy->getPointeeType();
2109 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2110 RetAttrs.addDereferenceableAttr(
2111 getMinimumObjectSize(PTy).getQuantity());
2112 if (getContext().getTargetAddressSpace(PTy) == 0 &&
2113 !CodeGenOpts.NullPointerIsValid)
2114 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2115 if (PTy->isObjectType()) {
2116 llvm::Align Alignment =
2117 getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2118 RetAttrs.addAlignmentAttr(Alignment);
2119 }
2120 }
2121
2122 bool hasUsedSRet = false;
2123 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2124
2125 // Attach attributes to sret.
2126 if (IRFunctionArgs.hasSRetArg()) {
2127 llvm::AttrBuilder SRETAttrs;
2128 SRETAttrs.addAttribute(llvm::Attribute::StructRet);
2129 hasUsedSRet = true;
2130 if (RetAI.getInReg())
2131 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2132 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2133 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2134 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2135 }
2136
2137 // Attach attributes to inalloca argument.
2138 if (IRFunctionArgs.hasInallocaArg()) {
2139 llvm::AttrBuilder Attrs;
2140 Attrs.addAttribute(llvm::Attribute::InAlloca);
2141 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2142 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2143 }
2144
2145 unsigned ArgNo = 0;
2146 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2147 E = FI.arg_end();
2148 I != E; ++I, ++ArgNo) {
2149 QualType ParamType = I->type;
2150 const ABIArgInfo &AI = I->info;
2151 llvm::AttrBuilder Attrs;
2152
2153 // Add attribute for padding argument, if necessary.
2154 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2155 if (AI.getPaddingInReg()) {
2156 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2157 llvm::AttributeSet::get(
2158 getLLVMContext(),
2159 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2160 }
2161 }
2162
2163 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2164 // have the corresponding parameter variable. It doesn't make
2165 // sense to do it here because parameters are so messed up.
2166 switch (AI.getKind()) {
2167 case ABIArgInfo::Extend:
2168 if (AI.isSignExt())
2169 Attrs.addAttribute(llvm::Attribute::SExt);
2170 else
2171 Attrs.addAttribute(llvm::Attribute::ZExt);
2172 LLVM_FALLTHROUGH;
2173 case ABIArgInfo::Direct:
2174 if (ArgNo == 0 && FI.isChainCall())
2175 Attrs.addAttribute(llvm::Attribute::Nest);
2176 else if (AI.getInReg())
2177 Attrs.addAttribute(llvm::Attribute::InReg);
2178 break;
2179
2180 case ABIArgInfo::Indirect: {
2181 if (AI.getInReg())
2182 Attrs.addAttribute(llvm::Attribute::InReg);
2183
2184 if (AI.getIndirectByVal())
2185 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2186
2187 CharUnits Align = AI.getIndirectAlign();
2188
2189 // In a byval argument, it is important that the required
2190 // alignment of the type is honored, as LLVM might be creating a
2191 // *new* stack object, and needs to know what alignment to give
2192 // it. (Sometimes it can deduce a sensible alignment on its own,
2193 // but not if clang decides it must emit a packed struct, or the
2194 // user specifies increased alignment requirements.)
2195 //
2196 // This is different from indirect *not* byval, where the object
2197 // exists already, and the align attribute is purely
2198 // informative.
2199 assert(!Align.isZero());
2200
2201 // For now, only add this when we have a byval argument.
2202 // TODO: be less lazy about updating test cases.
2203 if (AI.getIndirectByVal())
2204 Attrs.addAlignmentAttr(Align.getQuantity());
2205
2206 // byval disables readnone and readonly.
2207 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2208 .removeAttribute(llvm::Attribute::ReadNone);
2209 break;
2210 }
2211 case ABIArgInfo::Ignore:
2212 case ABIArgInfo::Expand:
2213 case ABIArgInfo::CoerceAndExpand:
2214 break;
2215
2216 case ABIArgInfo::InAlloca:
2217 // inalloca disables readnone and readonly.
2218 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2219 .removeAttribute(llvm::Attribute::ReadNone);
2220 continue;
2221 }
2222
2223 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2224 QualType PTy = RefTy->getPointeeType();
2225 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2226 Attrs.addDereferenceableAttr(
2227 getMinimumObjectSize(PTy).getQuantity());
2228 if (getContext().getTargetAddressSpace(PTy) == 0 &&
2229 !CodeGenOpts.NullPointerIsValid)
2230 Attrs.addAttribute(llvm::Attribute::NonNull);
2231 if (PTy->isObjectType()) {
2232 llvm::Align Alignment =
2233 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2234 Attrs.addAlignmentAttr(Alignment);
2235 }
2236 }
2237
2238 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2239 case ParameterABI::Ordinary:
2240 break;
2241
2242 case ParameterABI::SwiftIndirectResult: {
2243 // Add 'sret' if we haven't already used it for something, but
2244 // only if the result is void.
2245 if (!hasUsedSRet && RetTy->isVoidType()) {
2246 Attrs.addAttribute(llvm::Attribute::StructRet);
2247 hasUsedSRet = true;
2248 }
2249
2250 // Add 'noalias' in either case.
2251 Attrs.addAttribute(llvm::Attribute::NoAlias);
2252
2253 // Add 'dereferenceable' and 'alignment'.
2254 auto PTy = ParamType->getPointeeType();
2255 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2256 auto info = getContext().getTypeInfoInChars(PTy);
2257 Attrs.addDereferenceableAttr(info.first.getQuantity());
2258 Attrs.addAlignmentAttr(info.second.getAsAlign());
2259 }
2260 break;
2261 }
2262
2263 case ParameterABI::SwiftErrorResult:
2264 Attrs.addAttribute(llvm::Attribute::SwiftError);
2265 break;
2266
2267 case ParameterABI::SwiftContext:
2268 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2269 break;
2270 }
2271
2272 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2273 Attrs.addAttribute(llvm::Attribute::NoCapture);
2274
2275 if (Attrs.hasAttributes()) {
2276 unsigned FirstIRArg, NumIRArgs;
2277 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2278 for (unsigned i = 0; i < NumIRArgs; i++)
2279 ArgAttrs[FirstIRArg + i] =
2280 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2281 }
2282 }
2283 assert(ArgNo == FI.arg_size());
2284
2285 AttrList = llvm::AttributeList::get(
2286 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2287 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2288 }
2289
2290 /// An argument came in as a promoted argument; demote it back to its
2291 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)2292 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2293 const VarDecl *var,
2294 llvm::Value *value) {
2295 llvm::Type *varType = CGF.ConvertType(var->getType());
2296
2297 // This can happen with promotions that actually don't change the
2298 // underlying type, like the enum promotions.
2299 if (value->getType() == varType) return value;
2300
2301 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2302 && "unexpected promotion type");
2303
2304 if (isa<llvm::IntegerType>(varType))
2305 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2306
2307 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2308 }
2309
2310 /// Returns the attribute (either parameter attribute, or function
2311 /// attribute), which declares argument ArgNo to be non-null.
getNonNullAttr(const Decl * FD,const ParmVarDecl * PVD,QualType ArgType,unsigned ArgNo)2312 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2313 QualType ArgType, unsigned ArgNo) {
2314 // FIXME: __attribute__((nonnull)) can also be applied to:
2315 // - references to pointers, where the pointee is known to be
2316 // nonnull (apparently a Clang extension)
2317 // - transparent unions containing pointers
2318 // In the former case, LLVM IR cannot represent the constraint. In
2319 // the latter case, we have no guarantee that the transparent union
2320 // is in fact passed as a pointer.
2321 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2322 return nullptr;
2323 // First, check attribute on parameter itself.
2324 if (PVD) {
2325 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2326 return ParmNNAttr;
2327 }
2328 // Check function attributes.
2329 if (!FD)
2330 return nullptr;
2331 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2332 if (NNAttr->isNonNull(ArgNo))
2333 return NNAttr;
2334 }
2335 return nullptr;
2336 }
2337
2338 namespace {
2339 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2340 Address Temp;
2341 Address Arg;
CopyBackSwiftError__anon9dd9c9960811::CopyBackSwiftError2342 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
Emit__anon9dd9c9960811::CopyBackSwiftError2343 void Emit(CodeGenFunction &CGF, Flags flags) override {
2344 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2345 CGF.Builder.CreateStore(errorValue, Arg);
2346 }
2347 };
2348 }
2349
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)2350 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2351 llvm::Function *Fn,
2352 const FunctionArgList &Args) {
2353 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2354 // Naked functions don't have prologues.
2355 return;
2356
2357 // If this is an implicit-return-zero function, go ahead and
2358 // initialize the return value. TODO: it might be nice to have
2359 // a more general mechanism for this that didn't require synthesized
2360 // return statements.
2361 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2362 if (FD->hasImplicitReturnZero()) {
2363 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2364 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2365 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2366 Builder.CreateStore(Zero, ReturnValue);
2367 }
2368 }
2369
2370 // FIXME: We no longer need the types from FunctionArgList; lift up and
2371 // simplify.
2372
2373 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2374 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2375
2376 // If we're using inalloca, all the memory arguments are GEPs off of the last
2377 // parameter, which is a pointer to the complete memory area.
2378 Address ArgStruct = Address::invalid();
2379 if (IRFunctionArgs.hasInallocaArg()) {
2380 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2381 FI.getArgStructAlignment());
2382
2383 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2384 }
2385
2386 // Name the struct return parameter.
2387 if (IRFunctionArgs.hasSRetArg()) {
2388 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2389 AI->setName("agg.result");
2390 AI->addAttr(llvm::Attribute::NoAlias);
2391 }
2392
2393 // Track if we received the parameter as a pointer (indirect, byval, or
2394 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2395 // into a local alloca for us.
2396 SmallVector<ParamValue, 16> ArgVals;
2397 ArgVals.reserve(Args.size());
2398
2399 // Create a pointer value for every parameter declaration. This usually
2400 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2401 // any cleanups or do anything that might unwind. We do that separately, so
2402 // we can push the cleanups in the correct order for the ABI.
2403 assert(FI.arg_size() == Args.size() &&
2404 "Mismatch between function signature & arguments.");
2405 unsigned ArgNo = 0;
2406 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2407 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2408 i != e; ++i, ++info_it, ++ArgNo) {
2409 const VarDecl *Arg = *i;
2410 const ABIArgInfo &ArgI = info_it->info;
2411
2412 bool isPromoted =
2413 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2414 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2415 // the parameter is promoted. In this case we convert to
2416 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2417 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2418 assert(hasScalarEvaluationKind(Ty) ==
2419 hasScalarEvaluationKind(Arg->getType()));
2420
2421 unsigned FirstIRArg, NumIRArgs;
2422 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2423
2424 switch (ArgI.getKind()) {
2425 case ABIArgInfo::InAlloca: {
2426 assert(NumIRArgs == 0);
2427 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2428 Address V =
2429 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2430 if (ArgI.getInAllocaIndirect())
2431 V = Address(Builder.CreateLoad(V),
2432 getContext().getTypeAlignInChars(Ty));
2433 ArgVals.push_back(ParamValue::forIndirect(V));
2434 break;
2435 }
2436
2437 case ABIArgInfo::Indirect: {
2438 assert(NumIRArgs == 1);
2439 Address ParamAddr =
2440 Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
2441
2442 if (!hasScalarEvaluationKind(Ty)) {
2443 // Aggregates and complex variables are accessed by reference. All we
2444 // need to do is realign the value, if requested.
2445 Address V = ParamAddr;
2446 if (ArgI.getIndirectRealign()) {
2447 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2448
2449 // Copy from the incoming argument pointer to the temporary with the
2450 // appropriate alignment.
2451 //
2452 // FIXME: We should have a common utility for generating an aggregate
2453 // copy.
2454 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2455 Builder.CreateMemCpy(
2456 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2457 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2458 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2459 V = AlignedTemp;
2460 }
2461 ArgVals.push_back(ParamValue::forIndirect(V));
2462 } else {
2463 // Load scalar value from indirect argument.
2464 llvm::Value *V =
2465 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2466
2467 if (isPromoted)
2468 V = emitArgumentDemotion(*this, Arg, V);
2469 ArgVals.push_back(ParamValue::forDirect(V));
2470 }
2471 break;
2472 }
2473
2474 case ABIArgInfo::Extend:
2475 case ABIArgInfo::Direct: {
2476 auto AI = Fn->getArg(FirstIRArg);
2477 llvm::Type *LTy = ConvertType(Arg->getType());
2478
2479 // Prepare parameter attributes. So far, only attributes for pointer
2480 // parameters are prepared. See
2481 // http://llvm.org/docs/LangRef.html#paramattrs.
2482 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2483 ArgI.getCoerceToType()->isPointerTy()) {
2484 assert(NumIRArgs == 1);
2485
2486 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2487 // Set `nonnull` attribute if any.
2488 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2489 PVD->getFunctionScopeIndex()) &&
2490 !CGM.getCodeGenOpts().NullPointerIsValid)
2491 AI->addAttr(llvm::Attribute::NonNull);
2492
2493 QualType OTy = PVD->getOriginalType();
2494 if (const auto *ArrTy =
2495 getContext().getAsConstantArrayType(OTy)) {
2496 // A C99 array parameter declaration with the static keyword also
2497 // indicates dereferenceability, and if the size is constant we can
2498 // use the dereferenceable attribute (which requires the size in
2499 // bytes).
2500 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2501 QualType ETy = ArrTy->getElementType();
2502 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2503 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2504 ArrSize) {
2505 llvm::AttrBuilder Attrs;
2506 Attrs.addDereferenceableAttr(
2507 getContext().getTypeSizeInChars(ETy).getQuantity() *
2508 ArrSize);
2509 AI->addAttrs(Attrs);
2510 } else if (getContext().getTargetInfo().getNullPointerValue(
2511 ETy.getAddressSpace()) == 0 &&
2512 !CGM.getCodeGenOpts().NullPointerIsValid) {
2513 AI->addAttr(llvm::Attribute::NonNull);
2514 }
2515 }
2516 } else if (const auto *ArrTy =
2517 getContext().getAsVariableArrayType(OTy)) {
2518 // For C99 VLAs with the static keyword, we don't know the size so
2519 // we can't use the dereferenceable attribute, but in addrspace(0)
2520 // we know that it must be nonnull.
2521 if (ArrTy->getSizeModifier() == VariableArrayType::Static &&
2522 !getContext().getTargetAddressSpace(ArrTy->getElementType()) &&
2523 !CGM.getCodeGenOpts().NullPointerIsValid)
2524 AI->addAttr(llvm::Attribute::NonNull);
2525 }
2526
2527 // Set `align` attribute if any.
2528 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2529 if (!AVAttr)
2530 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2531 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2532 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2533 // If alignment-assumption sanitizer is enabled, we do *not* add
2534 // alignment attribute here, but emit normal alignment assumption,
2535 // so the UBSAN check could function.
2536 llvm::ConstantInt *AlignmentCI =
2537 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2538 unsigned AlignmentInt =
2539 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2540 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2541 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2542 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
2543 llvm::Align(AlignmentInt)));
2544 }
2545 }
2546 }
2547
2548 // Set 'noalias' if an argument type has the `restrict` qualifier.
2549 if (Arg->getType().isRestrictQualified())
2550 AI->addAttr(llvm::Attribute::NoAlias);
2551 }
2552
2553 // Prepare the argument value. If we have the trivial case, handle it
2554 // with no muss and fuss.
2555 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2556 ArgI.getCoerceToType() == ConvertType(Ty) &&
2557 ArgI.getDirectOffset() == 0) {
2558 assert(NumIRArgs == 1);
2559
2560 // LLVM expects swifterror parameters to be used in very restricted
2561 // ways. Copy the value into a less-restricted temporary.
2562 llvm::Value *V = AI;
2563 if (FI.getExtParameterInfo(ArgNo).getABI()
2564 == ParameterABI::SwiftErrorResult) {
2565 QualType pointeeTy = Ty->getPointeeType();
2566 assert(pointeeTy->isPointerType());
2567 Address temp =
2568 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2569 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2570 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2571 Builder.CreateStore(incomingErrorValue, temp);
2572 V = temp.getPointer();
2573
2574 // Push a cleanup to copy the value back at the end of the function.
2575 // The convention does not guarantee that the value will be written
2576 // back if the function exits with an unwind exception.
2577 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2578 }
2579
2580 // Ensure the argument is the correct type.
2581 if (V->getType() != ArgI.getCoerceToType())
2582 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2583
2584 if (isPromoted)
2585 V = emitArgumentDemotion(*this, Arg, V);
2586
2587 // Because of merging of function types from multiple decls it is
2588 // possible for the type of an argument to not match the corresponding
2589 // type in the function type. Since we are codegening the callee
2590 // in here, add a cast to the argument type.
2591 llvm::Type *LTy = ConvertType(Arg->getType());
2592 if (V->getType() != LTy)
2593 V = Builder.CreateBitCast(V, LTy);
2594
2595 ArgVals.push_back(ParamValue::forDirect(V));
2596 break;
2597 }
2598
2599 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2600 Arg->getName());
2601
2602 // Pointer to store into.
2603 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2604
2605 // Fast-isel and the optimizer generally like scalar values better than
2606 // FCAs, so we flatten them if this is safe to do for this argument.
2607 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2608 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2609 STy->getNumElements() > 1) {
2610 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2611 llvm::Type *DstTy = Ptr.getElementType();
2612 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2613
2614 Address AddrToStoreInto = Address::invalid();
2615 if (SrcSize <= DstSize) {
2616 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2617 } else {
2618 AddrToStoreInto =
2619 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2620 }
2621
2622 assert(STy->getNumElements() == NumIRArgs);
2623 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2624 auto AI = Fn->getArg(FirstIRArg + i);
2625 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2626 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2627 Builder.CreateStore(AI, EltPtr);
2628 }
2629
2630 if (SrcSize > DstSize) {
2631 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2632 }
2633
2634 } else {
2635 // Simple case, just do a coerced store of the argument into the alloca.
2636 assert(NumIRArgs == 1);
2637 auto AI = Fn->getArg(FirstIRArg);
2638 AI->setName(Arg->getName() + ".coerce");
2639 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2640 }
2641
2642 // Match to what EmitParmDecl is expecting for this type.
2643 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2644 llvm::Value *V =
2645 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2646 if (isPromoted)
2647 V = emitArgumentDemotion(*this, Arg, V);
2648 ArgVals.push_back(ParamValue::forDirect(V));
2649 } else {
2650 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2651 }
2652 break;
2653 }
2654
2655 case ABIArgInfo::CoerceAndExpand: {
2656 // Reconstruct into a temporary.
2657 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2658 ArgVals.push_back(ParamValue::forIndirect(alloca));
2659
2660 auto coercionType = ArgI.getCoerceAndExpandType();
2661 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2662
2663 unsigned argIndex = FirstIRArg;
2664 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2665 llvm::Type *eltType = coercionType->getElementType(i);
2666 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2667 continue;
2668
2669 auto eltAddr = Builder.CreateStructGEP(alloca, i);
2670 auto elt = Fn->getArg(argIndex++);
2671 Builder.CreateStore(elt, eltAddr);
2672 }
2673 assert(argIndex == FirstIRArg + NumIRArgs);
2674 break;
2675 }
2676
2677 case ABIArgInfo::Expand: {
2678 // If this structure was expanded into multiple arguments then
2679 // we need to create a temporary and reconstruct it from the
2680 // arguments.
2681 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2682 LValue LV = MakeAddrLValue(Alloca, Ty);
2683 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2684
2685 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
2686 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2687 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
2688 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2689 auto AI = Fn->getArg(FirstIRArg + i);
2690 AI->setName(Arg->getName() + "." + Twine(i));
2691 }
2692 break;
2693 }
2694
2695 case ABIArgInfo::Ignore:
2696 assert(NumIRArgs == 0);
2697 // Initialize the local variable appropriately.
2698 if (!hasScalarEvaluationKind(Ty)) {
2699 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2700 } else {
2701 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2702 ArgVals.push_back(ParamValue::forDirect(U));
2703 }
2704 break;
2705 }
2706 }
2707
2708 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2709 for (int I = Args.size() - 1; I >= 0; --I)
2710 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2711 } else {
2712 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2713 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2714 }
2715 }
2716
eraseUnusedBitCasts(llvm::Instruction * insn)2717 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2718 while (insn->use_empty()) {
2719 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2720 if (!bitcast) return;
2721
2722 // This is "safe" because we would have used a ConstantExpr otherwise.
2723 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2724 bitcast->eraseFromParent();
2725 }
2726 }
2727
2728 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2729 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
2730 llvm::Value *result) {
2731 // We must be immediately followed the cast.
2732 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
2733 if (BB->empty()) return nullptr;
2734 if (&BB->back() != result) return nullptr;
2735
2736 llvm::Type *resultType = result->getType();
2737
2738 // result is in a BasicBlock and is therefore an Instruction.
2739 llvm::Instruction *generator = cast<llvm::Instruction>(result);
2740
2741 SmallVector<llvm::Instruction *, 4> InstsToKill;
2742
2743 // Look for:
2744 // %generator = bitcast %type1* %generator2 to %type2*
2745 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
2746 // We would have emitted this as a constant if the operand weren't
2747 // an Instruction.
2748 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
2749
2750 // Require the generator to be immediately followed by the cast.
2751 if (generator->getNextNode() != bitcast)
2752 return nullptr;
2753
2754 InstsToKill.push_back(bitcast);
2755 }
2756
2757 // Look for:
2758 // %generator = call i8* @objc_retain(i8* %originalResult)
2759 // or
2760 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
2761 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
2762 if (!call) return nullptr;
2763
2764 bool doRetainAutorelease;
2765
2766 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
2767 doRetainAutorelease = true;
2768 } else if (call->getCalledOperand() ==
2769 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
2770 doRetainAutorelease = false;
2771
2772 // If we emitted an assembly marker for this call (and the
2773 // ARCEntrypoints field should have been set if so), go looking
2774 // for that call. If we can't find it, we can't do this
2775 // optimization. But it should always be the immediately previous
2776 // instruction, unless we needed bitcasts around the call.
2777 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
2778 llvm::Instruction *prev = call->getPrevNode();
2779 assert(prev);
2780 if (isa<llvm::BitCastInst>(prev)) {
2781 prev = prev->getPrevNode();
2782 assert(prev);
2783 }
2784 assert(isa<llvm::CallInst>(prev));
2785 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
2786 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
2787 InstsToKill.push_back(prev);
2788 }
2789 } else {
2790 return nullptr;
2791 }
2792
2793 result = call->getArgOperand(0);
2794 InstsToKill.push_back(call);
2795
2796 // Keep killing bitcasts, for sanity. Note that we no longer care
2797 // about precise ordering as long as there's exactly one use.
2798 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
2799 if (!bitcast->hasOneUse()) break;
2800 InstsToKill.push_back(bitcast);
2801 result = bitcast->getOperand(0);
2802 }
2803
2804 // Delete all the unnecessary instructions, from latest to earliest.
2805 for (auto *I : InstsToKill)
2806 I->eraseFromParent();
2807
2808 // Do the fused retain/autorelease if we were asked to.
2809 if (doRetainAutorelease)
2810 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
2811
2812 // Cast back to the result type.
2813 return CGF.Builder.CreateBitCast(result, resultType);
2814 }
2815
2816 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)2817 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
2818 llvm::Value *result) {
2819 // This is only applicable to a method with an immutable 'self'.
2820 const ObjCMethodDecl *method =
2821 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
2822 if (!method) return nullptr;
2823 const VarDecl *self = method->getSelfDecl();
2824 if (!self->getType().isConstQualified()) return nullptr;
2825
2826 // Look for a retain call.
2827 llvm::CallInst *retainCall =
2828 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
2829 if (!retainCall || retainCall->getCalledOperand() !=
2830 CGF.CGM.getObjCEntrypoints().objc_retain)
2831 return nullptr;
2832
2833 // Look for an ordinary load of 'self'.
2834 llvm::Value *retainedValue = retainCall->getArgOperand(0);
2835 llvm::LoadInst *load =
2836 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
2837 if (!load || load->isAtomic() || load->isVolatile() ||
2838 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
2839 return nullptr;
2840
2841 // Okay! Burn it all down. This relies for correctness on the
2842 // assumption that the retain is emitted as part of the return and
2843 // that thereafter everything is used "linearly".
2844 llvm::Type *resultType = result->getType();
2845 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
2846 assert(retainCall->use_empty());
2847 retainCall->eraseFromParent();
2848 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
2849
2850 return CGF.Builder.CreateBitCast(load, resultType);
2851 }
2852
2853 /// Emit an ARC autorelease of the result of a function.
2854 ///
2855 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2856 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
2857 llvm::Value *result) {
2858 // If we're returning 'self', kill the initial retain. This is a
2859 // heuristic attempt to "encourage correctness" in the really unfortunate
2860 // case where we have a return of self during a dealloc and we desperately
2861 // need to avoid the possible autorelease.
2862 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
2863 return self;
2864
2865 // At -O0, try to emit a fused retain/autorelease.
2866 if (CGF.shouldUseFusedARCCalls())
2867 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
2868 return fused;
2869
2870 return CGF.EmitARCAutoreleaseReturnValue(result);
2871 }
2872
2873 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)2874 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
2875 // Check if a User is a store which pointerOperand is the ReturnValue.
2876 // We are looking for stores to the ReturnValue, not for stores of the
2877 // ReturnValue to some other location.
2878 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
2879 auto *SI = dyn_cast<llvm::StoreInst>(U);
2880 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
2881 return nullptr;
2882 // These aren't actually possible for non-coerced returns, and we
2883 // only care about non-coerced returns on this code path.
2884 assert(!SI->isAtomic() && !SI->isVolatile());
2885 return SI;
2886 };
2887 // If there are multiple uses of the return-value slot, just check
2888 // for something immediately preceding the IP. Sometimes this can
2889 // happen with how we generate implicit-returns; it can also happen
2890 // with noreturn cleanups.
2891 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
2892 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2893 if (IP->empty()) return nullptr;
2894 llvm::Instruction *I = &IP->back();
2895
2896 // Skip lifetime markers
2897 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
2898 IE = IP->rend();
2899 II != IE; ++II) {
2900 if (llvm::IntrinsicInst *Intrinsic =
2901 dyn_cast<llvm::IntrinsicInst>(&*II)) {
2902 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
2903 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
2904 ++II;
2905 if (II == IE)
2906 break;
2907 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
2908 continue;
2909 }
2910 }
2911 I = &*II;
2912 break;
2913 }
2914
2915 return GetStoreIfValid(I);
2916 }
2917
2918 llvm::StoreInst *store =
2919 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
2920 if (!store) return nullptr;
2921
2922 // Now do a first-and-dirty dominance check: just walk up the
2923 // single-predecessors chain from the current insertion point.
2924 llvm::BasicBlock *StoreBB = store->getParent();
2925 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
2926 while (IP != StoreBB) {
2927 if (!(IP = IP->getSinglePredecessor()))
2928 return nullptr;
2929 }
2930
2931 // Okay, the store's basic block dominates the insertion point; we
2932 // can do our thing.
2933 return store;
2934 }
2935
2936 // Helper functions for EmitCMSEClearRecord
2937
2938 // Set the bits corresponding to a field having width `BitWidth` and located at
2939 // offset `BitOffset` (from the least significant bit) within a storage unit of
2940 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
2941 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
setBitRange(SmallVectorImpl<uint64_t> & Bits,int BitOffset,int BitWidth,int CharWidth)2942 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
2943 int BitWidth, int CharWidth) {
2944 assert(CharWidth <= 64);
2945 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
2946
2947 int Pos = 0;
2948 if (BitOffset >= CharWidth) {
2949 Pos += BitOffset / CharWidth;
2950 BitOffset = BitOffset % CharWidth;
2951 }
2952
2953 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
2954 if (BitOffset + BitWidth >= CharWidth) {
2955 Bits[Pos++] |= (Used << BitOffset) & Used;
2956 BitWidth -= CharWidth - BitOffset;
2957 BitOffset = 0;
2958 }
2959
2960 while (BitWidth >= CharWidth) {
2961 Bits[Pos++] = Used;
2962 BitWidth -= CharWidth;
2963 }
2964
2965 if (BitWidth > 0)
2966 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
2967 }
2968
2969 // Set the bits corresponding to a field having width `BitWidth` and located at
2970 // offset `BitOffset` (from the least significant bit) within a storage unit of
2971 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
2972 // `Bits` corresponds to one target byte. Use target endian layout.
setBitRange(SmallVectorImpl<uint64_t> & Bits,int StorageOffset,int StorageSize,int BitOffset,int BitWidth,int CharWidth,bool BigEndian)2973 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
2974 int StorageSize, int BitOffset, int BitWidth,
2975 int CharWidth, bool BigEndian) {
2976
2977 SmallVector<uint64_t, 8> TmpBits(StorageSize);
2978 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
2979
2980 if (BigEndian)
2981 std::reverse(TmpBits.begin(), TmpBits.end());
2982
2983 for (uint64_t V : TmpBits)
2984 Bits[StorageOffset++] |= V;
2985 }
2986
2987 static void setUsedBits(CodeGenModule &, QualType, int,
2988 SmallVectorImpl<uint64_t> &);
2989
2990 // Set the bits in `Bits`, which correspond to the value representations of
2991 // the actual members of the record type `RTy`. Note that this function does
2992 // not handle base classes, virtual tables, etc, since they cannot happen in
2993 // CMSE function arguments or return. The bit mask corresponds to the target
2994 // memory layout, i.e. it's endian dependent.
setUsedBits(CodeGenModule & CGM,const RecordType * RTy,int Offset,SmallVectorImpl<uint64_t> & Bits)2995 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
2996 SmallVectorImpl<uint64_t> &Bits) {
2997 ASTContext &Context = CGM.getContext();
2998 int CharWidth = Context.getCharWidth();
2999 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3000 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3001 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3002
3003 int Idx = 0;
3004 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3005 const FieldDecl *F = *I;
3006
3007 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3008 F->getType()->isIncompleteArrayType())
3009 continue;
3010
3011 if (F->isBitField()) {
3012 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3013 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3014 BFI.StorageSize / CharWidth, BFI.Offset,
3015 BFI.Size, CharWidth,
3016 CGM.getDataLayout().isBigEndian());
3017 continue;
3018 }
3019
3020 setUsedBits(CGM, F->getType(),
3021 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3022 }
3023 }
3024
3025 // Set the bits in `Bits`, which correspond to the value representations of
3026 // the elements of an array type `ATy`.
setUsedBits(CodeGenModule & CGM,const ConstantArrayType * ATy,int Offset,SmallVectorImpl<uint64_t> & Bits)3027 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3028 int Offset, SmallVectorImpl<uint64_t> &Bits) {
3029 const ASTContext &Context = CGM.getContext();
3030
3031 QualType ETy = Context.getBaseElementType(ATy);
3032 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3033 SmallVector<uint64_t, 4> TmpBits(Size);
3034 setUsedBits(CGM, ETy, 0, TmpBits);
3035
3036 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3037 auto Src = TmpBits.begin();
3038 auto Dst = Bits.begin() + Offset + I * Size;
3039 for (int J = 0; J < Size; ++J)
3040 *Dst++ |= *Src++;
3041 }
3042 }
3043
3044 // Set the bits in `Bits`, which correspond to the value representations of
3045 // the type `QTy`.
setUsedBits(CodeGenModule & CGM,QualType QTy,int Offset,SmallVectorImpl<uint64_t> & Bits)3046 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3047 SmallVectorImpl<uint64_t> &Bits) {
3048 if (const auto *RTy = QTy->getAs<RecordType>())
3049 return setUsedBits(CGM, RTy, Offset, Bits);
3050
3051 ASTContext &Context = CGM.getContext();
3052 if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3053 return setUsedBits(CGM, ATy, Offset, Bits);
3054
3055 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3056 if (Size <= 0)
3057 return;
3058
3059 std::fill_n(Bits.begin() + Offset, Size,
3060 (uint64_t(1) << Context.getCharWidth()) - 1);
3061 }
3062
buildMultiCharMask(const SmallVectorImpl<uint64_t> & Bits,int Pos,int Size,int CharWidth,bool BigEndian)3063 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3064 int Pos, int Size, int CharWidth,
3065 bool BigEndian) {
3066 assert(Size > 0);
3067 uint64_t Mask = 0;
3068 if (BigEndian) {
3069 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3070 ++P)
3071 Mask = (Mask << CharWidth) | *P;
3072 } else {
3073 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3074 do
3075 Mask = (Mask << CharWidth) | *--P;
3076 while (P != End);
3077 }
3078 return Mask;
3079 }
3080
3081 // Emit code to clear the bits in a record, which aren't a part of any user
3082 // declared member, when the record is a function return.
EmitCMSEClearRecord(llvm::Value * Src,llvm::IntegerType * ITy,QualType QTy)3083 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3084 llvm::IntegerType *ITy,
3085 QualType QTy) {
3086 assert(Src->getType() == ITy);
3087 assert(ITy->getScalarSizeInBits() <= 64);
3088
3089 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3090 int Size = DataLayout.getTypeStoreSize(ITy);
3091 SmallVector<uint64_t, 4> Bits(Size);
3092 setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
3093
3094 int CharWidth = CGM.getContext().getCharWidth();
3095 uint64_t Mask =
3096 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3097
3098 return Builder.CreateAnd(Src, Mask, "cmse.clear");
3099 }
3100
3101 // Emit code to clear the bits in a record, which aren't a part of any user
3102 // declared member, when the record is a function argument.
EmitCMSEClearRecord(llvm::Value * Src,llvm::ArrayType * ATy,QualType QTy)3103 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3104 llvm::ArrayType *ATy,
3105 QualType QTy) {
3106 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3107 int Size = DataLayout.getTypeStoreSize(ATy);
3108 SmallVector<uint64_t, 16> Bits(Size);
3109 setUsedBits(CGM, QTy->getAs<RecordType>(), 0, Bits);
3110
3111 // Clear each element of the LLVM array.
3112 int CharWidth = CGM.getContext().getCharWidth();
3113 int CharsPerElt =
3114 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3115 int MaskIndex = 0;
3116 llvm::Value *R = llvm::UndefValue::get(ATy);
3117 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3118 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3119 DataLayout.isBigEndian());
3120 MaskIndex += CharsPerElt;
3121 llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3122 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3123 R = Builder.CreateInsertValue(R, T1, I);
3124 }
3125
3126 return R;
3127 }
3128
EmitFunctionEpilog(const CGFunctionInfo & FI,bool EmitRetDbgLoc,SourceLocation EndLoc)3129 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3130 bool EmitRetDbgLoc,
3131 SourceLocation EndLoc) {
3132 if (FI.isNoReturn()) {
3133 // Noreturn functions don't return.
3134 EmitUnreachable(EndLoc);
3135 return;
3136 }
3137
3138 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3139 // Naked functions don't have epilogues.
3140 Builder.CreateUnreachable();
3141 return;
3142 }
3143
3144 // Functions with no result always return void.
3145 if (!ReturnValue.isValid()) {
3146 Builder.CreateRetVoid();
3147 return;
3148 }
3149
3150 llvm::DebugLoc RetDbgLoc;
3151 llvm::Value *RV = nullptr;
3152 QualType RetTy = FI.getReturnType();
3153 const ABIArgInfo &RetAI = FI.getReturnInfo();
3154
3155 switch (RetAI.getKind()) {
3156 case ABIArgInfo::InAlloca:
3157 // Aggregrates get evaluated directly into the destination. Sometimes we
3158 // need to return the sret value in a register, though.
3159 assert(hasAggregateEvaluationKind(RetTy));
3160 if (RetAI.getInAllocaSRet()) {
3161 llvm::Function::arg_iterator EI = CurFn->arg_end();
3162 --EI;
3163 llvm::Value *ArgStruct = &*EI;
3164 llvm::Value *SRet = Builder.CreateStructGEP(
3165 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
3166 RV = Builder.CreateAlignedLoad(SRet, getPointerAlign(), "sret");
3167 }
3168 break;
3169
3170 case ABIArgInfo::Indirect: {
3171 auto AI = CurFn->arg_begin();
3172 if (RetAI.isSRetAfterThis())
3173 ++AI;
3174 switch (getEvaluationKind(RetTy)) {
3175 case TEK_Complex: {
3176 ComplexPairTy RT =
3177 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3178 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3179 /*isInit*/ true);
3180 break;
3181 }
3182 case TEK_Aggregate:
3183 // Do nothing; aggregrates get evaluated directly into the destination.
3184 break;
3185 case TEK_Scalar:
3186 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
3187 MakeNaturalAlignAddrLValue(&*AI, RetTy),
3188 /*isInit*/ true);
3189 break;
3190 }
3191 break;
3192 }
3193
3194 case ABIArgInfo::Extend:
3195 case ABIArgInfo::Direct:
3196 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3197 RetAI.getDirectOffset() == 0) {
3198 // The internal return value temp always will have pointer-to-return-type
3199 // type, just do a load.
3200
3201 // If there is a dominating store to ReturnValue, we can elide
3202 // the load, zap the store, and usually zap the alloca.
3203 if (llvm::StoreInst *SI =
3204 findDominatingStoreToReturnValue(*this)) {
3205 // Reuse the debug location from the store unless there is
3206 // cleanup code to be emitted between the store and return
3207 // instruction.
3208 if (EmitRetDbgLoc && !AutoreleaseResult)
3209 RetDbgLoc = SI->getDebugLoc();
3210 // Get the stored value and nuke the now-dead store.
3211 RV = SI->getValueOperand();
3212 SI->eraseFromParent();
3213
3214 // Otherwise, we have to do a simple load.
3215 } else {
3216 RV = Builder.CreateLoad(ReturnValue);
3217 }
3218 } else {
3219 // If the value is offset in memory, apply the offset now.
3220 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3221
3222 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3223 }
3224
3225 // In ARC, end functions that return a retainable type with a call
3226 // to objc_autoreleaseReturnValue.
3227 if (AutoreleaseResult) {
3228 #ifndef NDEBUG
3229 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3230 // been stripped of the typedefs, so we cannot use RetTy here. Get the
3231 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3232 // CurCodeDecl or BlockInfo.
3233 QualType RT;
3234
3235 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3236 RT = FD->getReturnType();
3237 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3238 RT = MD->getReturnType();
3239 else if (isa<BlockDecl>(CurCodeDecl))
3240 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3241 else
3242 llvm_unreachable("Unexpected function/method type");
3243
3244 assert(getLangOpts().ObjCAutoRefCount &&
3245 !FI.isReturnsRetained() &&
3246 RT->isObjCRetainableType());
3247 #endif
3248 RV = emitAutoreleaseOfResult(*this, RV);
3249 }
3250
3251 break;
3252
3253 case ABIArgInfo::Ignore:
3254 break;
3255
3256 case ABIArgInfo::CoerceAndExpand: {
3257 auto coercionType = RetAI.getCoerceAndExpandType();
3258
3259 // Load all of the coerced elements out into results.
3260 llvm::SmallVector<llvm::Value*, 4> results;
3261 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3262 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3263 auto coercedEltType = coercionType->getElementType(i);
3264 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3265 continue;
3266
3267 auto eltAddr = Builder.CreateStructGEP(addr, i);
3268 auto elt = Builder.CreateLoad(eltAddr);
3269 results.push_back(elt);
3270 }
3271
3272 // If we have one result, it's the single direct result type.
3273 if (results.size() == 1) {
3274 RV = results[0];
3275
3276 // Otherwise, we need to make a first-class aggregate.
3277 } else {
3278 // Construct a return type that lacks padding elements.
3279 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3280
3281 RV = llvm::UndefValue::get(returnType);
3282 for (unsigned i = 0, e = results.size(); i != e; ++i) {
3283 RV = Builder.CreateInsertValue(RV, results[i], i);
3284 }
3285 }
3286 break;
3287 }
3288
3289 case ABIArgInfo::Expand:
3290 llvm_unreachable("Invalid ABI kind for return argument");
3291 }
3292
3293 llvm::Instruction *Ret;
3294 if (RV) {
3295 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3296 // For certain return types, clear padding bits, as they may reveal
3297 // sensitive information.
3298 // Small struct/union types are passed as integers.
3299 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3300 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3301 RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3302 }
3303 EmitReturnValueCheck(RV);
3304 Ret = Builder.CreateRet(RV);
3305 } else {
3306 Ret = Builder.CreateRetVoid();
3307 }
3308
3309 if (RetDbgLoc)
3310 Ret->setDebugLoc(std::move(RetDbgLoc));
3311 }
3312
EmitReturnValueCheck(llvm::Value * RV)3313 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3314 // A current decl may not be available when emitting vtable thunks.
3315 if (!CurCodeDecl)
3316 return;
3317
3318 // If the return block isn't reachable, neither is this check, so don't emit
3319 // it.
3320 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3321 return;
3322
3323 ReturnsNonNullAttr *RetNNAttr = nullptr;
3324 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3325 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3326
3327 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3328 return;
3329
3330 // Prefer the returns_nonnull attribute if it's present.
3331 SourceLocation AttrLoc;
3332 SanitizerMask CheckKind;
3333 SanitizerHandler Handler;
3334 if (RetNNAttr) {
3335 assert(!requiresReturnValueNullabilityCheck() &&
3336 "Cannot check nullability and the nonnull attribute");
3337 AttrLoc = RetNNAttr->getLocation();
3338 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3339 Handler = SanitizerHandler::NonnullReturn;
3340 } else {
3341 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3342 if (auto *TSI = DD->getTypeSourceInfo())
3343 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3344 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3345 CheckKind = SanitizerKind::NullabilityReturn;
3346 Handler = SanitizerHandler::NullabilityReturn;
3347 }
3348
3349 SanitizerScope SanScope(this);
3350
3351 // Make sure the "return" source location is valid. If we're checking a
3352 // nullability annotation, make sure the preconditions for the check are met.
3353 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3354 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3355 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3356 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3357 if (requiresReturnValueNullabilityCheck())
3358 CanNullCheck =
3359 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3360 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3361 EmitBlock(Check);
3362
3363 // Now do the null check.
3364 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3365 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3366 llvm::Value *DynamicData[] = {SLocPtr};
3367 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3368
3369 EmitBlock(NoCheck);
3370
3371 #ifndef NDEBUG
3372 // The return location should not be used after the check has been emitted.
3373 ReturnLocation = Address::invalid();
3374 #endif
3375 }
3376
isInAllocaArgument(CGCXXABI & ABI,QualType type)3377 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3378 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3379 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3380 }
3381
createPlaceholderSlot(CodeGenFunction & CGF,QualType Ty)3382 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3383 QualType Ty) {
3384 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3385 // placeholders.
3386 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3387 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3388 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3389
3390 // FIXME: When we generate this IR in one pass, we shouldn't need
3391 // this win32-specific alignment hack.
3392 CharUnits Align = CharUnits::fromQuantity(4);
3393 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3394
3395 return AggValueSlot::forAddr(Address(Placeholder, Align),
3396 Ty.getQualifiers(),
3397 AggValueSlot::IsNotDestructed,
3398 AggValueSlot::DoesNotNeedGCBarriers,
3399 AggValueSlot::IsNotAliased,
3400 AggValueSlot::DoesNotOverlap);
3401 }
3402
EmitDelegateCallArg(CallArgList & args,const VarDecl * param,SourceLocation loc)3403 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3404 const VarDecl *param,
3405 SourceLocation loc) {
3406 // StartFunction converted the ABI-lowered parameter(s) into a
3407 // local alloca. We need to turn that into an r-value suitable
3408 // for EmitCall.
3409 Address local = GetAddrOfLocalVar(param);
3410
3411 QualType type = param->getType();
3412
3413 if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3414 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3415 }
3416
3417 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3418 // but the argument needs to be the original pointer.
3419 if (type->isReferenceType()) {
3420 args.add(RValue::get(Builder.CreateLoad(local)), type);
3421
3422 // In ARC, move out of consumed arguments so that the release cleanup
3423 // entered by StartFunction doesn't cause an over-release. This isn't
3424 // optimal -O0 code generation, but it should get cleaned up when
3425 // optimization is enabled. This also assumes that delegate calls are
3426 // performed exactly once for a set of arguments, but that should be safe.
3427 } else if (getLangOpts().ObjCAutoRefCount &&
3428 param->hasAttr<NSConsumedAttr>() &&
3429 type->isObjCRetainableType()) {
3430 llvm::Value *ptr = Builder.CreateLoad(local);
3431 auto null =
3432 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3433 Builder.CreateStore(null, local);
3434 args.add(RValue::get(ptr), type);
3435
3436 // For the most part, we just need to load the alloca, except that
3437 // aggregate r-values are actually pointers to temporaries.
3438 } else {
3439 args.add(convertTempToRValue(local, type, loc), type);
3440 }
3441
3442 // Deactivate the cleanup for the callee-destructed param that was pushed.
3443 if (hasAggregateEvaluationKind(type) && !CurFuncIsThunk &&
3444 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3445 param->needsDestruction(getContext())) {
3446 EHScopeStack::stable_iterator cleanup =
3447 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3448 assert(cleanup.isValid() &&
3449 "cleanup for callee-destructed param not recorded");
3450 // This unreachable is a temporary marker which will be removed later.
3451 llvm::Instruction *isActive = Builder.CreateUnreachable();
3452 args.addArgCleanupDeactivation(cleanup, isActive);
3453 }
3454 }
3455
isProvablyNull(llvm::Value * addr)3456 static bool isProvablyNull(llvm::Value *addr) {
3457 return isa<llvm::ConstantPointerNull>(addr);
3458 }
3459
3460 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)3461 static void emitWriteback(CodeGenFunction &CGF,
3462 const CallArgList::Writeback &writeback) {
3463 const LValue &srcLV = writeback.Source;
3464 Address srcAddr = srcLV.getAddress(CGF);
3465 assert(!isProvablyNull(srcAddr.getPointer()) &&
3466 "shouldn't have writeback for provably null argument");
3467
3468 llvm::BasicBlock *contBB = nullptr;
3469
3470 // If the argument wasn't provably non-null, we need to null check
3471 // before doing the store.
3472 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3473 CGF.CGM.getDataLayout());
3474 if (!provablyNonNull) {
3475 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3476 contBB = CGF.createBasicBlock("icr.done");
3477
3478 llvm::Value *isNull =
3479 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3480 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3481 CGF.EmitBlock(writebackBB);
3482 }
3483
3484 // Load the value to writeback.
3485 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3486
3487 // Cast it back, in case we're writing an id to a Foo* or something.
3488 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3489 "icr.writeback-cast");
3490
3491 // Perform the writeback.
3492
3493 // If we have a "to use" value, it's something we need to emit a use
3494 // of. This has to be carefully threaded in: if it's done after the
3495 // release it's potentially undefined behavior (and the optimizer
3496 // will ignore it), and if it happens before the retain then the
3497 // optimizer could move the release there.
3498 if (writeback.ToUse) {
3499 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3500
3501 // Retain the new value. No need to block-copy here: the block's
3502 // being passed up the stack.
3503 value = CGF.EmitARCRetainNonBlock(value);
3504
3505 // Emit the intrinsic use here.
3506 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3507
3508 // Load the old value (primitively).
3509 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3510
3511 // Put the new value in place (primitively).
3512 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3513
3514 // Release the old value.
3515 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3516
3517 // Otherwise, we can just do a normal lvalue store.
3518 } else {
3519 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3520 }
3521
3522 // Jump to the continuation block.
3523 if (!provablyNonNull)
3524 CGF.EmitBlock(contBB);
3525 }
3526
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)3527 static void emitWritebacks(CodeGenFunction &CGF,
3528 const CallArgList &args) {
3529 for (const auto &I : args.writebacks())
3530 emitWriteback(CGF, I);
3531 }
3532
deactivateArgCleanupsBeforeCall(CodeGenFunction & CGF,const CallArgList & CallArgs)3533 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3534 const CallArgList &CallArgs) {
3535 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3536 CallArgs.getCleanupsToDeactivate();
3537 // Iterate in reverse to increase the likelihood of popping the cleanup.
3538 for (const auto &I : llvm::reverse(Cleanups)) {
3539 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3540 I.IsActiveIP->eraseFromParent();
3541 }
3542 }
3543
maybeGetUnaryAddrOfOperand(const Expr * E)3544 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3545 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3546 if (uop->getOpcode() == UO_AddrOf)
3547 return uop->getSubExpr();
3548 return nullptr;
3549 }
3550
3551 /// Emit an argument that's being passed call-by-writeback. That is,
3552 /// we are passing the address of an __autoreleased temporary; it
3553 /// might be copy-initialized with the current value of the given
3554 /// address, but it will definitely be copied out of after the call.
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)3555 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3556 const ObjCIndirectCopyRestoreExpr *CRE) {
3557 LValue srcLV;
3558
3559 // Make an optimistic effort to emit the address as an l-value.
3560 // This can fail if the argument expression is more complicated.
3561 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3562 srcLV = CGF.EmitLValue(lvExpr);
3563
3564 // Otherwise, just emit it as a scalar.
3565 } else {
3566 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3567
3568 QualType srcAddrType =
3569 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3570 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3571 }
3572 Address srcAddr = srcLV.getAddress(CGF);
3573
3574 // The dest and src types don't necessarily match in LLVM terms
3575 // because of the crazy ObjC compatibility rules.
3576
3577 llvm::PointerType *destType =
3578 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3579
3580 // If the address is a constant null, just pass the appropriate null.
3581 if (isProvablyNull(srcAddr.getPointer())) {
3582 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3583 CRE->getType());
3584 return;
3585 }
3586
3587 // Create the temporary.
3588 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3589 CGF.getPointerAlign(),
3590 "icr.temp");
3591 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3592 // and that cleanup will be conditional if we can't prove that the l-value
3593 // isn't null, so we need to register a dominating point so that the cleanups
3594 // system will make valid IR.
3595 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3596
3597 // Zero-initialize it if we're not doing a copy-initialization.
3598 bool shouldCopy = CRE->shouldCopy();
3599 if (!shouldCopy) {
3600 llvm::Value *null =
3601 llvm::ConstantPointerNull::get(
3602 cast<llvm::PointerType>(destType->getElementType()));
3603 CGF.Builder.CreateStore(null, temp);
3604 }
3605
3606 llvm::BasicBlock *contBB = nullptr;
3607 llvm::BasicBlock *originBB = nullptr;
3608
3609 // If the address is *not* known to be non-null, we need to switch.
3610 llvm::Value *finalArgument;
3611
3612 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3613 CGF.CGM.getDataLayout());
3614 if (provablyNonNull) {
3615 finalArgument = temp.getPointer();
3616 } else {
3617 llvm::Value *isNull =
3618 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3619
3620 finalArgument = CGF.Builder.CreateSelect(isNull,
3621 llvm::ConstantPointerNull::get(destType),
3622 temp.getPointer(), "icr.argument");
3623
3624 // If we need to copy, then the load has to be conditional, which
3625 // means we need control flow.
3626 if (shouldCopy) {
3627 originBB = CGF.Builder.GetInsertBlock();
3628 contBB = CGF.createBasicBlock("icr.cont");
3629 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3630 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3631 CGF.EmitBlock(copyBB);
3632 condEval.begin(CGF);
3633 }
3634 }
3635
3636 llvm::Value *valueToUse = nullptr;
3637
3638 // Perform a copy if necessary.
3639 if (shouldCopy) {
3640 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3641 assert(srcRV.isScalar());
3642
3643 llvm::Value *src = srcRV.getScalarVal();
3644 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3645 "icr.cast");
3646
3647 // Use an ordinary store, not a store-to-lvalue.
3648 CGF.Builder.CreateStore(src, temp);
3649
3650 // If optimization is enabled, and the value was held in a
3651 // __strong variable, we need to tell the optimizer that this
3652 // value has to stay alive until we're doing the store back.
3653 // This is because the temporary is effectively unretained,
3654 // and so otherwise we can violate the high-level semantics.
3655 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3656 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3657 valueToUse = src;
3658 }
3659 }
3660
3661 // Finish the control flow if we needed it.
3662 if (shouldCopy && !provablyNonNull) {
3663 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3664 CGF.EmitBlock(contBB);
3665
3666 // Make a phi for the value to intrinsically use.
3667 if (valueToUse) {
3668 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3669 "icr.to-use");
3670 phiToUse->addIncoming(valueToUse, copyBB);
3671 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3672 originBB);
3673 valueToUse = phiToUse;
3674 }
3675
3676 condEval.end(CGF);
3677 }
3678
3679 args.addWriteback(srcLV, temp, valueToUse);
3680 args.add(RValue::get(finalArgument), CRE->getType());
3681 }
3682
allocateArgumentMemory(CodeGenFunction & CGF)3683 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3684 assert(!StackBase);
3685
3686 // Save the stack.
3687 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3688 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3689 }
3690
freeArgumentMemory(CodeGenFunction & CGF) const3691 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3692 if (StackBase) {
3693 // Restore the stack after the call.
3694 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3695 CGF.Builder.CreateCall(F, StackBase);
3696 }
3697 }
3698
EmitNonNullArgCheck(RValue RV,QualType ArgType,SourceLocation ArgLoc,AbstractCallee AC,unsigned ParmNum)3699 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3700 SourceLocation ArgLoc,
3701 AbstractCallee AC,
3702 unsigned ParmNum) {
3703 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3704 SanOpts.has(SanitizerKind::NullabilityArg)))
3705 return;
3706
3707 // The param decl may be missing in a variadic function.
3708 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3709 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3710
3711 // Prefer the nonnull attribute if it's present.
3712 const NonNullAttr *NNAttr = nullptr;
3713 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3714 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3715
3716 bool CanCheckNullability = false;
3717 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3718 auto Nullability = PVD->getType()->getNullability(getContext());
3719 CanCheckNullability = Nullability &&
3720 *Nullability == NullabilityKind::NonNull &&
3721 PVD->getTypeSourceInfo();
3722 }
3723
3724 if (!NNAttr && !CanCheckNullability)
3725 return;
3726
3727 SourceLocation AttrLoc;
3728 SanitizerMask CheckKind;
3729 SanitizerHandler Handler;
3730 if (NNAttr) {
3731 AttrLoc = NNAttr->getLocation();
3732 CheckKind = SanitizerKind::NonnullAttribute;
3733 Handler = SanitizerHandler::NonnullArg;
3734 } else {
3735 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
3736 CheckKind = SanitizerKind::NullabilityArg;
3737 Handler = SanitizerHandler::NullabilityArg;
3738 }
3739
3740 SanitizerScope SanScope(this);
3741 assert(RV.isScalar());
3742 llvm::Value *V = RV.getScalarVal();
3743 llvm::Value *Cond =
3744 Builder.CreateICmpNE(V, llvm::Constant::getNullValue(V->getType()));
3745 llvm::Constant *StaticData[] = {
3746 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
3747 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
3748 };
3749 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
3750 }
3751
EmitCallArgs(CallArgList & Args,ArrayRef<QualType> ArgTypes,llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,AbstractCallee AC,unsigned ParamsToSkip,EvaluationOrder Order)3752 void CodeGenFunction::EmitCallArgs(
3753 CallArgList &Args, ArrayRef<QualType> ArgTypes,
3754 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
3755 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
3756 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
3757
3758 // We *have* to evaluate arguments from right to left in the MS C++ ABI,
3759 // because arguments are destroyed left to right in the callee. As a special
3760 // case, there are certain language constructs that require left-to-right
3761 // evaluation, and in those cases we consider the evaluation order requirement
3762 // to trump the "destruction order is reverse construction order" guarantee.
3763 bool LeftToRight =
3764 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
3765 ? Order == EvaluationOrder::ForceLeftToRight
3766 : Order != EvaluationOrder::ForceRightToLeft;
3767
3768 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
3769 RValue EmittedArg) {
3770 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
3771 return;
3772 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
3773 if (PS == nullptr)
3774 return;
3775
3776 const auto &Context = getContext();
3777 auto SizeTy = Context.getSizeType();
3778 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
3779 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
3780 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
3781 EmittedArg.getScalarVal(),
3782 PS->isDynamic());
3783 Args.add(RValue::get(V), SizeTy);
3784 // If we're emitting args in reverse, be sure to do so with
3785 // pass_object_size, as well.
3786 if (!LeftToRight)
3787 std::swap(Args.back(), *(&Args.back() - 1));
3788 };
3789
3790 // Insert a stack save if we're going to need any inalloca args.
3791 bool HasInAllocaArgs = false;
3792 if (CGM.getTarget().getCXXABI().isMicrosoft()) {
3793 for (ArrayRef<QualType>::iterator I = ArgTypes.begin(), E = ArgTypes.end();
3794 I != E && !HasInAllocaArgs; ++I)
3795 HasInAllocaArgs = isInAllocaArgument(CGM.getCXXABI(), *I);
3796 if (HasInAllocaArgs) {
3797 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
3798 Args.allocateArgumentMemory(*this);
3799 }
3800 }
3801
3802 // Evaluate each argument in the appropriate order.
3803 size_t CallArgsStart = Args.size();
3804 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
3805 unsigned Idx = LeftToRight ? I : E - I - 1;
3806 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
3807 unsigned InitialArgSize = Args.size();
3808 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
3809 // the argument and parameter match or the objc method is parameterized.
3810 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
3811 getContext().hasSameUnqualifiedType((*Arg)->getType(),
3812 ArgTypes[Idx]) ||
3813 (isa<ObjCMethodDecl>(AC.getDecl()) &&
3814 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
3815 "Argument and parameter types don't match");
3816 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
3817 // In particular, we depend on it being the last arg in Args, and the
3818 // objectsize bits depend on there only being one arg if !LeftToRight.
3819 assert(InitialArgSize + 1 == Args.size() &&
3820 "The code below depends on only adding one arg per EmitCallArg");
3821 (void)InitialArgSize;
3822 // Since pointer argument are never emitted as LValue, it is safe to emit
3823 // non-null argument check for r-value only.
3824 if (!Args.back().hasLValue()) {
3825 RValue RVArg = Args.back().getKnownRValue();
3826 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
3827 ParamsToSkip + Idx);
3828 // @llvm.objectsize should never have side-effects and shouldn't need
3829 // destruction/cleanups, so we can safely "emit" it after its arg,
3830 // regardless of right-to-leftness
3831 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
3832 }
3833 }
3834
3835 if (!LeftToRight) {
3836 // Un-reverse the arguments we just evaluated so they match up with the LLVM
3837 // IR function.
3838 std::reverse(Args.begin() + CallArgsStart, Args.end());
3839 }
3840 }
3841
3842 namespace {
3843
3844 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
DestroyUnpassedArg__anon9dd9c9960b11::DestroyUnpassedArg3845 DestroyUnpassedArg(Address Addr, QualType Ty)
3846 : Addr(Addr), Ty(Ty) {}
3847
3848 Address Addr;
3849 QualType Ty;
3850
Emit__anon9dd9c9960b11::DestroyUnpassedArg3851 void Emit(CodeGenFunction &CGF, Flags flags) override {
3852 QualType::DestructionKind DtorKind = Ty.isDestructedType();
3853 if (DtorKind == QualType::DK_cxx_destructor) {
3854 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
3855 assert(!Dtor->isTrivial());
3856 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
3857 /*Delegating=*/false, Addr, Ty);
3858 } else {
3859 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
3860 }
3861 }
3862 };
3863
3864 struct DisableDebugLocationUpdates {
3865 CodeGenFunction &CGF;
3866 bool disabledDebugInfo;
DisableDebugLocationUpdates__anon9dd9c9960b11::DisableDebugLocationUpdates3867 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
3868 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
3869 CGF.disableDebugInfo();
3870 }
~DisableDebugLocationUpdates__anon9dd9c9960b11::DisableDebugLocationUpdates3871 ~DisableDebugLocationUpdates() {
3872 if (disabledDebugInfo)
3873 CGF.enableDebugInfo();
3874 }
3875 };
3876
3877 } // end anonymous namespace
3878
getRValue(CodeGenFunction & CGF) const3879 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
3880 if (!HasLV)
3881 return RV;
3882 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
3883 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
3884 LV.isVolatile());
3885 IsUsed = true;
3886 return RValue::getAggregate(Copy.getAddress(CGF));
3887 }
3888
copyInto(CodeGenFunction & CGF,Address Addr) const3889 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
3890 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
3891 if (!HasLV && RV.isScalar())
3892 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
3893 else if (!HasLV && RV.isComplex())
3894 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
3895 else {
3896 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
3897 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
3898 // We assume that call args are never copied into subobjects.
3899 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
3900 HasLV ? LV.isVolatileQualified()
3901 : RV.isVolatileQualified());
3902 }
3903 IsUsed = true;
3904 }
3905
EmitCallArg(CallArgList & args,const Expr * E,QualType type)3906 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
3907 QualType type) {
3908 DisableDebugLocationUpdates Dis(*this, E);
3909 if (const ObjCIndirectCopyRestoreExpr *CRE
3910 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
3911 assert(getLangOpts().ObjCAutoRefCount);
3912 return emitWritebackArg(*this, args, CRE);
3913 }
3914
3915 assert(type->isReferenceType() == E->isGLValue() &&
3916 "reference binding to unmaterialized r-value!");
3917
3918 if (E->isGLValue()) {
3919 assert(E->getObjectKind() == OK_Ordinary);
3920 return args.add(EmitReferenceBindingToExpr(E), type);
3921 }
3922
3923 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
3924
3925 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
3926 // However, we still have to push an EH-only cleanup in case we unwind before
3927 // we make it to the call.
3928 if (HasAggregateEvalKind &&
3929 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
3930 // If we're using inalloca, use the argument memory. Otherwise, use a
3931 // temporary.
3932 AggValueSlot Slot;
3933 if (args.isUsingInAlloca())
3934 Slot = createPlaceholderSlot(*this, type);
3935 else
3936 Slot = CreateAggTemp(type, "agg.tmp");
3937
3938 bool DestroyedInCallee = true, NeedsEHCleanup = true;
3939 if (const auto *RD = type->getAsCXXRecordDecl())
3940 DestroyedInCallee = RD->hasNonTrivialDestructor();
3941 else
3942 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
3943
3944 if (DestroyedInCallee)
3945 Slot.setExternallyDestructed();
3946
3947 EmitAggExpr(E, Slot);
3948 RValue RV = Slot.asRValue();
3949 args.add(RV, type);
3950
3951 if (DestroyedInCallee && NeedsEHCleanup) {
3952 // Create a no-op GEP between the placeholder and the cleanup so we can
3953 // RAUW it successfully. It also serves as a marker of the first
3954 // instruction where the cleanup is active.
3955 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
3956 type);
3957 // This unreachable is a temporary marker which will be removed later.
3958 llvm::Instruction *IsActive = Builder.CreateUnreachable();
3959 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
3960 }
3961 return;
3962 }
3963
3964 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
3965 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
3966 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
3967 assert(L.isSimple());
3968 args.addUncopiedAggregate(L, type);
3969 return;
3970 }
3971
3972 args.add(EmitAnyExprToTemp(E), type);
3973 }
3974
getVarArgType(const Expr * Arg)3975 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
3976 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
3977 // implicitly widens null pointer constants that are arguments to varargs
3978 // functions to pointer-sized ints.
3979 if (!getTarget().getTriple().isOSWindows())
3980 return Arg->getType();
3981
3982 if (Arg->getType()->isIntegerType() &&
3983 getContext().getTypeSize(Arg->getType()) <
3984 getContext().getTargetInfo().getPointerWidth(0) &&
3985 Arg->isNullPointerConstant(getContext(),
3986 Expr::NPC_ValueDependentIsNotNull)) {
3987 return getContext().getIntPtrType();
3988 }
3989
3990 return Arg->getType();
3991 }
3992
3993 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
3994 // optimizer it can aggressively ignore unwind edges.
3995 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)3996 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
3997 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3998 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
3999 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4000 CGM.getNoObjCARCExceptionsMetadata());
4001 }
4002
4003 /// Emits a call to the given no-arguments nounwind runtime function.
4004 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::FunctionCallee callee,const llvm::Twine & name)4005 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4006 const llvm::Twine &name) {
4007 return EmitNounwindRuntimeCall(callee, None, name);
4008 }
4009
4010 /// Emits a call to the given nounwind runtime function.
4011 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)4012 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4013 ArrayRef<llvm::Value *> args,
4014 const llvm::Twine &name) {
4015 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4016 call->setDoesNotThrow();
4017 return call;
4018 }
4019
4020 /// Emits a simple call (never an invoke) to the given no-arguments
4021 /// runtime function.
EmitRuntimeCall(llvm::FunctionCallee callee,const llvm::Twine & name)4022 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4023 const llvm::Twine &name) {
4024 return EmitRuntimeCall(callee, None, name);
4025 }
4026
4027 // Calls which may throw must have operand bundles indicating which funclet
4028 // they are nested within.
4029 SmallVector<llvm::OperandBundleDef, 1>
getBundlesForFunclet(llvm::Value * Callee)4030 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4031 SmallVector<llvm::OperandBundleDef, 1> BundleList;
4032 // There is no need for a funclet operand bundle if we aren't inside a
4033 // funclet.
4034 if (!CurrentFuncletPad)
4035 return BundleList;
4036
4037 // Skip intrinsics which cannot throw.
4038 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
4039 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
4040 return BundleList;
4041
4042 BundleList.emplace_back("funclet", CurrentFuncletPad);
4043 return BundleList;
4044 }
4045
4046 /// Emits a simple call (never an invoke) to the given runtime function.
EmitRuntimeCall(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)4047 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4048 ArrayRef<llvm::Value *> args,
4049 const llvm::Twine &name) {
4050 llvm::CallInst *call = Builder.CreateCall(
4051 callee, args, getBundlesForFunclet(callee.getCallee()), name);
4052 call->setCallingConv(getRuntimeCC());
4053 return call;
4054 }
4055
4056 /// Emits a call or invoke to the given noreturn runtime function.
EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args)4057 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4058 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4059 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4060 getBundlesForFunclet(callee.getCallee());
4061
4062 if (getInvokeDest()) {
4063 llvm::InvokeInst *invoke =
4064 Builder.CreateInvoke(callee,
4065 getUnreachableBlock(),
4066 getInvokeDest(),
4067 args,
4068 BundleList);
4069 invoke->setDoesNotReturn();
4070 invoke->setCallingConv(getRuntimeCC());
4071 } else {
4072 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4073 call->setDoesNotReturn();
4074 call->setCallingConv(getRuntimeCC());
4075 Builder.CreateUnreachable();
4076 }
4077 }
4078
4079 /// Emits a call or invoke instruction to the given nullary runtime function.
4080 llvm::CallBase *
EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,const Twine & name)4081 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4082 const Twine &name) {
4083 return EmitRuntimeCallOrInvoke(callee, None, name);
4084 }
4085
4086 /// Emits a call or invoke instruction to the given runtime function.
4087 llvm::CallBase *
EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const Twine & name)4088 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4089 ArrayRef<llvm::Value *> args,
4090 const Twine &name) {
4091 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4092 call->setCallingConv(getRuntimeCC());
4093 return call;
4094 }
4095
4096 /// Emits a call or invoke instruction to the given function, depending
4097 /// on the current state of the EH stack.
EmitCallOrInvoke(llvm::FunctionCallee Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)4098 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4099 ArrayRef<llvm::Value *> Args,
4100 const Twine &Name) {
4101 llvm::BasicBlock *InvokeDest = getInvokeDest();
4102 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4103 getBundlesForFunclet(Callee.getCallee());
4104
4105 llvm::CallBase *Inst;
4106 if (!InvokeDest)
4107 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4108 else {
4109 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4110 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4111 Name);
4112 EmitBlock(ContBB);
4113 }
4114
4115 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4116 // optimizer it can aggressively ignore unwind edges.
4117 if (CGM.getLangOpts().ObjCAutoRefCount)
4118 AddObjCARCExceptionMetadata(Inst);
4119
4120 return Inst;
4121 }
4122
deferPlaceholderReplacement(llvm::Instruction * Old,llvm::Value * New)4123 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4124 llvm::Value *New) {
4125 DeferredReplacements.push_back(std::make_pair(Old, New));
4126 }
4127
4128 namespace {
4129
4130 /// Specify given \p NewAlign as the alignment of return value attribute. If
4131 /// such attribute already exists, re-set it to the maximal one of two options.
4132 LLVM_NODISCARD llvm::AttributeList
maybeRaiseRetAlignmentAttribute(llvm::LLVMContext & Ctx,const llvm::AttributeList & Attrs,llvm::Align NewAlign)4133 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4134 const llvm::AttributeList &Attrs,
4135 llvm::Align NewAlign) {
4136 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4137 if (CurAlign >= NewAlign)
4138 return Attrs;
4139 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4140 return Attrs
4141 .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
4142 llvm::Attribute::AttrKind::Alignment)
4143 .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
4144 }
4145
4146 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4147 protected:
4148 CodeGenFunction &CGF;
4149
4150 /// We do nothing if this is, or becomes, nullptr.
4151 const AlignedAttrTy *AA = nullptr;
4152
4153 llvm::Value *Alignment = nullptr; // May or may not be a constant.
4154 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4155
AbstractAssumeAlignedAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl)4156 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4157 : CGF(CGF_) {
4158 if (!FuncDecl)
4159 return;
4160 AA = FuncDecl->getAttr<AlignedAttrTy>();
4161 }
4162
4163 public:
4164 /// If we can, materialize the alignment as an attribute on return value.
4165 LLVM_NODISCARD llvm::AttributeList
TryEmitAsCallSiteAttribute(const llvm::AttributeList & Attrs)4166 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4167 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4168 return Attrs;
4169 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4170 if (!AlignmentCI)
4171 return Attrs;
4172 // We may legitimately have non-power-of-2 alignment here.
4173 // If so, this is UB land, emit it via `@llvm.assume` instead.
4174 if (!AlignmentCI->getValue().isPowerOf2())
4175 return Attrs;
4176 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4177 CGF.getLLVMContext(), Attrs,
4178 llvm::Align(
4179 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4180 AA = nullptr; // We're done. Disallow doing anything else.
4181 return NewAttrs;
4182 }
4183
4184 /// Emit alignment assumption.
4185 /// This is a general fallback that we take if either there is an offset,
4186 /// or the alignment is variable or we are sanitizing for alignment.
EmitAsAnAssumption(SourceLocation Loc,QualType RetTy,RValue & Ret)4187 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4188 if (!AA)
4189 return;
4190 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4191 AA->getLocation(), Alignment, OffsetCI);
4192 AA = nullptr; // We're done. Disallow doing anything else.
4193 }
4194 };
4195
4196 /// Helper data structure to emit `AssumeAlignedAttr`.
4197 class AssumeAlignedAttrEmitter final
4198 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4199 public:
AssumeAlignedAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl)4200 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4201 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4202 if (!AA)
4203 return;
4204 // It is guaranteed that the alignment/offset are constants.
4205 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4206 if (Expr *Offset = AA->getOffset()) {
4207 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4208 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4209 OffsetCI = nullptr;
4210 }
4211 }
4212 };
4213
4214 /// Helper data structure to emit `AllocAlignAttr`.
4215 class AllocAlignAttrEmitter final
4216 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4217 public:
AllocAlignAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl,const CallArgList & CallArgs)4218 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4219 const CallArgList &CallArgs)
4220 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4221 if (!AA)
4222 return;
4223 // Alignment may or may not be a constant, and that is okay.
4224 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4225 .getRValue(CGF)
4226 .getScalarVal();
4227 }
4228 };
4229
4230 } // namespace
4231
EmitCall(const CGFunctionInfo & CallInfo,const CGCallee & Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,llvm::CallBase ** callOrInvoke,SourceLocation Loc)4232 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4233 const CGCallee &Callee,
4234 ReturnValueSlot ReturnValue,
4235 const CallArgList &CallArgs,
4236 llvm::CallBase **callOrInvoke,
4237 SourceLocation Loc) {
4238 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4239
4240 assert(Callee.isOrdinary() || Callee.isVirtual());
4241
4242 // Handle struct-return functions by passing a pointer to the
4243 // location that we would like to return into.
4244 QualType RetTy = CallInfo.getReturnType();
4245 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4246
4247 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4248
4249 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4250 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4251 // We can only guarantee that a function is called from the correct
4252 // context/function based on the appropriate target attributes,
4253 // so only check in the case where we have both always_inline and target
4254 // since otherwise we could be making a conditional call after a check for
4255 // the proper cpu features (and it won't cause code generation issues due to
4256 // function based code generation).
4257 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4258 TargetDecl->hasAttr<TargetAttr>())
4259 checkTargetFeatures(Loc, FD);
4260
4261 // Some architectures (such as x86-64) have the ABI changed based on
4262 // attribute-target/features. Give them a chance to diagnose.
4263 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4264 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4265 }
4266
4267 #ifndef NDEBUG
4268 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4269 // For an inalloca varargs function, we don't expect CallInfo to match the
4270 // function pointer's type, because the inalloca struct a will have extra
4271 // fields in it for the varargs parameters. Code later in this function
4272 // bitcasts the function pointer to the type derived from CallInfo.
4273 //
4274 // In other cases, we assert that the types match up (until pointers stop
4275 // having pointee types).
4276 llvm::Type *TypeFromVal;
4277 if (Callee.isVirtual())
4278 TypeFromVal = Callee.getVirtualFunctionType();
4279 else
4280 TypeFromVal =
4281 Callee.getFunctionPointer()->getType()->getPointerElementType();
4282 assert(IRFuncTy == TypeFromVal);
4283 }
4284 #endif
4285
4286 // 1. Set up the arguments.
4287
4288 // If we're using inalloca, insert the allocation after the stack save.
4289 // FIXME: Do this earlier rather than hacking it in here!
4290 Address ArgMemory = Address::invalid();
4291 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4292 const llvm::DataLayout &DL = CGM.getDataLayout();
4293 llvm::Instruction *IP = CallArgs.getStackBase();
4294 llvm::AllocaInst *AI;
4295 if (IP) {
4296 IP = IP->getNextNode();
4297 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4298 "argmem", IP);
4299 } else {
4300 AI = CreateTempAlloca(ArgStruct, "argmem");
4301 }
4302 auto Align = CallInfo.getArgStructAlignment();
4303 AI->setAlignment(Align.getAsAlign());
4304 AI->setUsedWithInAlloca(true);
4305 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
4306 ArgMemory = Address(AI, Align);
4307 }
4308
4309 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4310 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4311
4312 // If the call returns a temporary with struct return, create a temporary
4313 // alloca to hold the result, unless one is given to us.
4314 Address SRetPtr = Address::invalid();
4315 Address SRetAlloca = Address::invalid();
4316 llvm::Value *UnusedReturnSizePtr = nullptr;
4317 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4318 if (!ReturnValue.isNull()) {
4319 SRetPtr = ReturnValue.getValue();
4320 } else {
4321 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4322 if (HaveInsertPoint() && ReturnValue.isUnused()) {
4323 uint64_t size =
4324 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4325 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4326 }
4327 }
4328 if (IRFunctionArgs.hasSRetArg()) {
4329 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4330 } else if (RetAI.isInAlloca()) {
4331 Address Addr =
4332 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4333 Builder.CreateStore(SRetPtr.getPointer(), Addr);
4334 }
4335 }
4336
4337 Address swiftErrorTemp = Address::invalid();
4338 Address swiftErrorArg = Address::invalid();
4339
4340 // When passing arguments using temporary allocas, we need to add the
4341 // appropriate lifetime markers. This vector keeps track of all the lifetime
4342 // markers that need to be ended right after the call.
4343 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4344
4345 // Translate all of the arguments as necessary to match the IR lowering.
4346 assert(CallInfo.arg_size() == CallArgs.size() &&
4347 "Mismatch between function signature & arguments.");
4348 unsigned ArgNo = 0;
4349 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4350 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4351 I != E; ++I, ++info_it, ++ArgNo) {
4352 const ABIArgInfo &ArgInfo = info_it->info;
4353
4354 // Insert a padding argument to ensure proper alignment.
4355 if (IRFunctionArgs.hasPaddingArg(ArgNo))
4356 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4357 llvm::UndefValue::get(ArgInfo.getPaddingType());
4358
4359 unsigned FirstIRArg, NumIRArgs;
4360 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4361
4362 switch (ArgInfo.getKind()) {
4363 case ABIArgInfo::InAlloca: {
4364 assert(NumIRArgs == 0);
4365 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
4366 if (I->isAggregate()) {
4367 Address Addr = I->hasLValue()
4368 ? I->getKnownLValue().getAddress(*this)
4369 : I->getKnownRValue().getAggregateAddress();
4370 llvm::Instruction *Placeholder =
4371 cast<llvm::Instruction>(Addr.getPointer());
4372
4373 if (!ArgInfo.getInAllocaIndirect()) {
4374 // Replace the placeholder with the appropriate argument slot GEP.
4375 CGBuilderTy::InsertPoint IP = Builder.saveIP();
4376 Builder.SetInsertPoint(Placeholder);
4377 Addr = Builder.CreateStructGEP(ArgMemory,
4378 ArgInfo.getInAllocaFieldIndex());
4379 Builder.restoreIP(IP);
4380 } else {
4381 // For indirect things such as overaligned structs, replace the
4382 // placeholder with a regular aggregate temporary alloca. Store the
4383 // address of this alloca into the struct.
4384 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4385 Address ArgSlot = Builder.CreateStructGEP(
4386 ArgMemory, ArgInfo.getInAllocaFieldIndex());
4387 Builder.CreateStore(Addr.getPointer(), ArgSlot);
4388 }
4389 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4390 } else if (ArgInfo.getInAllocaIndirect()) {
4391 // Make a temporary alloca and store the address of it into the argument
4392 // struct.
4393 Address Addr = CreateMemTempWithoutCast(
4394 I->Ty, getContext().getTypeAlignInChars(I->Ty),
4395 "indirect-arg-temp");
4396 I->copyInto(*this, Addr);
4397 Address ArgSlot =
4398 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4399 Builder.CreateStore(Addr.getPointer(), ArgSlot);
4400 } else {
4401 // Store the RValue into the argument struct.
4402 Address Addr =
4403 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4404 unsigned AS = Addr.getType()->getPointerAddressSpace();
4405 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4406 // There are some cases where a trivial bitcast is not avoidable. The
4407 // definition of a type later in a translation unit may change it's type
4408 // from {}* to (%struct.foo*)*.
4409 if (Addr.getType() != MemType)
4410 Addr = Builder.CreateBitCast(Addr, MemType);
4411 I->copyInto(*this, Addr);
4412 }
4413 break;
4414 }
4415
4416 case ABIArgInfo::Indirect: {
4417 assert(NumIRArgs == 1);
4418 if (!I->isAggregate()) {
4419 // Make a temporary alloca to pass the argument.
4420 Address Addr = CreateMemTempWithoutCast(
4421 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4422 IRCallArgs[FirstIRArg] = Addr.getPointer();
4423
4424 I->copyInto(*this, Addr);
4425 } else {
4426 // We want to avoid creating an unnecessary temporary+copy here;
4427 // however, we need one in three cases:
4428 // 1. If the argument is not byval, and we are required to copy the
4429 // source. (This case doesn't occur on any common architecture.)
4430 // 2. If the argument is byval, RV is not sufficiently aligned, and
4431 // we cannot force it to be sufficiently aligned.
4432 // 3. If the argument is byval, but RV is not located in default
4433 // or alloca address space.
4434 Address Addr = I->hasLValue()
4435 ? I->getKnownLValue().getAddress(*this)
4436 : I->getKnownRValue().getAggregateAddress();
4437 llvm::Value *V = Addr.getPointer();
4438 CharUnits Align = ArgInfo.getIndirectAlign();
4439 const llvm::DataLayout *TD = &CGM.getDataLayout();
4440
4441 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4442 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4443 TD->getAllocaAddrSpace()) &&
4444 "indirect argument must be in alloca address space");
4445
4446 bool NeedCopy = false;
4447
4448 if (Addr.getAlignment() < Align &&
4449 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4450 Align.getAsAlign()) {
4451 NeedCopy = true;
4452 } else if (I->hasLValue()) {
4453 auto LV = I->getKnownLValue();
4454 auto AS = LV.getAddressSpace();
4455
4456 if (!ArgInfo.getIndirectByVal() ||
4457 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4458 NeedCopy = true;
4459 }
4460 if (!getLangOpts().OpenCL) {
4461 if ((ArgInfo.getIndirectByVal() &&
4462 (AS != LangAS::Default &&
4463 AS != CGM.getASTAllocaAddressSpace()))) {
4464 NeedCopy = true;
4465 }
4466 }
4467 // For OpenCL even if RV is located in default or alloca address space
4468 // we don't want to perform address space cast for it.
4469 else if ((ArgInfo.getIndirectByVal() &&
4470 Addr.getType()->getAddressSpace() != IRFuncTy->
4471 getParamType(FirstIRArg)->getPointerAddressSpace())) {
4472 NeedCopy = true;
4473 }
4474 }
4475
4476 if (NeedCopy) {
4477 // Create an aligned temporary, and copy to it.
4478 Address AI = CreateMemTempWithoutCast(
4479 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4480 IRCallArgs[FirstIRArg] = AI.getPointer();
4481
4482 // Emit lifetime markers for the temporary alloca.
4483 uint64_t ByvalTempElementSize =
4484 CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4485 llvm::Value *LifetimeSize =
4486 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4487
4488 // Add cleanup code to emit the end lifetime marker after the call.
4489 if (LifetimeSize) // In case we disabled lifetime markers.
4490 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4491
4492 // Generate the copy.
4493 I->copyInto(*this, AI);
4494 } else {
4495 // Skip the extra memcpy call.
4496 auto *T = V->getType()->getPointerElementType()->getPointerTo(
4497 CGM.getDataLayout().getAllocaAddrSpace());
4498 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4499 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4500 true);
4501 }
4502 }
4503 break;
4504 }
4505
4506 case ABIArgInfo::Ignore:
4507 assert(NumIRArgs == 0);
4508 break;
4509
4510 case ABIArgInfo::Extend:
4511 case ABIArgInfo::Direct: {
4512 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4513 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4514 ArgInfo.getDirectOffset() == 0) {
4515 assert(NumIRArgs == 1);
4516 llvm::Value *V;
4517 if (!I->isAggregate())
4518 V = I->getKnownRValue().getScalarVal();
4519 else
4520 V = Builder.CreateLoad(
4521 I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4522 : I->getKnownRValue().getAggregateAddress());
4523
4524 // Implement swifterror by copying into a new swifterror argument.
4525 // We'll write back in the normal path out of the call.
4526 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4527 == ParameterABI::SwiftErrorResult) {
4528 assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4529
4530 QualType pointeeTy = I->Ty->getPointeeType();
4531 swiftErrorArg =
4532 Address(V, getContext().getTypeAlignInChars(pointeeTy));
4533
4534 swiftErrorTemp =
4535 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4536 V = swiftErrorTemp.getPointer();
4537 cast<llvm::AllocaInst>(V)->setSwiftError(true);
4538
4539 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4540 Builder.CreateStore(errorValue, swiftErrorTemp);
4541 }
4542
4543 // We might have to widen integers, but we should never truncate.
4544 if (ArgInfo.getCoerceToType() != V->getType() &&
4545 V->getType()->isIntegerTy())
4546 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4547
4548 // If the argument doesn't match, perform a bitcast to coerce it. This
4549 // can happen due to trivial type mismatches.
4550 if (FirstIRArg < IRFuncTy->getNumParams() &&
4551 V->getType() != IRFuncTy->getParamType(FirstIRArg))
4552 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4553
4554 IRCallArgs[FirstIRArg] = V;
4555 break;
4556 }
4557
4558 // FIXME: Avoid the conversion through memory if possible.
4559 Address Src = Address::invalid();
4560 if (!I->isAggregate()) {
4561 Src = CreateMemTemp(I->Ty, "coerce");
4562 I->copyInto(*this, Src);
4563 } else {
4564 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4565 : I->getKnownRValue().getAggregateAddress();
4566 }
4567
4568 // If the value is offset in memory, apply the offset now.
4569 Src = emitAddressAtOffset(*this, Src, ArgInfo);
4570
4571 // Fast-isel and the optimizer generally like scalar values better than
4572 // FCAs, so we flatten them if this is safe to do for this argument.
4573 llvm::StructType *STy =
4574 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4575 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4576 llvm::Type *SrcTy = Src.getElementType();
4577 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4578 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4579
4580 // If the source type is smaller than the destination type of the
4581 // coerce-to logic, copy the source value into a temp alloca the size
4582 // of the destination type to allow loading all of it. The bits past
4583 // the source value are left undef.
4584 if (SrcSize < DstSize) {
4585 Address TempAlloca
4586 = CreateTempAlloca(STy, Src.getAlignment(),
4587 Src.getName() + ".coerce");
4588 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4589 Src = TempAlloca;
4590 } else {
4591 Src = Builder.CreateBitCast(Src,
4592 STy->getPointerTo(Src.getAddressSpace()));
4593 }
4594
4595 assert(NumIRArgs == STy->getNumElements());
4596 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4597 Address EltPtr = Builder.CreateStructGEP(Src, i);
4598 llvm::Value *LI = Builder.CreateLoad(EltPtr);
4599 IRCallArgs[FirstIRArg + i] = LI;
4600 }
4601 } else {
4602 // In the simple case, just pass the coerced loaded value.
4603 assert(NumIRArgs == 1);
4604 llvm::Value *Load =
4605 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4606
4607 if (CallInfo.isCmseNSCall()) {
4608 // For certain parameter types, clear padding bits, as they may reveal
4609 // sensitive information.
4610 // Small struct/union types are passed as integer arrays.
4611 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
4612 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
4613 Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
4614 }
4615 IRCallArgs[FirstIRArg] = Load;
4616 }
4617
4618 break;
4619 }
4620
4621 case ABIArgInfo::CoerceAndExpand: {
4622 auto coercionType = ArgInfo.getCoerceAndExpandType();
4623 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4624
4625 llvm::Value *tempSize = nullptr;
4626 Address addr = Address::invalid();
4627 Address AllocaAddr = Address::invalid();
4628 if (I->isAggregate()) {
4629 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4630 : I->getKnownRValue().getAggregateAddress();
4631
4632 } else {
4633 RValue RV = I->getKnownRValue();
4634 assert(RV.isScalar()); // complex should always just be direct
4635
4636 llvm::Type *scalarType = RV.getScalarVal()->getType();
4637 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4638 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4639
4640 // Materialize to a temporary.
4641 addr = CreateTempAlloca(
4642 RV.getScalarVal()->getType(),
4643 CharUnits::fromQuantity(std::max(
4644 (unsigned)layout->getAlignment().value(), scalarAlign)),
4645 "tmp",
4646 /*ArraySize=*/nullptr, &AllocaAddr);
4647 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
4648
4649 Builder.CreateStore(RV.getScalarVal(), addr);
4650 }
4651
4652 addr = Builder.CreateElementBitCast(addr, coercionType);
4653
4654 unsigned IRArgPos = FirstIRArg;
4655 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
4656 llvm::Type *eltType = coercionType->getElementType(i);
4657 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
4658 Address eltAddr = Builder.CreateStructGEP(addr, i);
4659 llvm::Value *elt = Builder.CreateLoad(eltAddr);
4660 IRCallArgs[IRArgPos++] = elt;
4661 }
4662 assert(IRArgPos == FirstIRArg + NumIRArgs);
4663
4664 if (tempSize) {
4665 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
4666 }
4667
4668 break;
4669 }
4670
4671 case ABIArgInfo::Expand:
4672 unsigned IRArgPos = FirstIRArg;
4673 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
4674 assert(IRArgPos == FirstIRArg + NumIRArgs);
4675 break;
4676 }
4677 }
4678
4679 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
4680 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
4681
4682 // If we're using inalloca, set up that argument.
4683 if (ArgMemory.isValid()) {
4684 llvm::Value *Arg = ArgMemory.getPointer();
4685 if (CallInfo.isVariadic()) {
4686 // When passing non-POD arguments by value to variadic functions, we will
4687 // end up with a variadic prototype and an inalloca call site. In such
4688 // cases, we can't do any parameter mismatch checks. Give up and bitcast
4689 // the callee.
4690 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
4691 CalleePtr =
4692 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
4693 } else {
4694 llvm::Type *LastParamTy =
4695 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
4696 if (Arg->getType() != LastParamTy) {
4697 #ifndef NDEBUG
4698 // Assert that these structs have equivalent element types.
4699 llvm::StructType *FullTy = CallInfo.getArgStruct();
4700 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
4701 cast<llvm::PointerType>(LastParamTy)->getElementType());
4702 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
4703 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
4704 DE = DeclaredTy->element_end(),
4705 FI = FullTy->element_begin();
4706 DI != DE; ++DI, ++FI)
4707 assert(*DI == *FI);
4708 #endif
4709 Arg = Builder.CreateBitCast(Arg, LastParamTy);
4710 }
4711 }
4712 assert(IRFunctionArgs.hasInallocaArg());
4713 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
4714 }
4715
4716 // 2. Prepare the function pointer.
4717
4718 // If the callee is a bitcast of a non-variadic function to have a
4719 // variadic function pointer type, check to see if we can remove the
4720 // bitcast. This comes up with unprototyped functions.
4721 //
4722 // This makes the IR nicer, but more importantly it ensures that we
4723 // can inline the function at -O0 if it is marked always_inline.
4724 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
4725 llvm::Value *Ptr) -> llvm::Function * {
4726 if (!CalleeFT->isVarArg())
4727 return nullptr;
4728
4729 // Get underlying value if it's a bitcast
4730 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
4731 if (CE->getOpcode() == llvm::Instruction::BitCast)
4732 Ptr = CE->getOperand(0);
4733 }
4734
4735 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
4736 if (!OrigFn)
4737 return nullptr;
4738
4739 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
4740
4741 // If the original type is variadic, or if any of the component types
4742 // disagree, we cannot remove the cast.
4743 if (OrigFT->isVarArg() ||
4744 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
4745 OrigFT->getReturnType() != CalleeFT->getReturnType())
4746 return nullptr;
4747
4748 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
4749 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
4750 return nullptr;
4751
4752 return OrigFn;
4753 };
4754
4755 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
4756 CalleePtr = OrigFn;
4757 IRFuncTy = OrigFn->getFunctionType();
4758 }
4759
4760 // 3. Perform the actual call.
4761
4762 // Deactivate any cleanups that we're supposed to do immediately before
4763 // the call.
4764 if (!CallArgs.getCleanupsToDeactivate().empty())
4765 deactivateArgCleanupsBeforeCall(*this, CallArgs);
4766
4767 // Assert that the arguments we computed match up. The IR verifier
4768 // will catch this, but this is a common enough source of problems
4769 // during IRGen changes that it's way better for debugging to catch
4770 // it ourselves here.
4771 #ifndef NDEBUG
4772 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
4773 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4774 // Inalloca argument can have different type.
4775 if (IRFunctionArgs.hasInallocaArg() &&
4776 i == IRFunctionArgs.getInallocaArgNo())
4777 continue;
4778 if (i < IRFuncTy->getNumParams())
4779 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
4780 }
4781 #endif
4782
4783 // Update the largest vector width if any arguments have vector types.
4784 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
4785 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
4786 LargestVectorWidth =
4787 std::max((uint64_t)LargestVectorWidth,
4788 VT->getPrimitiveSizeInBits().getKnownMinSize());
4789 }
4790
4791 // Compute the calling convention and attributes.
4792 unsigned CallingConv;
4793 llvm::AttributeList Attrs;
4794 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
4795 Callee.getAbstractInfo(), Attrs, CallingConv,
4796 /*AttrOnCallSite=*/true);
4797
4798 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4799 if (FD->usesFPIntrin())
4800 // All calls within a strictfp function are marked strictfp
4801 Attrs =
4802 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4803 llvm::Attribute::StrictFP);
4804
4805 // Add call-site nomerge attribute if exists.
4806 if (InNoMergeAttributedStmt)
4807 Attrs =
4808 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4809 llvm::Attribute::NoMerge);
4810
4811 // Apply some call-site-specific attributes.
4812 // TODO: work this into building the attribute set.
4813
4814 // Apply always_inline to all calls within flatten functions.
4815 // FIXME: should this really take priority over __try, below?
4816 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
4817 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
4818 Attrs =
4819 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4820 llvm::Attribute::AlwaysInline);
4821 }
4822
4823 // Disable inlining inside SEH __try blocks.
4824 if (isSEHTryScope()) {
4825 Attrs =
4826 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4827 llvm::Attribute::NoInline);
4828 }
4829
4830 // Decide whether to use a call or an invoke.
4831 bool CannotThrow;
4832 if (currentFunctionUsesSEHTry()) {
4833 // SEH cares about asynchronous exceptions, so everything can "throw."
4834 CannotThrow = false;
4835 } else if (isCleanupPadScope() &&
4836 EHPersonality::get(*this).isMSVCXXPersonality()) {
4837 // The MSVC++ personality will implicitly terminate the program if an
4838 // exception is thrown during a cleanup outside of a try/catch.
4839 // We don't need to model anything in IR to get this behavior.
4840 CannotThrow = true;
4841 } else {
4842 // Otherwise, nounwind call sites will never throw.
4843 CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
4844 }
4845
4846 // If we made a temporary, be sure to clean up after ourselves. Note that we
4847 // can't depend on being inside of an ExprWithCleanups, so we need to manually
4848 // pop this cleanup later on. Being eager about this is OK, since this
4849 // temporary is 'invisible' outside of the callee.
4850 if (UnusedReturnSizePtr)
4851 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
4852 UnusedReturnSizePtr);
4853
4854 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
4855
4856 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4857 getBundlesForFunclet(CalleePtr);
4858
4859 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
4860 if (FD->usesFPIntrin())
4861 // All calls within a strictfp function are marked strictfp
4862 Attrs =
4863 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
4864 llvm::Attribute::StrictFP);
4865
4866 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
4867 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
4868
4869 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
4870 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
4871
4872 // Emit the actual call/invoke instruction.
4873 llvm::CallBase *CI;
4874 if (!InvokeDest) {
4875 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
4876 } else {
4877 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
4878 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
4879 BundleList);
4880 EmitBlock(Cont);
4881 }
4882 if (callOrInvoke)
4883 *callOrInvoke = CI;
4884
4885 // If this is within a function that has the guard(nocf) attribute and is an
4886 // indirect call, add the "guard_nocf" attribute to this call to indicate that
4887 // Control Flow Guard checks should not be added, even if the call is inlined.
4888 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
4889 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
4890 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
4891 Attrs = Attrs.addAttribute(
4892 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
4893 }
4894 }
4895
4896 // Apply the attributes and calling convention.
4897 CI->setAttributes(Attrs);
4898 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
4899
4900 // Apply various metadata.
4901
4902 if (!CI->getType()->isVoidTy())
4903 CI->setName("call");
4904
4905 // Update largest vector width from the return type.
4906 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
4907 LargestVectorWidth =
4908 std::max((uint64_t)LargestVectorWidth,
4909 VT->getPrimitiveSizeInBits().getKnownMinSize());
4910
4911 // Insert instrumentation or attach profile metadata at indirect call sites.
4912 // For more details, see the comment before the definition of
4913 // IPVK_IndirectCallTarget in InstrProfData.inc.
4914 if (!CI->getCalledFunction())
4915 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
4916 CI, CalleePtr);
4917
4918 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4919 // optimizer it can aggressively ignore unwind edges.
4920 if (CGM.getLangOpts().ObjCAutoRefCount)
4921 AddObjCARCExceptionMetadata(CI);
4922
4923 // Suppress tail calls if requested.
4924 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
4925 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
4926 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
4927 }
4928
4929 // Add metadata for calls to MSAllocator functions
4930 if (getDebugInfo() && TargetDecl &&
4931 TargetDecl->hasAttr<MSAllocatorAttr>())
4932 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
4933
4934 // 4. Finish the call.
4935
4936 // If the call doesn't return, finish the basic block and clear the
4937 // insertion point; this allows the rest of IRGen to discard
4938 // unreachable code.
4939 if (CI->doesNotReturn()) {
4940 if (UnusedReturnSizePtr)
4941 PopCleanupBlock();
4942
4943 // Strip away the noreturn attribute to better diagnose unreachable UB.
4944 if (SanOpts.has(SanitizerKind::Unreachable)) {
4945 // Also remove from function since CallBase::hasFnAttr additionally checks
4946 // attributes of the called function.
4947 if (auto *F = CI->getCalledFunction())
4948 F->removeFnAttr(llvm::Attribute::NoReturn);
4949 CI->removeAttribute(llvm::AttributeList::FunctionIndex,
4950 llvm::Attribute::NoReturn);
4951
4952 // Avoid incompatibility with ASan which relies on the `noreturn`
4953 // attribute to insert handler calls.
4954 if (SanOpts.hasOneOf(SanitizerKind::Address |
4955 SanitizerKind::KernelAddress)) {
4956 SanitizerScope SanScope(this);
4957 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
4958 Builder.SetInsertPoint(CI);
4959 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
4960 llvm::FunctionCallee Fn =
4961 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
4962 EmitNounwindRuntimeCall(Fn);
4963 }
4964 }
4965
4966 EmitUnreachable(Loc);
4967 Builder.ClearInsertionPoint();
4968
4969 // FIXME: For now, emit a dummy basic block because expr emitters in
4970 // generally are not ready to handle emitting expressions at unreachable
4971 // points.
4972 EnsureInsertPoint();
4973
4974 // Return a reasonable RValue.
4975 return GetUndefRValue(RetTy);
4976 }
4977
4978 // Perform the swifterror writeback.
4979 if (swiftErrorTemp.isValid()) {
4980 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
4981 Builder.CreateStore(errorResult, swiftErrorArg);
4982 }
4983
4984 // Emit any call-associated writebacks immediately. Arguably this
4985 // should happen after any return-value munging.
4986 if (CallArgs.hasWritebacks())
4987 emitWritebacks(*this, CallArgs);
4988
4989 // The stack cleanup for inalloca arguments has to run out of the normal
4990 // lexical order, so deactivate it and run it manually here.
4991 CallArgs.freeArgumentMemory(*this);
4992
4993 // Extract the return value.
4994 RValue Ret = [&] {
4995 switch (RetAI.getKind()) {
4996 case ABIArgInfo::CoerceAndExpand: {
4997 auto coercionType = RetAI.getCoerceAndExpandType();
4998
4999 Address addr = SRetPtr;
5000 addr = Builder.CreateElementBitCast(addr, coercionType);
5001
5002 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5003 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5004
5005 unsigned unpaddedIndex = 0;
5006 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5007 llvm::Type *eltType = coercionType->getElementType(i);
5008 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5009 Address eltAddr = Builder.CreateStructGEP(addr, i);
5010 llvm::Value *elt = CI;
5011 if (requiresExtract)
5012 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5013 else
5014 assert(unpaddedIndex == 0);
5015 Builder.CreateStore(elt, eltAddr);
5016 }
5017 // FALLTHROUGH
5018 LLVM_FALLTHROUGH;
5019 }
5020
5021 case ABIArgInfo::InAlloca:
5022 case ABIArgInfo::Indirect: {
5023 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5024 if (UnusedReturnSizePtr)
5025 PopCleanupBlock();
5026 return ret;
5027 }
5028
5029 case ABIArgInfo::Ignore:
5030 // If we are ignoring an argument that had a result, make sure to
5031 // construct the appropriate return value for our caller.
5032 return GetUndefRValue(RetTy);
5033
5034 case ABIArgInfo::Extend:
5035 case ABIArgInfo::Direct: {
5036 llvm::Type *RetIRTy = ConvertType(RetTy);
5037 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5038 switch (getEvaluationKind(RetTy)) {
5039 case TEK_Complex: {
5040 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5041 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5042 return RValue::getComplex(std::make_pair(Real, Imag));
5043 }
5044 case TEK_Aggregate: {
5045 Address DestPtr = ReturnValue.getValue();
5046 bool DestIsVolatile = ReturnValue.isVolatile();
5047
5048 if (!DestPtr.isValid()) {
5049 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5050 DestIsVolatile = false;
5051 }
5052 EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5053 return RValue::getAggregate(DestPtr);
5054 }
5055 case TEK_Scalar: {
5056 // If the argument doesn't match, perform a bitcast to coerce it. This
5057 // can happen due to trivial type mismatches.
5058 llvm::Value *V = CI;
5059 if (V->getType() != RetIRTy)
5060 V = Builder.CreateBitCast(V, RetIRTy);
5061 return RValue::get(V);
5062 }
5063 }
5064 llvm_unreachable("bad evaluation kind");
5065 }
5066
5067 Address DestPtr = ReturnValue.getValue();
5068 bool DestIsVolatile = ReturnValue.isVolatile();
5069
5070 if (!DestPtr.isValid()) {
5071 DestPtr = CreateMemTemp(RetTy, "coerce");
5072 DestIsVolatile = false;
5073 }
5074
5075 // If the value is offset in memory, apply the offset now.
5076 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5077 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5078
5079 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5080 }
5081
5082 case ABIArgInfo::Expand:
5083 llvm_unreachable("Invalid ABI kind for return argument");
5084 }
5085
5086 llvm_unreachable("Unhandled ABIArgInfo::Kind");
5087 } ();
5088
5089 // Emit the assume_aligned check on the return value.
5090 if (Ret.isScalar() && TargetDecl) {
5091 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5092 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5093 }
5094
5095 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5096 // we can't use the full cleanup mechanism.
5097 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5098 LifetimeEnd.Emit(*this, /*Flags=*/{});
5099
5100 if (!ReturnValue.isExternallyDestructed() &&
5101 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5102 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5103 RetTy);
5104
5105 return Ret;
5106 }
5107
prepareConcreteCallee(CodeGenFunction & CGF) const5108 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5109 if (isVirtual()) {
5110 const CallExpr *CE = getVirtualCallExpr();
5111 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5112 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5113 CE ? CE->getBeginLoc() : SourceLocation());
5114 }
5115
5116 return *this;
5117 }
5118
5119 /* VarArg handling */
5120
EmitVAArg(VAArgExpr * VE,Address & VAListAddr)5121 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5122 VAListAddr = VE->isMicrosoftABI()
5123 ? EmitMSVAListRef(VE->getSubExpr())
5124 : EmitVAListRef(VE->getSubExpr());
5125 QualType Ty = VE->getType();
5126 if (VE->isMicrosoftABI())
5127 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5128 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5129 }
5130