1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "CGBlocks.h"
17 #include "CGCXXABI.h"
18 #include "CGCleanup.h"
19 #include "CGRecordLayout.h"
20 #include "CodeGenFunction.h"
21 #include "CodeGenModule.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/Attr.h"
24 #include "clang/AST/Decl.h"
25 #include "clang/AST/DeclCXX.h"
26 #include "clang/AST/DeclObjC.h"
27 #include "clang/Basic/CodeGenOptions.h"
28 #include "clang/Basic/TargetBuiltins.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/CodeGen/SwiftCallingConv.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/Attributes.h"
36 #include "llvm/IR/CallingConv.h"
37 #include "llvm/IR/DataLayout.h"
38 #include "llvm/IR/InlineAsm.h"
39 #include "llvm/IR/IntrinsicInst.h"
40 #include "llvm/IR/Intrinsics.h"
41 #include "llvm/Transforms/Utils/Local.h"
42 using namespace clang;
43 using namespace CodeGen;
44
45 /***/
46
ClangCallConvToLLVMCallConv(CallingConv CC)47 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
48 switch (CC) {
49 default: return llvm::CallingConv::C;
50 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
51 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
52 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
53 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
54 case CC_Win64: return llvm::CallingConv::Win64;
55 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
56 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
57 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
58 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
59 // TODO: Add support for __pascal to LLVM.
60 case CC_X86Pascal: return llvm::CallingConv::C;
61 // TODO: Add support for __vectorcall to LLVM.
62 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
63 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
64 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
65 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
66 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
67 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
68 case CC_Swift: return llvm::CallingConv::Swift;
69 }
70 }
71
72 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
73 /// qualification. Either or both of RD and MD may be null. A null RD indicates
74 /// that there is no meaningful 'this' type, and a null MD can occur when
75 /// calling a method pointer.
DeriveThisType(const CXXRecordDecl * RD,const CXXMethodDecl * MD)76 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
77 const CXXMethodDecl *MD) {
78 QualType RecTy;
79 if (RD)
80 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
81 else
82 RecTy = Context.VoidTy;
83
84 if (MD)
85 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
86 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
87 }
88
89 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)90 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
91 return MD->getType()->getCanonicalTypeUnqualified()
92 .getAs<FunctionProtoType>();
93 }
94
95 /// Returns the "extra-canonicalized" return type, which discards
96 /// qualifiers on the return type. Codegen doesn't care about them,
97 /// and it makes ABI code a little easier to be able to assume that
98 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)99 static CanQualType GetReturnType(QualType RetTy) {
100 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
101 }
102
103 /// Arrange the argument and result information for a value of the given
104 /// unprototyped freestanding function type.
105 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)106 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
107 // When translating an unprototyped function type, always use a
108 // variadic type.
109 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
110 /*instanceMethod=*/false,
111 /*chainCall=*/false, None,
112 FTNP->getExtInfo(), {}, RequiredArgs(0));
113 }
114
addExtParameterInfosForCall(llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> & paramInfos,const FunctionProtoType * proto,unsigned prefixArgs,unsigned totalArgs)115 static void addExtParameterInfosForCall(
116 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
117 const FunctionProtoType *proto,
118 unsigned prefixArgs,
119 unsigned totalArgs) {
120 assert(proto->hasExtParameterInfos());
121 assert(paramInfos.size() <= prefixArgs);
122 assert(proto->getNumParams() + prefixArgs <= totalArgs);
123
124 paramInfos.reserve(totalArgs);
125
126 // Add default infos for any prefix args that don't already have infos.
127 paramInfos.resize(prefixArgs);
128
129 // Add infos for the prototype.
130 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
131 paramInfos.push_back(ParamInfo);
132 // pass_object_size params have no parameter info.
133 if (ParamInfo.hasPassObjectSize())
134 paramInfos.emplace_back();
135 }
136
137 assert(paramInfos.size() <= totalArgs &&
138 "Did we forget to insert pass_object_size args?");
139 // Add default infos for the variadic and/or suffix arguments.
140 paramInfos.resize(totalArgs);
141 }
142
143 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
144 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
appendParameterTypes(const CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,SmallVectorImpl<FunctionProtoType::ExtParameterInfo> & paramInfos,CanQual<FunctionProtoType> FPT)145 static void appendParameterTypes(const CodeGenTypes &CGT,
146 SmallVectorImpl<CanQualType> &prefix,
147 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
148 CanQual<FunctionProtoType> FPT) {
149 // Fast path: don't touch param info if we don't need to.
150 if (!FPT->hasExtParameterInfos()) {
151 assert(paramInfos.empty() &&
152 "We have paramInfos, but the prototype doesn't?");
153 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
154 return;
155 }
156
157 unsigned PrefixSize = prefix.size();
158 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
159 // parameters; the only thing that can change this is the presence of
160 // pass_object_size. So, we preallocate for the common case.
161 prefix.reserve(prefix.size() + FPT->getNumParams());
162
163 auto ExtInfos = FPT->getExtParameterInfos();
164 assert(ExtInfos.size() == FPT->getNumParams());
165 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
166 prefix.push_back(FPT->getParamType(I));
167 if (ExtInfos[I].hasPassObjectSize())
168 prefix.push_back(CGT.getContext().getSizeType());
169 }
170
171 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
172 prefix.size());
173 }
174
175 /// Arrange the LLVM function layout for a value of the given function
176 /// type, on top of any implicit parameters already stored.
177 static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,bool instanceMethod,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)178 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
179 SmallVectorImpl<CanQualType> &prefix,
180 CanQual<FunctionProtoType> FTP) {
181 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
182 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
183 // FIXME: Kill copy.
184 appendParameterTypes(CGT, prefix, paramInfos, FTP);
185 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
186
187 return CGT.arrangeLLVMFunctionInfo(resultType, instanceMethod,
188 /*chainCall=*/false, prefix,
189 FTP->getExtInfo(), paramInfos,
190 Required);
191 }
192
193 /// Arrange the argument and result information for a value of the
194 /// given freestanding function type.
195 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP)196 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
197 SmallVector<CanQualType, 16> argTypes;
198 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
199 FTP);
200 }
201
getCallingConventionForDecl(const ObjCMethodDecl * D,bool IsWindows)202 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
203 bool IsWindows) {
204 // Set the appropriate calling convention for the Function.
205 if (D->hasAttr<StdCallAttr>())
206 return CC_X86StdCall;
207
208 if (D->hasAttr<FastCallAttr>())
209 return CC_X86FastCall;
210
211 if (D->hasAttr<RegCallAttr>())
212 return CC_X86RegCall;
213
214 if (D->hasAttr<ThisCallAttr>())
215 return CC_X86ThisCall;
216
217 if (D->hasAttr<VectorCallAttr>())
218 return CC_X86VectorCall;
219
220 if (D->hasAttr<PascalAttr>())
221 return CC_X86Pascal;
222
223 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
224 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
225
226 if (D->hasAttr<AArch64VectorPcsAttr>())
227 return CC_AArch64VectorCall;
228
229 if (D->hasAttr<IntelOclBiccAttr>())
230 return CC_IntelOclBicc;
231
232 if (D->hasAttr<MSABIAttr>())
233 return IsWindows ? CC_C : CC_Win64;
234
235 if (D->hasAttr<SysVABIAttr>())
236 return IsWindows ? CC_X86_64SysV : CC_C;
237
238 if (D->hasAttr<PreserveMostAttr>())
239 return CC_PreserveMost;
240
241 if (D->hasAttr<PreserveAllAttr>())
242 return CC_PreserveAll;
243
244 return CC_C;
245 }
246
247 /// Arrange the argument and result information for a call to an
248 /// unknown C++ non-static member function of the given abstract type.
249 /// (A null RD means we don't have any meaningful "this" argument type,
250 /// so fall back to a generic pointer type).
251 /// The member function must be an ordinary function, i.e. not a
252 /// constructor or destructor.
253 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP,const CXXMethodDecl * MD)254 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
255 const FunctionProtoType *FTP,
256 const CXXMethodDecl *MD) {
257 SmallVector<CanQualType, 16> argTypes;
258
259 // Add the 'this' pointer.
260 argTypes.push_back(DeriveThisType(RD, MD));
261
262 return ::arrangeLLVMFunctionInfo(
263 *this, true, argTypes,
264 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
265 }
266
267 /// Set calling convention for CUDA/HIP kernel.
setCUDAKernelCallingConvention(CanQualType & FTy,CodeGenModule & CGM,const FunctionDecl * FD)268 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
269 const FunctionDecl *FD) {
270 if (FD->hasAttr<CUDAGlobalAttr>()) {
271 const FunctionType *FT = FTy->getAs<FunctionType>();
272 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
273 FTy = FT->getCanonicalTypeUnqualified();
274 }
275 }
276
277 /// Arrange the argument and result information for a declaration or
278 /// definition of the given C++ non-static member function. The
279 /// member function must be an ordinary function, i.e. not a
280 /// constructor or destructor.
281 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)282 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
283 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
284 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
285
286 CanQualType FT = GetFormalType(MD).getAs<Type>();
287 setCUDAKernelCallingConvention(FT, CGM, MD);
288 auto prototype = FT.getAs<FunctionProtoType>();
289
290 if (MD->isInstance()) {
291 // The abstract case is perfectly fine.
292 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
293 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
294 }
295
296 return arrangeFreeFunctionType(prototype);
297 }
298
inheritingCtorHasParams(const InheritedConstructor & Inherited,CXXCtorType Type)299 bool CodeGenTypes::inheritingCtorHasParams(
300 const InheritedConstructor &Inherited, CXXCtorType Type) {
301 // Parameters are unnecessary if we're constructing a base class subobject
302 // and the inherited constructor lives in a virtual base.
303 return Type == Ctor_Complete ||
304 !Inherited.getShadowDecl()->constructsVirtualBase() ||
305 !Target.getCXXABI().hasConstructorVariants();
306 }
307
308 const CGFunctionInfo &
arrangeCXXStructorDeclaration(GlobalDecl GD)309 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
310 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
311
312 SmallVector<CanQualType, 16> argTypes;
313 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
314 argTypes.push_back(DeriveThisType(MD->getParent(), MD));
315
316 bool PassParams = true;
317
318 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
319 // A base class inheriting constructor doesn't get forwarded arguments
320 // needed to construct a virtual base (or base class thereof).
321 if (auto Inherited = CD->getInheritedConstructor())
322 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
323 }
324
325 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
326
327 // Add the formal parameters.
328 if (PassParams)
329 appendParameterTypes(*this, argTypes, paramInfos, FTP);
330
331 CGCXXABI::AddedStructorArgCounts AddedArgs =
332 TheCXXABI.buildStructorSignature(GD, argTypes);
333 if (!paramInfos.empty()) {
334 // Note: prefix implies after the first param.
335 if (AddedArgs.Prefix)
336 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
337 FunctionProtoType::ExtParameterInfo{});
338 if (AddedArgs.Suffix)
339 paramInfos.append(AddedArgs.Suffix,
340 FunctionProtoType::ExtParameterInfo{});
341 }
342
343 RequiredArgs required =
344 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
345 : RequiredArgs::All);
346
347 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
348 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
349 ? argTypes.front()
350 : TheCXXABI.hasMostDerivedReturn(GD)
351 ? CGM.getContext().VoidPtrTy
352 : Context.VoidTy;
353 return arrangeLLVMFunctionInfo(resultType, /*instanceMethod=*/true,
354 /*chainCall=*/false, argTypes, extInfo,
355 paramInfos, required);
356 }
357
358 static SmallVector<CanQualType, 16>
getArgTypesForCall(ASTContext & ctx,const CallArgList & args)359 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
360 SmallVector<CanQualType, 16> argTypes;
361 for (auto &arg : args)
362 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
363 return argTypes;
364 }
365
366 static SmallVector<CanQualType, 16>
getArgTypesForDeclaration(ASTContext & ctx,const FunctionArgList & args)367 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
368 SmallVector<CanQualType, 16> argTypes;
369 for (auto &arg : args)
370 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
371 return argTypes;
372 }
373
374 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
getExtParameterInfosForCall(const FunctionProtoType * proto,unsigned prefixArgs,unsigned totalArgs)375 getExtParameterInfosForCall(const FunctionProtoType *proto,
376 unsigned prefixArgs, unsigned totalArgs) {
377 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
378 if (proto->hasExtParameterInfos()) {
379 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
380 }
381 return result;
382 }
383
384 /// Arrange a call to a C++ method, passing the given arguments.
385 ///
386 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
387 /// parameter.
388 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
389 /// args.
390 /// PassProtoArgs indicates whether `args` has args for the parameters in the
391 /// given CXXConstructorDecl.
392 const CGFunctionInfo &
arrangeCXXConstructorCall(const CallArgList & args,const CXXConstructorDecl * D,CXXCtorType CtorKind,unsigned ExtraPrefixArgs,unsigned ExtraSuffixArgs,bool PassProtoArgs)393 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
394 const CXXConstructorDecl *D,
395 CXXCtorType CtorKind,
396 unsigned ExtraPrefixArgs,
397 unsigned ExtraSuffixArgs,
398 bool PassProtoArgs) {
399 // FIXME: Kill copy.
400 SmallVector<CanQualType, 16> ArgTypes;
401 for (const auto &Arg : args)
402 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
403
404 // +1 for implicit this, which should always be args[0].
405 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
406
407 CanQual<FunctionProtoType> FPT = GetFormalType(D);
408 RequiredArgs Required = PassProtoArgs
409 ? RequiredArgs::forPrototypePlus(
410 FPT, TotalPrefixArgs + ExtraSuffixArgs)
411 : RequiredArgs::All;
412
413 GlobalDecl GD(D, CtorKind);
414 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
415 ? ArgTypes.front()
416 : TheCXXABI.hasMostDerivedReturn(GD)
417 ? CGM.getContext().VoidPtrTy
418 : Context.VoidTy;
419
420 FunctionType::ExtInfo Info = FPT->getExtInfo();
421 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
422 // If the prototype args are elided, we should only have ABI-specific args,
423 // which never have param info.
424 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
425 // ABI-specific suffix arguments are treated the same as variadic arguments.
426 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
427 ArgTypes.size());
428 }
429 return arrangeLLVMFunctionInfo(ResultType, /*instanceMethod=*/true,
430 /*chainCall=*/false, ArgTypes, Info,
431 ParamInfos, Required);
432 }
433
434 /// Arrange the argument and result information for the declaration or
435 /// definition of the given function.
436 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)437 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
438 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
439 if (MD->isInstance())
440 return arrangeCXXMethodDeclaration(MD);
441
442 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
443
444 assert(isa<FunctionType>(FTy));
445 setCUDAKernelCallingConvention(FTy, CGM, FD);
446
447 // When declaring a function without a prototype, always use a
448 // non-variadic type.
449 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
450 return arrangeLLVMFunctionInfo(
451 noProto->getReturnType(), /*instanceMethod=*/false,
452 /*chainCall=*/false, None, noProto->getExtInfo(), {},RequiredArgs::All);
453 }
454
455 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
456 }
457
458 /// Arrange the argument and result information for the declaration or
459 /// definition of an Objective-C method.
460 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)461 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
462 // It happens that this is the same as a call with no optional
463 // arguments, except also using the formal 'self' type.
464 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
465 }
466
467 /// Arrange the argument and result information for the function type
468 /// through which to perform a send to the given Objective-C method,
469 /// using the given receiver type. The receiver type is not always
470 /// the 'self' type of the method or even an Objective-C pointer type.
471 /// This is *not* the right method for actually performing such a
472 /// message send, due to the possibility of optional arguments.
473 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)474 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
475 QualType receiverType) {
476 SmallVector<CanQualType, 16> argTys;
477 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(2);
478 argTys.push_back(Context.getCanonicalParamType(receiverType));
479 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
480 // FIXME: Kill copy?
481 for (const auto *I : MD->parameters()) {
482 argTys.push_back(Context.getCanonicalParamType(I->getType()));
483 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
484 I->hasAttr<NoEscapeAttr>());
485 extParamInfos.push_back(extParamInfo);
486 }
487
488 FunctionType::ExtInfo einfo;
489 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
490 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
491
492 if (getContext().getLangOpts().ObjCAutoRefCount &&
493 MD->hasAttr<NSReturnsRetainedAttr>())
494 einfo = einfo.withProducesResult(true);
495
496 RequiredArgs required =
497 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
498
499 return arrangeLLVMFunctionInfo(
500 GetReturnType(MD->getReturnType()), /*instanceMethod=*/false,
501 /*chainCall=*/false, argTys, einfo, extParamInfos, required);
502 }
503
504 const CGFunctionInfo &
arrangeUnprototypedObjCMessageSend(QualType returnType,const CallArgList & args)505 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
506 const CallArgList &args) {
507 auto argTypes = getArgTypesForCall(Context, args);
508 FunctionType::ExtInfo einfo;
509
510 return arrangeLLVMFunctionInfo(
511 GetReturnType(returnType), /*instanceMethod=*/false,
512 /*chainCall=*/false, argTypes, einfo, {}, RequiredArgs::All);
513 }
514
515 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)516 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
517 // FIXME: Do we need to handle ObjCMethodDecl?
518 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
519
520 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
521 isa<CXXDestructorDecl>(GD.getDecl()))
522 return arrangeCXXStructorDeclaration(GD);
523
524 return arrangeFunctionDeclaration(FD);
525 }
526
527 /// Arrange a thunk that takes 'this' as the first parameter followed by
528 /// varargs. Return a void pointer, regardless of the actual return type.
529 /// The body of the thunk will end in a musttail call to a function of the
530 /// correct type, and the caller will bitcast the function to the correct
531 /// prototype.
532 const CGFunctionInfo &
arrangeUnprototypedMustTailThunk(const CXXMethodDecl * MD)533 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
534 assert(MD->isVirtual() && "only methods have thunks");
535 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
536 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
537 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/false,
538 /*chainCall=*/false, ArgTys,
539 FTP->getExtInfo(), {}, RequiredArgs(1));
540 }
541
542 const CGFunctionInfo &
arrangeMSCtorClosure(const CXXConstructorDecl * CD,CXXCtorType CT)543 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
544 CXXCtorType CT) {
545 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
546
547 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
548 SmallVector<CanQualType, 2> ArgTys;
549 const CXXRecordDecl *RD = CD->getParent();
550 ArgTys.push_back(DeriveThisType(RD, CD));
551 if (CT == Ctor_CopyingClosure)
552 ArgTys.push_back(*FTP->param_type_begin());
553 if (RD->getNumVBases() > 0)
554 ArgTys.push_back(Context.IntTy);
555 CallingConv CC = Context.getDefaultCallingConvention(
556 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
557 return arrangeLLVMFunctionInfo(Context.VoidTy, /*instanceMethod=*/true,
558 /*chainCall=*/false, ArgTys,
559 FunctionType::ExtInfo(CC), {},
560 RequiredArgs::All);
561 }
562
563 /// Arrange a call as unto a free function, except possibly with an
564 /// additional number of formal parameters considered required.
565 static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes & CGT,CodeGenModule & CGM,const CallArgList & args,const FunctionType * fnType,unsigned numExtraRequiredArgs,bool chainCall)566 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
567 CodeGenModule &CGM,
568 const CallArgList &args,
569 const FunctionType *fnType,
570 unsigned numExtraRequiredArgs,
571 bool chainCall) {
572 assert(args.size() >= numExtraRequiredArgs);
573
574 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
575
576 // In most cases, there are no optional arguments.
577 RequiredArgs required = RequiredArgs::All;
578
579 // If we have a variadic prototype, the required arguments are the
580 // extra prefix plus the arguments in the prototype.
581 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
582 if (proto->isVariadic())
583 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
584
585 if (proto->hasExtParameterInfos())
586 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
587 args.size());
588
589 // If we don't have a prototype at all, but we're supposed to
590 // explicitly use the variadic convention for unprototyped calls,
591 // treat all of the arguments as required but preserve the nominal
592 // possibility of variadics.
593 } else if (CGM.getTargetCodeGenInfo()
594 .isNoProtoCallVariadic(args,
595 cast<FunctionNoProtoType>(fnType))) {
596 required = RequiredArgs(args.size());
597 }
598
599 // FIXME: Kill copy.
600 SmallVector<CanQualType, 16> argTypes;
601 for (const auto &arg : args)
602 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
603 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
604 /*instanceMethod=*/false, chainCall,
605 argTypes, fnType->getExtInfo(), paramInfos,
606 required);
607 }
608
609 /// Figure out the rules for calling a function with the given formal
610 /// type using the given arguments. The arguments are necessary
611 /// because the function might be unprototyped, in which case it's
612 /// target-dependent in crazy ways.
613 const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType,bool chainCall)614 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
615 const FunctionType *fnType,
616 bool chainCall) {
617 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
618 chainCall ? 1 : 0, chainCall);
619 }
620
621 /// A block function is essentially a free function with an
622 /// extra implicit argument.
623 const CGFunctionInfo &
arrangeBlockFunctionCall(const CallArgList & args,const FunctionType * fnType)624 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
625 const FunctionType *fnType) {
626 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
627 /*chainCall=*/false);
628 }
629
630 const CGFunctionInfo &
arrangeBlockFunctionDeclaration(const FunctionProtoType * proto,const FunctionArgList & params)631 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
632 const FunctionArgList ¶ms) {
633 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
634 auto argTypes = getArgTypesForDeclaration(Context, params);
635
636 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
637 /*instanceMethod*/ false, /*chainCall*/ false,
638 argTypes, proto->getExtInfo(), paramInfos,
639 RequiredArgs::forPrototypePlus(proto, 1));
640 }
641
642 const CGFunctionInfo &
arrangeBuiltinFunctionCall(QualType resultType,const CallArgList & args)643 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
644 const CallArgList &args) {
645 // FIXME: Kill copy.
646 SmallVector<CanQualType, 16> argTypes;
647 for (const auto &Arg : args)
648 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
649 return arrangeLLVMFunctionInfo(
650 GetReturnType(resultType), /*instanceMethod=*/false,
651 /*chainCall=*/false, argTypes, FunctionType::ExtInfo(),
652 /*paramInfos=*/ {}, RequiredArgs::All);
653 }
654
655 const CGFunctionInfo &
arrangeBuiltinFunctionDeclaration(QualType resultType,const FunctionArgList & args)656 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
657 const FunctionArgList &args) {
658 auto argTypes = getArgTypesForDeclaration(Context, args);
659
660 return arrangeLLVMFunctionInfo(
661 GetReturnType(resultType), /*instanceMethod=*/false, /*chainCall=*/false,
662 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
663 }
664
665 const CGFunctionInfo &
arrangeBuiltinFunctionDeclaration(CanQualType resultType,ArrayRef<CanQualType> argTypes)666 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
667 ArrayRef<CanQualType> argTypes) {
668 return arrangeLLVMFunctionInfo(
669 resultType, /*instanceMethod=*/false, /*chainCall=*/false,
670 argTypes, FunctionType::ExtInfo(), {}, RequiredArgs::All);
671 }
672
673 /// Arrange a call to a C++ method, passing the given arguments.
674 ///
675 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
676 /// does not count `this`.
677 const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * proto,RequiredArgs required,unsigned numPrefixArgs)678 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
679 const FunctionProtoType *proto,
680 RequiredArgs required,
681 unsigned numPrefixArgs) {
682 assert(numPrefixArgs + 1 <= args.size() &&
683 "Emitting a call with less args than the required prefix?");
684 // Add one to account for `this`. It's a bit awkward here, but we don't count
685 // `this` in similar places elsewhere.
686 auto paramInfos =
687 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
688
689 // FIXME: Kill copy.
690 auto argTypes = getArgTypesForCall(Context, args);
691
692 FunctionType::ExtInfo info = proto->getExtInfo();
693 return arrangeLLVMFunctionInfo(
694 GetReturnType(proto->getReturnType()), /*instanceMethod=*/true,
695 /*chainCall=*/false, argTypes, info, paramInfos, required);
696 }
697
arrangeNullaryFunction()698 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
699 return arrangeLLVMFunctionInfo(
700 getContext().VoidTy, /*instanceMethod=*/false, /*chainCall=*/false,
701 None, FunctionType::ExtInfo(), {}, RequiredArgs::All);
702 }
703
704 const CGFunctionInfo &
arrangeCall(const CGFunctionInfo & signature,const CallArgList & args)705 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
706 const CallArgList &args) {
707 assert(signature.arg_size() <= args.size());
708 if (signature.arg_size() == args.size())
709 return signature;
710
711 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
712 auto sigParamInfos = signature.getExtParameterInfos();
713 if (!sigParamInfos.empty()) {
714 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
715 paramInfos.resize(args.size());
716 }
717
718 auto argTypes = getArgTypesForCall(Context, args);
719
720 assert(signature.getRequiredArgs().allowsOptionalArgs());
721 return arrangeLLVMFunctionInfo(signature.getReturnType(),
722 signature.isInstanceMethod(),
723 signature.isChainCall(),
724 argTypes,
725 signature.getExtInfo(),
726 paramInfos,
727 signature.getRequiredArgs());
728 }
729
730 namespace clang {
731 namespace CodeGen {
732 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
733 }
734 }
735
736 /// Arrange the argument and result information for an abstract value
737 /// of a given function type. This is the method which all of the
738 /// above functions ultimately defer to.
739 const CGFunctionInfo &
arrangeLLVMFunctionInfo(CanQualType resultType,bool instanceMethod,bool chainCall,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,RequiredArgs required)740 CodeGenTypes::arrangeLLVMFunctionInfo(CanQualType resultType,
741 bool instanceMethod,
742 bool chainCall,
743 ArrayRef<CanQualType> argTypes,
744 FunctionType::ExtInfo info,
745 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
746 RequiredArgs required) {
747 assert(llvm::all_of(argTypes,
748 [](CanQualType T) { return T.isCanonicalAsParam(); }));
749
750 // Lookup or create unique function info.
751 llvm::FoldingSetNodeID ID;
752 CGFunctionInfo::Profile(ID, instanceMethod, chainCall, info, paramInfos,
753 required, resultType, argTypes);
754
755 void *insertPos = nullptr;
756 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
757 if (FI)
758 return *FI;
759
760 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
761
762 // Construct the function info. We co-allocate the ArgInfos.
763 FI = CGFunctionInfo::create(CC, instanceMethod, chainCall, info,
764 paramInfos, resultType, argTypes, required);
765 FunctionInfos.InsertNode(FI, insertPos);
766
767 bool inserted = FunctionsBeingProcessed.insert(FI).second;
768 (void)inserted;
769 assert(inserted && "Recursively being processed?");
770
771 // Compute ABI information.
772 if (CC == llvm::CallingConv::SPIR_KERNEL) {
773 // Force target independent argument handling for the host visible
774 // kernel functions.
775 computeSPIRKernelABIInfo(CGM, *FI);
776 } else if (info.getCC() == CC_Swift) {
777 swiftcall::computeABIInfo(CGM, *FI);
778 } else {
779 getABIInfo().computeInfo(*FI);
780 }
781
782 // Loop over all of the computed argument and return value info. If any of
783 // them are direct or extend without a specified coerce type, specify the
784 // default now.
785 ABIArgInfo &retInfo = FI->getReturnInfo();
786 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
787 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
788
789 for (auto &I : FI->arguments())
790 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
791 I.info.setCoerceToType(ConvertType(I.type));
792
793 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
794 assert(erased && "Not in set?");
795
796 return *FI;
797 }
798
create(unsigned llvmCC,bool instanceMethod,bool chainCall,const FunctionType::ExtInfo & info,ArrayRef<ExtParameterInfo> paramInfos,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)799 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC,
800 bool instanceMethod,
801 bool chainCall,
802 const FunctionType::ExtInfo &info,
803 ArrayRef<ExtParameterInfo> paramInfos,
804 CanQualType resultType,
805 ArrayRef<CanQualType> argTypes,
806 RequiredArgs required) {
807 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
808 assert(!required.allowsOptionalArgs() ||
809 required.getNumRequiredArgs() <= argTypes.size());
810
811 void *buffer =
812 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
813 argTypes.size() + 1, paramInfos.size()));
814
815 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
816 FI->CallingConvention = llvmCC;
817 FI->EffectiveCallingConvention = llvmCC;
818 FI->ASTCallingConvention = info.getCC();
819 FI->InstanceMethod = instanceMethod;
820 FI->ChainCall = chainCall;
821 FI->CmseNSCall = info.getCmseNSCall();
822 FI->NoReturn = info.getNoReturn();
823 FI->ReturnsRetained = info.getProducesResult();
824 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
825 FI->NoCfCheck = info.getNoCfCheck();
826 FI->Required = required;
827 FI->HasRegParm = info.getHasRegParm();
828 FI->RegParm = info.getRegParm();
829 FI->ArgStruct = nullptr;
830 FI->ArgStructAlign = 0;
831 FI->NumArgs = argTypes.size();
832 FI->HasExtParameterInfos = !paramInfos.empty();
833 FI->getArgsBuffer()[0].type = resultType;
834 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
835 FI->getArgsBuffer()[i + 1].type = argTypes[i];
836 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
837 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
838 return FI;
839 }
840
841 /***/
842
843 namespace {
844 // ABIArgInfo::Expand implementation.
845
846 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
847 struct TypeExpansion {
848 enum TypeExpansionKind {
849 // Elements of constant arrays are expanded recursively.
850 TEK_ConstantArray,
851 // Record fields are expanded recursively (but if record is a union, only
852 // the field with the largest size is expanded).
853 TEK_Record,
854 // For complex types, real and imaginary parts are expanded recursively.
855 TEK_Complex,
856 // All other types are not expandable.
857 TEK_None
858 };
859
860 const TypeExpansionKind Kind;
861
TypeExpansion__anon2a3770250211::TypeExpansion862 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
~TypeExpansion__anon2a3770250211::TypeExpansion863 virtual ~TypeExpansion() {}
864 };
865
866 struct ConstantArrayExpansion : TypeExpansion {
867 QualType EltTy;
868 uint64_t NumElts;
869
ConstantArrayExpansion__anon2a3770250211::ConstantArrayExpansion870 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
871 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
classof__anon2a3770250211::ConstantArrayExpansion872 static bool classof(const TypeExpansion *TE) {
873 return TE->Kind == TEK_ConstantArray;
874 }
875 };
876
877 struct RecordExpansion : TypeExpansion {
878 SmallVector<const CXXBaseSpecifier *, 1> Bases;
879
880 SmallVector<const FieldDecl *, 1> Fields;
881
RecordExpansion__anon2a3770250211::RecordExpansion882 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
883 SmallVector<const FieldDecl *, 1> &&Fields)
884 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
885 Fields(std::move(Fields)) {}
classof__anon2a3770250211::RecordExpansion886 static bool classof(const TypeExpansion *TE) {
887 return TE->Kind == TEK_Record;
888 }
889 };
890
891 struct ComplexExpansion : TypeExpansion {
892 QualType EltTy;
893
ComplexExpansion__anon2a3770250211::ComplexExpansion894 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
classof__anon2a3770250211::ComplexExpansion895 static bool classof(const TypeExpansion *TE) {
896 return TE->Kind == TEK_Complex;
897 }
898 };
899
900 struct NoExpansion : TypeExpansion {
NoExpansion__anon2a3770250211::NoExpansion901 NoExpansion() : TypeExpansion(TEK_None) {}
classof__anon2a3770250211::NoExpansion902 static bool classof(const TypeExpansion *TE) {
903 return TE->Kind == TEK_None;
904 }
905 };
906 } // namespace
907
908 static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty,const ASTContext & Context)909 getTypeExpansion(QualType Ty, const ASTContext &Context) {
910 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
911 return std::make_unique<ConstantArrayExpansion>(
912 AT->getElementType(), AT->getSize().getZExtValue());
913 }
914 if (const RecordType *RT = Ty->getAs<RecordType>()) {
915 SmallVector<const CXXBaseSpecifier *, 1> Bases;
916 SmallVector<const FieldDecl *, 1> Fields;
917 const RecordDecl *RD = RT->getDecl();
918 assert(!RD->hasFlexibleArrayMember() &&
919 "Cannot expand structure with flexible array.");
920 if (RD->isUnion()) {
921 // Unions can be here only in degenerative cases - all the fields are same
922 // after flattening. Thus we have to use the "largest" field.
923 const FieldDecl *LargestFD = nullptr;
924 CharUnits UnionSize = CharUnits::Zero();
925
926 for (const auto *FD : RD->fields()) {
927 if (FD->isZeroLengthBitField(Context))
928 continue;
929 assert(!FD->isBitField() &&
930 "Cannot expand structure with bit-field members.");
931 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
932 if (UnionSize < FieldSize) {
933 UnionSize = FieldSize;
934 LargestFD = FD;
935 }
936 }
937 if (LargestFD)
938 Fields.push_back(LargestFD);
939 } else {
940 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
941 assert(!CXXRD->isDynamicClass() &&
942 "cannot expand vtable pointers in dynamic classes");
943 for (const CXXBaseSpecifier &BS : CXXRD->bases())
944 Bases.push_back(&BS);
945 }
946
947 for (const auto *FD : RD->fields()) {
948 if (FD->isZeroLengthBitField(Context))
949 continue;
950 assert(!FD->isBitField() &&
951 "Cannot expand structure with bit-field members.");
952 Fields.push_back(FD);
953 }
954 }
955 return std::make_unique<RecordExpansion>(std::move(Bases),
956 std::move(Fields));
957 }
958 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
959 return std::make_unique<ComplexExpansion>(CT->getElementType());
960 }
961 return std::make_unique<NoExpansion>();
962 }
963
getExpansionSize(QualType Ty,const ASTContext & Context)964 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
965 auto Exp = getTypeExpansion(Ty, Context);
966 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
967 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
968 }
969 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
970 int Res = 0;
971 for (auto BS : RExp->Bases)
972 Res += getExpansionSize(BS->getType(), Context);
973 for (auto FD : RExp->Fields)
974 Res += getExpansionSize(FD->getType(), Context);
975 return Res;
976 }
977 if (isa<ComplexExpansion>(Exp.get()))
978 return 2;
979 assert(isa<NoExpansion>(Exp.get()));
980 return 1;
981 }
982
983 void
getExpandedTypes(QualType Ty,SmallVectorImpl<llvm::Type * >::iterator & TI)984 CodeGenTypes::getExpandedTypes(QualType Ty,
985 SmallVectorImpl<llvm::Type *>::iterator &TI) {
986 auto Exp = getTypeExpansion(Ty, Context);
987 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
988 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
989 getExpandedTypes(CAExp->EltTy, TI);
990 }
991 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
992 for (auto BS : RExp->Bases)
993 getExpandedTypes(BS->getType(), TI);
994 for (auto FD : RExp->Fields)
995 getExpandedTypes(FD->getType(), TI);
996 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
997 llvm::Type *EltTy = ConvertType(CExp->EltTy);
998 *TI++ = EltTy;
999 *TI++ = EltTy;
1000 } else {
1001 assert(isa<NoExpansion>(Exp.get()));
1002 *TI++ = ConvertType(Ty);
1003 }
1004 }
1005
forConstantArrayExpansion(CodeGenFunction & CGF,ConstantArrayExpansion * CAE,Address BaseAddr,llvm::function_ref<void (Address)> Fn)1006 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1007 ConstantArrayExpansion *CAE,
1008 Address BaseAddr,
1009 llvm::function_ref<void(Address)> Fn) {
1010 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1011 CharUnits EltAlign =
1012 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1013
1014 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1015 llvm::Value *EltAddr =
1016 CGF.Builder.CreateConstGEP2_32(nullptr, BaseAddr.getPointer(), 0, i);
1017 Fn(Address(EltAddr, EltAlign));
1018 }
1019 }
1020
ExpandTypeFromArgs(QualType Ty,LValue LV,llvm::Function::arg_iterator & AI)1021 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1022 llvm::Function::arg_iterator &AI) {
1023 assert(LV.isSimple() &&
1024 "Unexpected non-simple lvalue during struct expansion.");
1025
1026 auto Exp = getTypeExpansion(Ty, getContext());
1027 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1028 forConstantArrayExpansion(
1029 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1030 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1031 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1032 });
1033 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1034 Address This = LV.getAddress(*this);
1035 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1036 // Perform a single step derived-to-base conversion.
1037 Address Base =
1038 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1039 /*NullCheckValue=*/false, SourceLocation());
1040 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1041
1042 // Recurse onto bases.
1043 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1044 }
1045 for (auto FD : RExp->Fields) {
1046 // FIXME: What are the right qualifiers here?
1047 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1048 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1049 }
1050 } else if (isa<ComplexExpansion>(Exp.get())) {
1051 auto realValue = &*AI++;
1052 auto imagValue = &*AI++;
1053 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1054 } else {
1055 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1056 // primitive store.
1057 assert(isa<NoExpansion>(Exp.get()));
1058 if (LV.isBitField())
1059 EmitStoreThroughLValue(RValue::get(&*AI++), LV);
1060 else
1061 EmitStoreOfScalar(&*AI++, LV);
1062 }
1063 }
1064
ExpandTypeToArgs(QualType Ty,CallArg Arg,llvm::FunctionType * IRFuncTy,SmallVectorImpl<llvm::Value * > & IRCallArgs,unsigned & IRCallArgPos)1065 void CodeGenFunction::ExpandTypeToArgs(
1066 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1067 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1068 auto Exp = getTypeExpansion(Ty, getContext());
1069 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1070 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1071 : Arg.getKnownRValue().getAggregateAddress();
1072 forConstantArrayExpansion(
1073 *this, CAExp, Addr, [&](Address EltAddr) {
1074 CallArg EltArg = CallArg(
1075 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1076 CAExp->EltTy);
1077 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1078 IRCallArgPos);
1079 });
1080 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1081 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1082 : Arg.getKnownRValue().getAggregateAddress();
1083 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1084 // Perform a single step derived-to-base conversion.
1085 Address Base =
1086 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1087 /*NullCheckValue=*/false, SourceLocation());
1088 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1089
1090 // Recurse onto bases.
1091 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1092 IRCallArgPos);
1093 }
1094
1095 LValue LV = MakeAddrLValue(This, Ty);
1096 for (auto FD : RExp->Fields) {
1097 CallArg FldArg =
1098 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1099 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1100 IRCallArgPos);
1101 }
1102 } else if (isa<ComplexExpansion>(Exp.get())) {
1103 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1104 IRCallArgs[IRCallArgPos++] = CV.first;
1105 IRCallArgs[IRCallArgPos++] = CV.second;
1106 } else {
1107 assert(isa<NoExpansion>(Exp.get()));
1108 auto RV = Arg.getKnownRValue();
1109 assert(RV.isScalar() &&
1110 "Unexpected non-scalar rvalue during struct expansion.");
1111
1112 // Insert a bitcast as needed.
1113 llvm::Value *V = RV.getScalarVal();
1114 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1115 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1116 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1117
1118 IRCallArgs[IRCallArgPos++] = V;
1119 }
1120 }
1121
1122 /// Create a temporary allocation for the purposes of coercion.
CreateTempAllocaForCoercion(CodeGenFunction & CGF,llvm::Type * Ty,CharUnits MinAlign,const Twine & Name="tmp")1123 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1124 CharUnits MinAlign,
1125 const Twine &Name = "tmp") {
1126 // Don't use an alignment that's worse than what LLVM would prefer.
1127 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlignment(Ty);
1128 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1129
1130 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1131 }
1132
1133 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1134 /// accessing some number of bytes out of it, try to gep into the struct to get
1135 /// at its inner goodness. Dive as deep as possible without entering an element
1136 /// with an in-memory size smaller than DstSize.
1137 static Address
EnterStructPointerForCoercedAccess(Address SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)1138 EnterStructPointerForCoercedAccess(Address SrcPtr,
1139 llvm::StructType *SrcSTy,
1140 uint64_t DstSize, CodeGenFunction &CGF) {
1141 // We can't dive into a zero-element struct.
1142 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1143
1144 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1145
1146 // If the first elt is at least as large as what we're looking for, or if the
1147 // first element is the same size as the whole struct, we can enter it. The
1148 // comparison must be made on the store size and not the alloca size. Using
1149 // the alloca size may overstate the size of the load.
1150 uint64_t FirstEltSize =
1151 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1152 if (FirstEltSize < DstSize &&
1153 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1154 return SrcPtr;
1155
1156 // GEP into the first element.
1157 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1158
1159 // If the first element is a struct, recurse.
1160 llvm::Type *SrcTy = SrcPtr.getElementType();
1161 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1162 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1163
1164 return SrcPtr;
1165 }
1166
1167 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1168 /// are either integers or pointers. This does a truncation of the value if it
1169 /// is too large or a zero extension if it is too small.
1170 ///
1171 /// This behaves as if the value were coerced through memory, so on big-endian
1172 /// targets the high bits are preserved in a truncation, while little-endian
1173 /// targets preserve the low bits.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)1174 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1175 llvm::Type *Ty,
1176 CodeGenFunction &CGF) {
1177 if (Val->getType() == Ty)
1178 return Val;
1179
1180 if (isa<llvm::PointerType>(Val->getType())) {
1181 // If this is Pointer->Pointer avoid conversion to and from int.
1182 if (isa<llvm::PointerType>(Ty))
1183 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1184
1185 // Convert the pointer to an integer so we can play with its width.
1186 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1187 }
1188
1189 llvm::Type *DestIntTy = Ty;
1190 if (isa<llvm::PointerType>(DestIntTy))
1191 DestIntTy = CGF.IntPtrTy;
1192
1193 if (Val->getType() != DestIntTy) {
1194 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1195 if (DL.isBigEndian()) {
1196 // Preserve the high bits on big-endian targets.
1197 // That is what memory coercion does.
1198 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1199 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1200
1201 if (SrcSize > DstSize) {
1202 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1203 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1204 } else {
1205 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1206 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1207 }
1208 } else {
1209 // Little-endian targets preserve the low bits. No shifts required.
1210 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1211 }
1212 }
1213
1214 if (isa<llvm::PointerType>(Ty))
1215 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1216 return Val;
1217 }
1218
1219
1220
1221 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1222 /// a pointer to an object of type \arg Ty, known to be aligned to
1223 /// \arg SrcAlign bytes.
1224 ///
1225 /// This safely handles the case when the src type is smaller than the
1226 /// destination type; in this situation the values of bits which not
1227 /// present in the src are undefined.
CreateCoercedLoad(Address Src,llvm::Type * Ty,CodeGenFunction & CGF)1228 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1229 CodeGenFunction &CGF) {
1230 llvm::Type *SrcTy = Src.getElementType();
1231
1232 // If SrcTy and Ty are the same, just do a load.
1233 if (SrcTy == Ty)
1234 return CGF.Builder.CreateLoad(Src);
1235
1236 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1237
1238 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1239 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1240 DstSize.getFixedSize(), CGF);
1241 SrcTy = Src.getElementType();
1242 }
1243
1244 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1245
1246 // If the source and destination are integer or pointer types, just do an
1247 // extension or truncation to the desired type.
1248 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1249 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1250 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1251 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1252 }
1253
1254 // If load is legal, just bitcast the src pointer.
1255 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1256 SrcSize.getFixedSize() >= DstSize.getFixedSize()) {
1257 // Generally SrcSize is never greater than DstSize, since this means we are
1258 // losing bits. However, this can happen in cases where the structure has
1259 // additional padding, for example due to a user specified alignment.
1260 //
1261 // FIXME: Assert that we aren't truncating non-padding bits when have access
1262 // to that information.
1263 Src = CGF.Builder.CreateBitCast(Src,
1264 Ty->getPointerTo(Src.getAddressSpace()));
1265 return CGF.Builder.CreateLoad(Src);
1266 }
1267
1268 // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1269 // the types match, use the llvm.experimental.vector.insert intrinsic to
1270 // perform the conversion.
1271 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1272 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1273 if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
1274 auto *Load = CGF.Builder.CreateLoad(Src);
1275 auto *UndefVec = llvm::UndefValue::get(ScalableDst);
1276 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1277 return CGF.Builder.CreateInsertVector(ScalableDst, UndefVec, Load, Zero,
1278 "castScalableSve");
1279 }
1280 }
1281 }
1282
1283 // Otherwise do coercion through memory. This is stupid, but simple.
1284 Address Tmp =
1285 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1286 CGF.Builder.CreateMemCpy(
1287 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1288 Src.getAlignment().getAsAlign(),
1289 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinSize()));
1290 return CGF.Builder.CreateLoad(Tmp);
1291 }
1292
1293 // Function to store a first-class aggregate into memory. We prefer to
1294 // store the elements rather than the aggregate to be more friendly to
1295 // fast-isel.
1296 // FIXME: Do we need to recurse here?
EmitAggregateStore(llvm::Value * Val,Address Dest,bool DestIsVolatile)1297 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1298 bool DestIsVolatile) {
1299 // Prefer scalar stores to first-class aggregate stores.
1300 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1301 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1302 Address EltPtr = Builder.CreateStructGEP(Dest, i);
1303 llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1304 Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1305 }
1306 } else {
1307 Builder.CreateStore(Val, Dest, DestIsVolatile);
1308 }
1309 }
1310
1311 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1312 /// where the source and destination may have different types. The
1313 /// destination is known to be aligned to \arg DstAlign bytes.
1314 ///
1315 /// This safely handles the case when the src type is larger than the
1316 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,Address Dst,bool DstIsVolatile,CodeGenFunction & CGF)1317 static void CreateCoercedStore(llvm::Value *Src,
1318 Address Dst,
1319 bool DstIsVolatile,
1320 CodeGenFunction &CGF) {
1321 llvm::Type *SrcTy = Src->getType();
1322 llvm::Type *DstTy = Dst.getElementType();
1323 if (SrcTy == DstTy) {
1324 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1325 return;
1326 }
1327
1328 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1329
1330 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1331 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1332 SrcSize.getFixedSize(), CGF);
1333 DstTy = Dst.getElementType();
1334 }
1335
1336 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1337 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1338 if (SrcPtrTy && DstPtrTy &&
1339 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1340 Src = CGF.Builder.CreatePointerBitCastOrAddrSpaceCast(Src, DstTy);
1341 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1342 return;
1343 }
1344
1345 // If the source and destination are integer or pointer types, just do an
1346 // extension or truncation to the desired type.
1347 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1348 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1349 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1350 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1351 return;
1352 }
1353
1354 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1355
1356 // If store is legal, just bitcast the src pointer.
1357 if (isa<llvm::ScalableVectorType>(SrcTy) ||
1358 isa<llvm::ScalableVectorType>(DstTy) ||
1359 SrcSize.getFixedSize() <= DstSize.getFixedSize()) {
1360 Dst = CGF.Builder.CreateElementBitCast(Dst, SrcTy);
1361 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1362 } else {
1363 // Otherwise do coercion through memory. This is stupid, but
1364 // simple.
1365
1366 // Generally SrcSize is never greater than DstSize, since this means we are
1367 // losing bits. However, this can happen in cases where the structure has
1368 // additional padding, for example due to a user specified alignment.
1369 //
1370 // FIXME: Assert that we aren't truncating non-padding bits when have access
1371 // to that information.
1372 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1373 CGF.Builder.CreateStore(Src, Tmp);
1374 CGF.Builder.CreateMemCpy(
1375 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1376 Tmp.getAlignment().getAsAlign(),
1377 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedSize()));
1378 }
1379 }
1380
emitAddressAtOffset(CodeGenFunction & CGF,Address addr,const ABIArgInfo & info)1381 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1382 const ABIArgInfo &info) {
1383 if (unsigned offset = info.getDirectOffset()) {
1384 addr = CGF.Builder.CreateElementBitCast(addr, CGF.Int8Ty);
1385 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1386 CharUnits::fromQuantity(offset));
1387 addr = CGF.Builder.CreateElementBitCast(addr, info.getCoerceToType());
1388 }
1389 return addr;
1390 }
1391
1392 namespace {
1393
1394 /// Encapsulates information about the way function arguments from
1395 /// CGFunctionInfo should be passed to actual LLVM IR function.
1396 class ClangToLLVMArgMapping {
1397 static const unsigned InvalidIndex = ~0U;
1398 unsigned InallocaArgNo;
1399 unsigned SRetArgNo;
1400 unsigned TotalIRArgs;
1401
1402 /// Arguments of LLVM IR function corresponding to single Clang argument.
1403 struct IRArgs {
1404 unsigned PaddingArgIndex;
1405 // Argument is expanded to IR arguments at positions
1406 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1407 unsigned FirstArgIndex;
1408 unsigned NumberOfArgs;
1409
IRArgs__anon2a3770250511::ClangToLLVMArgMapping::IRArgs1410 IRArgs()
1411 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1412 NumberOfArgs(0) {}
1413 };
1414
1415 SmallVector<IRArgs, 8> ArgInfo;
1416
1417 public:
ClangToLLVMArgMapping(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs=false)1418 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1419 bool OnlyRequiredArgs = false)
1420 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1421 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1422 construct(Context, FI, OnlyRequiredArgs);
1423 }
1424
hasInallocaArg() const1425 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
getInallocaArgNo() const1426 unsigned getInallocaArgNo() const {
1427 assert(hasInallocaArg());
1428 return InallocaArgNo;
1429 }
1430
hasSRetArg() const1431 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
getSRetArgNo() const1432 unsigned getSRetArgNo() const {
1433 assert(hasSRetArg());
1434 return SRetArgNo;
1435 }
1436
totalIRArgs() const1437 unsigned totalIRArgs() const { return TotalIRArgs; }
1438
hasPaddingArg(unsigned ArgNo) const1439 bool hasPaddingArg(unsigned ArgNo) const {
1440 assert(ArgNo < ArgInfo.size());
1441 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1442 }
getPaddingArgNo(unsigned ArgNo) const1443 unsigned getPaddingArgNo(unsigned ArgNo) const {
1444 assert(hasPaddingArg(ArgNo));
1445 return ArgInfo[ArgNo].PaddingArgIndex;
1446 }
1447
1448 /// Returns index of first IR argument corresponding to ArgNo, and their
1449 /// quantity.
getIRArgs(unsigned ArgNo) const1450 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1451 assert(ArgNo < ArgInfo.size());
1452 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1453 ArgInfo[ArgNo].NumberOfArgs);
1454 }
1455
1456 private:
1457 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1458 bool OnlyRequiredArgs);
1459 };
1460
construct(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs)1461 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1462 const CGFunctionInfo &FI,
1463 bool OnlyRequiredArgs) {
1464 unsigned IRArgNo = 0;
1465 bool SwapThisWithSRet = false;
1466 const ABIArgInfo &RetAI = FI.getReturnInfo();
1467
1468 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1469 SwapThisWithSRet = RetAI.isSRetAfterThis();
1470 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1471 }
1472
1473 unsigned ArgNo = 0;
1474 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1475 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1476 ++I, ++ArgNo) {
1477 assert(I != FI.arg_end());
1478 QualType ArgType = I->type;
1479 const ABIArgInfo &AI = I->info;
1480 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1481 auto &IRArgs = ArgInfo[ArgNo];
1482
1483 if (AI.getPaddingType())
1484 IRArgs.PaddingArgIndex = IRArgNo++;
1485
1486 switch (AI.getKind()) {
1487 case ABIArgInfo::Extend:
1488 case ABIArgInfo::Direct: {
1489 // FIXME: handle sseregparm someday...
1490 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1491 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1492 IRArgs.NumberOfArgs = STy->getNumElements();
1493 } else {
1494 IRArgs.NumberOfArgs = 1;
1495 }
1496 break;
1497 }
1498 case ABIArgInfo::Indirect:
1499 case ABIArgInfo::IndirectAliased:
1500 IRArgs.NumberOfArgs = 1;
1501 break;
1502 case ABIArgInfo::Ignore:
1503 case ABIArgInfo::InAlloca:
1504 // ignore and inalloca doesn't have matching LLVM parameters.
1505 IRArgs.NumberOfArgs = 0;
1506 break;
1507 case ABIArgInfo::CoerceAndExpand:
1508 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1509 break;
1510 case ABIArgInfo::Expand:
1511 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1512 break;
1513 }
1514
1515 if (IRArgs.NumberOfArgs > 0) {
1516 IRArgs.FirstArgIndex = IRArgNo;
1517 IRArgNo += IRArgs.NumberOfArgs;
1518 }
1519
1520 // Skip over the sret parameter when it comes second. We already handled it
1521 // above.
1522 if (IRArgNo == 1 && SwapThisWithSRet)
1523 IRArgNo++;
1524 }
1525 assert(ArgNo == ArgInfo.size());
1526
1527 if (FI.usesInAlloca())
1528 InallocaArgNo = IRArgNo++;
1529
1530 TotalIRArgs = IRArgNo;
1531 }
1532 } // namespace
1533
1534 /***/
1535
ReturnTypeUsesSRet(const CGFunctionInfo & FI)1536 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1537 const auto &RI = FI.getReturnInfo();
1538 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1539 }
1540
ReturnSlotInterferesWithArgs(const CGFunctionInfo & FI)1541 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1542 return ReturnTypeUsesSRet(FI) &&
1543 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1544 }
1545
ReturnTypeUsesFPRet(QualType ResultType)1546 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1547 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1548 switch (BT->getKind()) {
1549 default:
1550 return false;
1551 case BuiltinType::Float:
1552 return getTarget().useObjCFPRetForRealType(TargetInfo::Float);
1553 case BuiltinType::Double:
1554 return getTarget().useObjCFPRetForRealType(TargetInfo::Double);
1555 case BuiltinType::LongDouble:
1556 return getTarget().useObjCFPRetForRealType(TargetInfo::LongDouble);
1557 }
1558 }
1559
1560 return false;
1561 }
1562
ReturnTypeUsesFP2Ret(QualType ResultType)1563 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1564 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1565 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1566 if (BT->getKind() == BuiltinType::LongDouble)
1567 return getTarget().useObjCFP2RetForComplexLongDouble();
1568 }
1569 }
1570
1571 return false;
1572 }
1573
GetFunctionType(GlobalDecl GD)1574 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1575 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1576 return GetFunctionType(FI);
1577 }
1578
1579 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)1580 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1581
1582 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1583 (void)Inserted;
1584 assert(Inserted && "Recursively being processed?");
1585
1586 llvm::Type *resultType = nullptr;
1587 const ABIArgInfo &retAI = FI.getReturnInfo();
1588 switch (retAI.getKind()) {
1589 case ABIArgInfo::Expand:
1590 case ABIArgInfo::IndirectAliased:
1591 llvm_unreachable("Invalid ABI kind for return argument");
1592
1593 case ABIArgInfo::Extend:
1594 case ABIArgInfo::Direct:
1595 resultType = retAI.getCoerceToType();
1596 break;
1597
1598 case ABIArgInfo::InAlloca:
1599 if (retAI.getInAllocaSRet()) {
1600 // sret things on win32 aren't void, they return the sret pointer.
1601 QualType ret = FI.getReturnType();
1602 llvm::Type *ty = ConvertType(ret);
1603 unsigned addressSpace = Context.getTargetAddressSpace(ret);
1604 resultType = llvm::PointerType::get(ty, addressSpace);
1605 } else {
1606 resultType = llvm::Type::getVoidTy(getLLVMContext());
1607 }
1608 break;
1609
1610 case ABIArgInfo::Indirect:
1611 case ABIArgInfo::Ignore:
1612 resultType = llvm::Type::getVoidTy(getLLVMContext());
1613 break;
1614
1615 case ABIArgInfo::CoerceAndExpand:
1616 resultType = retAI.getUnpaddedCoerceAndExpandType();
1617 break;
1618 }
1619
1620 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1621 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1622
1623 // Add type for sret argument.
1624 if (IRFunctionArgs.hasSRetArg()) {
1625 QualType Ret = FI.getReturnType();
1626 llvm::Type *Ty = ConvertType(Ret);
1627 unsigned AddressSpace = Context.getTargetAddressSpace(Ret);
1628 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1629 llvm::PointerType::get(Ty, AddressSpace);
1630 }
1631
1632 // Add type for inalloca argument.
1633 if (IRFunctionArgs.hasInallocaArg()) {
1634 auto ArgStruct = FI.getArgStruct();
1635 assert(ArgStruct);
1636 ArgTypes[IRFunctionArgs.getInallocaArgNo()] = ArgStruct->getPointerTo();
1637 }
1638
1639 // Add in all of the required arguments.
1640 unsigned ArgNo = 0;
1641 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1642 ie = it + FI.getNumRequiredArgs();
1643 for (; it != ie; ++it, ++ArgNo) {
1644 const ABIArgInfo &ArgInfo = it->info;
1645
1646 // Insert a padding type to ensure proper alignment.
1647 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1648 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1649 ArgInfo.getPaddingType();
1650
1651 unsigned FirstIRArg, NumIRArgs;
1652 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1653
1654 switch (ArgInfo.getKind()) {
1655 case ABIArgInfo::Ignore:
1656 case ABIArgInfo::InAlloca:
1657 assert(NumIRArgs == 0);
1658 break;
1659
1660 case ABIArgInfo::Indirect: {
1661 assert(NumIRArgs == 1);
1662 // indirect arguments are always on the stack, which is alloca addr space.
1663 llvm::Type *LTy = ConvertTypeForMem(it->type);
1664 ArgTypes[FirstIRArg] = LTy->getPointerTo(
1665 CGM.getDataLayout().getAllocaAddrSpace());
1666 break;
1667 }
1668 case ABIArgInfo::IndirectAliased: {
1669 assert(NumIRArgs == 1);
1670 llvm::Type *LTy = ConvertTypeForMem(it->type);
1671 ArgTypes[FirstIRArg] = LTy->getPointerTo(ArgInfo.getIndirectAddrSpace());
1672 break;
1673 }
1674 case ABIArgInfo::Extend:
1675 case ABIArgInfo::Direct: {
1676 // Fast-isel and the optimizer generally like scalar values better than
1677 // FCAs, so we flatten them if this is safe to do for this argument.
1678 llvm::Type *argType = ArgInfo.getCoerceToType();
1679 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1680 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1681 assert(NumIRArgs == st->getNumElements());
1682 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1683 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1684 } else {
1685 assert(NumIRArgs == 1);
1686 ArgTypes[FirstIRArg] = argType;
1687 }
1688 break;
1689 }
1690
1691 case ABIArgInfo::CoerceAndExpand: {
1692 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1693 for (auto EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1694 *ArgTypesIter++ = EltTy;
1695 }
1696 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1697 break;
1698 }
1699
1700 case ABIArgInfo::Expand:
1701 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1702 getExpandedTypes(it->type, ArgTypesIter);
1703 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1704 break;
1705 }
1706 }
1707
1708 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1709 assert(Erased && "Not in set?");
1710
1711 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1712 }
1713
GetFunctionTypeForVTable(GlobalDecl GD)1714 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1715 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1716 const FunctionProtoType *FPT = MD->getType()->getAs<FunctionProtoType>();
1717
1718 if (!isFuncTypeConvertible(FPT))
1719 return llvm::StructType::get(getLLVMContext());
1720
1721 return GetFunctionType(GD);
1722 }
1723
AddAttributesFromFunctionProtoType(ASTContext & Ctx,llvm::AttrBuilder & FuncAttrs,const FunctionProtoType * FPT)1724 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1725 llvm::AttrBuilder &FuncAttrs,
1726 const FunctionProtoType *FPT) {
1727 if (!FPT)
1728 return;
1729
1730 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1731 FPT->isNothrow())
1732 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1733 }
1734
MayDropFunctionReturn(const ASTContext & Context,QualType ReturnType)1735 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
1736 QualType ReturnType) {
1737 // We can't just discard the return value for a record type with a
1738 // complex destructor or a non-trivially copyable type.
1739 if (const RecordType *RT =
1740 ReturnType.getCanonicalType()->getAs<RecordType>()) {
1741 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1742 return ClassDecl->hasTrivialDestructor();
1743 }
1744 return ReturnType.isTriviallyCopyableType(Context);
1745 }
1746
getDefaultFunctionAttributes(StringRef Name,bool HasOptnone,bool AttrOnCallSite,llvm::AttrBuilder & FuncAttrs)1747 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
1748 bool HasOptnone,
1749 bool AttrOnCallSite,
1750 llvm::AttrBuilder &FuncAttrs) {
1751 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1752 if (!HasOptnone) {
1753 if (CodeGenOpts.OptimizeSize)
1754 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1755 if (CodeGenOpts.OptimizeSize == 2)
1756 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1757 }
1758
1759 if (CodeGenOpts.DisableRedZone)
1760 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1761 if (CodeGenOpts.IndirectTlsSegRefs)
1762 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1763 if (CodeGenOpts.NoImplicitFloat)
1764 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1765
1766 if (AttrOnCallSite) {
1767 // Attributes that should go on the call site only.
1768 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
1769 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1770 if (!CodeGenOpts.TrapFuncName.empty())
1771 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1772 } else {
1773 StringRef FpKind;
1774 switch (CodeGenOpts.getFramePointer()) {
1775 case CodeGenOptions::FramePointerKind::None:
1776 FpKind = "none";
1777 break;
1778 case CodeGenOptions::FramePointerKind::NonLeaf:
1779 FpKind = "non-leaf";
1780 break;
1781 case CodeGenOptions::FramePointerKind::All:
1782 FpKind = "all";
1783 break;
1784 }
1785 FuncAttrs.addAttribute("frame-pointer", FpKind);
1786
1787 if (CodeGenOpts.LessPreciseFPMAD)
1788 FuncAttrs.addAttribute("less-precise-fpmad", "true");
1789
1790 if (CodeGenOpts.NullPointerIsValid)
1791 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1792
1793 if (CodeGenOpts.FPDenormalMode != llvm::DenormalMode::getIEEE())
1794 FuncAttrs.addAttribute("denormal-fp-math",
1795 CodeGenOpts.FPDenormalMode.str());
1796 if (CodeGenOpts.FP32DenormalMode != CodeGenOpts.FPDenormalMode) {
1797 FuncAttrs.addAttribute(
1798 "denormal-fp-math-f32",
1799 CodeGenOpts.FP32DenormalMode.str());
1800 }
1801
1802 if (LangOpts.getFPExceptionMode() == LangOptions::FPE_Ignore)
1803 FuncAttrs.addAttribute("no-trapping-math", "true");
1804
1805 // Strict (compliant) code is the default, so only add this attribute to
1806 // indicate that we are trying to workaround a problem case.
1807 if (!CodeGenOpts.StrictFloatCastOverflow)
1808 FuncAttrs.addAttribute("strict-float-cast-overflow", "false");
1809
1810 // TODO: Are these all needed?
1811 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1812 if (LangOpts.NoHonorInfs)
1813 FuncAttrs.addAttribute("no-infs-fp-math", "true");
1814 if (LangOpts.NoHonorNaNs)
1815 FuncAttrs.addAttribute("no-nans-fp-math", "true");
1816 if (LangOpts.UnsafeFPMath)
1817 FuncAttrs.addAttribute("unsafe-fp-math", "true");
1818 if (CodeGenOpts.SoftFloat)
1819 FuncAttrs.addAttribute("use-soft-float", "true");
1820 FuncAttrs.addAttribute("stack-protector-buffer-size",
1821 llvm::utostr(CodeGenOpts.SSPBufferSize));
1822 if (LangOpts.NoSignedZero)
1823 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1824
1825 // TODO: Reciprocal estimate codegen options should apply to instructions?
1826 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1827 if (!Recips.empty())
1828 FuncAttrs.addAttribute("reciprocal-estimates",
1829 llvm::join(Recips, ","));
1830
1831 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1832 CodeGenOpts.PreferVectorWidth != "none")
1833 FuncAttrs.addAttribute("prefer-vector-width",
1834 CodeGenOpts.PreferVectorWidth);
1835
1836 if (CodeGenOpts.StackRealignment)
1837 FuncAttrs.addAttribute("stackrealign");
1838 if (CodeGenOpts.Backchain)
1839 FuncAttrs.addAttribute("backchain");
1840 if (CodeGenOpts.EnableSegmentedStacks)
1841 FuncAttrs.addAttribute("split-stack");
1842
1843 if (CodeGenOpts.SpeculativeLoadHardening)
1844 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1845 }
1846
1847 if (getLangOpts().assumeFunctionsAreConvergent()) {
1848 // Conservatively, mark all functions and calls in CUDA and OpenCL as
1849 // convergent (meaning, they may call an intrinsically convergent op, such
1850 // as __syncthreads() / barrier(), and so can't have certain optimizations
1851 // applied around them). LLVM will remove this attribute where it safely
1852 // can.
1853 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
1854 }
1855
1856 if (getLangOpts().CUDA && getLangOpts().CUDAIsDevice) {
1857 // Exceptions aren't supported in CUDA device code.
1858 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1859 }
1860
1861 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
1862 StringRef Var, Value;
1863 std::tie(Var, Value) = Attr.split('=');
1864 FuncAttrs.addAttribute(Var, Value);
1865 }
1866 }
1867
addDefaultFunctionDefinitionAttributes(llvm::Function & F)1868 void CodeGenModule::addDefaultFunctionDefinitionAttributes(llvm::Function &F) {
1869 llvm::AttrBuilder FuncAttrs;
1870 getDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
1871 /* AttrOnCallSite = */ false, FuncAttrs);
1872 // TODO: call GetCPUAndFeaturesAttributes?
1873 F.addAttributes(llvm::AttributeList::FunctionIndex, FuncAttrs);
1874 }
1875
addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder & attrs)1876 void CodeGenModule::addDefaultFunctionDefinitionAttributes(
1877 llvm::AttrBuilder &attrs) {
1878 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
1879 /*for call*/ false, attrs);
1880 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
1881 }
1882
addNoBuiltinAttributes(llvm::AttrBuilder & FuncAttrs,const LangOptions & LangOpts,const NoBuiltinAttr * NBA=nullptr)1883 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
1884 const LangOptions &LangOpts,
1885 const NoBuiltinAttr *NBA = nullptr) {
1886 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
1887 SmallString<32> AttributeName;
1888 AttributeName += "no-builtin-";
1889 AttributeName += BuiltinName;
1890 FuncAttrs.addAttribute(AttributeName);
1891 };
1892
1893 // First, handle the language options passed through -fno-builtin.
1894 if (LangOpts.NoBuiltin) {
1895 // -fno-builtin disables them all.
1896 FuncAttrs.addAttribute("no-builtins");
1897 return;
1898 }
1899
1900 // Then, add attributes for builtins specified through -fno-builtin-<name>.
1901 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
1902
1903 // Now, let's check the __attribute__((no_builtin("...")) attribute added to
1904 // the source.
1905 if (!NBA)
1906 return;
1907
1908 // If there is a wildcard in the builtin names specified through the
1909 // attribute, disable them all.
1910 if (llvm::is_contained(NBA->builtinNames(), "*")) {
1911 FuncAttrs.addAttribute("no-builtins");
1912 return;
1913 }
1914
1915 // And last, add the rest of the builtin names.
1916 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
1917 }
1918
DetermineNoUndef(QualType QTy,CodeGenTypes & Types,const llvm::DataLayout & DL,const ABIArgInfo & AI,bool CheckCoerce=true)1919 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
1920 const llvm::DataLayout &DL, const ABIArgInfo &AI,
1921 bool CheckCoerce = true) {
1922 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
1923 if (AI.getKind() == ABIArgInfo::Indirect)
1924 return true;
1925 if (AI.getKind() == ABIArgInfo::Extend)
1926 return true;
1927 if (!DL.typeSizeEqualsStoreSize(Ty))
1928 // TODO: This will result in a modest amount of values not marked noundef
1929 // when they could be. We care about values that *invisibly* contain undef
1930 // bits from the perspective of LLVM IR.
1931 return false;
1932 if (CheckCoerce && AI.canHaveCoerceToType()) {
1933 llvm::Type *CoerceTy = AI.getCoerceToType();
1934 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
1935 DL.getTypeSizeInBits(Ty)))
1936 // If we're coercing to a type with a greater size than the canonical one,
1937 // we're introducing new undef bits.
1938 // Coercing to a type of smaller or equal size is ok, as we know that
1939 // there's no internal padding (typeSizeEqualsStoreSize).
1940 return false;
1941 }
1942 if (QTy->isExtIntType())
1943 return true;
1944 if (QTy->isReferenceType())
1945 return true;
1946 if (QTy->isNullPtrType())
1947 return false;
1948 if (QTy->isMemberPointerType())
1949 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For
1950 // now, never mark them.
1951 return false;
1952 if (QTy->isScalarType()) {
1953 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy))
1954 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
1955 return true;
1956 }
1957 if (const VectorType *Vector = dyn_cast<VectorType>(QTy))
1958 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
1959 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
1960 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
1961 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy))
1962 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
1963
1964 // TODO: Some structs may be `noundef`, in specific situations.
1965 return false;
1966 }
1967
1968 /// Construct the IR attribute list of a function or call.
1969 ///
1970 /// When adding an attribute, please consider where it should be handled:
1971 ///
1972 /// - getDefaultFunctionAttributes is for attributes that are essentially
1973 /// part of the global target configuration (but perhaps can be
1974 /// overridden on a per-function basis). Adding attributes there
1975 /// will cause them to also be set in frontends that build on Clang's
1976 /// target-configuration logic, as well as for code defined in library
1977 /// modules such as CUDA's libdevice.
1978 ///
1979 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
1980 /// and adds declaration-specific, convention-specific, and
1981 /// frontend-specific logic. The last is of particular importance:
1982 /// attributes that restrict how the frontend generates code must be
1983 /// added here rather than getDefaultFunctionAttributes.
1984 ///
ConstructAttributeList(StringRef Name,const CGFunctionInfo & FI,CGCalleeInfo CalleeInfo,llvm::AttributeList & AttrList,unsigned & CallingConv,bool AttrOnCallSite,bool IsThunk)1985 void CodeGenModule::ConstructAttributeList(StringRef Name,
1986 const CGFunctionInfo &FI,
1987 CGCalleeInfo CalleeInfo,
1988 llvm::AttributeList &AttrList,
1989 unsigned &CallingConv,
1990 bool AttrOnCallSite, bool IsThunk) {
1991 llvm::AttrBuilder FuncAttrs;
1992 llvm::AttrBuilder RetAttrs;
1993
1994 // Collect function IR attributes from the CC lowering.
1995 // We'll collect the paramete and result attributes later.
1996 CallingConv = FI.getEffectiveCallingConvention();
1997 if (FI.isNoReturn())
1998 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
1999 if (FI.isCmseNSCall())
2000 FuncAttrs.addAttribute("cmse_nonsecure_call");
2001
2002 // Collect function IR attributes from the callee prototype if we have one.
2003 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
2004 CalleeInfo.getCalleeFunctionProtoType());
2005
2006 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
2007
2008 bool HasOptnone = false;
2009 // The NoBuiltinAttr attached to the target FunctionDecl.
2010 const NoBuiltinAttr *NBA = nullptr;
2011
2012 // Collect function IR attributes based on declaration-specific
2013 // information.
2014 // FIXME: handle sseregparm someday...
2015 if (TargetDecl) {
2016 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
2017 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2018 if (TargetDecl->hasAttr<NoThrowAttr>())
2019 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2020 if (TargetDecl->hasAttr<NoReturnAttr>())
2021 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2022 if (TargetDecl->hasAttr<ColdAttr>())
2023 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2024 if (TargetDecl->hasAttr<HotAttr>())
2025 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2026 if (TargetDecl->hasAttr<NoDuplicateAttr>())
2027 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2028 if (TargetDecl->hasAttr<ConvergentAttr>())
2029 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2030
2031 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2032 AddAttributesFromFunctionProtoType(
2033 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
2034 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2035 // A sane operator new returns a non-aliasing pointer.
2036 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2037 if (getCodeGenOpts().AssumeSaneOperatorNew &&
2038 (Kind == OO_New || Kind == OO_Array_New))
2039 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2040 }
2041 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
2042 const bool IsVirtualCall = MD && MD->isVirtual();
2043 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
2044 // virtual function. These attributes are not inherited by overloads.
2045 if (!(AttrOnCallSite && IsVirtualCall)) {
2046 if (Fn->isNoReturn())
2047 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2048 NBA = Fn->getAttr<NoBuiltinAttr>();
2049 }
2050 // Only place nomerge attribute on call sites, never functions. This
2051 // allows it to work on indirect virtual function calls.
2052 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
2053 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2054
2055 // Add known guaranteed alignment for allocation functions.
2056 if (unsigned BuiltinID = Fn->getBuiltinID()) {
2057 switch (BuiltinID) {
2058 case Builtin::BIaligned_alloc:
2059 case Builtin::BIcalloc:
2060 case Builtin::BImalloc:
2061 case Builtin::BImemalign:
2062 case Builtin::BIrealloc:
2063 case Builtin::BIstrdup:
2064 case Builtin::BIstrndup:
2065 RetAttrs.addAlignmentAttr(Context.getTargetInfo().getNewAlign() /
2066 Context.getTargetInfo().getCharWidth());
2067 break;
2068 default:
2069 break;
2070 }
2071 }
2072 }
2073
2074 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
2075 if (TargetDecl->hasAttr<ConstAttr>()) {
2076 FuncAttrs.addAttribute(llvm::Attribute::ReadNone);
2077 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2078 // gcc specifies that 'const' functions have greater restrictions than
2079 // 'pure' functions, so they also cannot have infinite loops.
2080 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2081 } else if (TargetDecl->hasAttr<PureAttr>()) {
2082 FuncAttrs.addAttribute(llvm::Attribute::ReadOnly);
2083 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2084 // gcc specifies that 'pure' functions cannot have infinite loops.
2085 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2086 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2087 FuncAttrs.addAttribute(llvm::Attribute::ArgMemOnly);
2088 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2089 }
2090 if (TargetDecl->hasAttr<RestrictAttr>())
2091 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2092 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2093 !CodeGenOpts.NullPointerIsValid)
2094 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2095 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2096 FuncAttrs.addAttribute("no_caller_saved_registers");
2097 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2098 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2099 if (TargetDecl->hasAttr<LeafAttr>())
2100 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2101
2102 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2103 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2104 Optional<unsigned> NumElemsParam;
2105 if (AllocSize->getNumElemsParam().isValid())
2106 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2107 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2108 NumElemsParam);
2109 }
2110
2111 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2112 if (getLangOpts().OpenCLVersion <= 120) {
2113 // OpenCL v1.2 Work groups are always uniform
2114 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2115 } else {
2116 // OpenCL v2.0 Work groups may be whether uniform or not.
2117 // '-cl-uniform-work-group-size' compile option gets a hint
2118 // to the compiler that the global work-size be a multiple of
2119 // the work-group size specified to clEnqueueNDRangeKernel
2120 // (i.e. work groups are uniform).
2121 FuncAttrs.addAttribute("uniform-work-group-size",
2122 llvm::toStringRef(CodeGenOpts.UniformWGSize));
2123 }
2124 }
2125
2126 std::string AssumptionValueStr;
2127 for (AssumptionAttr *AssumptionA :
2128 TargetDecl->specific_attrs<AssumptionAttr>()) {
2129 std::string AS = AssumptionA->getAssumption().str();
2130 if (!AS.empty() && !AssumptionValueStr.empty())
2131 AssumptionValueStr += ",";
2132 AssumptionValueStr += AS;
2133 }
2134
2135 if (!AssumptionValueStr.empty())
2136 FuncAttrs.addAttribute(llvm::AssumptionAttrKey, AssumptionValueStr);
2137 }
2138
2139 // Attach "no-builtins" attributes to:
2140 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2141 // * definitions: "no-builtins" or "no-builtin-<name>" only.
2142 // The attributes can come from:
2143 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2144 // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2145 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2146
2147 // Collect function IR attributes based on global settiings.
2148 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2149
2150 // Override some default IR attributes based on declaration-specific
2151 // information.
2152 if (TargetDecl) {
2153 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2154 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2155 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2156 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2157 if (TargetDecl->hasAttr<NoSplitStackAttr>())
2158 FuncAttrs.removeAttribute("split-stack");
2159
2160 // Add NonLazyBind attribute to function declarations when -fno-plt
2161 // is used.
2162 // FIXME: what if we just haven't processed the function definition
2163 // yet, or if it's an external definition like C99 inline?
2164 if (CodeGenOpts.NoPLT) {
2165 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2166 if (!Fn->isDefined() && !AttrOnCallSite) {
2167 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2168 }
2169 }
2170 }
2171 }
2172
2173 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
2174 // functions with -funique-internal-linkage-names.
2175 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2176 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2177 if (this->getFunctionLinkage(Fn) == llvm::GlobalValue::InternalLinkage)
2178 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2179 "selected");
2180 }
2181 }
2182
2183 // Collect non-call-site function IR attributes from declaration-specific
2184 // information.
2185 if (!AttrOnCallSite) {
2186 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2187 FuncAttrs.addAttribute("cmse_nonsecure_entry");
2188
2189 // Whether tail calls are enabled.
2190 auto shouldDisableTailCalls = [&] {
2191 // Should this be honored in getDefaultFunctionAttributes?
2192 if (CodeGenOpts.DisableTailCalls)
2193 return true;
2194
2195 if (!TargetDecl)
2196 return false;
2197
2198 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2199 TargetDecl->hasAttr<AnyX86InterruptAttr>())
2200 return true;
2201
2202 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2203 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2204 if (!BD->doesNotEscape())
2205 return true;
2206 }
2207
2208 return false;
2209 };
2210 if (shouldDisableTailCalls())
2211 FuncAttrs.addAttribute("disable-tail-calls", "true");
2212
2213 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
2214 // handles these separately to set them based on the global defaults.
2215 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2216 }
2217
2218 // Collect attributes from arguments and return values.
2219 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2220
2221 QualType RetTy = FI.getReturnType();
2222 const ABIArgInfo &RetAI = FI.getReturnInfo();
2223 const llvm::DataLayout &DL = getDataLayout();
2224
2225 // C++ explicitly makes returning undefined values UB. C's rule only applies
2226 // to used values, so we never mark them noundef for now.
2227 bool HasStrictReturn = getLangOpts().CPlusPlus;
2228 if (TargetDecl) {
2229 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl))
2230 HasStrictReturn &= !FDecl->isExternC();
2231 else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl))
2232 // Function pointer
2233 HasStrictReturn &= !VDecl->isExternC();
2234 }
2235
2236 // We don't want to be too aggressive with the return checking, unless
2237 // it's explicit in the code opts or we're using an appropriate sanitizer.
2238 // Try to respect what the programmer intended.
2239 HasStrictReturn &= getCodeGenOpts().StrictReturn ||
2240 !MayDropFunctionReturn(getContext(), RetTy) ||
2241 getLangOpts().Sanitize.has(SanitizerKind::Memory) ||
2242 getLangOpts().Sanitize.has(SanitizerKind::Return);
2243
2244 // Determine if the return type could be partially undef
2245 if (CodeGenOpts.EnableNoundefAttrs && HasStrictReturn) {
2246 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
2247 DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
2248 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2249 }
2250
2251 switch (RetAI.getKind()) {
2252 case ABIArgInfo::Extend:
2253 if (RetAI.isSignExt())
2254 RetAttrs.addAttribute(llvm::Attribute::SExt);
2255 else
2256 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2257 LLVM_FALLTHROUGH;
2258 case ABIArgInfo::Direct:
2259 if (RetAI.getInReg())
2260 RetAttrs.addAttribute(llvm::Attribute::InReg);
2261 break;
2262 case ABIArgInfo::Ignore:
2263 break;
2264
2265 case ABIArgInfo::InAlloca:
2266 case ABIArgInfo::Indirect: {
2267 // inalloca and sret disable readnone and readonly
2268 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2269 .removeAttribute(llvm::Attribute::ReadNone);
2270 break;
2271 }
2272
2273 case ABIArgInfo::CoerceAndExpand:
2274 break;
2275
2276 case ABIArgInfo::Expand:
2277 case ABIArgInfo::IndirectAliased:
2278 llvm_unreachable("Invalid ABI kind for return argument");
2279 }
2280
2281 if (!IsThunk) {
2282 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2283 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2284 QualType PTy = RefTy->getPointeeType();
2285 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2286 RetAttrs.addDereferenceableAttr(
2287 getMinimumObjectSize(PTy).getQuantity());
2288 if (getContext().getTargetAddressSpace(PTy) == 0 &&
2289 !CodeGenOpts.NullPointerIsValid)
2290 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2291 if (PTy->isObjectType()) {
2292 llvm::Align Alignment =
2293 getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2294 RetAttrs.addAlignmentAttr(Alignment);
2295 }
2296 }
2297 }
2298
2299 bool hasUsedSRet = false;
2300 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2301
2302 // Attach attributes to sret.
2303 if (IRFunctionArgs.hasSRetArg()) {
2304 llvm::AttrBuilder SRETAttrs;
2305 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2306 hasUsedSRet = true;
2307 if (RetAI.getInReg())
2308 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2309 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2310 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2311 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2312 }
2313
2314 // Attach attributes to inalloca argument.
2315 if (IRFunctionArgs.hasInallocaArg()) {
2316 llvm::AttrBuilder Attrs;
2317 Attrs.addInAllocaAttr(FI.getArgStruct());
2318 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2319 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2320 }
2321
2322 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument,
2323 // unless this is a thunk function.
2324 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2325 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2326 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2327 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2328
2329 assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2330
2331 llvm::AttrBuilder Attrs;
2332
2333 QualType ThisTy =
2334 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType();
2335
2336 if (!CodeGenOpts.NullPointerIsValid &&
2337 getContext().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2338 Attrs.addAttribute(llvm::Attribute::NonNull);
2339 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
2340 } else {
2341 // FIXME dereferenceable should be correct here, regardless of
2342 // NullPointerIsValid. However, dereferenceable currently does not always
2343 // respect NullPointerIsValid and may imply nonnull and break the program.
2344 // See https://reviews.llvm.org/D66618 for discussions.
2345 Attrs.addDereferenceableOrNullAttr(
2346 getMinimumObjectSize(
2347 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2348 .getQuantity());
2349 }
2350
2351 llvm::Align Alignment =
2352 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr,
2353 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true)
2354 .getAsAlign();
2355 Attrs.addAlignmentAttr(Alignment);
2356
2357 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2358 }
2359
2360 unsigned ArgNo = 0;
2361 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2362 E = FI.arg_end();
2363 I != E; ++I, ++ArgNo) {
2364 QualType ParamType = I->type;
2365 const ABIArgInfo &AI = I->info;
2366 llvm::AttrBuilder Attrs;
2367
2368 // Add attribute for padding argument, if necessary.
2369 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2370 if (AI.getPaddingInReg()) {
2371 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2372 llvm::AttributeSet::get(
2373 getLLVMContext(),
2374 llvm::AttrBuilder().addAttribute(llvm::Attribute::InReg));
2375 }
2376 }
2377
2378 // Decide whether the argument we're handling could be partially undef
2379 bool ArgNoUndef = DetermineNoUndef(ParamType, getTypes(), DL, AI);
2380 if (CodeGenOpts.EnableNoundefAttrs && ArgNoUndef)
2381 Attrs.addAttribute(llvm::Attribute::NoUndef);
2382
2383 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2384 // have the corresponding parameter variable. It doesn't make
2385 // sense to do it here because parameters are so messed up.
2386 switch (AI.getKind()) {
2387 case ABIArgInfo::Extend:
2388 if (AI.isSignExt())
2389 Attrs.addAttribute(llvm::Attribute::SExt);
2390 else
2391 Attrs.addAttribute(llvm::Attribute::ZExt);
2392 LLVM_FALLTHROUGH;
2393 case ABIArgInfo::Direct:
2394 if (ArgNo == 0 && FI.isChainCall())
2395 Attrs.addAttribute(llvm::Attribute::Nest);
2396 else if (AI.getInReg())
2397 Attrs.addAttribute(llvm::Attribute::InReg);
2398 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
2399 break;
2400
2401 case ABIArgInfo::Indirect: {
2402 if (AI.getInReg())
2403 Attrs.addAttribute(llvm::Attribute::InReg);
2404
2405 if (AI.getIndirectByVal())
2406 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2407
2408 auto *Decl = ParamType->getAsRecordDecl();
2409 if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2410 Decl->getArgPassingRestrictions() == RecordDecl::APK_CanPassInRegs)
2411 // When calling the function, the pointer passed in will be the only
2412 // reference to the underlying object. Mark it accordingly.
2413 Attrs.addAttribute(llvm::Attribute::NoAlias);
2414
2415 // TODO: We could add the byref attribute if not byval, but it would
2416 // require updating many testcases.
2417
2418 CharUnits Align = AI.getIndirectAlign();
2419
2420 // In a byval argument, it is important that the required
2421 // alignment of the type is honored, as LLVM might be creating a
2422 // *new* stack object, and needs to know what alignment to give
2423 // it. (Sometimes it can deduce a sensible alignment on its own,
2424 // but not if clang decides it must emit a packed struct, or the
2425 // user specifies increased alignment requirements.)
2426 //
2427 // This is different from indirect *not* byval, where the object
2428 // exists already, and the align attribute is purely
2429 // informative.
2430 assert(!Align.isZero());
2431
2432 // For now, only add this when we have a byval argument.
2433 // TODO: be less lazy about updating test cases.
2434 if (AI.getIndirectByVal())
2435 Attrs.addAlignmentAttr(Align.getQuantity());
2436
2437 // byval disables readnone and readonly.
2438 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2439 .removeAttribute(llvm::Attribute::ReadNone);
2440
2441 break;
2442 }
2443 case ABIArgInfo::IndirectAliased: {
2444 CharUnits Align = AI.getIndirectAlign();
2445 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2446 Attrs.addAlignmentAttr(Align.getQuantity());
2447 break;
2448 }
2449 case ABIArgInfo::Ignore:
2450 case ABIArgInfo::Expand:
2451 case ABIArgInfo::CoerceAndExpand:
2452 break;
2453
2454 case ABIArgInfo::InAlloca:
2455 // inalloca disables readnone and readonly.
2456 FuncAttrs.removeAttribute(llvm::Attribute::ReadOnly)
2457 .removeAttribute(llvm::Attribute::ReadNone);
2458 continue;
2459 }
2460
2461 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2462 QualType PTy = RefTy->getPointeeType();
2463 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2464 Attrs.addDereferenceableAttr(
2465 getMinimumObjectSize(PTy).getQuantity());
2466 if (getContext().getTargetAddressSpace(PTy) == 0 &&
2467 !CodeGenOpts.NullPointerIsValid)
2468 Attrs.addAttribute(llvm::Attribute::NonNull);
2469 if (PTy->isObjectType()) {
2470 llvm::Align Alignment =
2471 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2472 Attrs.addAlignmentAttr(Alignment);
2473 }
2474 }
2475
2476 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2477 case ParameterABI::Ordinary:
2478 break;
2479
2480 case ParameterABI::SwiftIndirectResult: {
2481 // Add 'sret' if we haven't already used it for something, but
2482 // only if the result is void.
2483 if (!hasUsedSRet && RetTy->isVoidType()) {
2484 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2485 hasUsedSRet = true;
2486 }
2487
2488 // Add 'noalias' in either case.
2489 Attrs.addAttribute(llvm::Attribute::NoAlias);
2490
2491 // Add 'dereferenceable' and 'alignment'.
2492 auto PTy = ParamType->getPointeeType();
2493 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2494 auto info = getContext().getTypeInfoInChars(PTy);
2495 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2496 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2497 }
2498 break;
2499 }
2500
2501 case ParameterABI::SwiftErrorResult:
2502 Attrs.addAttribute(llvm::Attribute::SwiftError);
2503 break;
2504
2505 case ParameterABI::SwiftContext:
2506 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2507 break;
2508 }
2509
2510 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2511 Attrs.addAttribute(llvm::Attribute::NoCapture);
2512
2513 if (Attrs.hasAttributes()) {
2514 unsigned FirstIRArg, NumIRArgs;
2515 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2516 for (unsigned i = 0; i < NumIRArgs; i++)
2517 ArgAttrs[FirstIRArg + i] =
2518 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2519 }
2520 }
2521 assert(ArgNo == FI.arg_size());
2522
2523 AttrList = llvm::AttributeList::get(
2524 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2525 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2526 }
2527
2528 /// An argument came in as a promoted argument; demote it back to its
2529 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)2530 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2531 const VarDecl *var,
2532 llvm::Value *value) {
2533 llvm::Type *varType = CGF.ConvertType(var->getType());
2534
2535 // This can happen with promotions that actually don't change the
2536 // underlying type, like the enum promotions.
2537 if (value->getType() == varType) return value;
2538
2539 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2540 && "unexpected promotion type");
2541
2542 if (isa<llvm::IntegerType>(varType))
2543 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2544
2545 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2546 }
2547
2548 /// Returns the attribute (either parameter attribute, or function
2549 /// attribute), which declares argument ArgNo to be non-null.
getNonNullAttr(const Decl * FD,const ParmVarDecl * PVD,QualType ArgType,unsigned ArgNo)2550 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2551 QualType ArgType, unsigned ArgNo) {
2552 // FIXME: __attribute__((nonnull)) can also be applied to:
2553 // - references to pointers, where the pointee is known to be
2554 // nonnull (apparently a Clang extension)
2555 // - transparent unions containing pointers
2556 // In the former case, LLVM IR cannot represent the constraint. In
2557 // the latter case, we have no guarantee that the transparent union
2558 // is in fact passed as a pointer.
2559 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2560 return nullptr;
2561 // First, check attribute on parameter itself.
2562 if (PVD) {
2563 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2564 return ParmNNAttr;
2565 }
2566 // Check function attributes.
2567 if (!FD)
2568 return nullptr;
2569 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2570 if (NNAttr->isNonNull(ArgNo))
2571 return NNAttr;
2572 }
2573 return nullptr;
2574 }
2575
2576 namespace {
2577 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2578 Address Temp;
2579 Address Arg;
CopyBackSwiftError__anon2a3770250811::CopyBackSwiftError2580 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
Emit__anon2a3770250811::CopyBackSwiftError2581 void Emit(CodeGenFunction &CGF, Flags flags) override {
2582 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2583 CGF.Builder.CreateStore(errorValue, Arg);
2584 }
2585 };
2586 }
2587
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)2588 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2589 llvm::Function *Fn,
2590 const FunctionArgList &Args) {
2591 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2592 // Naked functions don't have prologues.
2593 return;
2594
2595 // If this is an implicit-return-zero function, go ahead and
2596 // initialize the return value. TODO: it might be nice to have
2597 // a more general mechanism for this that didn't require synthesized
2598 // return statements.
2599 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2600 if (FD->hasImplicitReturnZero()) {
2601 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2602 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2603 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2604 Builder.CreateStore(Zero, ReturnValue);
2605 }
2606 }
2607
2608 // FIXME: We no longer need the types from FunctionArgList; lift up and
2609 // simplify.
2610
2611 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2612 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2613
2614 // If we're using inalloca, all the memory arguments are GEPs off of the last
2615 // parameter, which is a pointer to the complete memory area.
2616 Address ArgStruct = Address::invalid();
2617 if (IRFunctionArgs.hasInallocaArg()) {
2618 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2619 FI.getArgStructAlignment());
2620
2621 assert(ArgStruct.getType() == FI.getArgStruct()->getPointerTo());
2622 }
2623
2624 // Name the struct return parameter.
2625 if (IRFunctionArgs.hasSRetArg()) {
2626 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2627 AI->setName("agg.result");
2628 AI->addAttr(llvm::Attribute::NoAlias);
2629 }
2630
2631 // Track if we received the parameter as a pointer (indirect, byval, or
2632 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2633 // into a local alloca for us.
2634 SmallVector<ParamValue, 16> ArgVals;
2635 ArgVals.reserve(Args.size());
2636
2637 // Create a pointer value for every parameter declaration. This usually
2638 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2639 // any cleanups or do anything that might unwind. We do that separately, so
2640 // we can push the cleanups in the correct order for the ABI.
2641 assert(FI.arg_size() == Args.size() &&
2642 "Mismatch between function signature & arguments.");
2643 unsigned ArgNo = 0;
2644 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2645 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2646 i != e; ++i, ++info_it, ++ArgNo) {
2647 const VarDecl *Arg = *i;
2648 const ABIArgInfo &ArgI = info_it->info;
2649
2650 bool isPromoted =
2651 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2652 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2653 // the parameter is promoted. In this case we convert to
2654 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
2655 QualType Ty = isPromoted ? info_it->type : Arg->getType();
2656 assert(hasScalarEvaluationKind(Ty) ==
2657 hasScalarEvaluationKind(Arg->getType()));
2658
2659 unsigned FirstIRArg, NumIRArgs;
2660 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2661
2662 switch (ArgI.getKind()) {
2663 case ABIArgInfo::InAlloca: {
2664 assert(NumIRArgs == 0);
2665 auto FieldIndex = ArgI.getInAllocaFieldIndex();
2666 Address V =
2667 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
2668 if (ArgI.getInAllocaIndirect())
2669 V = Address(Builder.CreateLoad(V),
2670 getContext().getTypeAlignInChars(Ty));
2671 ArgVals.push_back(ParamValue::forIndirect(V));
2672 break;
2673 }
2674
2675 case ABIArgInfo::Indirect:
2676 case ABIArgInfo::IndirectAliased: {
2677 assert(NumIRArgs == 1);
2678 Address ParamAddr =
2679 Address(Fn->getArg(FirstIRArg), ArgI.getIndirectAlign());
2680
2681 if (!hasScalarEvaluationKind(Ty)) {
2682 // Aggregates and complex variables are accessed by reference. All we
2683 // need to do is realign the value, if requested. Also, if the address
2684 // may be aliased, copy it to ensure that the parameter variable is
2685 // mutable and has a unique adress, as C requires.
2686 Address V = ParamAddr;
2687 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
2688 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
2689
2690 // Copy from the incoming argument pointer to the temporary with the
2691 // appropriate alignment.
2692 //
2693 // FIXME: We should have a common utility for generating an aggregate
2694 // copy.
2695 CharUnits Size = getContext().getTypeSizeInChars(Ty);
2696 Builder.CreateMemCpy(
2697 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
2698 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
2699 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
2700 V = AlignedTemp;
2701 }
2702 ArgVals.push_back(ParamValue::forIndirect(V));
2703 } else {
2704 // Load scalar value from indirect argument.
2705 llvm::Value *V =
2706 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
2707
2708 if (isPromoted)
2709 V = emitArgumentDemotion(*this, Arg, V);
2710 ArgVals.push_back(ParamValue::forDirect(V));
2711 }
2712 break;
2713 }
2714
2715 case ABIArgInfo::Extend:
2716 case ABIArgInfo::Direct: {
2717 auto AI = Fn->getArg(FirstIRArg);
2718 llvm::Type *LTy = ConvertType(Arg->getType());
2719
2720 // Prepare parameter attributes. So far, only attributes for pointer
2721 // parameters are prepared. See
2722 // http://llvm.org/docs/LangRef.html#paramattrs.
2723 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
2724 ArgI.getCoerceToType()->isPointerTy()) {
2725 assert(NumIRArgs == 1);
2726
2727 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
2728 // Set `nonnull` attribute if any.
2729 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
2730 PVD->getFunctionScopeIndex()) &&
2731 !CGM.getCodeGenOpts().NullPointerIsValid)
2732 AI->addAttr(llvm::Attribute::NonNull);
2733
2734 QualType OTy = PVD->getOriginalType();
2735 if (const auto *ArrTy =
2736 getContext().getAsConstantArrayType(OTy)) {
2737 // A C99 array parameter declaration with the static keyword also
2738 // indicates dereferenceability, and if the size is constant we can
2739 // use the dereferenceable attribute (which requires the size in
2740 // bytes).
2741 if (ArrTy->getSizeModifier() == ArrayType::Static) {
2742 QualType ETy = ArrTy->getElementType();
2743 llvm::Align Alignment =
2744 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2745 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2746 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
2747 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
2748 ArrSize) {
2749 llvm::AttrBuilder Attrs;
2750 Attrs.addDereferenceableAttr(
2751 getContext().getTypeSizeInChars(ETy).getQuantity() *
2752 ArrSize);
2753 AI->addAttrs(Attrs);
2754 } else if (getContext().getTargetInfo().getNullPointerValue(
2755 ETy.getAddressSpace()) == 0 &&
2756 !CGM.getCodeGenOpts().NullPointerIsValid) {
2757 AI->addAttr(llvm::Attribute::NonNull);
2758 }
2759 }
2760 } else if (const auto *ArrTy =
2761 getContext().getAsVariableArrayType(OTy)) {
2762 // For C99 VLAs with the static keyword, we don't know the size so
2763 // we can't use the dereferenceable attribute, but in addrspace(0)
2764 // we know that it must be nonnull.
2765 if (ArrTy->getSizeModifier() == VariableArrayType::Static) {
2766 QualType ETy = ArrTy->getElementType();
2767 llvm::Align Alignment =
2768 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
2769 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(Alignment));
2770 if (!getContext().getTargetAddressSpace(ETy) &&
2771 !CGM.getCodeGenOpts().NullPointerIsValid)
2772 AI->addAttr(llvm::Attribute::NonNull);
2773 }
2774 }
2775
2776 // Set `align` attribute if any.
2777 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
2778 if (!AVAttr)
2779 if (const auto *TOTy = dyn_cast<TypedefType>(OTy))
2780 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
2781 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
2782 // If alignment-assumption sanitizer is enabled, we do *not* add
2783 // alignment attribute here, but emit normal alignment assumption,
2784 // so the UBSAN check could function.
2785 llvm::ConstantInt *AlignmentCI =
2786 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
2787 unsigned AlignmentInt =
2788 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
2789 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
2790 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
2791 AI->addAttrs(llvm::AttrBuilder().addAlignmentAttr(
2792 llvm::Align(AlignmentInt)));
2793 }
2794 }
2795 }
2796
2797 // Set 'noalias' if an argument type has the `restrict` qualifier.
2798 if (Arg->getType().isRestrictQualified())
2799 AI->addAttr(llvm::Attribute::NoAlias);
2800 }
2801
2802 // Prepare the argument value. If we have the trivial case, handle it
2803 // with no muss and fuss.
2804 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
2805 ArgI.getCoerceToType() == ConvertType(Ty) &&
2806 ArgI.getDirectOffset() == 0) {
2807 assert(NumIRArgs == 1);
2808
2809 // LLVM expects swifterror parameters to be used in very restricted
2810 // ways. Copy the value into a less-restricted temporary.
2811 llvm::Value *V = AI;
2812 if (FI.getExtParameterInfo(ArgNo).getABI()
2813 == ParameterABI::SwiftErrorResult) {
2814 QualType pointeeTy = Ty->getPointeeType();
2815 assert(pointeeTy->isPointerType());
2816 Address temp =
2817 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
2818 Address arg = Address(V, getContext().getTypeAlignInChars(pointeeTy));
2819 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
2820 Builder.CreateStore(incomingErrorValue, temp);
2821 V = temp.getPointer();
2822
2823 // Push a cleanup to copy the value back at the end of the function.
2824 // The convention does not guarantee that the value will be written
2825 // back if the function exits with an unwind exception.
2826 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
2827 }
2828
2829 // Ensure the argument is the correct type.
2830 if (V->getType() != ArgI.getCoerceToType())
2831 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
2832
2833 if (isPromoted)
2834 V = emitArgumentDemotion(*this, Arg, V);
2835
2836 // Because of merging of function types from multiple decls it is
2837 // possible for the type of an argument to not match the corresponding
2838 // type in the function type. Since we are codegening the callee
2839 // in here, add a cast to the argument type.
2840 llvm::Type *LTy = ConvertType(Arg->getType());
2841 if (V->getType() != LTy)
2842 V = Builder.CreateBitCast(V, LTy);
2843
2844 ArgVals.push_back(ParamValue::forDirect(V));
2845 break;
2846 }
2847
2848 // VLST arguments are coerced to VLATs at the function boundary for
2849 // ABI consistency. If this is a VLST that was coerced to
2850 // a VLAT at the function boundary and the types match up, use
2851 // llvm.experimental.vector.extract to convert back to the original
2852 // VLST.
2853 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
2854 auto *Coerced = Fn->getArg(FirstIRArg);
2855 if (auto *VecTyFrom =
2856 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
2857 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
2858 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
2859
2860 assert(NumIRArgs == 1);
2861 Coerced->setName(Arg->getName() + ".coerce");
2862 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
2863 VecTyTo, Coerced, Zero, "castFixedSve")));
2864 break;
2865 }
2866 }
2867 }
2868
2869 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
2870 Arg->getName());
2871
2872 // Pointer to store into.
2873 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
2874
2875 // Fast-isel and the optimizer generally like scalar values better than
2876 // FCAs, so we flatten them if this is safe to do for this argument.
2877 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
2878 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
2879 STy->getNumElements() > 1) {
2880 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(STy);
2881 llvm::Type *DstTy = Ptr.getElementType();
2882 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(DstTy);
2883
2884 Address AddrToStoreInto = Address::invalid();
2885 if (SrcSize <= DstSize) {
2886 AddrToStoreInto = Builder.CreateElementBitCast(Ptr, STy);
2887 } else {
2888 AddrToStoreInto =
2889 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
2890 }
2891
2892 assert(STy->getNumElements() == NumIRArgs);
2893 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
2894 auto AI = Fn->getArg(FirstIRArg + i);
2895 AI->setName(Arg->getName() + ".coerce" + Twine(i));
2896 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
2897 Builder.CreateStore(AI, EltPtr);
2898 }
2899
2900 if (SrcSize > DstSize) {
2901 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
2902 }
2903
2904 } else {
2905 // Simple case, just do a coerced store of the argument into the alloca.
2906 assert(NumIRArgs == 1);
2907 auto AI = Fn->getArg(FirstIRArg);
2908 AI->setName(Arg->getName() + ".coerce");
2909 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
2910 }
2911
2912 // Match to what EmitParmDecl is expecting for this type.
2913 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
2914 llvm::Value *V =
2915 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
2916 if (isPromoted)
2917 V = emitArgumentDemotion(*this, Arg, V);
2918 ArgVals.push_back(ParamValue::forDirect(V));
2919 } else {
2920 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2921 }
2922 break;
2923 }
2924
2925 case ABIArgInfo::CoerceAndExpand: {
2926 // Reconstruct into a temporary.
2927 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2928 ArgVals.push_back(ParamValue::forIndirect(alloca));
2929
2930 auto coercionType = ArgI.getCoerceAndExpandType();
2931 alloca = Builder.CreateElementBitCast(alloca, coercionType);
2932
2933 unsigned argIndex = FirstIRArg;
2934 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
2935 llvm::Type *eltType = coercionType->getElementType(i);
2936 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
2937 continue;
2938
2939 auto eltAddr = Builder.CreateStructGEP(alloca, i);
2940 auto elt = Fn->getArg(argIndex++);
2941 Builder.CreateStore(elt, eltAddr);
2942 }
2943 assert(argIndex == FirstIRArg + NumIRArgs);
2944 break;
2945 }
2946
2947 case ABIArgInfo::Expand: {
2948 // If this structure was expanded into multiple arguments then
2949 // we need to create a temporary and reconstruct it from the
2950 // arguments.
2951 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
2952 LValue LV = MakeAddrLValue(Alloca, Ty);
2953 ArgVals.push_back(ParamValue::forIndirect(Alloca));
2954
2955 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
2956 ExpandTypeFromArgs(Ty, LV, FnArgIter);
2957 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
2958 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
2959 auto AI = Fn->getArg(FirstIRArg + i);
2960 AI->setName(Arg->getName() + "." + Twine(i));
2961 }
2962 break;
2963 }
2964
2965 case ABIArgInfo::Ignore:
2966 assert(NumIRArgs == 0);
2967 // Initialize the local variable appropriately.
2968 if (!hasScalarEvaluationKind(Ty)) {
2969 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
2970 } else {
2971 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
2972 ArgVals.push_back(ParamValue::forDirect(U));
2973 }
2974 break;
2975 }
2976 }
2977
2978 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
2979 for (int I = Args.size() - 1; I >= 0; --I)
2980 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2981 } else {
2982 for (unsigned I = 0, E = Args.size(); I != E; ++I)
2983 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
2984 }
2985 }
2986
eraseUnusedBitCasts(llvm::Instruction * insn)2987 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
2988 while (insn->use_empty()) {
2989 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
2990 if (!bitcast) return;
2991
2992 // This is "safe" because we would have used a ConstantExpr otherwise.
2993 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
2994 bitcast->eraseFromParent();
2995 }
2996 }
2997
2998 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)2999 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
3000 llvm::Value *result) {
3001 // We must be immediately followed the cast.
3002 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
3003 if (BB->empty()) return nullptr;
3004 if (&BB->back() != result) return nullptr;
3005
3006 llvm::Type *resultType = result->getType();
3007
3008 // result is in a BasicBlock and is therefore an Instruction.
3009 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3010
3011 SmallVector<llvm::Instruction *, 4> InstsToKill;
3012
3013 // Look for:
3014 // %generator = bitcast %type1* %generator2 to %type2*
3015 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3016 // We would have emitted this as a constant if the operand weren't
3017 // an Instruction.
3018 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3019
3020 // Require the generator to be immediately followed by the cast.
3021 if (generator->getNextNode() != bitcast)
3022 return nullptr;
3023
3024 InstsToKill.push_back(bitcast);
3025 }
3026
3027 // Look for:
3028 // %generator = call i8* @objc_retain(i8* %originalResult)
3029 // or
3030 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
3031 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3032 if (!call) return nullptr;
3033
3034 bool doRetainAutorelease;
3035
3036 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
3037 doRetainAutorelease = true;
3038 } else if (call->getCalledOperand() ==
3039 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
3040 doRetainAutorelease = false;
3041
3042 // If we emitted an assembly marker for this call (and the
3043 // ARCEntrypoints field should have been set if so), go looking
3044 // for that call. If we can't find it, we can't do this
3045 // optimization. But it should always be the immediately previous
3046 // instruction, unless we needed bitcasts around the call.
3047 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
3048 llvm::Instruction *prev = call->getPrevNode();
3049 assert(prev);
3050 if (isa<llvm::BitCastInst>(prev)) {
3051 prev = prev->getPrevNode();
3052 assert(prev);
3053 }
3054 assert(isa<llvm::CallInst>(prev));
3055 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3056 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
3057 InstsToKill.push_back(prev);
3058 }
3059 } else {
3060 return nullptr;
3061 }
3062
3063 result = call->getArgOperand(0);
3064 InstsToKill.push_back(call);
3065
3066 // Keep killing bitcasts, for sanity. Note that we no longer care
3067 // about precise ordering as long as there's exactly one use.
3068 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3069 if (!bitcast->hasOneUse()) break;
3070 InstsToKill.push_back(bitcast);
3071 result = bitcast->getOperand(0);
3072 }
3073
3074 // Delete all the unnecessary instructions, from latest to earliest.
3075 for (auto *I : InstsToKill)
3076 I->eraseFromParent();
3077
3078 // Do the fused retain/autorelease if we were asked to.
3079 if (doRetainAutorelease)
3080 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
3081
3082 // Cast back to the result type.
3083 return CGF.Builder.CreateBitCast(result, resultType);
3084 }
3085
3086 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)3087 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
3088 llvm::Value *result) {
3089 // This is only applicable to a method with an immutable 'self'.
3090 const ObjCMethodDecl *method =
3091 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
3092 if (!method) return nullptr;
3093 const VarDecl *self = method->getSelfDecl();
3094 if (!self->getType().isConstQualified()) return nullptr;
3095
3096 // Look for a retain call.
3097 llvm::CallInst *retainCall =
3098 dyn_cast<llvm::CallInst>(result->stripPointerCasts());
3099 if (!retainCall || retainCall->getCalledOperand() !=
3100 CGF.CGM.getObjCEntrypoints().objc_retain)
3101 return nullptr;
3102
3103 // Look for an ordinary load of 'self'.
3104 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3105 llvm::LoadInst *load =
3106 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3107 if (!load || load->isAtomic() || load->isVolatile() ||
3108 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
3109 return nullptr;
3110
3111 // Okay! Burn it all down. This relies for correctness on the
3112 // assumption that the retain is emitted as part of the return and
3113 // that thereafter everything is used "linearly".
3114 llvm::Type *resultType = result->getType();
3115 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
3116 assert(retainCall->use_empty());
3117 retainCall->eraseFromParent();
3118 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
3119
3120 return CGF.Builder.CreateBitCast(load, resultType);
3121 }
3122
3123 /// Emit an ARC autorelease of the result of a function.
3124 ///
3125 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)3126 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
3127 llvm::Value *result) {
3128 // If we're returning 'self', kill the initial retain. This is a
3129 // heuristic attempt to "encourage correctness" in the really unfortunate
3130 // case where we have a return of self during a dealloc and we desperately
3131 // need to avoid the possible autorelease.
3132 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
3133 return self;
3134
3135 // At -O0, try to emit a fused retain/autorelease.
3136 if (CGF.shouldUseFusedARCCalls())
3137 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
3138 return fused;
3139
3140 return CGF.EmitARCAutoreleaseReturnValue(result);
3141 }
3142
3143 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)3144 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
3145 // Check if a User is a store which pointerOperand is the ReturnValue.
3146 // We are looking for stores to the ReturnValue, not for stores of the
3147 // ReturnValue to some other location.
3148 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3149 auto *SI = dyn_cast<llvm::StoreInst>(U);
3150 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer())
3151 return nullptr;
3152 // These aren't actually possible for non-coerced returns, and we
3153 // only care about non-coerced returns on this code path.
3154 assert(!SI->isAtomic() && !SI->isVolatile());
3155 return SI;
3156 };
3157 // If there are multiple uses of the return-value slot, just check
3158 // for something immediately preceding the IP. Sometimes this can
3159 // happen with how we generate implicit-returns; it can also happen
3160 // with noreturn cleanups.
3161 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3162 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3163 if (IP->empty()) return nullptr;
3164 llvm::Instruction *I = &IP->back();
3165
3166 // Skip lifetime markers
3167 for (llvm::BasicBlock::reverse_iterator II = IP->rbegin(),
3168 IE = IP->rend();
3169 II != IE; ++II) {
3170 if (llvm::IntrinsicInst *Intrinsic =
3171 dyn_cast<llvm::IntrinsicInst>(&*II)) {
3172 if (Intrinsic->getIntrinsicID() == llvm::Intrinsic::lifetime_end) {
3173 const llvm::Value *CastAddr = Intrinsic->getArgOperand(1);
3174 ++II;
3175 if (II == IE)
3176 break;
3177 if (isa<llvm::BitCastInst>(&*II) && (CastAddr == &*II))
3178 continue;
3179 }
3180 }
3181 I = &*II;
3182 break;
3183 }
3184
3185 return GetStoreIfValid(I);
3186 }
3187
3188 llvm::StoreInst *store =
3189 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3190 if (!store) return nullptr;
3191
3192 // Now do a first-and-dirty dominance check: just walk up the
3193 // single-predecessors chain from the current insertion point.
3194 llvm::BasicBlock *StoreBB = store->getParent();
3195 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3196 while (IP != StoreBB) {
3197 if (!(IP = IP->getSinglePredecessor()))
3198 return nullptr;
3199 }
3200
3201 // Okay, the store's basic block dominates the insertion point; we
3202 // can do our thing.
3203 return store;
3204 }
3205
3206 // Helper functions for EmitCMSEClearRecord
3207
3208 // Set the bits corresponding to a field having width `BitWidth` and located at
3209 // offset `BitOffset` (from the least significant bit) within a storage unit of
3210 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3211 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
setBitRange(SmallVectorImpl<uint64_t> & Bits,int BitOffset,int BitWidth,int CharWidth)3212 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3213 int BitWidth, int CharWidth) {
3214 assert(CharWidth <= 64);
3215 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3216
3217 int Pos = 0;
3218 if (BitOffset >= CharWidth) {
3219 Pos += BitOffset / CharWidth;
3220 BitOffset = BitOffset % CharWidth;
3221 }
3222
3223 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3224 if (BitOffset + BitWidth >= CharWidth) {
3225 Bits[Pos++] |= (Used << BitOffset) & Used;
3226 BitWidth -= CharWidth - BitOffset;
3227 BitOffset = 0;
3228 }
3229
3230 while (BitWidth >= CharWidth) {
3231 Bits[Pos++] = Used;
3232 BitWidth -= CharWidth;
3233 }
3234
3235 if (BitWidth > 0)
3236 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3237 }
3238
3239 // Set the bits corresponding to a field having width `BitWidth` and located at
3240 // offset `BitOffset` (from the least significant bit) within a storage unit of
3241 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3242 // `Bits` corresponds to one target byte. Use target endian layout.
setBitRange(SmallVectorImpl<uint64_t> & Bits,int StorageOffset,int StorageSize,int BitOffset,int BitWidth,int CharWidth,bool BigEndian)3243 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3244 int StorageSize, int BitOffset, int BitWidth,
3245 int CharWidth, bool BigEndian) {
3246
3247 SmallVector<uint64_t, 8> TmpBits(StorageSize);
3248 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3249
3250 if (BigEndian)
3251 std::reverse(TmpBits.begin(), TmpBits.end());
3252
3253 for (uint64_t V : TmpBits)
3254 Bits[StorageOffset++] |= V;
3255 }
3256
3257 static void setUsedBits(CodeGenModule &, QualType, int,
3258 SmallVectorImpl<uint64_t> &);
3259
3260 // Set the bits in `Bits`, which correspond to the value representations of
3261 // the actual members of the record type `RTy`. Note that this function does
3262 // not handle base classes, virtual tables, etc, since they cannot happen in
3263 // CMSE function arguments or return. The bit mask corresponds to the target
3264 // memory layout, i.e. it's endian dependent.
setUsedBits(CodeGenModule & CGM,const RecordType * RTy,int Offset,SmallVectorImpl<uint64_t> & Bits)3265 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3266 SmallVectorImpl<uint64_t> &Bits) {
3267 ASTContext &Context = CGM.getContext();
3268 int CharWidth = Context.getCharWidth();
3269 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3270 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3271 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3272
3273 int Idx = 0;
3274 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3275 const FieldDecl *F = *I;
3276
3277 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3278 F->getType()->isIncompleteArrayType())
3279 continue;
3280
3281 if (F->isBitField()) {
3282 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3283 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3284 BFI.StorageSize / CharWidth, BFI.Offset,
3285 BFI.Size, CharWidth,
3286 CGM.getDataLayout().isBigEndian());
3287 continue;
3288 }
3289
3290 setUsedBits(CGM, F->getType(),
3291 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3292 }
3293 }
3294
3295 // Set the bits in `Bits`, which correspond to the value representations of
3296 // the elements of an array type `ATy`.
setUsedBits(CodeGenModule & CGM,const ConstantArrayType * ATy,int Offset,SmallVectorImpl<uint64_t> & Bits)3297 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3298 int Offset, SmallVectorImpl<uint64_t> &Bits) {
3299 const ASTContext &Context = CGM.getContext();
3300
3301 QualType ETy = Context.getBaseElementType(ATy);
3302 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3303 SmallVector<uint64_t, 4> TmpBits(Size);
3304 setUsedBits(CGM, ETy, 0, TmpBits);
3305
3306 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3307 auto Src = TmpBits.begin();
3308 auto Dst = Bits.begin() + Offset + I * Size;
3309 for (int J = 0; J < Size; ++J)
3310 *Dst++ |= *Src++;
3311 }
3312 }
3313
3314 // Set the bits in `Bits`, which correspond to the value representations of
3315 // the type `QTy`.
setUsedBits(CodeGenModule & CGM,QualType QTy,int Offset,SmallVectorImpl<uint64_t> & Bits)3316 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3317 SmallVectorImpl<uint64_t> &Bits) {
3318 if (const auto *RTy = QTy->getAs<RecordType>())
3319 return setUsedBits(CGM, RTy, Offset, Bits);
3320
3321 ASTContext &Context = CGM.getContext();
3322 if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3323 return setUsedBits(CGM, ATy, Offset, Bits);
3324
3325 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3326 if (Size <= 0)
3327 return;
3328
3329 std::fill_n(Bits.begin() + Offset, Size,
3330 (uint64_t(1) << Context.getCharWidth()) - 1);
3331 }
3332
buildMultiCharMask(const SmallVectorImpl<uint64_t> & Bits,int Pos,int Size,int CharWidth,bool BigEndian)3333 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3334 int Pos, int Size, int CharWidth,
3335 bool BigEndian) {
3336 assert(Size > 0);
3337 uint64_t Mask = 0;
3338 if (BigEndian) {
3339 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3340 ++P)
3341 Mask = (Mask << CharWidth) | *P;
3342 } else {
3343 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3344 do
3345 Mask = (Mask << CharWidth) | *--P;
3346 while (P != End);
3347 }
3348 return Mask;
3349 }
3350
3351 // Emit code to clear the bits in a record, which aren't a part of any user
3352 // declared member, when the record is a function return.
EmitCMSEClearRecord(llvm::Value * Src,llvm::IntegerType * ITy,QualType QTy)3353 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3354 llvm::IntegerType *ITy,
3355 QualType QTy) {
3356 assert(Src->getType() == ITy);
3357 assert(ITy->getScalarSizeInBits() <= 64);
3358
3359 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3360 int Size = DataLayout.getTypeStoreSize(ITy);
3361 SmallVector<uint64_t, 4> Bits(Size);
3362 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3363
3364 int CharWidth = CGM.getContext().getCharWidth();
3365 uint64_t Mask =
3366 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3367
3368 return Builder.CreateAnd(Src, Mask, "cmse.clear");
3369 }
3370
3371 // Emit code to clear the bits in a record, which aren't a part of any user
3372 // declared member, when the record is a function argument.
EmitCMSEClearRecord(llvm::Value * Src,llvm::ArrayType * ATy,QualType QTy)3373 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3374 llvm::ArrayType *ATy,
3375 QualType QTy) {
3376 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3377 int Size = DataLayout.getTypeStoreSize(ATy);
3378 SmallVector<uint64_t, 16> Bits(Size);
3379 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3380
3381 // Clear each element of the LLVM array.
3382 int CharWidth = CGM.getContext().getCharWidth();
3383 int CharsPerElt =
3384 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3385 int MaskIndex = 0;
3386 llvm::Value *R = llvm::UndefValue::get(ATy);
3387 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3388 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3389 DataLayout.isBigEndian());
3390 MaskIndex += CharsPerElt;
3391 llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3392 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3393 R = Builder.CreateInsertValue(R, T1, I);
3394 }
3395
3396 return R;
3397 }
3398
EmitFunctionEpilog(const CGFunctionInfo & FI,bool EmitRetDbgLoc,SourceLocation EndLoc)3399 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3400 bool EmitRetDbgLoc,
3401 SourceLocation EndLoc) {
3402 if (FI.isNoReturn()) {
3403 // Noreturn functions don't return.
3404 EmitUnreachable(EndLoc);
3405 return;
3406 }
3407
3408 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3409 // Naked functions don't have epilogues.
3410 Builder.CreateUnreachable();
3411 return;
3412 }
3413
3414 // Functions with no result always return void.
3415 if (!ReturnValue.isValid()) {
3416 Builder.CreateRetVoid();
3417 return;
3418 }
3419
3420 llvm::DebugLoc RetDbgLoc;
3421 llvm::Value *RV = nullptr;
3422 QualType RetTy = FI.getReturnType();
3423 const ABIArgInfo &RetAI = FI.getReturnInfo();
3424
3425 switch (RetAI.getKind()) {
3426 case ABIArgInfo::InAlloca:
3427 // Aggregrates get evaluated directly into the destination. Sometimes we
3428 // need to return the sret value in a register, though.
3429 assert(hasAggregateEvaluationKind(RetTy));
3430 if (RetAI.getInAllocaSRet()) {
3431 llvm::Function::arg_iterator EI = CurFn->arg_end();
3432 --EI;
3433 llvm::Value *ArgStruct = &*EI;
3434 llvm::Value *SRet = Builder.CreateStructGEP(
3435 nullptr, ArgStruct, RetAI.getInAllocaFieldIndex());
3436 llvm::Type *Ty =
3437 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3438 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
3439 }
3440 break;
3441
3442 case ABIArgInfo::Indirect: {
3443 auto AI = CurFn->arg_begin();
3444 if (RetAI.isSRetAfterThis())
3445 ++AI;
3446 switch (getEvaluationKind(RetTy)) {
3447 case TEK_Complex: {
3448 ComplexPairTy RT =
3449 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3450 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3451 /*isInit*/ true);
3452 break;
3453 }
3454 case TEK_Aggregate:
3455 // Do nothing; aggregrates get evaluated directly into the destination.
3456 break;
3457 case TEK_Scalar:
3458 EmitStoreOfScalar(Builder.CreateLoad(ReturnValue),
3459 MakeNaturalAlignAddrLValue(&*AI, RetTy),
3460 /*isInit*/ true);
3461 break;
3462 }
3463 break;
3464 }
3465
3466 case ABIArgInfo::Extend:
3467 case ABIArgInfo::Direct:
3468 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3469 RetAI.getDirectOffset() == 0) {
3470 // The internal return value temp always will have pointer-to-return-type
3471 // type, just do a load.
3472
3473 // If there is a dominating store to ReturnValue, we can elide
3474 // the load, zap the store, and usually zap the alloca.
3475 if (llvm::StoreInst *SI =
3476 findDominatingStoreToReturnValue(*this)) {
3477 // Reuse the debug location from the store unless there is
3478 // cleanup code to be emitted between the store and return
3479 // instruction.
3480 if (EmitRetDbgLoc && !AutoreleaseResult)
3481 RetDbgLoc = SI->getDebugLoc();
3482 // Get the stored value and nuke the now-dead store.
3483 RV = SI->getValueOperand();
3484 SI->eraseFromParent();
3485
3486 // Otherwise, we have to do a simple load.
3487 } else {
3488 RV = Builder.CreateLoad(ReturnValue);
3489 }
3490 } else {
3491 // If the value is offset in memory, apply the offset now.
3492 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3493
3494 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3495 }
3496
3497 // In ARC, end functions that return a retainable type with a call
3498 // to objc_autoreleaseReturnValue.
3499 if (AutoreleaseResult) {
3500 #ifndef NDEBUG
3501 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3502 // been stripped of the typedefs, so we cannot use RetTy here. Get the
3503 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3504 // CurCodeDecl or BlockInfo.
3505 QualType RT;
3506
3507 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3508 RT = FD->getReturnType();
3509 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3510 RT = MD->getReturnType();
3511 else if (isa<BlockDecl>(CurCodeDecl))
3512 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3513 else
3514 llvm_unreachable("Unexpected function/method type");
3515
3516 assert(getLangOpts().ObjCAutoRefCount &&
3517 !FI.isReturnsRetained() &&
3518 RT->isObjCRetainableType());
3519 #endif
3520 RV = emitAutoreleaseOfResult(*this, RV);
3521 }
3522
3523 break;
3524
3525 case ABIArgInfo::Ignore:
3526 break;
3527
3528 case ABIArgInfo::CoerceAndExpand: {
3529 auto coercionType = RetAI.getCoerceAndExpandType();
3530
3531 // Load all of the coerced elements out into results.
3532 llvm::SmallVector<llvm::Value*, 4> results;
3533 Address addr = Builder.CreateElementBitCast(ReturnValue, coercionType);
3534 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3535 auto coercedEltType = coercionType->getElementType(i);
3536 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3537 continue;
3538
3539 auto eltAddr = Builder.CreateStructGEP(addr, i);
3540 auto elt = Builder.CreateLoad(eltAddr);
3541 results.push_back(elt);
3542 }
3543
3544 // If we have one result, it's the single direct result type.
3545 if (results.size() == 1) {
3546 RV = results[0];
3547
3548 // Otherwise, we need to make a first-class aggregate.
3549 } else {
3550 // Construct a return type that lacks padding elements.
3551 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3552
3553 RV = llvm::UndefValue::get(returnType);
3554 for (unsigned i = 0, e = results.size(); i != e; ++i) {
3555 RV = Builder.CreateInsertValue(RV, results[i], i);
3556 }
3557 }
3558 break;
3559 }
3560 case ABIArgInfo::Expand:
3561 case ABIArgInfo::IndirectAliased:
3562 llvm_unreachable("Invalid ABI kind for return argument");
3563 }
3564
3565 llvm::Instruction *Ret;
3566 if (RV) {
3567 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3568 // For certain return types, clear padding bits, as they may reveal
3569 // sensitive information.
3570 // Small struct/union types are passed as integers.
3571 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3572 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3573 RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3574 }
3575 EmitReturnValueCheck(RV);
3576 Ret = Builder.CreateRet(RV);
3577 } else {
3578 Ret = Builder.CreateRetVoid();
3579 }
3580
3581 if (RetDbgLoc)
3582 Ret->setDebugLoc(std::move(RetDbgLoc));
3583 }
3584
EmitReturnValueCheck(llvm::Value * RV)3585 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3586 // A current decl may not be available when emitting vtable thunks.
3587 if (!CurCodeDecl)
3588 return;
3589
3590 // If the return block isn't reachable, neither is this check, so don't emit
3591 // it.
3592 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3593 return;
3594
3595 ReturnsNonNullAttr *RetNNAttr = nullptr;
3596 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3597 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3598
3599 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3600 return;
3601
3602 // Prefer the returns_nonnull attribute if it's present.
3603 SourceLocation AttrLoc;
3604 SanitizerMask CheckKind;
3605 SanitizerHandler Handler;
3606 if (RetNNAttr) {
3607 assert(!requiresReturnValueNullabilityCheck() &&
3608 "Cannot check nullability and the nonnull attribute");
3609 AttrLoc = RetNNAttr->getLocation();
3610 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3611 Handler = SanitizerHandler::NonnullReturn;
3612 } else {
3613 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3614 if (auto *TSI = DD->getTypeSourceInfo())
3615 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3616 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3617 CheckKind = SanitizerKind::NullabilityReturn;
3618 Handler = SanitizerHandler::NullabilityReturn;
3619 }
3620
3621 SanitizerScope SanScope(this);
3622
3623 // Make sure the "return" source location is valid. If we're checking a
3624 // nullability annotation, make sure the preconditions for the check are met.
3625 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
3626 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
3627 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
3628 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
3629 if (requiresReturnValueNullabilityCheck())
3630 CanNullCheck =
3631 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
3632 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
3633 EmitBlock(Check);
3634
3635 // Now do the null check.
3636 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
3637 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
3638 llvm::Value *DynamicData[] = {SLocPtr};
3639 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
3640
3641 EmitBlock(NoCheck);
3642
3643 #ifndef NDEBUG
3644 // The return location should not be used after the check has been emitted.
3645 ReturnLocation = Address::invalid();
3646 #endif
3647 }
3648
isInAllocaArgument(CGCXXABI & ABI,QualType type)3649 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
3650 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
3651 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
3652 }
3653
createPlaceholderSlot(CodeGenFunction & CGF,QualType Ty)3654 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
3655 QualType Ty) {
3656 // FIXME: Generate IR in one pass, rather than going back and fixing up these
3657 // placeholders.
3658 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
3659 llvm::Type *IRPtrTy = IRTy->getPointerTo();
3660 llvm::Value *Placeholder = llvm::UndefValue::get(IRPtrTy->getPointerTo());
3661
3662 // FIXME: When we generate this IR in one pass, we shouldn't need
3663 // this win32-specific alignment hack.
3664 CharUnits Align = CharUnits::fromQuantity(4);
3665 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
3666
3667 return AggValueSlot::forAddr(Address(Placeholder, Align),
3668 Ty.getQualifiers(),
3669 AggValueSlot::IsNotDestructed,
3670 AggValueSlot::DoesNotNeedGCBarriers,
3671 AggValueSlot::IsNotAliased,
3672 AggValueSlot::DoesNotOverlap);
3673 }
3674
EmitDelegateCallArg(CallArgList & args,const VarDecl * param,SourceLocation loc)3675 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
3676 const VarDecl *param,
3677 SourceLocation loc) {
3678 // StartFunction converted the ABI-lowered parameter(s) into a
3679 // local alloca. We need to turn that into an r-value suitable
3680 // for EmitCall.
3681 Address local = GetAddrOfLocalVar(param);
3682
3683 QualType type = param->getType();
3684
3685 if (isInAllocaArgument(CGM.getCXXABI(), type)) {
3686 CGM.ErrorUnsupported(param, "forwarded non-trivially copyable parameter");
3687 }
3688
3689 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
3690 // but the argument needs to be the original pointer.
3691 if (type->isReferenceType()) {
3692 args.add(RValue::get(Builder.CreateLoad(local)), type);
3693
3694 // In ARC, move out of consumed arguments so that the release cleanup
3695 // entered by StartFunction doesn't cause an over-release. This isn't
3696 // optimal -O0 code generation, but it should get cleaned up when
3697 // optimization is enabled. This also assumes that delegate calls are
3698 // performed exactly once for a set of arguments, but that should be safe.
3699 } else if (getLangOpts().ObjCAutoRefCount &&
3700 param->hasAttr<NSConsumedAttr>() &&
3701 type->isObjCRetainableType()) {
3702 llvm::Value *ptr = Builder.CreateLoad(local);
3703 auto null =
3704 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
3705 Builder.CreateStore(null, local);
3706 args.add(RValue::get(ptr), type);
3707
3708 // For the most part, we just need to load the alloca, except that
3709 // aggregate r-values are actually pointers to temporaries.
3710 } else {
3711 args.add(convertTempToRValue(local, type, loc), type);
3712 }
3713
3714 // Deactivate the cleanup for the callee-destructed param that was pushed.
3715 if (type->isRecordType() && !CurFuncIsThunk &&
3716 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
3717 param->needsDestruction(getContext())) {
3718 EHScopeStack::stable_iterator cleanup =
3719 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
3720 assert(cleanup.isValid() &&
3721 "cleanup for callee-destructed param not recorded");
3722 // This unreachable is a temporary marker which will be removed later.
3723 llvm::Instruction *isActive = Builder.CreateUnreachable();
3724 args.addArgCleanupDeactivation(cleanup, isActive);
3725 }
3726 }
3727
isProvablyNull(llvm::Value * addr)3728 static bool isProvablyNull(llvm::Value *addr) {
3729 return isa<llvm::ConstantPointerNull>(addr);
3730 }
3731
3732 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)3733 static void emitWriteback(CodeGenFunction &CGF,
3734 const CallArgList::Writeback &writeback) {
3735 const LValue &srcLV = writeback.Source;
3736 Address srcAddr = srcLV.getAddress(CGF);
3737 assert(!isProvablyNull(srcAddr.getPointer()) &&
3738 "shouldn't have writeback for provably null argument");
3739
3740 llvm::BasicBlock *contBB = nullptr;
3741
3742 // If the argument wasn't provably non-null, we need to null check
3743 // before doing the store.
3744 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3745 CGF.CGM.getDataLayout());
3746 if (!provablyNonNull) {
3747 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
3748 contBB = CGF.createBasicBlock("icr.done");
3749
3750 llvm::Value *isNull =
3751 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3752 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
3753 CGF.EmitBlock(writebackBB);
3754 }
3755
3756 // Load the value to writeback.
3757 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
3758
3759 // Cast it back, in case we're writing an id to a Foo* or something.
3760 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
3761 "icr.writeback-cast");
3762
3763 // Perform the writeback.
3764
3765 // If we have a "to use" value, it's something we need to emit a use
3766 // of. This has to be carefully threaded in: if it's done after the
3767 // release it's potentially undefined behavior (and the optimizer
3768 // will ignore it), and if it happens before the retain then the
3769 // optimizer could move the release there.
3770 if (writeback.ToUse) {
3771 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
3772
3773 // Retain the new value. No need to block-copy here: the block's
3774 // being passed up the stack.
3775 value = CGF.EmitARCRetainNonBlock(value);
3776
3777 // Emit the intrinsic use here.
3778 CGF.EmitARCIntrinsicUse(writeback.ToUse);
3779
3780 // Load the old value (primitively).
3781 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
3782
3783 // Put the new value in place (primitively).
3784 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
3785
3786 // Release the old value.
3787 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
3788
3789 // Otherwise, we can just do a normal lvalue store.
3790 } else {
3791 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
3792 }
3793
3794 // Jump to the continuation block.
3795 if (!provablyNonNull)
3796 CGF.EmitBlock(contBB);
3797 }
3798
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)3799 static void emitWritebacks(CodeGenFunction &CGF,
3800 const CallArgList &args) {
3801 for (const auto &I : args.writebacks())
3802 emitWriteback(CGF, I);
3803 }
3804
deactivateArgCleanupsBeforeCall(CodeGenFunction & CGF,const CallArgList & CallArgs)3805 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
3806 const CallArgList &CallArgs) {
3807 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
3808 CallArgs.getCleanupsToDeactivate();
3809 // Iterate in reverse to increase the likelihood of popping the cleanup.
3810 for (const auto &I : llvm::reverse(Cleanups)) {
3811 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
3812 I.IsActiveIP->eraseFromParent();
3813 }
3814 }
3815
maybeGetUnaryAddrOfOperand(const Expr * E)3816 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
3817 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
3818 if (uop->getOpcode() == UO_AddrOf)
3819 return uop->getSubExpr();
3820 return nullptr;
3821 }
3822
3823 /// Emit an argument that's being passed call-by-writeback. That is,
3824 /// we are passing the address of an __autoreleased temporary; it
3825 /// might be copy-initialized with the current value of the given
3826 /// address, but it will definitely be copied out of after the call.
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)3827 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
3828 const ObjCIndirectCopyRestoreExpr *CRE) {
3829 LValue srcLV;
3830
3831 // Make an optimistic effort to emit the address as an l-value.
3832 // This can fail if the argument expression is more complicated.
3833 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
3834 srcLV = CGF.EmitLValue(lvExpr);
3835
3836 // Otherwise, just emit it as a scalar.
3837 } else {
3838 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
3839
3840 QualType srcAddrType =
3841 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
3842 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
3843 }
3844 Address srcAddr = srcLV.getAddress(CGF);
3845
3846 // The dest and src types don't necessarily match in LLVM terms
3847 // because of the crazy ObjC compatibility rules.
3848
3849 llvm::PointerType *destType =
3850 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
3851
3852 // If the address is a constant null, just pass the appropriate null.
3853 if (isProvablyNull(srcAddr.getPointer())) {
3854 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
3855 CRE->getType());
3856 return;
3857 }
3858
3859 // Create the temporary.
3860 Address temp = CGF.CreateTempAlloca(destType->getElementType(),
3861 CGF.getPointerAlign(),
3862 "icr.temp");
3863 // Loading an l-value can introduce a cleanup if the l-value is __weak,
3864 // and that cleanup will be conditional if we can't prove that the l-value
3865 // isn't null, so we need to register a dominating point so that the cleanups
3866 // system will make valid IR.
3867 CodeGenFunction::ConditionalEvaluation condEval(CGF);
3868
3869 // Zero-initialize it if we're not doing a copy-initialization.
3870 bool shouldCopy = CRE->shouldCopy();
3871 if (!shouldCopy) {
3872 llvm::Value *null =
3873 llvm::ConstantPointerNull::get(
3874 cast<llvm::PointerType>(destType->getElementType()));
3875 CGF.Builder.CreateStore(null, temp);
3876 }
3877
3878 llvm::BasicBlock *contBB = nullptr;
3879 llvm::BasicBlock *originBB = nullptr;
3880
3881 // If the address is *not* known to be non-null, we need to switch.
3882 llvm::Value *finalArgument;
3883
3884 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
3885 CGF.CGM.getDataLayout());
3886 if (provablyNonNull) {
3887 finalArgument = temp.getPointer();
3888 } else {
3889 llvm::Value *isNull =
3890 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
3891
3892 finalArgument = CGF.Builder.CreateSelect(isNull,
3893 llvm::ConstantPointerNull::get(destType),
3894 temp.getPointer(), "icr.argument");
3895
3896 // If we need to copy, then the load has to be conditional, which
3897 // means we need control flow.
3898 if (shouldCopy) {
3899 originBB = CGF.Builder.GetInsertBlock();
3900 contBB = CGF.createBasicBlock("icr.cont");
3901 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
3902 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
3903 CGF.EmitBlock(copyBB);
3904 condEval.begin(CGF);
3905 }
3906 }
3907
3908 llvm::Value *valueToUse = nullptr;
3909
3910 // Perform a copy if necessary.
3911 if (shouldCopy) {
3912 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
3913 assert(srcRV.isScalar());
3914
3915 llvm::Value *src = srcRV.getScalarVal();
3916 src = CGF.Builder.CreateBitCast(src, destType->getElementType(),
3917 "icr.cast");
3918
3919 // Use an ordinary store, not a store-to-lvalue.
3920 CGF.Builder.CreateStore(src, temp);
3921
3922 // If optimization is enabled, and the value was held in a
3923 // __strong variable, we need to tell the optimizer that this
3924 // value has to stay alive until we're doing the store back.
3925 // This is because the temporary is effectively unretained,
3926 // and so otherwise we can violate the high-level semantics.
3927 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
3928 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
3929 valueToUse = src;
3930 }
3931 }
3932
3933 // Finish the control flow if we needed it.
3934 if (shouldCopy && !provablyNonNull) {
3935 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
3936 CGF.EmitBlock(contBB);
3937
3938 // Make a phi for the value to intrinsically use.
3939 if (valueToUse) {
3940 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
3941 "icr.to-use");
3942 phiToUse->addIncoming(valueToUse, copyBB);
3943 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
3944 originBB);
3945 valueToUse = phiToUse;
3946 }
3947
3948 condEval.end(CGF);
3949 }
3950
3951 args.addWriteback(srcLV, temp, valueToUse);
3952 args.add(RValue::get(finalArgument), CRE->getType());
3953 }
3954
allocateArgumentMemory(CodeGenFunction & CGF)3955 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
3956 assert(!StackBase);
3957
3958 // Save the stack.
3959 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stacksave);
3960 StackBase = CGF.Builder.CreateCall(F, {}, "inalloca.save");
3961 }
3962
freeArgumentMemory(CodeGenFunction & CGF) const3963 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
3964 if (StackBase) {
3965 // Restore the stack after the call.
3966 llvm::Function *F = CGF.CGM.getIntrinsic(llvm::Intrinsic::stackrestore);
3967 CGF.Builder.CreateCall(F, StackBase);
3968 }
3969 }
3970
EmitNonNullArgCheck(RValue RV,QualType ArgType,SourceLocation ArgLoc,AbstractCallee AC,unsigned ParmNum)3971 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
3972 SourceLocation ArgLoc,
3973 AbstractCallee AC,
3974 unsigned ParmNum) {
3975 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
3976 SanOpts.has(SanitizerKind::NullabilityArg)))
3977 return;
3978
3979 // The param decl may be missing in a variadic function.
3980 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
3981 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
3982
3983 // Prefer the nonnull attribute if it's present.
3984 const NonNullAttr *NNAttr = nullptr;
3985 if (SanOpts.has(SanitizerKind::NonnullAttribute))
3986 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
3987
3988 bool CanCheckNullability = false;
3989 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
3990 auto Nullability = PVD->getType()->getNullability(getContext());
3991 CanCheckNullability = Nullability &&
3992 *Nullability == NullabilityKind::NonNull &&
3993 PVD->getTypeSourceInfo();
3994 }
3995
3996 if (!NNAttr && !CanCheckNullability)
3997 return;
3998
3999 SourceLocation AttrLoc;
4000 SanitizerMask CheckKind;
4001 SanitizerHandler Handler;
4002 if (NNAttr) {
4003 AttrLoc = NNAttr->getLocation();
4004 CheckKind = SanitizerKind::NonnullAttribute;
4005 Handler = SanitizerHandler::NonnullArg;
4006 } else {
4007 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4008 CheckKind = SanitizerKind::NullabilityArg;
4009 Handler = SanitizerHandler::NullabilityArg;
4010 }
4011
4012 SanitizerScope SanScope(this);
4013 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
4014 llvm::Constant *StaticData[] = {
4015 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
4016 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
4017 };
4018 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, None);
4019 }
4020
4021 // Check if the call is going to use the inalloca convention. This needs to
4022 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
4023 // later, so we can't check it directly.
hasInAllocaArgs(CodeGenModule & CGM,CallingConv ExplicitCC,ArrayRef<QualType> ArgTypes)4024 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
4025 ArrayRef<QualType> ArgTypes) {
4026 // The Swift calling convention doesn't go through the target-specific
4027 // argument classification, so it never uses inalloca.
4028 // TODO: Consider limiting inalloca use to only calling conventions supported
4029 // by MSVC.
4030 if (ExplicitCC == CC_Swift)
4031 return false;
4032 if (!CGM.getTarget().getCXXABI().isMicrosoft())
4033 return false;
4034 return llvm::any_of(ArgTypes, [&](QualType Ty) {
4035 return isInAllocaArgument(CGM.getCXXABI(), Ty);
4036 });
4037 }
4038
4039 #ifndef NDEBUG
4040 // Determine whether the given argument is an Objective-C method
4041 // that may have type parameters in its signature.
isObjCMethodWithTypeParams(const ObjCMethodDecl * method)4042 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4043 const DeclContext *dc = method->getDeclContext();
4044 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
4045 return classDecl->getTypeParamListAsWritten();
4046 }
4047
4048 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4049 return catDecl->getTypeParamList();
4050 }
4051
4052 return false;
4053 }
4054 #endif
4055
4056 /// EmitCallArgs - Emit call arguments for a function.
EmitCallArgs(CallArgList & Args,PrototypeWrapper Prototype,llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,AbstractCallee AC,unsigned ParamsToSkip,EvaluationOrder Order)4057 void CodeGenFunction::EmitCallArgs(
4058 CallArgList &Args, PrototypeWrapper Prototype,
4059 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4060 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
4061 SmallVector<QualType, 16> ArgTypes;
4062
4063 assert((ParamsToSkip == 0 || Prototype.P) &&
4064 "Can't skip parameters if type info is not provided");
4065
4066 // This variable only captures *explicitly* written conventions, not those
4067 // applied by default via command line flags or target defaults, such as
4068 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
4069 // require knowing if this is a C++ instance method or being able to see
4070 // unprototyped FunctionTypes.
4071 CallingConv ExplicitCC = CC_C;
4072
4073 // First, if a prototype was provided, use those argument types.
4074 bool IsVariadic = false;
4075 if (Prototype.P) {
4076 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
4077 if (MD) {
4078 IsVariadic = MD->isVariadic();
4079 ExplicitCC = getCallingConventionForDecl(
4080 MD, CGM.getTarget().getTriple().isOSWindows());
4081 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4082 MD->param_type_end());
4083 } else {
4084 const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
4085 IsVariadic = FPT->isVariadic();
4086 ExplicitCC = FPT->getExtInfo().getCC();
4087 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4088 FPT->param_type_end());
4089 }
4090
4091 #ifndef NDEBUG
4092 // Check that the prototyped types match the argument expression types.
4093 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
4094 CallExpr::const_arg_iterator Arg = ArgRange.begin();
4095 for (QualType Ty : ArgTypes) {
4096 assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4097 assert(
4098 (isGenericMethod || Ty->isVariablyModifiedType() ||
4099 Ty.getNonReferenceType()->isObjCRetainableType() ||
4100 getContext()
4101 .getCanonicalType(Ty.getNonReferenceType())
4102 .getTypePtr() ==
4103 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4104 "type mismatch in call argument!");
4105 ++Arg;
4106 }
4107
4108 // Either we've emitted all the call args, or we have a call to variadic
4109 // function.
4110 assert((Arg == ArgRange.end() || IsVariadic) &&
4111 "Extra arguments in non-variadic function!");
4112 #endif
4113 }
4114
4115 // If we still have any arguments, emit them using the type of the argument.
4116 for (auto *A : llvm::make_range(std::next(ArgRange.begin(), ArgTypes.size()),
4117 ArgRange.end()))
4118 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4119 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4120
4121 // We must evaluate arguments from right to left in the MS C++ ABI,
4122 // because arguments are destroyed left to right in the callee. As a special
4123 // case, there are certain language constructs that require left-to-right
4124 // evaluation, and in those cases we consider the evaluation order requirement
4125 // to trump the "destruction order is reverse construction order" guarantee.
4126 bool LeftToRight =
4127 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4128 ? Order == EvaluationOrder::ForceLeftToRight
4129 : Order != EvaluationOrder::ForceRightToLeft;
4130
4131 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
4132 RValue EmittedArg) {
4133 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4134 return;
4135 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4136 if (PS == nullptr)
4137 return;
4138
4139 const auto &Context = getContext();
4140 auto SizeTy = Context.getSizeType();
4141 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4142 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
4143 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4144 EmittedArg.getScalarVal(),
4145 PS->isDynamic());
4146 Args.add(RValue::get(V), SizeTy);
4147 // If we're emitting args in reverse, be sure to do so with
4148 // pass_object_size, as well.
4149 if (!LeftToRight)
4150 std::swap(Args.back(), *(&Args.back() - 1));
4151 };
4152
4153 // Insert a stack save if we're going to need any inalloca args.
4154 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4155 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4156 "inalloca only supported on x86");
4157 Args.allocateArgumentMemory(*this);
4158 }
4159
4160 // Evaluate each argument in the appropriate order.
4161 size_t CallArgsStart = Args.size();
4162 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4163 unsigned Idx = LeftToRight ? I : E - I - 1;
4164 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4165 unsigned InitialArgSize = Args.size();
4166 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4167 // the argument and parameter match or the objc method is parameterized.
4168 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4169 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4170 ArgTypes[Idx]) ||
4171 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4172 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
4173 "Argument and parameter types don't match");
4174 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4175 // In particular, we depend on it being the last arg in Args, and the
4176 // objectsize bits depend on there only being one arg if !LeftToRight.
4177 assert(InitialArgSize + 1 == Args.size() &&
4178 "The code below depends on only adding one arg per EmitCallArg");
4179 (void)InitialArgSize;
4180 // Since pointer argument are never emitted as LValue, it is safe to emit
4181 // non-null argument check for r-value only.
4182 if (!Args.back().hasLValue()) {
4183 RValue RVArg = Args.back().getKnownRValue();
4184 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4185 ParamsToSkip + Idx);
4186 // @llvm.objectsize should never have side-effects and shouldn't need
4187 // destruction/cleanups, so we can safely "emit" it after its arg,
4188 // regardless of right-to-leftness
4189 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4190 }
4191 }
4192
4193 if (!LeftToRight) {
4194 // Un-reverse the arguments we just evaluated so they match up with the LLVM
4195 // IR function.
4196 std::reverse(Args.begin() + CallArgsStart, Args.end());
4197 }
4198 }
4199
4200 namespace {
4201
4202 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
DestroyUnpassedArg__anon2a3770250c11::DestroyUnpassedArg4203 DestroyUnpassedArg(Address Addr, QualType Ty)
4204 : Addr(Addr), Ty(Ty) {}
4205
4206 Address Addr;
4207 QualType Ty;
4208
Emit__anon2a3770250c11::DestroyUnpassedArg4209 void Emit(CodeGenFunction &CGF, Flags flags) override {
4210 QualType::DestructionKind DtorKind = Ty.isDestructedType();
4211 if (DtorKind == QualType::DK_cxx_destructor) {
4212 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4213 assert(!Dtor->isTrivial());
4214 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4215 /*Delegating=*/false, Addr, Ty);
4216 } else {
4217 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4218 }
4219 }
4220 };
4221
4222 struct DisableDebugLocationUpdates {
4223 CodeGenFunction &CGF;
4224 bool disabledDebugInfo;
DisableDebugLocationUpdates__anon2a3770250c11::DisableDebugLocationUpdates4225 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4226 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4227 CGF.disableDebugInfo();
4228 }
~DisableDebugLocationUpdates__anon2a3770250c11::DisableDebugLocationUpdates4229 ~DisableDebugLocationUpdates() {
4230 if (disabledDebugInfo)
4231 CGF.enableDebugInfo();
4232 }
4233 };
4234
4235 } // end anonymous namespace
4236
getRValue(CodeGenFunction & CGF) const4237 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4238 if (!HasLV)
4239 return RV;
4240 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4241 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4242 LV.isVolatile());
4243 IsUsed = true;
4244 return RValue::getAggregate(Copy.getAddress(CGF));
4245 }
4246
copyInto(CodeGenFunction & CGF,Address Addr) const4247 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4248 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4249 if (!HasLV && RV.isScalar())
4250 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4251 else if (!HasLV && RV.isComplex())
4252 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4253 else {
4254 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4255 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4256 // We assume that call args are never copied into subobjects.
4257 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4258 HasLV ? LV.isVolatileQualified()
4259 : RV.isVolatileQualified());
4260 }
4261 IsUsed = true;
4262 }
4263
EmitCallArg(CallArgList & args,const Expr * E,QualType type)4264 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4265 QualType type) {
4266 DisableDebugLocationUpdates Dis(*this, E);
4267 if (const ObjCIndirectCopyRestoreExpr *CRE
4268 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4269 assert(getLangOpts().ObjCAutoRefCount);
4270 return emitWritebackArg(*this, args, CRE);
4271 }
4272
4273 assert(type->isReferenceType() == E->isGLValue() &&
4274 "reference binding to unmaterialized r-value!");
4275
4276 if (E->isGLValue()) {
4277 assert(E->getObjectKind() == OK_Ordinary);
4278 return args.add(EmitReferenceBindingToExpr(E), type);
4279 }
4280
4281 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4282
4283 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4284 // However, we still have to push an EH-only cleanup in case we unwind before
4285 // we make it to the call.
4286 if (type->isRecordType() &&
4287 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4288 // If we're using inalloca, use the argument memory. Otherwise, use a
4289 // temporary.
4290 AggValueSlot Slot;
4291 if (args.isUsingInAlloca())
4292 Slot = createPlaceholderSlot(*this, type);
4293 else
4294 Slot = CreateAggTemp(type, "agg.tmp");
4295
4296 bool DestroyedInCallee = true, NeedsEHCleanup = true;
4297 if (const auto *RD = type->getAsCXXRecordDecl())
4298 DestroyedInCallee = RD->hasNonTrivialDestructor();
4299 else
4300 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4301
4302 if (DestroyedInCallee)
4303 Slot.setExternallyDestructed();
4304
4305 EmitAggExpr(E, Slot);
4306 RValue RV = Slot.asRValue();
4307 args.add(RV, type);
4308
4309 if (DestroyedInCallee && NeedsEHCleanup) {
4310 // Create a no-op GEP between the placeholder and the cleanup so we can
4311 // RAUW it successfully. It also serves as a marker of the first
4312 // instruction where the cleanup is active.
4313 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4314 type);
4315 // This unreachable is a temporary marker which will be removed later.
4316 llvm::Instruction *IsActive = Builder.CreateUnreachable();
4317 args.addArgCleanupDeactivation(EHStack.getInnermostEHScope(), IsActive);
4318 }
4319 return;
4320 }
4321
4322 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4323 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4324 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4325 assert(L.isSimple());
4326 args.addUncopiedAggregate(L, type);
4327 return;
4328 }
4329
4330 args.add(EmitAnyExprToTemp(E), type);
4331 }
4332
getVarArgType(const Expr * Arg)4333 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4334 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4335 // implicitly widens null pointer constants that are arguments to varargs
4336 // functions to pointer-sized ints.
4337 if (!getTarget().getTriple().isOSWindows())
4338 return Arg->getType();
4339
4340 if (Arg->getType()->isIntegerType() &&
4341 getContext().getTypeSize(Arg->getType()) <
4342 getContext().getTargetInfo().getPointerWidth(0) &&
4343 Arg->isNullPointerConstant(getContext(),
4344 Expr::NPC_ValueDependentIsNotNull)) {
4345 return getContext().getIntPtrType();
4346 }
4347
4348 return Arg->getType();
4349 }
4350
4351 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4352 // optimizer it can aggressively ignore unwind edges.
4353 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)4354 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4355 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4356 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4357 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4358 CGM.getNoObjCARCExceptionsMetadata());
4359 }
4360
4361 /// Emits a call to the given no-arguments nounwind runtime function.
4362 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::FunctionCallee callee,const llvm::Twine & name)4363 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4364 const llvm::Twine &name) {
4365 return EmitNounwindRuntimeCall(callee, None, name);
4366 }
4367
4368 /// Emits a call to the given nounwind runtime function.
4369 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)4370 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4371 ArrayRef<llvm::Value *> args,
4372 const llvm::Twine &name) {
4373 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4374 call->setDoesNotThrow();
4375 return call;
4376 }
4377
4378 /// Emits a simple call (never an invoke) to the given no-arguments
4379 /// runtime function.
EmitRuntimeCall(llvm::FunctionCallee callee,const llvm::Twine & name)4380 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4381 const llvm::Twine &name) {
4382 return EmitRuntimeCall(callee, None, name);
4383 }
4384
4385 // Calls which may throw must have operand bundles indicating which funclet
4386 // they are nested within.
4387 SmallVector<llvm::OperandBundleDef, 1>
getBundlesForFunclet(llvm::Value * Callee)4388 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4389 SmallVector<llvm::OperandBundleDef, 1> BundleList;
4390 // There is no need for a funclet operand bundle if we aren't inside a
4391 // funclet.
4392 if (!CurrentFuncletPad)
4393 return BundleList;
4394
4395 // Skip intrinsics which cannot throw.
4396 auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts());
4397 if (CalleeFn && CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow())
4398 return BundleList;
4399
4400 BundleList.emplace_back("funclet", CurrentFuncletPad);
4401 return BundleList;
4402 }
4403
4404 /// Emits a simple call (never an invoke) to the given runtime function.
EmitRuntimeCall(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)4405 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4406 ArrayRef<llvm::Value *> args,
4407 const llvm::Twine &name) {
4408 llvm::CallInst *call = Builder.CreateCall(
4409 callee, args, getBundlesForFunclet(callee.getCallee()), name);
4410 call->setCallingConv(getRuntimeCC());
4411 return call;
4412 }
4413
4414 /// Emits a call or invoke to the given noreturn runtime function.
EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args)4415 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4416 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4417 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4418 getBundlesForFunclet(callee.getCallee());
4419
4420 if (getInvokeDest()) {
4421 llvm::InvokeInst *invoke =
4422 Builder.CreateInvoke(callee,
4423 getUnreachableBlock(),
4424 getInvokeDest(),
4425 args,
4426 BundleList);
4427 invoke->setDoesNotReturn();
4428 invoke->setCallingConv(getRuntimeCC());
4429 } else {
4430 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4431 call->setDoesNotReturn();
4432 call->setCallingConv(getRuntimeCC());
4433 Builder.CreateUnreachable();
4434 }
4435 }
4436
4437 /// Emits a call or invoke instruction to the given nullary runtime function.
4438 llvm::CallBase *
EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,const Twine & name)4439 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4440 const Twine &name) {
4441 return EmitRuntimeCallOrInvoke(callee, None, name);
4442 }
4443
4444 /// Emits a call or invoke instruction to the given runtime function.
4445 llvm::CallBase *
EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const Twine & name)4446 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4447 ArrayRef<llvm::Value *> args,
4448 const Twine &name) {
4449 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4450 call->setCallingConv(getRuntimeCC());
4451 return call;
4452 }
4453
4454 /// Emits a call or invoke instruction to the given function, depending
4455 /// on the current state of the EH stack.
EmitCallOrInvoke(llvm::FunctionCallee Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)4456 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4457 ArrayRef<llvm::Value *> Args,
4458 const Twine &Name) {
4459 llvm::BasicBlock *InvokeDest = getInvokeDest();
4460 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4461 getBundlesForFunclet(Callee.getCallee());
4462
4463 llvm::CallBase *Inst;
4464 if (!InvokeDest)
4465 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4466 else {
4467 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4468 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4469 Name);
4470 EmitBlock(ContBB);
4471 }
4472
4473 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4474 // optimizer it can aggressively ignore unwind edges.
4475 if (CGM.getLangOpts().ObjCAutoRefCount)
4476 AddObjCARCExceptionMetadata(Inst);
4477
4478 return Inst;
4479 }
4480
deferPlaceholderReplacement(llvm::Instruction * Old,llvm::Value * New)4481 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4482 llvm::Value *New) {
4483 DeferredReplacements.push_back(
4484 std::make_pair(llvm::WeakTrackingVH(Old), New));
4485 }
4486
4487 namespace {
4488
4489 /// Specify given \p NewAlign as the alignment of return value attribute. If
4490 /// such attribute already exists, re-set it to the maximal one of two options.
4491 LLVM_NODISCARD llvm::AttributeList
maybeRaiseRetAlignmentAttribute(llvm::LLVMContext & Ctx,const llvm::AttributeList & Attrs,llvm::Align NewAlign)4492 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4493 const llvm::AttributeList &Attrs,
4494 llvm::Align NewAlign) {
4495 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4496 if (CurAlign >= NewAlign)
4497 return Attrs;
4498 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4499 return Attrs
4500 .removeAttribute(Ctx, llvm::AttributeList::ReturnIndex,
4501 llvm::Attribute::AttrKind::Alignment)
4502 .addAttribute(Ctx, llvm::AttributeList::ReturnIndex, AlignAttr);
4503 }
4504
4505 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4506 protected:
4507 CodeGenFunction &CGF;
4508
4509 /// We do nothing if this is, or becomes, nullptr.
4510 const AlignedAttrTy *AA = nullptr;
4511
4512 llvm::Value *Alignment = nullptr; // May or may not be a constant.
4513 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4514
AbstractAssumeAlignedAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl)4515 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4516 : CGF(CGF_) {
4517 if (!FuncDecl)
4518 return;
4519 AA = FuncDecl->getAttr<AlignedAttrTy>();
4520 }
4521
4522 public:
4523 /// If we can, materialize the alignment as an attribute on return value.
4524 LLVM_NODISCARD llvm::AttributeList
TryEmitAsCallSiteAttribute(const llvm::AttributeList & Attrs)4525 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4526 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4527 return Attrs;
4528 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4529 if (!AlignmentCI)
4530 return Attrs;
4531 // We may legitimately have non-power-of-2 alignment here.
4532 // If so, this is UB land, emit it via `@llvm.assume` instead.
4533 if (!AlignmentCI->getValue().isPowerOf2())
4534 return Attrs;
4535 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4536 CGF.getLLVMContext(), Attrs,
4537 llvm::Align(
4538 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4539 AA = nullptr; // We're done. Disallow doing anything else.
4540 return NewAttrs;
4541 }
4542
4543 /// Emit alignment assumption.
4544 /// This is a general fallback that we take if either there is an offset,
4545 /// or the alignment is variable or we are sanitizing for alignment.
EmitAsAnAssumption(SourceLocation Loc,QualType RetTy,RValue & Ret)4546 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4547 if (!AA)
4548 return;
4549 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4550 AA->getLocation(), Alignment, OffsetCI);
4551 AA = nullptr; // We're done. Disallow doing anything else.
4552 }
4553 };
4554
4555 /// Helper data structure to emit `AssumeAlignedAttr`.
4556 class AssumeAlignedAttrEmitter final
4557 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4558 public:
AssumeAlignedAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl)4559 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4560 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4561 if (!AA)
4562 return;
4563 // It is guaranteed that the alignment/offset are constants.
4564 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4565 if (Expr *Offset = AA->getOffset()) {
4566 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4567 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4568 OffsetCI = nullptr;
4569 }
4570 }
4571 };
4572
4573 /// Helper data structure to emit `AllocAlignAttr`.
4574 class AllocAlignAttrEmitter final
4575 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4576 public:
AllocAlignAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl,const CallArgList & CallArgs)4577 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4578 const CallArgList &CallArgs)
4579 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4580 if (!AA)
4581 return;
4582 // Alignment may or may not be a constant, and that is okay.
4583 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4584 .getRValue(CGF)
4585 .getScalarVal();
4586 }
4587 };
4588
4589 } // namespace
4590
EmitCall(const CGFunctionInfo & CallInfo,const CGCallee & Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,llvm::CallBase ** callOrInvoke,bool IsMustTail,SourceLocation Loc)4591 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4592 const CGCallee &Callee,
4593 ReturnValueSlot ReturnValue,
4594 const CallArgList &CallArgs,
4595 llvm::CallBase **callOrInvoke, bool IsMustTail,
4596 SourceLocation Loc) {
4597 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4598
4599 assert(Callee.isOrdinary() || Callee.isVirtual());
4600
4601 // Handle struct-return functions by passing a pointer to the
4602 // location that we would like to return into.
4603 QualType RetTy = CallInfo.getReturnType();
4604 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4605
4606 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4607
4608 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4609 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4610 // We can only guarantee that a function is called from the correct
4611 // context/function based on the appropriate target attributes,
4612 // so only check in the case where we have both always_inline and target
4613 // since otherwise we could be making a conditional call after a check for
4614 // the proper cpu features (and it won't cause code generation issues due to
4615 // function based code generation).
4616 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
4617 TargetDecl->hasAttr<TargetAttr>())
4618 checkTargetFeatures(Loc, FD);
4619
4620 // Some architectures (such as x86-64) have the ABI changed based on
4621 // attribute-target/features. Give them a chance to diagnose.
4622 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
4623 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
4624 }
4625
4626 #ifndef NDEBUG
4627 if (!(CallInfo.isVariadic() && CallInfo.getArgStruct())) {
4628 // For an inalloca varargs function, we don't expect CallInfo to match the
4629 // function pointer's type, because the inalloca struct a will have extra
4630 // fields in it for the varargs parameters. Code later in this function
4631 // bitcasts the function pointer to the type derived from CallInfo.
4632 //
4633 // In other cases, we assert that the types match up (until pointers stop
4634 // having pointee types).
4635 llvm::Type *TypeFromVal;
4636 if (Callee.isVirtual())
4637 TypeFromVal = Callee.getVirtualFunctionType();
4638 else
4639 TypeFromVal =
4640 Callee.getFunctionPointer()->getType()->getPointerElementType();
4641 assert(IRFuncTy == TypeFromVal);
4642 }
4643 #endif
4644
4645 // 1. Set up the arguments.
4646
4647 // If we're using inalloca, insert the allocation after the stack save.
4648 // FIXME: Do this earlier rather than hacking it in here!
4649 Address ArgMemory = Address::invalid();
4650 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
4651 const llvm::DataLayout &DL = CGM.getDataLayout();
4652 llvm::Instruction *IP = CallArgs.getStackBase();
4653 llvm::AllocaInst *AI;
4654 if (IP) {
4655 IP = IP->getNextNode();
4656 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
4657 "argmem", IP);
4658 } else {
4659 AI = CreateTempAlloca(ArgStruct, "argmem");
4660 }
4661 auto Align = CallInfo.getArgStructAlignment();
4662 AI->setAlignment(Align.getAsAlign());
4663 AI->setUsedWithInAlloca(true);
4664 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
4665 ArgMemory = Address(AI, Align);
4666 }
4667
4668 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
4669 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
4670
4671 // If the call returns a temporary with struct return, create a temporary
4672 // alloca to hold the result, unless one is given to us.
4673 Address SRetPtr = Address::invalid();
4674 Address SRetAlloca = Address::invalid();
4675 llvm::Value *UnusedReturnSizePtr = nullptr;
4676 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
4677 if (!ReturnValue.isNull()) {
4678 SRetPtr = ReturnValue.getValue();
4679 } else {
4680 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
4681 if (HaveInsertPoint() && ReturnValue.isUnused()) {
4682 uint64_t size =
4683 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
4684 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
4685 }
4686 }
4687 if (IRFunctionArgs.hasSRetArg()) {
4688 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
4689 } else if (RetAI.isInAlloca()) {
4690 Address Addr =
4691 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
4692 Builder.CreateStore(SRetPtr.getPointer(), Addr);
4693 }
4694 }
4695
4696 Address swiftErrorTemp = Address::invalid();
4697 Address swiftErrorArg = Address::invalid();
4698
4699 // When passing arguments using temporary allocas, we need to add the
4700 // appropriate lifetime markers. This vector keeps track of all the lifetime
4701 // markers that need to be ended right after the call.
4702 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
4703
4704 // Translate all of the arguments as necessary to match the IR lowering.
4705 assert(CallInfo.arg_size() == CallArgs.size() &&
4706 "Mismatch between function signature & arguments.");
4707 unsigned ArgNo = 0;
4708 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
4709 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
4710 I != E; ++I, ++info_it, ++ArgNo) {
4711 const ABIArgInfo &ArgInfo = info_it->info;
4712
4713 // Insert a padding argument to ensure proper alignment.
4714 if (IRFunctionArgs.hasPaddingArg(ArgNo))
4715 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
4716 llvm::UndefValue::get(ArgInfo.getPaddingType());
4717
4718 unsigned FirstIRArg, NumIRArgs;
4719 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
4720
4721 switch (ArgInfo.getKind()) {
4722 case ABIArgInfo::InAlloca: {
4723 assert(NumIRArgs == 0);
4724 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
4725 if (I->isAggregate()) {
4726 Address Addr = I->hasLValue()
4727 ? I->getKnownLValue().getAddress(*this)
4728 : I->getKnownRValue().getAggregateAddress();
4729 llvm::Instruction *Placeholder =
4730 cast<llvm::Instruction>(Addr.getPointer());
4731
4732 if (!ArgInfo.getInAllocaIndirect()) {
4733 // Replace the placeholder with the appropriate argument slot GEP.
4734 CGBuilderTy::InsertPoint IP = Builder.saveIP();
4735 Builder.SetInsertPoint(Placeholder);
4736 Addr = Builder.CreateStructGEP(ArgMemory,
4737 ArgInfo.getInAllocaFieldIndex());
4738 Builder.restoreIP(IP);
4739 } else {
4740 // For indirect things such as overaligned structs, replace the
4741 // placeholder with a regular aggregate temporary alloca. Store the
4742 // address of this alloca into the struct.
4743 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
4744 Address ArgSlot = Builder.CreateStructGEP(
4745 ArgMemory, ArgInfo.getInAllocaFieldIndex());
4746 Builder.CreateStore(Addr.getPointer(), ArgSlot);
4747 }
4748 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
4749 } else if (ArgInfo.getInAllocaIndirect()) {
4750 // Make a temporary alloca and store the address of it into the argument
4751 // struct.
4752 Address Addr = CreateMemTempWithoutCast(
4753 I->Ty, getContext().getTypeAlignInChars(I->Ty),
4754 "indirect-arg-temp");
4755 I->copyInto(*this, Addr);
4756 Address ArgSlot =
4757 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4758 Builder.CreateStore(Addr.getPointer(), ArgSlot);
4759 } else {
4760 // Store the RValue into the argument struct.
4761 Address Addr =
4762 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
4763 unsigned AS = Addr.getType()->getPointerAddressSpace();
4764 llvm::Type *MemType = ConvertTypeForMem(I->Ty)->getPointerTo(AS);
4765 // There are some cases where a trivial bitcast is not avoidable. The
4766 // definition of a type later in a translation unit may change it's type
4767 // from {}* to (%struct.foo*)*.
4768 if (Addr.getType() != MemType)
4769 Addr = Builder.CreateBitCast(Addr, MemType);
4770 I->copyInto(*this, Addr);
4771 }
4772 break;
4773 }
4774
4775 case ABIArgInfo::Indirect:
4776 case ABIArgInfo::IndirectAliased: {
4777 assert(NumIRArgs == 1);
4778 if (!I->isAggregate()) {
4779 // Make a temporary alloca to pass the argument.
4780 Address Addr = CreateMemTempWithoutCast(
4781 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
4782 IRCallArgs[FirstIRArg] = Addr.getPointer();
4783
4784 I->copyInto(*this, Addr);
4785 } else {
4786 // We want to avoid creating an unnecessary temporary+copy here;
4787 // however, we need one in three cases:
4788 // 1. If the argument is not byval, and we are required to copy the
4789 // source. (This case doesn't occur on any common architecture.)
4790 // 2. If the argument is byval, RV is not sufficiently aligned, and
4791 // we cannot force it to be sufficiently aligned.
4792 // 3. If the argument is byval, but RV is not located in default
4793 // or alloca address space.
4794 Address Addr = I->hasLValue()
4795 ? I->getKnownLValue().getAddress(*this)
4796 : I->getKnownRValue().getAggregateAddress();
4797 llvm::Value *V = Addr.getPointer();
4798 CharUnits Align = ArgInfo.getIndirectAlign();
4799 const llvm::DataLayout *TD = &CGM.getDataLayout();
4800
4801 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
4802 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
4803 TD->getAllocaAddrSpace()) &&
4804 "indirect argument must be in alloca address space");
4805
4806 bool NeedCopy = false;
4807
4808 if (Addr.getAlignment() < Align &&
4809 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
4810 Align.getAsAlign()) {
4811 NeedCopy = true;
4812 } else if (I->hasLValue()) {
4813 auto LV = I->getKnownLValue();
4814 auto AS = LV.getAddressSpace();
4815
4816 if (!ArgInfo.getIndirectByVal() ||
4817 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
4818 NeedCopy = true;
4819 }
4820 if (!getLangOpts().OpenCL) {
4821 if ((ArgInfo.getIndirectByVal() &&
4822 (AS != LangAS::Default &&
4823 AS != CGM.getASTAllocaAddressSpace()))) {
4824 NeedCopy = true;
4825 }
4826 }
4827 // For OpenCL even if RV is located in default or alloca address space
4828 // we don't want to perform address space cast for it.
4829 else if ((ArgInfo.getIndirectByVal() &&
4830 Addr.getType()->getAddressSpace() != IRFuncTy->
4831 getParamType(FirstIRArg)->getPointerAddressSpace())) {
4832 NeedCopy = true;
4833 }
4834 }
4835
4836 if (NeedCopy) {
4837 // Create an aligned temporary, and copy to it.
4838 Address AI = CreateMemTempWithoutCast(
4839 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
4840 IRCallArgs[FirstIRArg] = AI.getPointer();
4841
4842 // Emit lifetime markers for the temporary alloca.
4843 uint64_t ByvalTempElementSize =
4844 CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
4845 llvm::Value *LifetimeSize =
4846 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
4847
4848 // Add cleanup code to emit the end lifetime marker after the call.
4849 if (LifetimeSize) // In case we disabled lifetime markers.
4850 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
4851
4852 // Generate the copy.
4853 I->copyInto(*this, AI);
4854 } else {
4855 // Skip the extra memcpy call.
4856 auto *T = V->getType()->getPointerElementType()->getPointerTo(
4857 CGM.getDataLayout().getAllocaAddrSpace());
4858 IRCallArgs[FirstIRArg] = getTargetHooks().performAddrSpaceCast(
4859 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
4860 true);
4861 }
4862 }
4863 break;
4864 }
4865
4866 case ABIArgInfo::Ignore:
4867 assert(NumIRArgs == 0);
4868 break;
4869
4870 case ABIArgInfo::Extend:
4871 case ABIArgInfo::Direct: {
4872 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
4873 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
4874 ArgInfo.getDirectOffset() == 0) {
4875 assert(NumIRArgs == 1);
4876 llvm::Value *V;
4877 if (!I->isAggregate())
4878 V = I->getKnownRValue().getScalarVal();
4879 else
4880 V = Builder.CreateLoad(
4881 I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4882 : I->getKnownRValue().getAggregateAddress());
4883
4884 // Implement swifterror by copying into a new swifterror argument.
4885 // We'll write back in the normal path out of the call.
4886 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
4887 == ParameterABI::SwiftErrorResult) {
4888 assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
4889
4890 QualType pointeeTy = I->Ty->getPointeeType();
4891 swiftErrorArg =
4892 Address(V, getContext().getTypeAlignInChars(pointeeTy));
4893
4894 swiftErrorTemp =
4895 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
4896 V = swiftErrorTemp.getPointer();
4897 cast<llvm::AllocaInst>(V)->setSwiftError(true);
4898
4899 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
4900 Builder.CreateStore(errorValue, swiftErrorTemp);
4901 }
4902
4903 // We might have to widen integers, but we should never truncate.
4904 if (ArgInfo.getCoerceToType() != V->getType() &&
4905 V->getType()->isIntegerTy())
4906 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
4907
4908 // If the argument doesn't match, perform a bitcast to coerce it. This
4909 // can happen due to trivial type mismatches.
4910 if (FirstIRArg < IRFuncTy->getNumParams() &&
4911 V->getType() != IRFuncTy->getParamType(FirstIRArg))
4912 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
4913
4914 IRCallArgs[FirstIRArg] = V;
4915 break;
4916 }
4917
4918 // FIXME: Avoid the conversion through memory if possible.
4919 Address Src = Address::invalid();
4920 if (!I->isAggregate()) {
4921 Src = CreateMemTemp(I->Ty, "coerce");
4922 I->copyInto(*this, Src);
4923 } else {
4924 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4925 : I->getKnownRValue().getAggregateAddress();
4926 }
4927
4928 // If the value is offset in memory, apply the offset now.
4929 Src = emitAddressAtOffset(*this, Src, ArgInfo);
4930
4931 // Fast-isel and the optimizer generally like scalar values better than
4932 // FCAs, so we flatten them if this is safe to do for this argument.
4933 llvm::StructType *STy =
4934 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
4935 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
4936 llvm::Type *SrcTy = Src.getElementType();
4937 uint64_t SrcSize = CGM.getDataLayout().getTypeAllocSize(SrcTy);
4938 uint64_t DstSize = CGM.getDataLayout().getTypeAllocSize(STy);
4939
4940 // If the source type is smaller than the destination type of the
4941 // coerce-to logic, copy the source value into a temp alloca the size
4942 // of the destination type to allow loading all of it. The bits past
4943 // the source value are left undef.
4944 if (SrcSize < DstSize) {
4945 Address TempAlloca
4946 = CreateTempAlloca(STy, Src.getAlignment(),
4947 Src.getName() + ".coerce");
4948 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
4949 Src = TempAlloca;
4950 } else {
4951 Src = Builder.CreateBitCast(Src,
4952 STy->getPointerTo(Src.getAddressSpace()));
4953 }
4954
4955 assert(NumIRArgs == STy->getNumElements());
4956 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
4957 Address EltPtr = Builder.CreateStructGEP(Src, i);
4958 llvm::Value *LI = Builder.CreateLoad(EltPtr);
4959 IRCallArgs[FirstIRArg + i] = LI;
4960 }
4961 } else {
4962 // In the simple case, just pass the coerced loaded value.
4963 assert(NumIRArgs == 1);
4964 llvm::Value *Load =
4965 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
4966
4967 if (CallInfo.isCmseNSCall()) {
4968 // For certain parameter types, clear padding bits, as they may reveal
4969 // sensitive information.
4970 // Small struct/union types are passed as integer arrays.
4971 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
4972 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
4973 Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
4974 }
4975 IRCallArgs[FirstIRArg] = Load;
4976 }
4977
4978 break;
4979 }
4980
4981 case ABIArgInfo::CoerceAndExpand: {
4982 auto coercionType = ArgInfo.getCoerceAndExpandType();
4983 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
4984
4985 llvm::Value *tempSize = nullptr;
4986 Address addr = Address::invalid();
4987 Address AllocaAddr = Address::invalid();
4988 if (I->isAggregate()) {
4989 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
4990 : I->getKnownRValue().getAggregateAddress();
4991
4992 } else {
4993 RValue RV = I->getKnownRValue();
4994 assert(RV.isScalar()); // complex should always just be direct
4995
4996 llvm::Type *scalarType = RV.getScalarVal()->getType();
4997 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
4998 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlignment(scalarType);
4999
5000 // Materialize to a temporary.
5001 addr = CreateTempAlloca(
5002 RV.getScalarVal()->getType(),
5003 CharUnits::fromQuantity(std::max(
5004 (unsigned)layout->getAlignment().value(), scalarAlign)),
5005 "tmp",
5006 /*ArraySize=*/nullptr, &AllocaAddr);
5007 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
5008
5009 Builder.CreateStore(RV.getScalarVal(), addr);
5010 }
5011
5012 addr = Builder.CreateElementBitCast(addr, coercionType);
5013
5014 unsigned IRArgPos = FirstIRArg;
5015 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5016 llvm::Type *eltType = coercionType->getElementType(i);
5017 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5018 Address eltAddr = Builder.CreateStructGEP(addr, i);
5019 llvm::Value *elt = Builder.CreateLoad(eltAddr);
5020 IRCallArgs[IRArgPos++] = elt;
5021 }
5022 assert(IRArgPos == FirstIRArg + NumIRArgs);
5023
5024 if (tempSize) {
5025 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
5026 }
5027
5028 break;
5029 }
5030
5031 case ABIArgInfo::Expand: {
5032 unsigned IRArgPos = FirstIRArg;
5033 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5034 assert(IRArgPos == FirstIRArg + NumIRArgs);
5035 break;
5036 }
5037 }
5038 }
5039
5040 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
5041 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
5042
5043 // If we're using inalloca, set up that argument.
5044 if (ArgMemory.isValid()) {
5045 llvm::Value *Arg = ArgMemory.getPointer();
5046 if (CallInfo.isVariadic()) {
5047 // When passing non-POD arguments by value to variadic functions, we will
5048 // end up with a variadic prototype and an inalloca call site. In such
5049 // cases, we can't do any parameter mismatch checks. Give up and bitcast
5050 // the callee.
5051 unsigned CalleeAS = CalleePtr->getType()->getPointerAddressSpace();
5052 CalleePtr =
5053 Builder.CreateBitCast(CalleePtr, IRFuncTy->getPointerTo(CalleeAS));
5054 } else {
5055 llvm::Type *LastParamTy =
5056 IRFuncTy->getParamType(IRFuncTy->getNumParams() - 1);
5057 if (Arg->getType() != LastParamTy) {
5058 #ifndef NDEBUG
5059 // Assert that these structs have equivalent element types.
5060 llvm::StructType *FullTy = CallInfo.getArgStruct();
5061 llvm::StructType *DeclaredTy = cast<llvm::StructType>(
5062 cast<llvm::PointerType>(LastParamTy)->getElementType());
5063 assert(DeclaredTy->getNumElements() == FullTy->getNumElements());
5064 for (llvm::StructType::element_iterator DI = DeclaredTy->element_begin(),
5065 DE = DeclaredTy->element_end(),
5066 FI = FullTy->element_begin();
5067 DI != DE; ++DI, ++FI)
5068 assert(*DI == *FI);
5069 #endif
5070 Arg = Builder.CreateBitCast(Arg, LastParamTy);
5071 }
5072 }
5073 assert(IRFunctionArgs.hasInallocaArg());
5074 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5075 }
5076
5077 // 2. Prepare the function pointer.
5078
5079 // If the callee is a bitcast of a non-variadic function to have a
5080 // variadic function pointer type, check to see if we can remove the
5081 // bitcast. This comes up with unprototyped functions.
5082 //
5083 // This makes the IR nicer, but more importantly it ensures that we
5084 // can inline the function at -O0 if it is marked always_inline.
5085 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5086 llvm::Value *Ptr) -> llvm::Function * {
5087 if (!CalleeFT->isVarArg())
5088 return nullptr;
5089
5090 // Get underlying value if it's a bitcast
5091 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5092 if (CE->getOpcode() == llvm::Instruction::BitCast)
5093 Ptr = CE->getOperand(0);
5094 }
5095
5096 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5097 if (!OrigFn)
5098 return nullptr;
5099
5100 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5101
5102 // If the original type is variadic, or if any of the component types
5103 // disagree, we cannot remove the cast.
5104 if (OrigFT->isVarArg() ||
5105 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5106 OrigFT->getReturnType() != CalleeFT->getReturnType())
5107 return nullptr;
5108
5109 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5110 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5111 return nullptr;
5112
5113 return OrigFn;
5114 };
5115
5116 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5117 CalleePtr = OrigFn;
5118 IRFuncTy = OrigFn->getFunctionType();
5119 }
5120
5121 // 3. Perform the actual call.
5122
5123 // Deactivate any cleanups that we're supposed to do immediately before
5124 // the call.
5125 if (!CallArgs.getCleanupsToDeactivate().empty())
5126 deactivateArgCleanupsBeforeCall(*this, CallArgs);
5127
5128 // Assert that the arguments we computed match up. The IR verifier
5129 // will catch this, but this is a common enough source of problems
5130 // during IRGen changes that it's way better for debugging to catch
5131 // it ourselves here.
5132 #ifndef NDEBUG
5133 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5134 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5135 // Inalloca argument can have different type.
5136 if (IRFunctionArgs.hasInallocaArg() &&
5137 i == IRFunctionArgs.getInallocaArgNo())
5138 continue;
5139 if (i < IRFuncTy->getNumParams())
5140 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5141 }
5142 #endif
5143
5144 // Update the largest vector width if any arguments have vector types.
5145 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5146 if (auto *VT = dyn_cast<llvm::VectorType>(IRCallArgs[i]->getType()))
5147 LargestVectorWidth =
5148 std::max((uint64_t)LargestVectorWidth,
5149 VT->getPrimitiveSizeInBits().getKnownMinSize());
5150 }
5151
5152 // Compute the calling convention and attributes.
5153 unsigned CallingConv;
5154 llvm::AttributeList Attrs;
5155 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5156 Callee.getAbstractInfo(), Attrs, CallingConv,
5157 /*AttrOnCallSite=*/true,
5158 /*IsThunk=*/false);
5159
5160 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5161 if (FD->hasAttr<StrictFPAttr>())
5162 // All calls within a strictfp function are marked strictfp
5163 Attrs =
5164 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5165 llvm::Attribute::StrictFP);
5166
5167 // Add call-site nomerge attribute if exists.
5168 if (InNoMergeAttributedStmt)
5169 Attrs =
5170 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5171 llvm::Attribute::NoMerge);
5172
5173 // Apply some call-site-specific attributes.
5174 // TODO: work this into building the attribute set.
5175
5176 // Apply always_inline to all calls within flatten functions.
5177 // FIXME: should this really take priority over __try, below?
5178 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5179 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5180 Attrs =
5181 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5182 llvm::Attribute::AlwaysInline);
5183 }
5184
5185 // Disable inlining inside SEH __try blocks.
5186 if (isSEHTryScope()) {
5187 Attrs =
5188 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5189 llvm::Attribute::NoInline);
5190 }
5191
5192 // Decide whether to use a call or an invoke.
5193 bool CannotThrow;
5194 if (currentFunctionUsesSEHTry()) {
5195 // SEH cares about asynchronous exceptions, so everything can "throw."
5196 CannotThrow = false;
5197 } else if (isCleanupPadScope() &&
5198 EHPersonality::get(*this).isMSVCXXPersonality()) {
5199 // The MSVC++ personality will implicitly terminate the program if an
5200 // exception is thrown during a cleanup outside of a try/catch.
5201 // We don't need to model anything in IR to get this behavior.
5202 CannotThrow = true;
5203 } else {
5204 // Otherwise, nounwind call sites will never throw.
5205 CannotThrow = Attrs.hasFnAttribute(llvm::Attribute::NoUnwind);
5206
5207 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5208 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5209 CannotThrow = true;
5210 }
5211
5212 // If we made a temporary, be sure to clean up after ourselves. Note that we
5213 // can't depend on being inside of an ExprWithCleanups, so we need to manually
5214 // pop this cleanup later on. Being eager about this is OK, since this
5215 // temporary is 'invisible' outside of the callee.
5216 if (UnusedReturnSizePtr)
5217 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5218 UnusedReturnSizePtr);
5219
5220 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5221
5222 SmallVector<llvm::OperandBundleDef, 1> BundleList =
5223 getBundlesForFunclet(CalleePtr);
5224
5225 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5226 if (FD->hasAttr<StrictFPAttr>())
5227 // All calls within a strictfp function are marked strictfp
5228 Attrs =
5229 Attrs.addAttribute(getLLVMContext(), llvm::AttributeList::FunctionIndex,
5230 llvm::Attribute::StrictFP);
5231
5232 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5233 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5234
5235 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5236 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5237
5238 // Emit the actual call/invoke instruction.
5239 llvm::CallBase *CI;
5240 if (!InvokeDest) {
5241 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5242 } else {
5243 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5244 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5245 BundleList);
5246 EmitBlock(Cont);
5247 }
5248 if (callOrInvoke)
5249 *callOrInvoke = CI;
5250
5251 // If this is within a function that has the guard(nocf) attribute and is an
5252 // indirect call, add the "guard_nocf" attribute to this call to indicate that
5253 // Control Flow Guard checks should not be added, even if the call is inlined.
5254 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5255 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5256 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5257 Attrs = Attrs.addAttribute(
5258 getLLVMContext(), llvm::AttributeList::FunctionIndex, "guard_nocf");
5259 }
5260 }
5261
5262 // Apply the attributes and calling convention.
5263 CI->setAttributes(Attrs);
5264 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5265
5266 // Apply various metadata.
5267
5268 if (!CI->getType()->isVoidTy())
5269 CI->setName("call");
5270
5271 // Update largest vector width from the return type.
5272 if (auto *VT = dyn_cast<llvm::VectorType>(CI->getType()))
5273 LargestVectorWidth =
5274 std::max((uint64_t)LargestVectorWidth,
5275 VT->getPrimitiveSizeInBits().getKnownMinSize());
5276
5277 // Insert instrumentation or attach profile metadata at indirect call sites.
5278 // For more details, see the comment before the definition of
5279 // IPVK_IndirectCallTarget in InstrProfData.inc.
5280 if (!CI->getCalledFunction())
5281 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5282 CI, CalleePtr);
5283
5284 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5285 // optimizer it can aggressively ignore unwind edges.
5286 if (CGM.getLangOpts().ObjCAutoRefCount)
5287 AddObjCARCExceptionMetadata(CI);
5288
5289 // Set tail call kind if necessary.
5290 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5291 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5292 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5293 else if (IsMustTail)
5294 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5295 }
5296
5297 // Add metadata for calls to MSAllocator functions
5298 if (getDebugInfo() && TargetDecl &&
5299 TargetDecl->hasAttr<MSAllocatorAttr>())
5300 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5301
5302 // 4. Finish the call.
5303
5304 // If the call doesn't return, finish the basic block and clear the
5305 // insertion point; this allows the rest of IRGen to discard
5306 // unreachable code.
5307 if (CI->doesNotReturn()) {
5308 if (UnusedReturnSizePtr)
5309 PopCleanupBlock();
5310
5311 // Strip away the noreturn attribute to better diagnose unreachable UB.
5312 if (SanOpts.has(SanitizerKind::Unreachable)) {
5313 // Also remove from function since CallBase::hasFnAttr additionally checks
5314 // attributes of the called function.
5315 if (auto *F = CI->getCalledFunction())
5316 F->removeFnAttr(llvm::Attribute::NoReturn);
5317 CI->removeAttribute(llvm::AttributeList::FunctionIndex,
5318 llvm::Attribute::NoReturn);
5319
5320 // Avoid incompatibility with ASan which relies on the `noreturn`
5321 // attribute to insert handler calls.
5322 if (SanOpts.hasOneOf(SanitizerKind::Address |
5323 SanitizerKind::KernelAddress)) {
5324 SanitizerScope SanScope(this);
5325 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5326 Builder.SetInsertPoint(CI);
5327 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5328 llvm::FunctionCallee Fn =
5329 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5330 EmitNounwindRuntimeCall(Fn);
5331 }
5332 }
5333
5334 EmitUnreachable(Loc);
5335 Builder.ClearInsertionPoint();
5336
5337 // FIXME: For now, emit a dummy basic block because expr emitters in
5338 // generally are not ready to handle emitting expressions at unreachable
5339 // points.
5340 EnsureInsertPoint();
5341
5342 // Return a reasonable RValue.
5343 return GetUndefRValue(RetTy);
5344 }
5345
5346 // If this is a musttail call, return immediately. We do not branch to the
5347 // epilogue in this case.
5348 if (IsMustTail) {
5349 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end();
5350 ++it) {
5351 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it);
5352 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
5353 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups");
5354 }
5355 if (CI->getType()->isVoidTy())
5356 Builder.CreateRetVoid();
5357 else
5358 Builder.CreateRet(CI);
5359 Builder.ClearInsertionPoint();
5360 EnsureInsertPoint();
5361 return GetUndefRValue(RetTy);
5362 }
5363
5364 // Perform the swifterror writeback.
5365 if (swiftErrorTemp.isValid()) {
5366 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5367 Builder.CreateStore(errorResult, swiftErrorArg);
5368 }
5369
5370 // Emit any call-associated writebacks immediately. Arguably this
5371 // should happen after any return-value munging.
5372 if (CallArgs.hasWritebacks())
5373 emitWritebacks(*this, CallArgs);
5374
5375 // The stack cleanup for inalloca arguments has to run out of the normal
5376 // lexical order, so deactivate it and run it manually here.
5377 CallArgs.freeArgumentMemory(*this);
5378
5379 // Extract the return value.
5380 RValue Ret = [&] {
5381 switch (RetAI.getKind()) {
5382 case ABIArgInfo::CoerceAndExpand: {
5383 auto coercionType = RetAI.getCoerceAndExpandType();
5384
5385 Address addr = SRetPtr;
5386 addr = Builder.CreateElementBitCast(addr, coercionType);
5387
5388 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5389 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5390
5391 unsigned unpaddedIndex = 0;
5392 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5393 llvm::Type *eltType = coercionType->getElementType(i);
5394 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5395 Address eltAddr = Builder.CreateStructGEP(addr, i);
5396 llvm::Value *elt = CI;
5397 if (requiresExtract)
5398 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5399 else
5400 assert(unpaddedIndex == 0);
5401 Builder.CreateStore(elt, eltAddr);
5402 }
5403 // FALLTHROUGH
5404 LLVM_FALLTHROUGH;
5405 }
5406
5407 case ABIArgInfo::InAlloca:
5408 case ABIArgInfo::Indirect: {
5409 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5410 if (UnusedReturnSizePtr)
5411 PopCleanupBlock();
5412 return ret;
5413 }
5414
5415 case ABIArgInfo::Ignore:
5416 // If we are ignoring an argument that had a result, make sure to
5417 // construct the appropriate return value for our caller.
5418 return GetUndefRValue(RetTy);
5419
5420 case ABIArgInfo::Extend:
5421 case ABIArgInfo::Direct: {
5422 llvm::Type *RetIRTy = ConvertType(RetTy);
5423 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5424 switch (getEvaluationKind(RetTy)) {
5425 case TEK_Complex: {
5426 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5427 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5428 return RValue::getComplex(std::make_pair(Real, Imag));
5429 }
5430 case TEK_Aggregate: {
5431 Address DestPtr = ReturnValue.getValue();
5432 bool DestIsVolatile = ReturnValue.isVolatile();
5433
5434 if (!DestPtr.isValid()) {
5435 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5436 DestIsVolatile = false;
5437 }
5438 EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5439 return RValue::getAggregate(DestPtr);
5440 }
5441 case TEK_Scalar: {
5442 // If the argument doesn't match, perform a bitcast to coerce it. This
5443 // can happen due to trivial type mismatches.
5444 llvm::Value *V = CI;
5445 if (V->getType() != RetIRTy)
5446 V = Builder.CreateBitCast(V, RetIRTy);
5447 return RValue::get(V);
5448 }
5449 }
5450 llvm_unreachable("bad evaluation kind");
5451 }
5452
5453 Address DestPtr = ReturnValue.getValue();
5454 bool DestIsVolatile = ReturnValue.isVolatile();
5455
5456 if (!DestPtr.isValid()) {
5457 DestPtr = CreateMemTemp(RetTy, "coerce");
5458 DestIsVolatile = false;
5459 }
5460
5461 // If the value is offset in memory, apply the offset now.
5462 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5463 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5464
5465 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5466 }
5467
5468 case ABIArgInfo::Expand:
5469 case ABIArgInfo::IndirectAliased:
5470 llvm_unreachable("Invalid ABI kind for return argument");
5471 }
5472
5473 llvm_unreachable("Unhandled ABIArgInfo::Kind");
5474 } ();
5475
5476 // Emit the assume_aligned check on the return value.
5477 if (Ret.isScalar() && TargetDecl) {
5478 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5479 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5480 }
5481
5482 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5483 // we can't use the full cleanup mechanism.
5484 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5485 LifetimeEnd.Emit(*this, /*Flags=*/{});
5486
5487 if (!ReturnValue.isExternallyDestructed() &&
5488 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5489 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5490 RetTy);
5491
5492 return Ret;
5493 }
5494
prepareConcreteCallee(CodeGenFunction & CGF) const5495 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5496 if (isVirtual()) {
5497 const CallExpr *CE = getVirtualCallExpr();
5498 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5499 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5500 CE ? CE->getBeginLoc() : SourceLocation());
5501 }
5502
5503 return *this;
5504 }
5505
5506 /* VarArg handling */
5507
EmitVAArg(VAArgExpr * VE,Address & VAListAddr)5508 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5509 VAListAddr = VE->isMicrosoftABI()
5510 ? EmitMSVAListRef(VE->getSubExpr())
5511 : EmitVAListRef(VE->getSubExpr());
5512 QualType Ty = VE->getType();
5513 if (VE->isMicrosoftABI())
5514 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5515 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5516 }
5517