1 //===--- CGCall.cpp - Encapsulate calling convention details --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // These classes wrap the information about a call or function
10 // definition used to handle ABI compliancy.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CGCall.h"
15 #include "ABIInfo.h"
16 #include "ABIInfoImpl.h"
17 #include "CGBlocks.h"
18 #include "CGCXXABI.h"
19 #include "CGCleanup.h"
20 #include "CGRecordLayout.h"
21 #include "CodeGenFunction.h"
22 #include "CodeGenModule.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/Attr.h"
25 #include "clang/AST/Decl.h"
26 #include "clang/AST/DeclCXX.h"
27 #include "clang/AST/DeclObjC.h"
28 #include "clang/Basic/CodeGenOptions.h"
29 #include "clang/Basic/TargetInfo.h"
30 #include "clang/CodeGen/CGFunctionInfo.h"
31 #include "clang/CodeGen/SwiftCallingConv.h"
32 #include "llvm/ADT/StringExtras.h"
33 #include "llvm/Analysis/ValueTracking.h"
34 #include "llvm/IR/Assumptions.h"
35 #include "llvm/IR/AttributeMask.h"
36 #include "llvm/IR/Attributes.h"
37 #include "llvm/IR/CallingConv.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/InlineAsm.h"
40 #include "llvm/IR/IntrinsicInst.h"
41 #include "llvm/IR/Intrinsics.h"
42 #include "llvm/IR/Type.h"
43 #include "llvm/Transforms/Utils/Local.h"
44 #include <optional>
45 using namespace clang;
46 using namespace CodeGen;
47
48 /***/
49
ClangCallConvToLLVMCallConv(CallingConv CC)50 unsigned CodeGenTypes::ClangCallConvToLLVMCallConv(CallingConv CC) {
51 switch (CC) {
52 default: return llvm::CallingConv::C;
53 case CC_X86StdCall: return llvm::CallingConv::X86_StdCall;
54 case CC_X86FastCall: return llvm::CallingConv::X86_FastCall;
55 case CC_X86RegCall: return llvm::CallingConv::X86_RegCall;
56 case CC_X86ThisCall: return llvm::CallingConv::X86_ThisCall;
57 case CC_Win64: return llvm::CallingConv::Win64;
58 case CC_X86_64SysV: return llvm::CallingConv::X86_64_SysV;
59 case CC_AAPCS: return llvm::CallingConv::ARM_AAPCS;
60 case CC_AAPCS_VFP: return llvm::CallingConv::ARM_AAPCS_VFP;
61 case CC_IntelOclBicc: return llvm::CallingConv::Intel_OCL_BI;
62 // TODO: Add support for __pascal to LLVM.
63 case CC_X86Pascal: return llvm::CallingConv::C;
64 // TODO: Add support for __vectorcall to LLVM.
65 case CC_X86VectorCall: return llvm::CallingConv::X86_VectorCall;
66 case CC_AArch64VectorCall: return llvm::CallingConv::AArch64_VectorCall;
67 case CC_AArch64SVEPCS: return llvm::CallingConv::AArch64_SVE_VectorCall;
68 case CC_AMDGPUKernelCall: return llvm::CallingConv::AMDGPU_KERNEL;
69 case CC_SpirFunction: return llvm::CallingConv::SPIR_FUNC;
70 case CC_OpenCLKernel: return CGM.getTargetCodeGenInfo().getOpenCLKernelCallingConv();
71 case CC_PreserveMost: return llvm::CallingConv::PreserveMost;
72 case CC_PreserveAll: return llvm::CallingConv::PreserveAll;
73 case CC_Swift: return llvm::CallingConv::Swift;
74 case CC_SwiftAsync: return llvm::CallingConv::SwiftTail;
75 case CC_M68kRTD: return llvm::CallingConv::M68k_RTD;
76 }
77 }
78
79 /// Derives the 'this' type for codegen purposes, i.e. ignoring method CVR
80 /// qualification. Either or both of RD and MD may be null. A null RD indicates
81 /// that there is no meaningful 'this' type, and a null MD can occur when
82 /// calling a method pointer.
DeriveThisType(const CXXRecordDecl * RD,const CXXMethodDecl * MD)83 CanQualType CodeGenTypes::DeriveThisType(const CXXRecordDecl *RD,
84 const CXXMethodDecl *MD) {
85 QualType RecTy;
86 if (RD)
87 RecTy = Context.getTagDeclType(RD)->getCanonicalTypeInternal();
88 else
89 RecTy = Context.VoidTy;
90
91 if (MD)
92 RecTy = Context.getAddrSpaceQualType(RecTy, MD->getMethodQualifiers().getAddressSpace());
93 return Context.getPointerType(CanQualType::CreateUnsafe(RecTy));
94 }
95
96 /// Returns the canonical formal type of the given C++ method.
GetFormalType(const CXXMethodDecl * MD)97 static CanQual<FunctionProtoType> GetFormalType(const CXXMethodDecl *MD) {
98 return MD->getType()->getCanonicalTypeUnqualified()
99 .getAs<FunctionProtoType>();
100 }
101
102 /// Returns the "extra-canonicalized" return type, which discards
103 /// qualifiers on the return type. Codegen doesn't care about them,
104 /// and it makes ABI code a little easier to be able to assume that
105 /// all parameter and return types are top-level unqualified.
GetReturnType(QualType RetTy)106 static CanQualType GetReturnType(QualType RetTy) {
107 return RetTy->getCanonicalTypeUnqualified().getUnqualifiedType();
108 }
109
110 /// Arrange the argument and result information for a value of the given
111 /// unprototyped freestanding function type.
112 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP)113 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionNoProtoType> FTNP) {
114 // When translating an unprototyped function type, always use a
115 // variadic type.
116 return arrangeLLVMFunctionInfo(FTNP->getReturnType().getUnqualifiedType(),
117 FnInfoOpts::None, std::nullopt,
118 FTNP->getExtInfo(), {}, RequiredArgs(0));
119 }
120
addExtParameterInfosForCall(llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> & paramInfos,const FunctionProtoType * proto,unsigned prefixArgs,unsigned totalArgs)121 static void addExtParameterInfosForCall(
122 llvm::SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
123 const FunctionProtoType *proto,
124 unsigned prefixArgs,
125 unsigned totalArgs) {
126 assert(proto->hasExtParameterInfos());
127 assert(paramInfos.size() <= prefixArgs);
128 assert(proto->getNumParams() + prefixArgs <= totalArgs);
129
130 paramInfos.reserve(totalArgs);
131
132 // Add default infos for any prefix args that don't already have infos.
133 paramInfos.resize(prefixArgs);
134
135 // Add infos for the prototype.
136 for (const auto &ParamInfo : proto->getExtParameterInfos()) {
137 paramInfos.push_back(ParamInfo);
138 // pass_object_size params have no parameter info.
139 if (ParamInfo.hasPassObjectSize())
140 paramInfos.emplace_back();
141 }
142
143 assert(paramInfos.size() <= totalArgs &&
144 "Did we forget to insert pass_object_size args?");
145 // Add default infos for the variadic and/or suffix arguments.
146 paramInfos.resize(totalArgs);
147 }
148
149 /// Adds the formal parameters in FPT to the given prefix. If any parameter in
150 /// FPT has pass_object_size attrs, then we'll add parameters for those, too.
appendParameterTypes(const CodeGenTypes & CGT,SmallVectorImpl<CanQualType> & prefix,SmallVectorImpl<FunctionProtoType::ExtParameterInfo> & paramInfos,CanQual<FunctionProtoType> FPT)151 static void appendParameterTypes(const CodeGenTypes &CGT,
152 SmallVectorImpl<CanQualType> &prefix,
153 SmallVectorImpl<FunctionProtoType::ExtParameterInfo> ¶mInfos,
154 CanQual<FunctionProtoType> FPT) {
155 // Fast path: don't touch param info if we don't need to.
156 if (!FPT->hasExtParameterInfos()) {
157 assert(paramInfos.empty() &&
158 "We have paramInfos, but the prototype doesn't?");
159 prefix.append(FPT->param_type_begin(), FPT->param_type_end());
160 return;
161 }
162
163 unsigned PrefixSize = prefix.size();
164 // In the vast majority of cases, we'll have precisely FPT->getNumParams()
165 // parameters; the only thing that can change this is the presence of
166 // pass_object_size. So, we preallocate for the common case.
167 prefix.reserve(prefix.size() + FPT->getNumParams());
168
169 auto ExtInfos = FPT->getExtParameterInfos();
170 assert(ExtInfos.size() == FPT->getNumParams());
171 for (unsigned I = 0, E = FPT->getNumParams(); I != E; ++I) {
172 prefix.push_back(FPT->getParamType(I));
173 if (ExtInfos[I].hasPassObjectSize())
174 prefix.push_back(CGT.getContext().getSizeType());
175 }
176
177 addExtParameterInfosForCall(paramInfos, FPT.getTypePtr(), PrefixSize,
178 prefix.size());
179 }
180
181 /// Arrange the LLVM function layout for a value of the given function
182 /// type, on top of any implicit parameters already stored.
183 static const CGFunctionInfo &
arrangeLLVMFunctionInfo(CodeGenTypes & CGT,bool instanceMethod,SmallVectorImpl<CanQualType> & prefix,CanQual<FunctionProtoType> FTP)184 arrangeLLVMFunctionInfo(CodeGenTypes &CGT, bool instanceMethod,
185 SmallVectorImpl<CanQualType> &prefix,
186 CanQual<FunctionProtoType> FTP) {
187 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
188 RequiredArgs Required = RequiredArgs::forPrototypePlus(FTP, prefix.size());
189 // FIXME: Kill copy.
190 appendParameterTypes(CGT, prefix, paramInfos, FTP);
191 CanQualType resultType = FTP->getReturnType().getUnqualifiedType();
192
193 FnInfoOpts opts =
194 instanceMethod ? FnInfoOpts::IsInstanceMethod : FnInfoOpts::None;
195 return CGT.arrangeLLVMFunctionInfo(resultType, opts, prefix,
196 FTP->getExtInfo(), paramInfos, Required);
197 }
198
199 /// Arrange the argument and result information for a value of the
200 /// given freestanding function type.
201 const CGFunctionInfo &
arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP)202 CodeGenTypes::arrangeFreeFunctionType(CanQual<FunctionProtoType> FTP) {
203 SmallVector<CanQualType, 16> argTypes;
204 return ::arrangeLLVMFunctionInfo(*this, /*instanceMethod=*/false, argTypes,
205 FTP);
206 }
207
getCallingConventionForDecl(const ObjCMethodDecl * D,bool IsWindows)208 static CallingConv getCallingConventionForDecl(const ObjCMethodDecl *D,
209 bool IsWindows) {
210 // Set the appropriate calling convention for the Function.
211 if (D->hasAttr<StdCallAttr>())
212 return CC_X86StdCall;
213
214 if (D->hasAttr<FastCallAttr>())
215 return CC_X86FastCall;
216
217 if (D->hasAttr<RegCallAttr>())
218 return CC_X86RegCall;
219
220 if (D->hasAttr<ThisCallAttr>())
221 return CC_X86ThisCall;
222
223 if (D->hasAttr<VectorCallAttr>())
224 return CC_X86VectorCall;
225
226 if (D->hasAttr<PascalAttr>())
227 return CC_X86Pascal;
228
229 if (PcsAttr *PCS = D->getAttr<PcsAttr>())
230 return (PCS->getPCS() == PcsAttr::AAPCS ? CC_AAPCS : CC_AAPCS_VFP);
231
232 if (D->hasAttr<AArch64VectorPcsAttr>())
233 return CC_AArch64VectorCall;
234
235 if (D->hasAttr<AArch64SVEPcsAttr>())
236 return CC_AArch64SVEPCS;
237
238 if (D->hasAttr<AMDGPUKernelCallAttr>())
239 return CC_AMDGPUKernelCall;
240
241 if (D->hasAttr<IntelOclBiccAttr>())
242 return CC_IntelOclBicc;
243
244 if (D->hasAttr<MSABIAttr>())
245 return IsWindows ? CC_C : CC_Win64;
246
247 if (D->hasAttr<SysVABIAttr>())
248 return IsWindows ? CC_X86_64SysV : CC_C;
249
250 if (D->hasAttr<PreserveMostAttr>())
251 return CC_PreserveMost;
252
253 if (D->hasAttr<PreserveAllAttr>())
254 return CC_PreserveAll;
255
256 if (D->hasAttr<M68kRTDAttr>())
257 return CC_M68kRTD;
258
259 return CC_C;
260 }
261
262 /// Arrange the argument and result information for a call to an
263 /// unknown C++ non-static member function of the given abstract type.
264 /// (A null RD means we don't have any meaningful "this" argument type,
265 /// so fall back to a generic pointer type).
266 /// The member function must be an ordinary function, i.e. not a
267 /// constructor or destructor.
268 const CGFunctionInfo &
arrangeCXXMethodType(const CXXRecordDecl * RD,const FunctionProtoType * FTP,const CXXMethodDecl * MD)269 CodeGenTypes::arrangeCXXMethodType(const CXXRecordDecl *RD,
270 const FunctionProtoType *FTP,
271 const CXXMethodDecl *MD) {
272 SmallVector<CanQualType, 16> argTypes;
273
274 // Add the 'this' pointer.
275 argTypes.push_back(DeriveThisType(RD, MD));
276
277 return ::arrangeLLVMFunctionInfo(
278 *this, /*instanceMethod=*/true, argTypes,
279 FTP->getCanonicalTypeUnqualified().getAs<FunctionProtoType>());
280 }
281
282 /// Set calling convention for CUDA/HIP kernel.
setCUDAKernelCallingConvention(CanQualType & FTy,CodeGenModule & CGM,const FunctionDecl * FD)283 static void setCUDAKernelCallingConvention(CanQualType &FTy, CodeGenModule &CGM,
284 const FunctionDecl *FD) {
285 if (FD->hasAttr<CUDAGlobalAttr>()) {
286 const FunctionType *FT = FTy->getAs<FunctionType>();
287 CGM.getTargetCodeGenInfo().setCUDAKernelCallingConvention(FT);
288 FTy = FT->getCanonicalTypeUnqualified();
289 }
290 }
291
292 /// Arrange the argument and result information for a declaration or
293 /// definition of the given C++ non-static member function. The
294 /// member function must be an ordinary function, i.e. not a
295 /// constructor or destructor.
296 const CGFunctionInfo &
arrangeCXXMethodDeclaration(const CXXMethodDecl * MD)297 CodeGenTypes::arrangeCXXMethodDeclaration(const CXXMethodDecl *MD) {
298 assert(!isa<CXXConstructorDecl>(MD) && "wrong method for constructors!");
299 assert(!isa<CXXDestructorDecl>(MD) && "wrong method for destructors!");
300
301 CanQualType FT = GetFormalType(MD).getAs<Type>();
302 setCUDAKernelCallingConvention(FT, CGM, MD);
303 auto prototype = FT.getAs<FunctionProtoType>();
304
305 if (MD->isImplicitObjectMemberFunction()) {
306 // The abstract case is perfectly fine.
307 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(MD);
308 return arrangeCXXMethodType(ThisType, prototype.getTypePtr(), MD);
309 }
310
311 return arrangeFreeFunctionType(prototype);
312 }
313
inheritingCtorHasParams(const InheritedConstructor & Inherited,CXXCtorType Type)314 bool CodeGenTypes::inheritingCtorHasParams(
315 const InheritedConstructor &Inherited, CXXCtorType Type) {
316 // Parameters are unnecessary if we're constructing a base class subobject
317 // and the inherited constructor lives in a virtual base.
318 return Type == Ctor_Complete ||
319 !Inherited.getShadowDecl()->constructsVirtualBase() ||
320 !Target.getCXXABI().hasConstructorVariants();
321 }
322
323 const CGFunctionInfo &
arrangeCXXStructorDeclaration(GlobalDecl GD)324 CodeGenTypes::arrangeCXXStructorDeclaration(GlobalDecl GD) {
325 auto *MD = cast<CXXMethodDecl>(GD.getDecl());
326
327 SmallVector<CanQualType, 16> argTypes;
328 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
329
330 const CXXRecordDecl *ThisType = TheCXXABI.getThisArgumentTypeForMethod(GD);
331 argTypes.push_back(DeriveThisType(ThisType, MD));
332
333 bool PassParams = true;
334
335 if (auto *CD = dyn_cast<CXXConstructorDecl>(MD)) {
336 // A base class inheriting constructor doesn't get forwarded arguments
337 // needed to construct a virtual base (or base class thereof).
338 if (auto Inherited = CD->getInheritedConstructor())
339 PassParams = inheritingCtorHasParams(Inherited, GD.getCtorType());
340 }
341
342 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
343
344 // Add the formal parameters.
345 if (PassParams)
346 appendParameterTypes(*this, argTypes, paramInfos, FTP);
347
348 CGCXXABI::AddedStructorArgCounts AddedArgs =
349 TheCXXABI.buildStructorSignature(GD, argTypes);
350 if (!paramInfos.empty()) {
351 // Note: prefix implies after the first param.
352 if (AddedArgs.Prefix)
353 paramInfos.insert(paramInfos.begin() + 1, AddedArgs.Prefix,
354 FunctionProtoType::ExtParameterInfo{});
355 if (AddedArgs.Suffix)
356 paramInfos.append(AddedArgs.Suffix,
357 FunctionProtoType::ExtParameterInfo{});
358 }
359
360 RequiredArgs required =
361 (PassParams && MD->isVariadic() ? RequiredArgs(argTypes.size())
362 : RequiredArgs::All);
363
364 FunctionType::ExtInfo extInfo = FTP->getExtInfo();
365 CanQualType resultType = TheCXXABI.HasThisReturn(GD)
366 ? argTypes.front()
367 : TheCXXABI.hasMostDerivedReturn(GD)
368 ? CGM.getContext().VoidPtrTy
369 : Context.VoidTy;
370 return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::IsInstanceMethod,
371 argTypes, extInfo, paramInfos, required);
372 }
373
374 static SmallVector<CanQualType, 16>
getArgTypesForCall(ASTContext & ctx,const CallArgList & args)375 getArgTypesForCall(ASTContext &ctx, const CallArgList &args) {
376 SmallVector<CanQualType, 16> argTypes;
377 for (auto &arg : args)
378 argTypes.push_back(ctx.getCanonicalParamType(arg.Ty));
379 return argTypes;
380 }
381
382 static SmallVector<CanQualType, 16>
getArgTypesForDeclaration(ASTContext & ctx,const FunctionArgList & args)383 getArgTypesForDeclaration(ASTContext &ctx, const FunctionArgList &args) {
384 SmallVector<CanQualType, 16> argTypes;
385 for (auto &arg : args)
386 argTypes.push_back(ctx.getCanonicalParamType(arg->getType()));
387 return argTypes;
388 }
389
390 static llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16>
getExtParameterInfosForCall(const FunctionProtoType * proto,unsigned prefixArgs,unsigned totalArgs)391 getExtParameterInfosForCall(const FunctionProtoType *proto,
392 unsigned prefixArgs, unsigned totalArgs) {
393 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> result;
394 if (proto->hasExtParameterInfos()) {
395 addExtParameterInfosForCall(result, proto, prefixArgs, totalArgs);
396 }
397 return result;
398 }
399
400 /// Arrange a call to a C++ method, passing the given arguments.
401 ///
402 /// ExtraPrefixArgs is the number of ABI-specific args passed after the `this`
403 /// parameter.
404 /// ExtraSuffixArgs is the number of ABI-specific args passed at the end of
405 /// args.
406 /// PassProtoArgs indicates whether `args` has args for the parameters in the
407 /// given CXXConstructorDecl.
408 const CGFunctionInfo &
arrangeCXXConstructorCall(const CallArgList & args,const CXXConstructorDecl * D,CXXCtorType CtorKind,unsigned ExtraPrefixArgs,unsigned ExtraSuffixArgs,bool PassProtoArgs)409 CodeGenTypes::arrangeCXXConstructorCall(const CallArgList &args,
410 const CXXConstructorDecl *D,
411 CXXCtorType CtorKind,
412 unsigned ExtraPrefixArgs,
413 unsigned ExtraSuffixArgs,
414 bool PassProtoArgs) {
415 // FIXME: Kill copy.
416 SmallVector<CanQualType, 16> ArgTypes;
417 for (const auto &Arg : args)
418 ArgTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
419
420 // +1 for implicit this, which should always be args[0].
421 unsigned TotalPrefixArgs = 1 + ExtraPrefixArgs;
422
423 CanQual<FunctionProtoType> FPT = GetFormalType(D);
424 RequiredArgs Required = PassProtoArgs
425 ? RequiredArgs::forPrototypePlus(
426 FPT, TotalPrefixArgs + ExtraSuffixArgs)
427 : RequiredArgs::All;
428
429 GlobalDecl GD(D, CtorKind);
430 CanQualType ResultType = TheCXXABI.HasThisReturn(GD)
431 ? ArgTypes.front()
432 : TheCXXABI.hasMostDerivedReturn(GD)
433 ? CGM.getContext().VoidPtrTy
434 : Context.VoidTy;
435
436 FunctionType::ExtInfo Info = FPT->getExtInfo();
437 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> ParamInfos;
438 // If the prototype args are elided, we should only have ABI-specific args,
439 // which never have param info.
440 if (PassProtoArgs && FPT->hasExtParameterInfos()) {
441 // ABI-specific suffix arguments are treated the same as variadic arguments.
442 addExtParameterInfosForCall(ParamInfos, FPT.getTypePtr(), TotalPrefixArgs,
443 ArgTypes.size());
444 }
445
446 return arrangeLLVMFunctionInfo(ResultType, FnInfoOpts::IsInstanceMethod,
447 ArgTypes, Info, ParamInfos, Required);
448 }
449
450 /// Arrange the argument and result information for the declaration or
451 /// definition of the given function.
452 const CGFunctionInfo &
arrangeFunctionDeclaration(const FunctionDecl * FD)453 CodeGenTypes::arrangeFunctionDeclaration(const FunctionDecl *FD) {
454 if (const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD))
455 if (MD->isImplicitObjectMemberFunction())
456 return arrangeCXXMethodDeclaration(MD);
457
458 CanQualType FTy = FD->getType()->getCanonicalTypeUnqualified();
459
460 assert(isa<FunctionType>(FTy));
461 setCUDAKernelCallingConvention(FTy, CGM, FD);
462
463 // When declaring a function without a prototype, always use a
464 // non-variadic type.
465 if (CanQual<FunctionNoProtoType> noProto = FTy.getAs<FunctionNoProtoType>()) {
466 return arrangeLLVMFunctionInfo(noProto->getReturnType(), FnInfoOpts::None,
467 std::nullopt, noProto->getExtInfo(), {},
468 RequiredArgs::All);
469 }
470
471 return arrangeFreeFunctionType(FTy.castAs<FunctionProtoType>());
472 }
473
474 /// Arrange the argument and result information for the declaration or
475 /// definition of an Objective-C method.
476 const CGFunctionInfo &
arrangeObjCMethodDeclaration(const ObjCMethodDecl * MD)477 CodeGenTypes::arrangeObjCMethodDeclaration(const ObjCMethodDecl *MD) {
478 // It happens that this is the same as a call with no optional
479 // arguments, except also using the formal 'self' type.
480 return arrangeObjCMessageSendSignature(MD, MD->getSelfDecl()->getType());
481 }
482
483 /// Arrange the argument and result information for the function type
484 /// through which to perform a send to the given Objective-C method,
485 /// using the given receiver type. The receiver type is not always
486 /// the 'self' type of the method or even an Objective-C pointer type.
487 /// This is *not* the right method for actually performing such a
488 /// message send, due to the possibility of optional arguments.
489 const CGFunctionInfo &
arrangeObjCMessageSendSignature(const ObjCMethodDecl * MD,QualType receiverType)490 CodeGenTypes::arrangeObjCMessageSendSignature(const ObjCMethodDecl *MD,
491 QualType receiverType) {
492 SmallVector<CanQualType, 16> argTys;
493 SmallVector<FunctionProtoType::ExtParameterInfo, 4> extParamInfos(
494 MD->isDirectMethod() ? 1 : 2);
495 argTys.push_back(Context.getCanonicalParamType(receiverType));
496 if (!MD->isDirectMethod())
497 argTys.push_back(Context.getCanonicalParamType(Context.getObjCSelType()));
498 // FIXME: Kill copy?
499 for (const auto *I : MD->parameters()) {
500 argTys.push_back(Context.getCanonicalParamType(I->getType()));
501 auto extParamInfo = FunctionProtoType::ExtParameterInfo().withIsNoEscape(
502 I->hasAttr<NoEscapeAttr>());
503 extParamInfos.push_back(extParamInfo);
504 }
505
506 FunctionType::ExtInfo einfo;
507 bool IsWindows = getContext().getTargetInfo().getTriple().isOSWindows();
508 einfo = einfo.withCallingConv(getCallingConventionForDecl(MD, IsWindows));
509
510 if (getContext().getLangOpts().ObjCAutoRefCount &&
511 MD->hasAttr<NSReturnsRetainedAttr>())
512 einfo = einfo.withProducesResult(true);
513
514 RequiredArgs required =
515 (MD->isVariadic() ? RequiredArgs(argTys.size()) : RequiredArgs::All);
516
517 return arrangeLLVMFunctionInfo(GetReturnType(MD->getReturnType()),
518 FnInfoOpts::None, argTys, einfo, extParamInfos,
519 required);
520 }
521
522 const CGFunctionInfo &
arrangeUnprototypedObjCMessageSend(QualType returnType,const CallArgList & args)523 CodeGenTypes::arrangeUnprototypedObjCMessageSend(QualType returnType,
524 const CallArgList &args) {
525 auto argTypes = getArgTypesForCall(Context, args);
526 FunctionType::ExtInfo einfo;
527
528 return arrangeLLVMFunctionInfo(GetReturnType(returnType), FnInfoOpts::None,
529 argTypes, einfo, {}, RequiredArgs::All);
530 }
531
532 const CGFunctionInfo &
arrangeGlobalDeclaration(GlobalDecl GD)533 CodeGenTypes::arrangeGlobalDeclaration(GlobalDecl GD) {
534 // FIXME: Do we need to handle ObjCMethodDecl?
535 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
536
537 if (isa<CXXConstructorDecl>(GD.getDecl()) ||
538 isa<CXXDestructorDecl>(GD.getDecl()))
539 return arrangeCXXStructorDeclaration(GD);
540
541 return arrangeFunctionDeclaration(FD);
542 }
543
544 /// Arrange a thunk that takes 'this' as the first parameter followed by
545 /// varargs. Return a void pointer, regardless of the actual return type.
546 /// The body of the thunk will end in a musttail call to a function of the
547 /// correct type, and the caller will bitcast the function to the correct
548 /// prototype.
549 const CGFunctionInfo &
arrangeUnprototypedMustTailThunk(const CXXMethodDecl * MD)550 CodeGenTypes::arrangeUnprototypedMustTailThunk(const CXXMethodDecl *MD) {
551 assert(MD->isVirtual() && "only methods have thunks");
552 CanQual<FunctionProtoType> FTP = GetFormalType(MD);
553 CanQualType ArgTys[] = {DeriveThisType(MD->getParent(), MD)};
554 return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::None, ArgTys,
555 FTP->getExtInfo(), {}, RequiredArgs(1));
556 }
557
558 const CGFunctionInfo &
arrangeMSCtorClosure(const CXXConstructorDecl * CD,CXXCtorType CT)559 CodeGenTypes::arrangeMSCtorClosure(const CXXConstructorDecl *CD,
560 CXXCtorType CT) {
561 assert(CT == Ctor_CopyingClosure || CT == Ctor_DefaultClosure);
562
563 CanQual<FunctionProtoType> FTP = GetFormalType(CD);
564 SmallVector<CanQualType, 2> ArgTys;
565 const CXXRecordDecl *RD = CD->getParent();
566 ArgTys.push_back(DeriveThisType(RD, CD));
567 if (CT == Ctor_CopyingClosure)
568 ArgTys.push_back(*FTP->param_type_begin());
569 if (RD->getNumVBases() > 0)
570 ArgTys.push_back(Context.IntTy);
571 CallingConv CC = Context.getDefaultCallingConvention(
572 /*IsVariadic=*/false, /*IsCXXMethod=*/true);
573 return arrangeLLVMFunctionInfo(Context.VoidTy, FnInfoOpts::IsInstanceMethod,
574 ArgTys, FunctionType::ExtInfo(CC), {},
575 RequiredArgs::All);
576 }
577
578 /// Arrange a call as unto a free function, except possibly with an
579 /// additional number of formal parameters considered required.
580 static const CGFunctionInfo &
arrangeFreeFunctionLikeCall(CodeGenTypes & CGT,CodeGenModule & CGM,const CallArgList & args,const FunctionType * fnType,unsigned numExtraRequiredArgs,bool chainCall)581 arrangeFreeFunctionLikeCall(CodeGenTypes &CGT,
582 CodeGenModule &CGM,
583 const CallArgList &args,
584 const FunctionType *fnType,
585 unsigned numExtraRequiredArgs,
586 bool chainCall) {
587 assert(args.size() >= numExtraRequiredArgs);
588
589 llvm::SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
590
591 // In most cases, there are no optional arguments.
592 RequiredArgs required = RequiredArgs::All;
593
594 // If we have a variadic prototype, the required arguments are the
595 // extra prefix plus the arguments in the prototype.
596 if (const FunctionProtoType *proto = dyn_cast<FunctionProtoType>(fnType)) {
597 if (proto->isVariadic())
598 required = RequiredArgs::forPrototypePlus(proto, numExtraRequiredArgs);
599
600 if (proto->hasExtParameterInfos())
601 addExtParameterInfosForCall(paramInfos, proto, numExtraRequiredArgs,
602 args.size());
603
604 // If we don't have a prototype at all, but we're supposed to
605 // explicitly use the variadic convention for unprototyped calls,
606 // treat all of the arguments as required but preserve the nominal
607 // possibility of variadics.
608 } else if (CGM.getTargetCodeGenInfo()
609 .isNoProtoCallVariadic(args,
610 cast<FunctionNoProtoType>(fnType))) {
611 required = RequiredArgs(args.size());
612 }
613
614 // FIXME: Kill copy.
615 SmallVector<CanQualType, 16> argTypes;
616 for (const auto &arg : args)
617 argTypes.push_back(CGT.getContext().getCanonicalParamType(arg.Ty));
618 FnInfoOpts opts = chainCall ? FnInfoOpts::IsChainCall : FnInfoOpts::None;
619 return CGT.arrangeLLVMFunctionInfo(GetReturnType(fnType->getReturnType()),
620 opts, argTypes, fnType->getExtInfo(),
621 paramInfos, required);
622 }
623
624 /// Figure out the rules for calling a function with the given formal
625 /// type using the given arguments. The arguments are necessary
626 /// because the function might be unprototyped, in which case it's
627 /// target-dependent in crazy ways.
628 const CGFunctionInfo &
arrangeFreeFunctionCall(const CallArgList & args,const FunctionType * fnType,bool chainCall)629 CodeGenTypes::arrangeFreeFunctionCall(const CallArgList &args,
630 const FunctionType *fnType,
631 bool chainCall) {
632 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType,
633 chainCall ? 1 : 0, chainCall);
634 }
635
636 /// A block function is essentially a free function with an
637 /// extra implicit argument.
638 const CGFunctionInfo &
arrangeBlockFunctionCall(const CallArgList & args,const FunctionType * fnType)639 CodeGenTypes::arrangeBlockFunctionCall(const CallArgList &args,
640 const FunctionType *fnType) {
641 return arrangeFreeFunctionLikeCall(*this, CGM, args, fnType, 1,
642 /*chainCall=*/false);
643 }
644
645 const CGFunctionInfo &
arrangeBlockFunctionDeclaration(const FunctionProtoType * proto,const FunctionArgList & params)646 CodeGenTypes::arrangeBlockFunctionDeclaration(const FunctionProtoType *proto,
647 const FunctionArgList ¶ms) {
648 auto paramInfos = getExtParameterInfosForCall(proto, 1, params.size());
649 auto argTypes = getArgTypesForDeclaration(Context, params);
650
651 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
652 FnInfoOpts::None, argTypes,
653 proto->getExtInfo(), paramInfos,
654 RequiredArgs::forPrototypePlus(proto, 1));
655 }
656
657 const CGFunctionInfo &
arrangeBuiltinFunctionCall(QualType resultType,const CallArgList & args)658 CodeGenTypes::arrangeBuiltinFunctionCall(QualType resultType,
659 const CallArgList &args) {
660 // FIXME: Kill copy.
661 SmallVector<CanQualType, 16> argTypes;
662 for (const auto &Arg : args)
663 argTypes.push_back(Context.getCanonicalParamType(Arg.Ty));
664 return arrangeLLVMFunctionInfo(GetReturnType(resultType), FnInfoOpts::None,
665 argTypes, FunctionType::ExtInfo(),
666 /*paramInfos=*/{}, RequiredArgs::All);
667 }
668
669 const CGFunctionInfo &
arrangeBuiltinFunctionDeclaration(QualType resultType,const FunctionArgList & args)670 CodeGenTypes::arrangeBuiltinFunctionDeclaration(QualType resultType,
671 const FunctionArgList &args) {
672 auto argTypes = getArgTypesForDeclaration(Context, args);
673
674 return arrangeLLVMFunctionInfo(GetReturnType(resultType), FnInfoOpts::None,
675 argTypes, FunctionType::ExtInfo(), {},
676 RequiredArgs::All);
677 }
678
679 const CGFunctionInfo &
arrangeBuiltinFunctionDeclaration(CanQualType resultType,ArrayRef<CanQualType> argTypes)680 CodeGenTypes::arrangeBuiltinFunctionDeclaration(CanQualType resultType,
681 ArrayRef<CanQualType> argTypes) {
682 return arrangeLLVMFunctionInfo(resultType, FnInfoOpts::None, argTypes,
683 FunctionType::ExtInfo(), {},
684 RequiredArgs::All);
685 }
686
687 /// Arrange a call to a C++ method, passing the given arguments.
688 ///
689 /// numPrefixArgs is the number of ABI-specific prefix arguments we have. It
690 /// does not count `this`.
691 const CGFunctionInfo &
arrangeCXXMethodCall(const CallArgList & args,const FunctionProtoType * proto,RequiredArgs required,unsigned numPrefixArgs)692 CodeGenTypes::arrangeCXXMethodCall(const CallArgList &args,
693 const FunctionProtoType *proto,
694 RequiredArgs required,
695 unsigned numPrefixArgs) {
696 assert(numPrefixArgs + 1 <= args.size() &&
697 "Emitting a call with less args than the required prefix?");
698 // Add one to account for `this`. It's a bit awkward here, but we don't count
699 // `this` in similar places elsewhere.
700 auto paramInfos =
701 getExtParameterInfosForCall(proto, numPrefixArgs + 1, args.size());
702
703 // FIXME: Kill copy.
704 auto argTypes = getArgTypesForCall(Context, args);
705
706 FunctionType::ExtInfo info = proto->getExtInfo();
707 return arrangeLLVMFunctionInfo(GetReturnType(proto->getReturnType()),
708 FnInfoOpts::IsInstanceMethod, argTypes, info,
709 paramInfos, required);
710 }
711
arrangeNullaryFunction()712 const CGFunctionInfo &CodeGenTypes::arrangeNullaryFunction() {
713 return arrangeLLVMFunctionInfo(getContext().VoidTy, FnInfoOpts::None,
714 std::nullopt, FunctionType::ExtInfo(), {},
715 RequiredArgs::All);
716 }
717
718 const CGFunctionInfo &
arrangeCall(const CGFunctionInfo & signature,const CallArgList & args)719 CodeGenTypes::arrangeCall(const CGFunctionInfo &signature,
720 const CallArgList &args) {
721 assert(signature.arg_size() <= args.size());
722 if (signature.arg_size() == args.size())
723 return signature;
724
725 SmallVector<FunctionProtoType::ExtParameterInfo, 16> paramInfos;
726 auto sigParamInfos = signature.getExtParameterInfos();
727 if (!sigParamInfos.empty()) {
728 paramInfos.append(sigParamInfos.begin(), sigParamInfos.end());
729 paramInfos.resize(args.size());
730 }
731
732 auto argTypes = getArgTypesForCall(Context, args);
733
734 assert(signature.getRequiredArgs().allowsOptionalArgs());
735 FnInfoOpts opts = FnInfoOpts::None;
736 if (signature.isInstanceMethod())
737 opts |= FnInfoOpts::IsInstanceMethod;
738 if (signature.isChainCall())
739 opts |= FnInfoOpts::IsChainCall;
740 if (signature.isDelegateCall())
741 opts |= FnInfoOpts::IsDelegateCall;
742 return arrangeLLVMFunctionInfo(signature.getReturnType(), opts, argTypes,
743 signature.getExtInfo(), paramInfos,
744 signature.getRequiredArgs());
745 }
746
747 namespace clang {
748 namespace CodeGen {
749 void computeSPIRKernelABIInfo(CodeGenModule &CGM, CGFunctionInfo &FI);
750 }
751 }
752
753 /// Arrange the argument and result information for an abstract value
754 /// of a given function type. This is the method which all of the
755 /// above functions ultimately defer to.
arrangeLLVMFunctionInfo(CanQualType resultType,FnInfoOpts opts,ArrayRef<CanQualType> argTypes,FunctionType::ExtInfo info,ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,RequiredArgs required)756 const CGFunctionInfo &CodeGenTypes::arrangeLLVMFunctionInfo(
757 CanQualType resultType, FnInfoOpts opts, ArrayRef<CanQualType> argTypes,
758 FunctionType::ExtInfo info,
759 ArrayRef<FunctionProtoType::ExtParameterInfo> paramInfos,
760 RequiredArgs required) {
761 assert(llvm::all_of(argTypes,
762 [](CanQualType T) { return T.isCanonicalAsParam(); }));
763
764 // Lookup or create unique function info.
765 llvm::FoldingSetNodeID ID;
766 bool isInstanceMethod =
767 (opts & FnInfoOpts::IsInstanceMethod) == FnInfoOpts::IsInstanceMethod;
768 bool isChainCall =
769 (opts & FnInfoOpts::IsChainCall) == FnInfoOpts::IsChainCall;
770 bool isDelegateCall =
771 (opts & FnInfoOpts::IsDelegateCall) == FnInfoOpts::IsDelegateCall;
772 CGFunctionInfo::Profile(ID, isInstanceMethod, isChainCall, isDelegateCall,
773 info, paramInfos, required, resultType, argTypes);
774
775 void *insertPos = nullptr;
776 CGFunctionInfo *FI = FunctionInfos.FindNodeOrInsertPos(ID, insertPos);
777 if (FI)
778 return *FI;
779
780 unsigned CC = ClangCallConvToLLVMCallConv(info.getCC());
781
782 // Construct the function info. We co-allocate the ArgInfos.
783 FI = CGFunctionInfo::create(CC, isInstanceMethod, isChainCall, isDelegateCall,
784 info, paramInfos, resultType, argTypes, required);
785 FunctionInfos.InsertNode(FI, insertPos);
786
787 bool inserted = FunctionsBeingProcessed.insert(FI).second;
788 (void)inserted;
789 assert(inserted && "Recursively being processed?");
790
791 // Compute ABI information.
792 if (CC == llvm::CallingConv::SPIR_KERNEL) {
793 // Force target independent argument handling for the host visible
794 // kernel functions.
795 computeSPIRKernelABIInfo(CGM, *FI);
796 } else if (info.getCC() == CC_Swift || info.getCC() == CC_SwiftAsync) {
797 swiftcall::computeABIInfo(CGM, *FI);
798 } else {
799 getABIInfo().computeInfo(*FI);
800 }
801
802 // Loop over all of the computed argument and return value info. If any of
803 // them are direct or extend without a specified coerce type, specify the
804 // default now.
805 ABIArgInfo &retInfo = FI->getReturnInfo();
806 if (retInfo.canHaveCoerceToType() && retInfo.getCoerceToType() == nullptr)
807 retInfo.setCoerceToType(ConvertType(FI->getReturnType()));
808
809 for (auto &I : FI->arguments())
810 if (I.info.canHaveCoerceToType() && I.info.getCoerceToType() == nullptr)
811 I.info.setCoerceToType(ConvertType(I.type));
812
813 bool erased = FunctionsBeingProcessed.erase(FI); (void)erased;
814 assert(erased && "Not in set?");
815
816 return *FI;
817 }
818
create(unsigned llvmCC,bool instanceMethod,bool chainCall,bool delegateCall,const FunctionType::ExtInfo & info,ArrayRef<ExtParameterInfo> paramInfos,CanQualType resultType,ArrayRef<CanQualType> argTypes,RequiredArgs required)819 CGFunctionInfo *CGFunctionInfo::create(unsigned llvmCC, bool instanceMethod,
820 bool chainCall, bool delegateCall,
821 const FunctionType::ExtInfo &info,
822 ArrayRef<ExtParameterInfo> paramInfos,
823 CanQualType resultType,
824 ArrayRef<CanQualType> argTypes,
825 RequiredArgs required) {
826 assert(paramInfos.empty() || paramInfos.size() == argTypes.size());
827 assert(!required.allowsOptionalArgs() ||
828 required.getNumRequiredArgs() <= argTypes.size());
829
830 void *buffer =
831 operator new(totalSizeToAlloc<ArgInfo, ExtParameterInfo>(
832 argTypes.size() + 1, paramInfos.size()));
833
834 CGFunctionInfo *FI = new(buffer) CGFunctionInfo();
835 FI->CallingConvention = llvmCC;
836 FI->EffectiveCallingConvention = llvmCC;
837 FI->ASTCallingConvention = info.getCC();
838 FI->InstanceMethod = instanceMethod;
839 FI->ChainCall = chainCall;
840 FI->DelegateCall = delegateCall;
841 FI->CmseNSCall = info.getCmseNSCall();
842 FI->NoReturn = info.getNoReturn();
843 FI->ReturnsRetained = info.getProducesResult();
844 FI->NoCallerSavedRegs = info.getNoCallerSavedRegs();
845 FI->NoCfCheck = info.getNoCfCheck();
846 FI->Required = required;
847 FI->HasRegParm = info.getHasRegParm();
848 FI->RegParm = info.getRegParm();
849 FI->ArgStruct = nullptr;
850 FI->ArgStructAlign = 0;
851 FI->NumArgs = argTypes.size();
852 FI->HasExtParameterInfos = !paramInfos.empty();
853 FI->getArgsBuffer()[0].type = resultType;
854 FI->MaxVectorWidth = 0;
855 for (unsigned i = 0, e = argTypes.size(); i != e; ++i)
856 FI->getArgsBuffer()[i + 1].type = argTypes[i];
857 for (unsigned i = 0, e = paramInfos.size(); i != e; ++i)
858 FI->getExtParameterInfosBuffer()[i] = paramInfos[i];
859 return FI;
860 }
861
862 /***/
863
864 namespace {
865 // ABIArgInfo::Expand implementation.
866
867 // Specifies the way QualType passed as ABIArgInfo::Expand is expanded.
868 struct TypeExpansion {
869 enum TypeExpansionKind {
870 // Elements of constant arrays are expanded recursively.
871 TEK_ConstantArray,
872 // Record fields are expanded recursively (but if record is a union, only
873 // the field with the largest size is expanded).
874 TEK_Record,
875 // For complex types, real and imaginary parts are expanded recursively.
876 TEK_Complex,
877 // All other types are not expandable.
878 TEK_None
879 };
880
881 const TypeExpansionKind Kind;
882
TypeExpansion__anon8a34942c0211::TypeExpansion883 TypeExpansion(TypeExpansionKind K) : Kind(K) {}
~TypeExpansion__anon8a34942c0211::TypeExpansion884 virtual ~TypeExpansion() {}
885 };
886
887 struct ConstantArrayExpansion : TypeExpansion {
888 QualType EltTy;
889 uint64_t NumElts;
890
ConstantArrayExpansion__anon8a34942c0211::ConstantArrayExpansion891 ConstantArrayExpansion(QualType EltTy, uint64_t NumElts)
892 : TypeExpansion(TEK_ConstantArray), EltTy(EltTy), NumElts(NumElts) {}
classof__anon8a34942c0211::ConstantArrayExpansion893 static bool classof(const TypeExpansion *TE) {
894 return TE->Kind == TEK_ConstantArray;
895 }
896 };
897
898 struct RecordExpansion : TypeExpansion {
899 SmallVector<const CXXBaseSpecifier *, 1> Bases;
900
901 SmallVector<const FieldDecl *, 1> Fields;
902
RecordExpansion__anon8a34942c0211::RecordExpansion903 RecordExpansion(SmallVector<const CXXBaseSpecifier *, 1> &&Bases,
904 SmallVector<const FieldDecl *, 1> &&Fields)
905 : TypeExpansion(TEK_Record), Bases(std::move(Bases)),
906 Fields(std::move(Fields)) {}
classof__anon8a34942c0211::RecordExpansion907 static bool classof(const TypeExpansion *TE) {
908 return TE->Kind == TEK_Record;
909 }
910 };
911
912 struct ComplexExpansion : TypeExpansion {
913 QualType EltTy;
914
ComplexExpansion__anon8a34942c0211::ComplexExpansion915 ComplexExpansion(QualType EltTy) : TypeExpansion(TEK_Complex), EltTy(EltTy) {}
classof__anon8a34942c0211::ComplexExpansion916 static bool classof(const TypeExpansion *TE) {
917 return TE->Kind == TEK_Complex;
918 }
919 };
920
921 struct NoExpansion : TypeExpansion {
NoExpansion__anon8a34942c0211::NoExpansion922 NoExpansion() : TypeExpansion(TEK_None) {}
classof__anon8a34942c0211::NoExpansion923 static bool classof(const TypeExpansion *TE) {
924 return TE->Kind == TEK_None;
925 }
926 };
927 } // namespace
928
929 static std::unique_ptr<TypeExpansion>
getTypeExpansion(QualType Ty,const ASTContext & Context)930 getTypeExpansion(QualType Ty, const ASTContext &Context) {
931 if (const ConstantArrayType *AT = Context.getAsConstantArrayType(Ty)) {
932 return std::make_unique<ConstantArrayExpansion>(
933 AT->getElementType(), AT->getSize().getZExtValue());
934 }
935 if (const RecordType *RT = Ty->getAs<RecordType>()) {
936 SmallVector<const CXXBaseSpecifier *, 1> Bases;
937 SmallVector<const FieldDecl *, 1> Fields;
938 const RecordDecl *RD = RT->getDecl();
939 assert(!RD->hasFlexibleArrayMember() &&
940 "Cannot expand structure with flexible array.");
941 if (RD->isUnion()) {
942 // Unions can be here only in degenerative cases - all the fields are same
943 // after flattening. Thus we have to use the "largest" field.
944 const FieldDecl *LargestFD = nullptr;
945 CharUnits UnionSize = CharUnits::Zero();
946
947 for (const auto *FD : RD->fields()) {
948 if (FD->isZeroLengthBitField(Context))
949 continue;
950 assert(!FD->isBitField() &&
951 "Cannot expand structure with bit-field members.");
952 CharUnits FieldSize = Context.getTypeSizeInChars(FD->getType());
953 if (UnionSize < FieldSize) {
954 UnionSize = FieldSize;
955 LargestFD = FD;
956 }
957 }
958 if (LargestFD)
959 Fields.push_back(LargestFD);
960 } else {
961 if (const auto *CXXRD = dyn_cast<CXXRecordDecl>(RD)) {
962 assert(!CXXRD->isDynamicClass() &&
963 "cannot expand vtable pointers in dynamic classes");
964 llvm::append_range(Bases, llvm::make_pointer_range(CXXRD->bases()));
965 }
966
967 for (const auto *FD : RD->fields()) {
968 if (FD->isZeroLengthBitField(Context))
969 continue;
970 assert(!FD->isBitField() &&
971 "Cannot expand structure with bit-field members.");
972 Fields.push_back(FD);
973 }
974 }
975 return std::make_unique<RecordExpansion>(std::move(Bases),
976 std::move(Fields));
977 }
978 if (const ComplexType *CT = Ty->getAs<ComplexType>()) {
979 return std::make_unique<ComplexExpansion>(CT->getElementType());
980 }
981 return std::make_unique<NoExpansion>();
982 }
983
getExpansionSize(QualType Ty,const ASTContext & Context)984 static int getExpansionSize(QualType Ty, const ASTContext &Context) {
985 auto Exp = getTypeExpansion(Ty, Context);
986 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
987 return CAExp->NumElts * getExpansionSize(CAExp->EltTy, Context);
988 }
989 if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
990 int Res = 0;
991 for (auto BS : RExp->Bases)
992 Res += getExpansionSize(BS->getType(), Context);
993 for (auto FD : RExp->Fields)
994 Res += getExpansionSize(FD->getType(), Context);
995 return Res;
996 }
997 if (isa<ComplexExpansion>(Exp.get()))
998 return 2;
999 assert(isa<NoExpansion>(Exp.get()));
1000 return 1;
1001 }
1002
1003 void
getExpandedTypes(QualType Ty,SmallVectorImpl<llvm::Type * >::iterator & TI)1004 CodeGenTypes::getExpandedTypes(QualType Ty,
1005 SmallVectorImpl<llvm::Type *>::iterator &TI) {
1006 auto Exp = getTypeExpansion(Ty, Context);
1007 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1008 for (int i = 0, n = CAExp->NumElts; i < n; i++) {
1009 getExpandedTypes(CAExp->EltTy, TI);
1010 }
1011 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1012 for (auto BS : RExp->Bases)
1013 getExpandedTypes(BS->getType(), TI);
1014 for (auto FD : RExp->Fields)
1015 getExpandedTypes(FD->getType(), TI);
1016 } else if (auto CExp = dyn_cast<ComplexExpansion>(Exp.get())) {
1017 llvm::Type *EltTy = ConvertType(CExp->EltTy);
1018 *TI++ = EltTy;
1019 *TI++ = EltTy;
1020 } else {
1021 assert(isa<NoExpansion>(Exp.get()));
1022 *TI++ = ConvertType(Ty);
1023 }
1024 }
1025
forConstantArrayExpansion(CodeGenFunction & CGF,ConstantArrayExpansion * CAE,Address BaseAddr,llvm::function_ref<void (Address)> Fn)1026 static void forConstantArrayExpansion(CodeGenFunction &CGF,
1027 ConstantArrayExpansion *CAE,
1028 Address BaseAddr,
1029 llvm::function_ref<void(Address)> Fn) {
1030 CharUnits EltSize = CGF.getContext().getTypeSizeInChars(CAE->EltTy);
1031 CharUnits EltAlign =
1032 BaseAddr.getAlignment().alignmentOfArrayElement(EltSize);
1033 llvm::Type *EltTy = CGF.ConvertTypeForMem(CAE->EltTy);
1034
1035 for (int i = 0, n = CAE->NumElts; i < n; i++) {
1036 llvm::Value *EltAddr = CGF.Builder.CreateConstGEP2_32(
1037 BaseAddr.getElementType(), BaseAddr.getPointer(), 0, i);
1038 Fn(Address(EltAddr, EltTy, EltAlign));
1039 }
1040 }
1041
ExpandTypeFromArgs(QualType Ty,LValue LV,llvm::Function::arg_iterator & AI)1042 void CodeGenFunction::ExpandTypeFromArgs(QualType Ty, LValue LV,
1043 llvm::Function::arg_iterator &AI) {
1044 assert(LV.isSimple() &&
1045 "Unexpected non-simple lvalue during struct expansion.");
1046
1047 auto Exp = getTypeExpansion(Ty, getContext());
1048 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1049 forConstantArrayExpansion(
1050 *this, CAExp, LV.getAddress(*this), [&](Address EltAddr) {
1051 LValue LV = MakeAddrLValue(EltAddr, CAExp->EltTy);
1052 ExpandTypeFromArgs(CAExp->EltTy, LV, AI);
1053 });
1054 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1055 Address This = LV.getAddress(*this);
1056 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1057 // Perform a single step derived-to-base conversion.
1058 Address Base =
1059 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1060 /*NullCheckValue=*/false, SourceLocation());
1061 LValue SubLV = MakeAddrLValue(Base, BS->getType());
1062
1063 // Recurse onto bases.
1064 ExpandTypeFromArgs(BS->getType(), SubLV, AI);
1065 }
1066 for (auto FD : RExp->Fields) {
1067 // FIXME: What are the right qualifiers here?
1068 LValue SubLV = EmitLValueForFieldInitialization(LV, FD);
1069 ExpandTypeFromArgs(FD->getType(), SubLV, AI);
1070 }
1071 } else if (isa<ComplexExpansion>(Exp.get())) {
1072 auto realValue = &*AI++;
1073 auto imagValue = &*AI++;
1074 EmitStoreOfComplex(ComplexPairTy(realValue, imagValue), LV, /*init*/ true);
1075 } else {
1076 // Call EmitStoreOfScalar except when the lvalue is a bitfield to emit a
1077 // primitive store.
1078 assert(isa<NoExpansion>(Exp.get()));
1079 llvm::Value *Arg = &*AI++;
1080 if (LV.isBitField()) {
1081 EmitStoreThroughLValue(RValue::get(Arg), LV);
1082 } else {
1083 // TODO: currently there are some places are inconsistent in what LLVM
1084 // pointer type they use (see D118744). Once clang uses opaque pointers
1085 // all LLVM pointer types will be the same and we can remove this check.
1086 if (Arg->getType()->isPointerTy()) {
1087 Address Addr = LV.getAddress(*this);
1088 Arg = Builder.CreateBitCast(Arg, Addr.getElementType());
1089 }
1090 EmitStoreOfScalar(Arg, LV);
1091 }
1092 }
1093 }
1094
ExpandTypeToArgs(QualType Ty,CallArg Arg,llvm::FunctionType * IRFuncTy,SmallVectorImpl<llvm::Value * > & IRCallArgs,unsigned & IRCallArgPos)1095 void CodeGenFunction::ExpandTypeToArgs(
1096 QualType Ty, CallArg Arg, llvm::FunctionType *IRFuncTy,
1097 SmallVectorImpl<llvm::Value *> &IRCallArgs, unsigned &IRCallArgPos) {
1098 auto Exp = getTypeExpansion(Ty, getContext());
1099 if (auto CAExp = dyn_cast<ConstantArrayExpansion>(Exp.get())) {
1100 Address Addr = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1101 : Arg.getKnownRValue().getAggregateAddress();
1102 forConstantArrayExpansion(
1103 *this, CAExp, Addr, [&](Address EltAddr) {
1104 CallArg EltArg = CallArg(
1105 convertTempToRValue(EltAddr, CAExp->EltTy, SourceLocation()),
1106 CAExp->EltTy);
1107 ExpandTypeToArgs(CAExp->EltTy, EltArg, IRFuncTy, IRCallArgs,
1108 IRCallArgPos);
1109 });
1110 } else if (auto RExp = dyn_cast<RecordExpansion>(Exp.get())) {
1111 Address This = Arg.hasLValue() ? Arg.getKnownLValue().getAddress(*this)
1112 : Arg.getKnownRValue().getAggregateAddress();
1113 for (const CXXBaseSpecifier *BS : RExp->Bases) {
1114 // Perform a single step derived-to-base conversion.
1115 Address Base =
1116 GetAddressOfBaseClass(This, Ty->getAsCXXRecordDecl(), &BS, &BS + 1,
1117 /*NullCheckValue=*/false, SourceLocation());
1118 CallArg BaseArg = CallArg(RValue::getAggregate(Base), BS->getType());
1119
1120 // Recurse onto bases.
1121 ExpandTypeToArgs(BS->getType(), BaseArg, IRFuncTy, IRCallArgs,
1122 IRCallArgPos);
1123 }
1124
1125 LValue LV = MakeAddrLValue(This, Ty);
1126 for (auto FD : RExp->Fields) {
1127 CallArg FldArg =
1128 CallArg(EmitRValueForField(LV, FD, SourceLocation()), FD->getType());
1129 ExpandTypeToArgs(FD->getType(), FldArg, IRFuncTy, IRCallArgs,
1130 IRCallArgPos);
1131 }
1132 } else if (isa<ComplexExpansion>(Exp.get())) {
1133 ComplexPairTy CV = Arg.getKnownRValue().getComplexVal();
1134 IRCallArgs[IRCallArgPos++] = CV.first;
1135 IRCallArgs[IRCallArgPos++] = CV.second;
1136 } else {
1137 assert(isa<NoExpansion>(Exp.get()));
1138 auto RV = Arg.getKnownRValue();
1139 assert(RV.isScalar() &&
1140 "Unexpected non-scalar rvalue during struct expansion.");
1141
1142 // Insert a bitcast as needed.
1143 llvm::Value *V = RV.getScalarVal();
1144 if (IRCallArgPos < IRFuncTy->getNumParams() &&
1145 V->getType() != IRFuncTy->getParamType(IRCallArgPos))
1146 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(IRCallArgPos));
1147
1148 IRCallArgs[IRCallArgPos++] = V;
1149 }
1150 }
1151
1152 /// Create a temporary allocation for the purposes of coercion.
CreateTempAllocaForCoercion(CodeGenFunction & CGF,llvm::Type * Ty,CharUnits MinAlign,const Twine & Name="tmp")1153 static Address CreateTempAllocaForCoercion(CodeGenFunction &CGF, llvm::Type *Ty,
1154 CharUnits MinAlign,
1155 const Twine &Name = "tmp") {
1156 // Don't use an alignment that's worse than what LLVM would prefer.
1157 auto PrefAlign = CGF.CGM.getDataLayout().getPrefTypeAlign(Ty);
1158 CharUnits Align = std::max(MinAlign, CharUnits::fromQuantity(PrefAlign));
1159
1160 return CGF.CreateTempAlloca(Ty, Align, Name + ".coerce");
1161 }
1162
1163 /// EnterStructPointerForCoercedAccess - Given a struct pointer that we are
1164 /// accessing some number of bytes out of it, try to gep into the struct to get
1165 /// at its inner goodness. Dive as deep as possible without entering an element
1166 /// with an in-memory size smaller than DstSize.
1167 static Address
EnterStructPointerForCoercedAccess(Address SrcPtr,llvm::StructType * SrcSTy,uint64_t DstSize,CodeGenFunction & CGF)1168 EnterStructPointerForCoercedAccess(Address SrcPtr,
1169 llvm::StructType *SrcSTy,
1170 uint64_t DstSize, CodeGenFunction &CGF) {
1171 // We can't dive into a zero-element struct.
1172 if (SrcSTy->getNumElements() == 0) return SrcPtr;
1173
1174 llvm::Type *FirstElt = SrcSTy->getElementType(0);
1175
1176 // If the first elt is at least as large as what we're looking for, or if the
1177 // first element is the same size as the whole struct, we can enter it. The
1178 // comparison must be made on the store size and not the alloca size. Using
1179 // the alloca size may overstate the size of the load.
1180 uint64_t FirstEltSize =
1181 CGF.CGM.getDataLayout().getTypeStoreSize(FirstElt);
1182 if (FirstEltSize < DstSize &&
1183 FirstEltSize < CGF.CGM.getDataLayout().getTypeStoreSize(SrcSTy))
1184 return SrcPtr;
1185
1186 // GEP into the first element.
1187 SrcPtr = CGF.Builder.CreateStructGEP(SrcPtr, 0, "coerce.dive");
1188
1189 // If the first element is a struct, recurse.
1190 llvm::Type *SrcTy = SrcPtr.getElementType();
1191 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy))
1192 return EnterStructPointerForCoercedAccess(SrcPtr, SrcSTy, DstSize, CGF);
1193
1194 return SrcPtr;
1195 }
1196
1197 /// CoerceIntOrPtrToIntOrPtr - Convert a value Val to the specific Ty where both
1198 /// are either integers or pointers. This does a truncation of the value if it
1199 /// is too large or a zero extension if it is too small.
1200 ///
1201 /// This behaves as if the value were coerced through memory, so on big-endian
1202 /// targets the high bits are preserved in a truncation, while little-endian
1203 /// targets preserve the low bits.
CoerceIntOrPtrToIntOrPtr(llvm::Value * Val,llvm::Type * Ty,CodeGenFunction & CGF)1204 static llvm::Value *CoerceIntOrPtrToIntOrPtr(llvm::Value *Val,
1205 llvm::Type *Ty,
1206 CodeGenFunction &CGF) {
1207 if (Val->getType() == Ty)
1208 return Val;
1209
1210 if (isa<llvm::PointerType>(Val->getType())) {
1211 // If this is Pointer->Pointer avoid conversion to and from int.
1212 if (isa<llvm::PointerType>(Ty))
1213 return CGF.Builder.CreateBitCast(Val, Ty, "coerce.val");
1214
1215 // Convert the pointer to an integer so we can play with its width.
1216 Val = CGF.Builder.CreatePtrToInt(Val, CGF.IntPtrTy, "coerce.val.pi");
1217 }
1218
1219 llvm::Type *DestIntTy = Ty;
1220 if (isa<llvm::PointerType>(DestIntTy))
1221 DestIntTy = CGF.IntPtrTy;
1222
1223 if (Val->getType() != DestIntTy) {
1224 const llvm::DataLayout &DL = CGF.CGM.getDataLayout();
1225 if (DL.isBigEndian()) {
1226 // Preserve the high bits on big-endian targets.
1227 // That is what memory coercion does.
1228 uint64_t SrcSize = DL.getTypeSizeInBits(Val->getType());
1229 uint64_t DstSize = DL.getTypeSizeInBits(DestIntTy);
1230
1231 if (SrcSize > DstSize) {
1232 Val = CGF.Builder.CreateLShr(Val, SrcSize - DstSize, "coerce.highbits");
1233 Val = CGF.Builder.CreateTrunc(Val, DestIntTy, "coerce.val.ii");
1234 } else {
1235 Val = CGF.Builder.CreateZExt(Val, DestIntTy, "coerce.val.ii");
1236 Val = CGF.Builder.CreateShl(Val, DstSize - SrcSize, "coerce.highbits");
1237 }
1238 } else {
1239 // Little-endian targets preserve the low bits. No shifts required.
1240 Val = CGF.Builder.CreateIntCast(Val, DestIntTy, false, "coerce.val.ii");
1241 }
1242 }
1243
1244 if (isa<llvm::PointerType>(Ty))
1245 Val = CGF.Builder.CreateIntToPtr(Val, Ty, "coerce.val.ip");
1246 return Val;
1247 }
1248
1249
1250
1251 /// CreateCoercedLoad - Create a load from \arg SrcPtr interpreted as
1252 /// a pointer to an object of type \arg Ty, known to be aligned to
1253 /// \arg SrcAlign bytes.
1254 ///
1255 /// This safely handles the case when the src type is smaller than the
1256 /// destination type; in this situation the values of bits which not
1257 /// present in the src are undefined.
CreateCoercedLoad(Address Src,llvm::Type * Ty,CodeGenFunction & CGF)1258 static llvm::Value *CreateCoercedLoad(Address Src, llvm::Type *Ty,
1259 CodeGenFunction &CGF) {
1260 llvm::Type *SrcTy = Src.getElementType();
1261
1262 // If SrcTy and Ty are the same, just do a load.
1263 if (SrcTy == Ty)
1264 return CGF.Builder.CreateLoad(Src);
1265
1266 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(Ty);
1267
1268 if (llvm::StructType *SrcSTy = dyn_cast<llvm::StructType>(SrcTy)) {
1269 Src = EnterStructPointerForCoercedAccess(Src, SrcSTy,
1270 DstSize.getFixedValue(), CGF);
1271 SrcTy = Src.getElementType();
1272 }
1273
1274 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1275
1276 // If the source and destination are integer or pointer types, just do an
1277 // extension or truncation to the desired type.
1278 if ((isa<llvm::IntegerType>(Ty) || isa<llvm::PointerType>(Ty)) &&
1279 (isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy))) {
1280 llvm::Value *Load = CGF.Builder.CreateLoad(Src);
1281 return CoerceIntOrPtrToIntOrPtr(Load, Ty, CGF);
1282 }
1283
1284 // If load is legal, just bitcast the src pointer.
1285 if (!SrcSize.isScalable() && !DstSize.isScalable() &&
1286 SrcSize.getFixedValue() >= DstSize.getFixedValue()) {
1287 // Generally SrcSize is never greater than DstSize, since this means we are
1288 // losing bits. However, this can happen in cases where the structure has
1289 // additional padding, for example due to a user specified alignment.
1290 //
1291 // FIXME: Assert that we aren't truncating non-padding bits when have access
1292 // to that information.
1293 Src = Src.withElementType(Ty);
1294 return CGF.Builder.CreateLoad(Src);
1295 }
1296
1297 // If coercing a fixed vector to a scalable vector for ABI compatibility, and
1298 // the types match, use the llvm.vector.insert intrinsic to perform the
1299 // conversion.
1300 if (auto *ScalableDst = dyn_cast<llvm::ScalableVectorType>(Ty)) {
1301 if (auto *FixedSrc = dyn_cast<llvm::FixedVectorType>(SrcTy)) {
1302 // If we are casting a fixed i8 vector to a scalable 16 x i1 predicate
1303 // vector, use a vector insert and bitcast the result.
1304 bool NeedsBitcast = false;
1305 auto PredType =
1306 llvm::ScalableVectorType::get(CGF.Builder.getInt1Ty(), 16);
1307 llvm::Type *OrigType = Ty;
1308 if (ScalableDst == PredType &&
1309 FixedSrc->getElementType() == CGF.Builder.getInt8Ty()) {
1310 ScalableDst = llvm::ScalableVectorType::get(CGF.Builder.getInt8Ty(), 2);
1311 NeedsBitcast = true;
1312 }
1313 if (ScalableDst->getElementType() == FixedSrc->getElementType()) {
1314 auto *Load = CGF.Builder.CreateLoad(Src);
1315 auto *UndefVec = llvm::UndefValue::get(ScalableDst);
1316 auto *Zero = llvm::Constant::getNullValue(CGF.CGM.Int64Ty);
1317 llvm::Value *Result = CGF.Builder.CreateInsertVector(
1318 ScalableDst, UndefVec, Load, Zero, "cast.scalable");
1319 if (NeedsBitcast)
1320 Result = CGF.Builder.CreateBitCast(Result, OrigType);
1321 return Result;
1322 }
1323 }
1324 }
1325
1326 // Otherwise do coercion through memory. This is stupid, but simple.
1327 Address Tmp =
1328 CreateTempAllocaForCoercion(CGF, Ty, Src.getAlignment(), Src.getName());
1329 CGF.Builder.CreateMemCpy(
1330 Tmp.getPointer(), Tmp.getAlignment().getAsAlign(), Src.getPointer(),
1331 Src.getAlignment().getAsAlign(),
1332 llvm::ConstantInt::get(CGF.IntPtrTy, SrcSize.getKnownMinValue()));
1333 return CGF.Builder.CreateLoad(Tmp);
1334 }
1335
1336 // Function to store a first-class aggregate into memory. We prefer to
1337 // store the elements rather than the aggregate to be more friendly to
1338 // fast-isel.
1339 // FIXME: Do we need to recurse here?
EmitAggregateStore(llvm::Value * Val,Address Dest,bool DestIsVolatile)1340 void CodeGenFunction::EmitAggregateStore(llvm::Value *Val, Address Dest,
1341 bool DestIsVolatile) {
1342 // Prefer scalar stores to first-class aggregate stores.
1343 if (llvm::StructType *STy = dyn_cast<llvm::StructType>(Val->getType())) {
1344 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
1345 Address EltPtr = Builder.CreateStructGEP(Dest, i);
1346 llvm::Value *Elt = Builder.CreateExtractValue(Val, i);
1347 Builder.CreateStore(Elt, EltPtr, DestIsVolatile);
1348 }
1349 } else {
1350 Builder.CreateStore(Val, Dest, DestIsVolatile);
1351 }
1352 }
1353
1354 /// CreateCoercedStore - Create a store to \arg DstPtr from \arg Src,
1355 /// where the source and destination may have different types. The
1356 /// destination is known to be aligned to \arg DstAlign bytes.
1357 ///
1358 /// This safely handles the case when the src type is larger than the
1359 /// destination type; the upper bits of the src will be lost.
CreateCoercedStore(llvm::Value * Src,Address Dst,bool DstIsVolatile,CodeGenFunction & CGF)1360 static void CreateCoercedStore(llvm::Value *Src,
1361 Address Dst,
1362 bool DstIsVolatile,
1363 CodeGenFunction &CGF) {
1364 llvm::Type *SrcTy = Src->getType();
1365 llvm::Type *DstTy = Dst.getElementType();
1366 if (SrcTy == DstTy) {
1367 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1368 return;
1369 }
1370
1371 llvm::TypeSize SrcSize = CGF.CGM.getDataLayout().getTypeAllocSize(SrcTy);
1372
1373 if (llvm::StructType *DstSTy = dyn_cast<llvm::StructType>(DstTy)) {
1374 Dst = EnterStructPointerForCoercedAccess(Dst, DstSTy,
1375 SrcSize.getFixedValue(), CGF);
1376 DstTy = Dst.getElementType();
1377 }
1378
1379 llvm::PointerType *SrcPtrTy = llvm::dyn_cast<llvm::PointerType>(SrcTy);
1380 llvm::PointerType *DstPtrTy = llvm::dyn_cast<llvm::PointerType>(DstTy);
1381 if (SrcPtrTy && DstPtrTy &&
1382 SrcPtrTy->getAddressSpace() != DstPtrTy->getAddressSpace()) {
1383 Src = CGF.Builder.CreateAddrSpaceCast(Src, DstTy);
1384 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1385 return;
1386 }
1387
1388 // If the source and destination are integer or pointer types, just do an
1389 // extension or truncation to the desired type.
1390 if ((isa<llvm::IntegerType>(SrcTy) || isa<llvm::PointerType>(SrcTy)) &&
1391 (isa<llvm::IntegerType>(DstTy) || isa<llvm::PointerType>(DstTy))) {
1392 Src = CoerceIntOrPtrToIntOrPtr(Src, DstTy, CGF);
1393 CGF.Builder.CreateStore(Src, Dst, DstIsVolatile);
1394 return;
1395 }
1396
1397 llvm::TypeSize DstSize = CGF.CGM.getDataLayout().getTypeAllocSize(DstTy);
1398
1399 // If store is legal, just bitcast the src pointer.
1400 if (isa<llvm::ScalableVectorType>(SrcTy) ||
1401 isa<llvm::ScalableVectorType>(DstTy) ||
1402 SrcSize.getFixedValue() <= DstSize.getFixedValue()) {
1403 Dst = Dst.withElementType(SrcTy);
1404 CGF.EmitAggregateStore(Src, Dst, DstIsVolatile);
1405 } else {
1406 // Otherwise do coercion through memory. This is stupid, but
1407 // simple.
1408
1409 // Generally SrcSize is never greater than DstSize, since this means we are
1410 // losing bits. However, this can happen in cases where the structure has
1411 // additional padding, for example due to a user specified alignment.
1412 //
1413 // FIXME: Assert that we aren't truncating non-padding bits when have access
1414 // to that information.
1415 Address Tmp = CreateTempAllocaForCoercion(CGF, SrcTy, Dst.getAlignment());
1416 CGF.Builder.CreateStore(Src, Tmp);
1417 CGF.Builder.CreateMemCpy(
1418 Dst.getPointer(), Dst.getAlignment().getAsAlign(), Tmp.getPointer(),
1419 Tmp.getAlignment().getAsAlign(),
1420 llvm::ConstantInt::get(CGF.IntPtrTy, DstSize.getFixedValue()));
1421 }
1422 }
1423
emitAddressAtOffset(CodeGenFunction & CGF,Address addr,const ABIArgInfo & info)1424 static Address emitAddressAtOffset(CodeGenFunction &CGF, Address addr,
1425 const ABIArgInfo &info) {
1426 if (unsigned offset = info.getDirectOffset()) {
1427 addr = addr.withElementType(CGF.Int8Ty);
1428 addr = CGF.Builder.CreateConstInBoundsByteGEP(addr,
1429 CharUnits::fromQuantity(offset));
1430 addr = addr.withElementType(info.getCoerceToType());
1431 }
1432 return addr;
1433 }
1434
1435 namespace {
1436
1437 /// Encapsulates information about the way function arguments from
1438 /// CGFunctionInfo should be passed to actual LLVM IR function.
1439 class ClangToLLVMArgMapping {
1440 static const unsigned InvalidIndex = ~0U;
1441 unsigned InallocaArgNo;
1442 unsigned SRetArgNo;
1443 unsigned TotalIRArgs;
1444
1445 /// Arguments of LLVM IR function corresponding to single Clang argument.
1446 struct IRArgs {
1447 unsigned PaddingArgIndex;
1448 // Argument is expanded to IR arguments at positions
1449 // [FirstArgIndex, FirstArgIndex + NumberOfArgs).
1450 unsigned FirstArgIndex;
1451 unsigned NumberOfArgs;
1452
IRArgs__anon8a34942c0511::ClangToLLVMArgMapping::IRArgs1453 IRArgs()
1454 : PaddingArgIndex(InvalidIndex), FirstArgIndex(InvalidIndex),
1455 NumberOfArgs(0) {}
1456 };
1457
1458 SmallVector<IRArgs, 8> ArgInfo;
1459
1460 public:
ClangToLLVMArgMapping(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs=false)1461 ClangToLLVMArgMapping(const ASTContext &Context, const CGFunctionInfo &FI,
1462 bool OnlyRequiredArgs = false)
1463 : InallocaArgNo(InvalidIndex), SRetArgNo(InvalidIndex), TotalIRArgs(0),
1464 ArgInfo(OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size()) {
1465 construct(Context, FI, OnlyRequiredArgs);
1466 }
1467
hasInallocaArg() const1468 bool hasInallocaArg() const { return InallocaArgNo != InvalidIndex; }
getInallocaArgNo() const1469 unsigned getInallocaArgNo() const {
1470 assert(hasInallocaArg());
1471 return InallocaArgNo;
1472 }
1473
hasSRetArg() const1474 bool hasSRetArg() const { return SRetArgNo != InvalidIndex; }
getSRetArgNo() const1475 unsigned getSRetArgNo() const {
1476 assert(hasSRetArg());
1477 return SRetArgNo;
1478 }
1479
totalIRArgs() const1480 unsigned totalIRArgs() const { return TotalIRArgs; }
1481
hasPaddingArg(unsigned ArgNo) const1482 bool hasPaddingArg(unsigned ArgNo) const {
1483 assert(ArgNo < ArgInfo.size());
1484 return ArgInfo[ArgNo].PaddingArgIndex != InvalidIndex;
1485 }
getPaddingArgNo(unsigned ArgNo) const1486 unsigned getPaddingArgNo(unsigned ArgNo) const {
1487 assert(hasPaddingArg(ArgNo));
1488 return ArgInfo[ArgNo].PaddingArgIndex;
1489 }
1490
1491 /// Returns index of first IR argument corresponding to ArgNo, and their
1492 /// quantity.
getIRArgs(unsigned ArgNo) const1493 std::pair<unsigned, unsigned> getIRArgs(unsigned ArgNo) const {
1494 assert(ArgNo < ArgInfo.size());
1495 return std::make_pair(ArgInfo[ArgNo].FirstArgIndex,
1496 ArgInfo[ArgNo].NumberOfArgs);
1497 }
1498
1499 private:
1500 void construct(const ASTContext &Context, const CGFunctionInfo &FI,
1501 bool OnlyRequiredArgs);
1502 };
1503
construct(const ASTContext & Context,const CGFunctionInfo & FI,bool OnlyRequiredArgs)1504 void ClangToLLVMArgMapping::construct(const ASTContext &Context,
1505 const CGFunctionInfo &FI,
1506 bool OnlyRequiredArgs) {
1507 unsigned IRArgNo = 0;
1508 bool SwapThisWithSRet = false;
1509 const ABIArgInfo &RetAI = FI.getReturnInfo();
1510
1511 if (RetAI.getKind() == ABIArgInfo::Indirect) {
1512 SwapThisWithSRet = RetAI.isSRetAfterThis();
1513 SRetArgNo = SwapThisWithSRet ? 1 : IRArgNo++;
1514 }
1515
1516 unsigned ArgNo = 0;
1517 unsigned NumArgs = OnlyRequiredArgs ? FI.getNumRequiredArgs() : FI.arg_size();
1518 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(); ArgNo < NumArgs;
1519 ++I, ++ArgNo) {
1520 assert(I != FI.arg_end());
1521 QualType ArgType = I->type;
1522 const ABIArgInfo &AI = I->info;
1523 // Collect data about IR arguments corresponding to Clang argument ArgNo.
1524 auto &IRArgs = ArgInfo[ArgNo];
1525
1526 if (AI.getPaddingType())
1527 IRArgs.PaddingArgIndex = IRArgNo++;
1528
1529 switch (AI.getKind()) {
1530 case ABIArgInfo::Extend:
1531 case ABIArgInfo::Direct: {
1532 // FIXME: handle sseregparm someday...
1533 llvm::StructType *STy = dyn_cast<llvm::StructType>(AI.getCoerceToType());
1534 if (AI.isDirect() && AI.getCanBeFlattened() && STy) {
1535 IRArgs.NumberOfArgs = STy->getNumElements();
1536 } else {
1537 IRArgs.NumberOfArgs = 1;
1538 }
1539 break;
1540 }
1541 case ABIArgInfo::Indirect:
1542 case ABIArgInfo::IndirectAliased:
1543 IRArgs.NumberOfArgs = 1;
1544 break;
1545 case ABIArgInfo::Ignore:
1546 case ABIArgInfo::InAlloca:
1547 // ignore and inalloca doesn't have matching LLVM parameters.
1548 IRArgs.NumberOfArgs = 0;
1549 break;
1550 case ABIArgInfo::CoerceAndExpand:
1551 IRArgs.NumberOfArgs = AI.getCoerceAndExpandTypeSequence().size();
1552 break;
1553 case ABIArgInfo::Expand:
1554 IRArgs.NumberOfArgs = getExpansionSize(ArgType, Context);
1555 break;
1556 }
1557
1558 if (IRArgs.NumberOfArgs > 0) {
1559 IRArgs.FirstArgIndex = IRArgNo;
1560 IRArgNo += IRArgs.NumberOfArgs;
1561 }
1562
1563 // Skip over the sret parameter when it comes second. We already handled it
1564 // above.
1565 if (IRArgNo == 1 && SwapThisWithSRet)
1566 IRArgNo++;
1567 }
1568 assert(ArgNo == ArgInfo.size());
1569
1570 if (FI.usesInAlloca())
1571 InallocaArgNo = IRArgNo++;
1572
1573 TotalIRArgs = IRArgNo;
1574 }
1575 } // namespace
1576
1577 /***/
1578
ReturnTypeUsesSRet(const CGFunctionInfo & FI)1579 bool CodeGenModule::ReturnTypeUsesSRet(const CGFunctionInfo &FI) {
1580 const auto &RI = FI.getReturnInfo();
1581 return RI.isIndirect() || (RI.isInAlloca() && RI.getInAllocaSRet());
1582 }
1583
ReturnTypeHasInReg(const CGFunctionInfo & FI)1584 bool CodeGenModule::ReturnTypeHasInReg(const CGFunctionInfo &FI) {
1585 const auto &RI = FI.getReturnInfo();
1586 return RI.getInReg();
1587 }
1588
ReturnSlotInterferesWithArgs(const CGFunctionInfo & FI)1589 bool CodeGenModule::ReturnSlotInterferesWithArgs(const CGFunctionInfo &FI) {
1590 return ReturnTypeUsesSRet(FI) &&
1591 getTargetCodeGenInfo().doesReturnSlotInterfereWithArgs();
1592 }
1593
ReturnTypeUsesFPRet(QualType ResultType)1594 bool CodeGenModule::ReturnTypeUsesFPRet(QualType ResultType) {
1595 if (const BuiltinType *BT = ResultType->getAs<BuiltinType>()) {
1596 switch (BT->getKind()) {
1597 default:
1598 return false;
1599 case BuiltinType::Float:
1600 return getTarget().useObjCFPRetForRealType(FloatModeKind::Float);
1601 case BuiltinType::Double:
1602 return getTarget().useObjCFPRetForRealType(FloatModeKind::Double);
1603 case BuiltinType::LongDouble:
1604 return getTarget().useObjCFPRetForRealType(FloatModeKind::LongDouble);
1605 }
1606 }
1607
1608 return false;
1609 }
1610
ReturnTypeUsesFP2Ret(QualType ResultType)1611 bool CodeGenModule::ReturnTypeUsesFP2Ret(QualType ResultType) {
1612 if (const ComplexType *CT = ResultType->getAs<ComplexType>()) {
1613 if (const BuiltinType *BT = CT->getElementType()->getAs<BuiltinType>()) {
1614 if (BT->getKind() == BuiltinType::LongDouble)
1615 return getTarget().useObjCFP2RetForComplexLongDouble();
1616 }
1617 }
1618
1619 return false;
1620 }
1621
GetFunctionType(GlobalDecl GD)1622 llvm::FunctionType *CodeGenTypes::GetFunctionType(GlobalDecl GD) {
1623 const CGFunctionInfo &FI = arrangeGlobalDeclaration(GD);
1624 return GetFunctionType(FI);
1625 }
1626
1627 llvm::FunctionType *
GetFunctionType(const CGFunctionInfo & FI)1628 CodeGenTypes::GetFunctionType(const CGFunctionInfo &FI) {
1629
1630 bool Inserted = FunctionsBeingProcessed.insert(&FI).second;
1631 (void)Inserted;
1632 assert(Inserted && "Recursively being processed?");
1633
1634 llvm::Type *resultType = nullptr;
1635 const ABIArgInfo &retAI = FI.getReturnInfo();
1636 switch (retAI.getKind()) {
1637 case ABIArgInfo::Expand:
1638 case ABIArgInfo::IndirectAliased:
1639 llvm_unreachable("Invalid ABI kind for return argument");
1640
1641 case ABIArgInfo::Extend:
1642 case ABIArgInfo::Direct:
1643 resultType = retAI.getCoerceToType();
1644 break;
1645
1646 case ABIArgInfo::InAlloca:
1647 if (retAI.getInAllocaSRet()) {
1648 // sret things on win32 aren't void, they return the sret pointer.
1649 QualType ret = FI.getReturnType();
1650 unsigned addressSpace = CGM.getTypes().getTargetAddressSpace(ret);
1651 resultType = llvm::PointerType::get(getLLVMContext(), addressSpace);
1652 } else {
1653 resultType = llvm::Type::getVoidTy(getLLVMContext());
1654 }
1655 break;
1656
1657 case ABIArgInfo::Indirect:
1658 case ABIArgInfo::Ignore:
1659 resultType = llvm::Type::getVoidTy(getLLVMContext());
1660 break;
1661
1662 case ABIArgInfo::CoerceAndExpand:
1663 resultType = retAI.getUnpaddedCoerceAndExpandType();
1664 break;
1665 }
1666
1667 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI, true);
1668 SmallVector<llvm::Type*, 8> ArgTypes(IRFunctionArgs.totalIRArgs());
1669
1670 // Add type for sret argument.
1671 if (IRFunctionArgs.hasSRetArg()) {
1672 QualType Ret = FI.getReturnType();
1673 unsigned AddressSpace = CGM.getTypes().getTargetAddressSpace(Ret);
1674 ArgTypes[IRFunctionArgs.getSRetArgNo()] =
1675 llvm::PointerType::get(getLLVMContext(), AddressSpace);
1676 }
1677
1678 // Add type for inalloca argument.
1679 if (IRFunctionArgs.hasInallocaArg())
1680 ArgTypes[IRFunctionArgs.getInallocaArgNo()] =
1681 llvm::PointerType::getUnqual(getLLVMContext());
1682
1683 // Add in all of the required arguments.
1684 unsigned ArgNo = 0;
1685 CGFunctionInfo::const_arg_iterator it = FI.arg_begin(),
1686 ie = it + FI.getNumRequiredArgs();
1687 for (; it != ie; ++it, ++ArgNo) {
1688 const ABIArgInfo &ArgInfo = it->info;
1689
1690 // Insert a padding type to ensure proper alignment.
1691 if (IRFunctionArgs.hasPaddingArg(ArgNo))
1692 ArgTypes[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
1693 ArgInfo.getPaddingType();
1694
1695 unsigned FirstIRArg, NumIRArgs;
1696 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
1697
1698 switch (ArgInfo.getKind()) {
1699 case ABIArgInfo::Ignore:
1700 case ABIArgInfo::InAlloca:
1701 assert(NumIRArgs == 0);
1702 break;
1703
1704 case ABIArgInfo::Indirect:
1705 assert(NumIRArgs == 1);
1706 // indirect arguments are always on the stack, which is alloca addr space.
1707 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1708 getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
1709 break;
1710 case ABIArgInfo::IndirectAliased:
1711 assert(NumIRArgs == 1);
1712 ArgTypes[FirstIRArg] = llvm::PointerType::get(
1713 getLLVMContext(), ArgInfo.getIndirectAddrSpace());
1714 break;
1715 case ABIArgInfo::Extend:
1716 case ABIArgInfo::Direct: {
1717 // Fast-isel and the optimizer generally like scalar values better than
1718 // FCAs, so we flatten them if this is safe to do for this argument.
1719 llvm::Type *argType = ArgInfo.getCoerceToType();
1720 llvm::StructType *st = dyn_cast<llvm::StructType>(argType);
1721 if (st && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
1722 assert(NumIRArgs == st->getNumElements());
1723 for (unsigned i = 0, e = st->getNumElements(); i != e; ++i)
1724 ArgTypes[FirstIRArg + i] = st->getElementType(i);
1725 } else {
1726 assert(NumIRArgs == 1);
1727 ArgTypes[FirstIRArg] = argType;
1728 }
1729 break;
1730 }
1731
1732 case ABIArgInfo::CoerceAndExpand: {
1733 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1734 for (auto *EltTy : ArgInfo.getCoerceAndExpandTypeSequence()) {
1735 *ArgTypesIter++ = EltTy;
1736 }
1737 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1738 break;
1739 }
1740
1741 case ABIArgInfo::Expand:
1742 auto ArgTypesIter = ArgTypes.begin() + FirstIRArg;
1743 getExpandedTypes(it->type, ArgTypesIter);
1744 assert(ArgTypesIter == ArgTypes.begin() + FirstIRArg + NumIRArgs);
1745 break;
1746 }
1747 }
1748
1749 bool Erased = FunctionsBeingProcessed.erase(&FI); (void)Erased;
1750 assert(Erased && "Not in set?");
1751
1752 return llvm::FunctionType::get(resultType, ArgTypes, FI.isVariadic());
1753 }
1754
GetFunctionTypeForVTable(GlobalDecl GD)1755 llvm::Type *CodeGenTypes::GetFunctionTypeForVTable(GlobalDecl GD) {
1756 const CXXMethodDecl *MD = cast<CXXMethodDecl>(GD.getDecl());
1757 const FunctionProtoType *FPT = MD->getType()->castAs<FunctionProtoType>();
1758
1759 if (!isFuncTypeConvertible(FPT))
1760 return llvm::StructType::get(getLLVMContext());
1761
1762 return GetFunctionType(GD);
1763 }
1764
AddAttributesFromFunctionProtoType(ASTContext & Ctx,llvm::AttrBuilder & FuncAttrs,const FunctionProtoType * FPT)1765 static void AddAttributesFromFunctionProtoType(ASTContext &Ctx,
1766 llvm::AttrBuilder &FuncAttrs,
1767 const FunctionProtoType *FPT) {
1768 if (!FPT)
1769 return;
1770
1771 if (!isUnresolvedExceptionSpec(FPT->getExceptionSpecType()) &&
1772 FPT->isNothrow())
1773 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
1774
1775 unsigned SMEBits = FPT->getAArch64SMEAttributes();
1776 if (SMEBits & FunctionType::SME_PStateSMEnabledMask)
1777 FuncAttrs.addAttribute("aarch64_pstate_sm_enabled");
1778 if (SMEBits & FunctionType::SME_PStateSMCompatibleMask)
1779 FuncAttrs.addAttribute("aarch64_pstate_sm_compatible");
1780
1781 // ZA
1782 if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Out ||
1783 FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_InOut)
1784 FuncAttrs.addAttribute("aarch64_pstate_za_shared");
1785 if (FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_Preserves ||
1786 FunctionType::getArmZAState(SMEBits) == FunctionType::ARM_In) {
1787 FuncAttrs.addAttribute("aarch64_pstate_za_shared");
1788 FuncAttrs.addAttribute("aarch64_pstate_za_preserved");
1789 }
1790
1791 // ZT0
1792 if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Preserves)
1793 FuncAttrs.addAttribute("aarch64_preserves_zt0");
1794 if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_In)
1795 FuncAttrs.addAttribute("aarch64_in_zt0");
1796 if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_Out)
1797 FuncAttrs.addAttribute("aarch64_out_zt0");
1798 if (FunctionType::getArmZT0State(SMEBits) == FunctionType::ARM_InOut)
1799 FuncAttrs.addAttribute("aarch64_inout_zt0");
1800 }
1801
AddAttributesFromAssumes(llvm::AttrBuilder & FuncAttrs,const Decl * Callee)1802 static void AddAttributesFromAssumes(llvm::AttrBuilder &FuncAttrs,
1803 const Decl *Callee) {
1804 if (!Callee)
1805 return;
1806
1807 SmallVector<StringRef, 4> Attrs;
1808
1809 for (const AssumptionAttr *AA : Callee->specific_attrs<AssumptionAttr>())
1810 AA->getAssumption().split(Attrs, ",");
1811
1812 if (!Attrs.empty())
1813 FuncAttrs.addAttribute(llvm::AssumptionAttrKey,
1814 llvm::join(Attrs.begin(), Attrs.end(), ","));
1815 }
1816
MayDropFunctionReturn(const ASTContext & Context,QualType ReturnType) const1817 bool CodeGenModule::MayDropFunctionReturn(const ASTContext &Context,
1818 QualType ReturnType) const {
1819 // We can't just discard the return value for a record type with a
1820 // complex destructor or a non-trivially copyable type.
1821 if (const RecordType *RT =
1822 ReturnType.getCanonicalType()->getAs<RecordType>()) {
1823 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1824 return ClassDecl->hasTrivialDestructor();
1825 }
1826 return ReturnType.isTriviallyCopyableType(Context);
1827 }
1828
HasStrictReturn(const CodeGenModule & Module,QualType RetTy,const Decl * TargetDecl)1829 static bool HasStrictReturn(const CodeGenModule &Module, QualType RetTy,
1830 const Decl *TargetDecl) {
1831 // As-is msan can not tolerate noundef mismatch between caller and
1832 // implementation. Mismatch is possible for e.g. indirect calls from C-caller
1833 // into C++. Such mismatches lead to confusing false reports. To avoid
1834 // expensive workaround on msan we enforce initialization event in uncommon
1835 // cases where it's allowed.
1836 if (Module.getLangOpts().Sanitize.has(SanitizerKind::Memory))
1837 return true;
1838 // C++ explicitly makes returning undefined values UB. C's rule only applies
1839 // to used values, so we never mark them noundef for now.
1840 if (!Module.getLangOpts().CPlusPlus)
1841 return false;
1842 if (TargetDecl) {
1843 if (const FunctionDecl *FDecl = dyn_cast<FunctionDecl>(TargetDecl)) {
1844 if (FDecl->isExternC())
1845 return false;
1846 } else if (const VarDecl *VDecl = dyn_cast<VarDecl>(TargetDecl)) {
1847 // Function pointer.
1848 if (VDecl->isExternC())
1849 return false;
1850 }
1851 }
1852
1853 // We don't want to be too aggressive with the return checking, unless
1854 // it's explicit in the code opts or we're using an appropriate sanitizer.
1855 // Try to respect what the programmer intended.
1856 return Module.getCodeGenOpts().StrictReturn ||
1857 !Module.MayDropFunctionReturn(Module.getContext(), RetTy) ||
1858 Module.getLangOpts().Sanitize.has(SanitizerKind::Return);
1859 }
1860
1861 /// Add denormal-fp-math and denormal-fp-math-f32 as appropriate for the
1862 /// requested denormal behavior, accounting for the overriding behavior of the
1863 /// -f32 case.
addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode,llvm::DenormalMode FP32DenormalMode,llvm::AttrBuilder & FuncAttrs)1864 static void addDenormalModeAttrs(llvm::DenormalMode FPDenormalMode,
1865 llvm::DenormalMode FP32DenormalMode,
1866 llvm::AttrBuilder &FuncAttrs) {
1867 if (FPDenormalMode != llvm::DenormalMode::getDefault())
1868 FuncAttrs.addAttribute("denormal-fp-math", FPDenormalMode.str());
1869
1870 if (FP32DenormalMode != FPDenormalMode && FP32DenormalMode.isValid())
1871 FuncAttrs.addAttribute("denormal-fp-math-f32", FP32DenormalMode.str());
1872 }
1873
1874 /// Add default attributes to a function, which have merge semantics under
1875 /// -mlink-builtin-bitcode and should not simply overwrite any existing
1876 /// attributes in the linked library.
1877 static void
addMergableDefaultFunctionAttributes(const CodeGenOptions & CodeGenOpts,llvm::AttrBuilder & FuncAttrs)1878 addMergableDefaultFunctionAttributes(const CodeGenOptions &CodeGenOpts,
1879 llvm::AttrBuilder &FuncAttrs) {
1880 addDenormalModeAttrs(CodeGenOpts.FPDenormalMode, CodeGenOpts.FP32DenormalMode,
1881 FuncAttrs);
1882 }
1883
getTrivialDefaultFunctionAttributes(StringRef Name,bool HasOptnone,const CodeGenOptions & CodeGenOpts,const LangOptions & LangOpts,bool AttrOnCallSite,llvm::AttrBuilder & FuncAttrs)1884 static void getTrivialDefaultFunctionAttributes(
1885 StringRef Name, bool HasOptnone, const CodeGenOptions &CodeGenOpts,
1886 const LangOptions &LangOpts, bool AttrOnCallSite,
1887 llvm::AttrBuilder &FuncAttrs) {
1888 // OptimizeNoneAttr takes precedence over -Os or -Oz. No warning needed.
1889 if (!HasOptnone) {
1890 if (CodeGenOpts.OptimizeSize)
1891 FuncAttrs.addAttribute(llvm::Attribute::OptimizeForSize);
1892 if (CodeGenOpts.OptimizeSize == 2)
1893 FuncAttrs.addAttribute(llvm::Attribute::MinSize);
1894 }
1895
1896 if (CodeGenOpts.DisableRedZone)
1897 FuncAttrs.addAttribute(llvm::Attribute::NoRedZone);
1898 if (CodeGenOpts.IndirectTlsSegRefs)
1899 FuncAttrs.addAttribute("indirect-tls-seg-refs");
1900 if (CodeGenOpts.NoImplicitFloat)
1901 FuncAttrs.addAttribute(llvm::Attribute::NoImplicitFloat);
1902
1903 if (AttrOnCallSite) {
1904 // Attributes that should go on the call site only.
1905 // FIXME: Look for 'BuiltinAttr' on the function rather than re-checking
1906 // the -fno-builtin-foo list.
1907 if (!CodeGenOpts.SimplifyLibCalls || LangOpts.isNoBuiltinFunc(Name))
1908 FuncAttrs.addAttribute(llvm::Attribute::NoBuiltin);
1909 if (!CodeGenOpts.TrapFuncName.empty())
1910 FuncAttrs.addAttribute("trap-func-name", CodeGenOpts.TrapFuncName);
1911 } else {
1912 switch (CodeGenOpts.getFramePointer()) {
1913 case CodeGenOptions::FramePointerKind::None:
1914 // This is the default behavior.
1915 break;
1916 case CodeGenOptions::FramePointerKind::NonLeaf:
1917 case CodeGenOptions::FramePointerKind::All:
1918 FuncAttrs.addAttribute("frame-pointer",
1919 CodeGenOptions::getFramePointerKindName(
1920 CodeGenOpts.getFramePointer()));
1921 }
1922
1923 if (CodeGenOpts.LessPreciseFPMAD)
1924 FuncAttrs.addAttribute("less-precise-fpmad", "true");
1925
1926 if (CodeGenOpts.NullPointerIsValid)
1927 FuncAttrs.addAttribute(llvm::Attribute::NullPointerIsValid);
1928
1929 if (LangOpts.getDefaultExceptionMode() == LangOptions::FPE_Ignore)
1930 FuncAttrs.addAttribute("no-trapping-math", "true");
1931
1932 // TODO: Are these all needed?
1933 // unsafe/inf/nan/nsz are handled by instruction-level FastMathFlags.
1934 if (LangOpts.NoHonorInfs)
1935 FuncAttrs.addAttribute("no-infs-fp-math", "true");
1936 if (LangOpts.NoHonorNaNs)
1937 FuncAttrs.addAttribute("no-nans-fp-math", "true");
1938 if (LangOpts.ApproxFunc)
1939 FuncAttrs.addAttribute("approx-func-fp-math", "true");
1940 if (LangOpts.AllowFPReassoc && LangOpts.AllowRecip &&
1941 LangOpts.NoSignedZero && LangOpts.ApproxFunc &&
1942 (LangOpts.getDefaultFPContractMode() ==
1943 LangOptions::FPModeKind::FPM_Fast ||
1944 LangOpts.getDefaultFPContractMode() ==
1945 LangOptions::FPModeKind::FPM_FastHonorPragmas))
1946 FuncAttrs.addAttribute("unsafe-fp-math", "true");
1947 if (CodeGenOpts.SoftFloat)
1948 FuncAttrs.addAttribute("use-soft-float", "true");
1949 FuncAttrs.addAttribute("stack-protector-buffer-size",
1950 llvm::utostr(CodeGenOpts.SSPBufferSize));
1951 if (LangOpts.NoSignedZero)
1952 FuncAttrs.addAttribute("no-signed-zeros-fp-math", "true");
1953
1954 // TODO: Reciprocal estimate codegen options should apply to instructions?
1955 const std::vector<std::string> &Recips = CodeGenOpts.Reciprocals;
1956 if (!Recips.empty())
1957 FuncAttrs.addAttribute("reciprocal-estimates",
1958 llvm::join(Recips, ","));
1959
1960 if (!CodeGenOpts.PreferVectorWidth.empty() &&
1961 CodeGenOpts.PreferVectorWidth != "none")
1962 FuncAttrs.addAttribute("prefer-vector-width",
1963 CodeGenOpts.PreferVectorWidth);
1964
1965 if (CodeGenOpts.StackRealignment)
1966 FuncAttrs.addAttribute("stackrealign");
1967 if (CodeGenOpts.Backchain)
1968 FuncAttrs.addAttribute("backchain");
1969 if (CodeGenOpts.EnableSegmentedStacks)
1970 FuncAttrs.addAttribute("split-stack");
1971
1972 if (CodeGenOpts.SpeculativeLoadHardening)
1973 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
1974
1975 // Add zero-call-used-regs attribute.
1976 switch (CodeGenOpts.getZeroCallUsedRegs()) {
1977 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Skip:
1978 FuncAttrs.removeAttribute("zero-call-used-regs");
1979 break;
1980 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPRArg:
1981 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr-arg");
1982 break;
1983 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedGPR:
1984 FuncAttrs.addAttribute("zero-call-used-regs", "used-gpr");
1985 break;
1986 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::UsedArg:
1987 FuncAttrs.addAttribute("zero-call-used-regs", "used-arg");
1988 break;
1989 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::Used:
1990 FuncAttrs.addAttribute("zero-call-used-regs", "used");
1991 break;
1992 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPRArg:
1993 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr-arg");
1994 break;
1995 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllGPR:
1996 FuncAttrs.addAttribute("zero-call-used-regs", "all-gpr");
1997 break;
1998 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::AllArg:
1999 FuncAttrs.addAttribute("zero-call-used-regs", "all-arg");
2000 break;
2001 case llvm::ZeroCallUsedRegs::ZeroCallUsedRegsKind::All:
2002 FuncAttrs.addAttribute("zero-call-used-regs", "all");
2003 break;
2004 }
2005 }
2006
2007 if (LangOpts.assumeFunctionsAreConvergent()) {
2008 // Conservatively, mark all functions and calls in CUDA and OpenCL as
2009 // convergent (meaning, they may call an intrinsically convergent op, such
2010 // as __syncthreads() / barrier(), and so can't have certain optimizations
2011 // applied around them). LLVM will remove this attribute where it safely
2012 // can.
2013 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2014 }
2015
2016 // TODO: NoUnwind attribute should be added for other GPU modes HIP,
2017 // OpenMP offload. AFAIK, neither of them support exceptions in device code.
2018 if ((LangOpts.CUDA && LangOpts.CUDAIsDevice) || LangOpts.OpenCL ||
2019 LangOpts.SYCLIsDevice) {
2020 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2021 }
2022
2023 for (StringRef Attr : CodeGenOpts.DefaultFunctionAttrs) {
2024 StringRef Var, Value;
2025 std::tie(Var, Value) = Attr.split('=');
2026 FuncAttrs.addAttribute(Var, Value);
2027 }
2028 }
2029
2030 /// Merges `target-features` from \TargetOpts and \F, and sets the result in
2031 /// \FuncAttr
2032 /// * features from \F are always kept
2033 /// * a feature from \TargetOpts is kept if itself and its opposite are absent
2034 /// from \F
2035 static void
overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder & FuncAttr,const llvm::Function & F,const TargetOptions & TargetOpts)2036 overrideFunctionFeaturesWithTargetFeatures(llvm::AttrBuilder &FuncAttr,
2037 const llvm::Function &F,
2038 const TargetOptions &TargetOpts) {
2039 auto FFeatures = F.getFnAttribute("target-features");
2040
2041 llvm::StringSet<> MergedNames;
2042 SmallVector<StringRef> MergedFeatures;
2043 MergedFeatures.reserve(TargetOpts.Features.size());
2044
2045 auto AddUnmergedFeatures = [&](auto &&FeatureRange) {
2046 for (StringRef Feature : FeatureRange) {
2047 if (Feature.empty())
2048 continue;
2049 assert(Feature[0] == '+' || Feature[0] == '-');
2050 StringRef Name = Feature.drop_front(1);
2051 bool Merged = !MergedNames.insert(Name).second;
2052 if (!Merged)
2053 MergedFeatures.push_back(Feature);
2054 }
2055 };
2056
2057 if (FFeatures.isValid())
2058 AddUnmergedFeatures(llvm::split(FFeatures.getValueAsString(), ','));
2059 AddUnmergedFeatures(TargetOpts.Features);
2060
2061 if (!MergedFeatures.empty()) {
2062 llvm::sort(MergedFeatures);
2063 FuncAttr.addAttribute("target-features", llvm::join(MergedFeatures, ","));
2064 }
2065 }
2066
mergeDefaultFunctionDefinitionAttributes(llvm::Function & F,const CodeGenOptions & CodeGenOpts,const LangOptions & LangOpts,const TargetOptions & TargetOpts,bool WillInternalize)2067 void CodeGen::mergeDefaultFunctionDefinitionAttributes(
2068 llvm::Function &F, const CodeGenOptions &CodeGenOpts,
2069 const LangOptions &LangOpts, const TargetOptions &TargetOpts,
2070 bool WillInternalize) {
2071
2072 llvm::AttrBuilder FuncAttrs(F.getContext());
2073 // Here we only extract the options that are relevant compared to the version
2074 // from GetCPUAndFeaturesAttributes.
2075 if (!TargetOpts.CPU.empty())
2076 FuncAttrs.addAttribute("target-cpu", TargetOpts.CPU);
2077 if (!TargetOpts.TuneCPU.empty())
2078 FuncAttrs.addAttribute("tune-cpu", TargetOpts.TuneCPU);
2079
2080 ::getTrivialDefaultFunctionAttributes(F.getName(), F.hasOptNone(),
2081 CodeGenOpts, LangOpts,
2082 /*AttrOnCallSite=*/false, FuncAttrs);
2083
2084 if (!WillInternalize && F.isInterposable()) {
2085 // Do not promote "dynamic" denormal-fp-math to this translation unit's
2086 // setting for weak functions that won't be internalized. The user has no
2087 // real control for how builtin bitcode is linked, so we shouldn't assume
2088 // later copies will use a consistent mode.
2089 F.addFnAttrs(FuncAttrs);
2090 return;
2091 }
2092
2093 llvm::AttributeMask AttrsToRemove;
2094
2095 llvm::DenormalMode DenormModeToMerge = F.getDenormalModeRaw();
2096 llvm::DenormalMode DenormModeToMergeF32 = F.getDenormalModeF32Raw();
2097 llvm::DenormalMode Merged =
2098 CodeGenOpts.FPDenormalMode.mergeCalleeMode(DenormModeToMerge);
2099 llvm::DenormalMode MergedF32 = CodeGenOpts.FP32DenormalMode;
2100
2101 if (DenormModeToMergeF32.isValid()) {
2102 MergedF32 =
2103 CodeGenOpts.FP32DenormalMode.mergeCalleeMode(DenormModeToMergeF32);
2104 }
2105
2106 if (Merged == llvm::DenormalMode::getDefault()) {
2107 AttrsToRemove.addAttribute("denormal-fp-math");
2108 } else if (Merged != DenormModeToMerge) {
2109 // Overwrite existing attribute
2110 FuncAttrs.addAttribute("denormal-fp-math",
2111 CodeGenOpts.FPDenormalMode.str());
2112 }
2113
2114 if (MergedF32 == llvm::DenormalMode::getDefault()) {
2115 AttrsToRemove.addAttribute("denormal-fp-math-f32");
2116 } else if (MergedF32 != DenormModeToMergeF32) {
2117 // Overwrite existing attribute
2118 FuncAttrs.addAttribute("denormal-fp-math-f32",
2119 CodeGenOpts.FP32DenormalMode.str());
2120 }
2121
2122 F.removeFnAttrs(AttrsToRemove);
2123 addDenormalModeAttrs(Merged, MergedF32, FuncAttrs);
2124
2125 overrideFunctionFeaturesWithTargetFeatures(FuncAttrs, F, TargetOpts);
2126
2127 F.addFnAttrs(FuncAttrs);
2128 }
2129
getTrivialDefaultFunctionAttributes(StringRef Name,bool HasOptnone,bool AttrOnCallSite,llvm::AttrBuilder & FuncAttrs)2130 void CodeGenModule::getTrivialDefaultFunctionAttributes(
2131 StringRef Name, bool HasOptnone, bool AttrOnCallSite,
2132 llvm::AttrBuilder &FuncAttrs) {
2133 ::getTrivialDefaultFunctionAttributes(Name, HasOptnone, getCodeGenOpts(),
2134 getLangOpts(), AttrOnCallSite,
2135 FuncAttrs);
2136 }
2137
getDefaultFunctionAttributes(StringRef Name,bool HasOptnone,bool AttrOnCallSite,llvm::AttrBuilder & FuncAttrs)2138 void CodeGenModule::getDefaultFunctionAttributes(StringRef Name,
2139 bool HasOptnone,
2140 bool AttrOnCallSite,
2141 llvm::AttrBuilder &FuncAttrs) {
2142 getTrivialDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite,
2143 FuncAttrs);
2144 // If we're just getting the default, get the default values for mergeable
2145 // attributes.
2146 if (!AttrOnCallSite)
2147 addMergableDefaultFunctionAttributes(CodeGenOpts, FuncAttrs);
2148 }
2149
addDefaultFunctionDefinitionAttributes(llvm::AttrBuilder & attrs)2150 void CodeGenModule::addDefaultFunctionDefinitionAttributes(
2151 llvm::AttrBuilder &attrs) {
2152 getDefaultFunctionAttributes(/*function name*/ "", /*optnone*/ false,
2153 /*for call*/ false, attrs);
2154 GetCPUAndFeaturesAttributes(GlobalDecl(), attrs);
2155 }
2156
addNoBuiltinAttributes(llvm::AttrBuilder & FuncAttrs,const LangOptions & LangOpts,const NoBuiltinAttr * NBA=nullptr)2157 static void addNoBuiltinAttributes(llvm::AttrBuilder &FuncAttrs,
2158 const LangOptions &LangOpts,
2159 const NoBuiltinAttr *NBA = nullptr) {
2160 auto AddNoBuiltinAttr = [&FuncAttrs](StringRef BuiltinName) {
2161 SmallString<32> AttributeName;
2162 AttributeName += "no-builtin-";
2163 AttributeName += BuiltinName;
2164 FuncAttrs.addAttribute(AttributeName);
2165 };
2166
2167 // First, handle the language options passed through -fno-builtin.
2168 if (LangOpts.NoBuiltin) {
2169 // -fno-builtin disables them all.
2170 FuncAttrs.addAttribute("no-builtins");
2171 return;
2172 }
2173
2174 // Then, add attributes for builtins specified through -fno-builtin-<name>.
2175 llvm::for_each(LangOpts.NoBuiltinFuncs, AddNoBuiltinAttr);
2176
2177 // Now, let's check the __attribute__((no_builtin("...")) attribute added to
2178 // the source.
2179 if (!NBA)
2180 return;
2181
2182 // If there is a wildcard in the builtin names specified through the
2183 // attribute, disable them all.
2184 if (llvm::is_contained(NBA->builtinNames(), "*")) {
2185 FuncAttrs.addAttribute("no-builtins");
2186 return;
2187 }
2188
2189 // And last, add the rest of the builtin names.
2190 llvm::for_each(NBA->builtinNames(), AddNoBuiltinAttr);
2191 }
2192
DetermineNoUndef(QualType QTy,CodeGenTypes & Types,const llvm::DataLayout & DL,const ABIArgInfo & AI,bool CheckCoerce=true)2193 static bool DetermineNoUndef(QualType QTy, CodeGenTypes &Types,
2194 const llvm::DataLayout &DL, const ABIArgInfo &AI,
2195 bool CheckCoerce = true) {
2196 llvm::Type *Ty = Types.ConvertTypeForMem(QTy);
2197 if (AI.getKind() == ABIArgInfo::Indirect ||
2198 AI.getKind() == ABIArgInfo::IndirectAliased)
2199 return true;
2200 if (AI.getKind() == ABIArgInfo::Extend)
2201 return true;
2202 if (!DL.typeSizeEqualsStoreSize(Ty))
2203 // TODO: This will result in a modest amount of values not marked noundef
2204 // when they could be. We care about values that *invisibly* contain undef
2205 // bits from the perspective of LLVM IR.
2206 return false;
2207 if (CheckCoerce && AI.canHaveCoerceToType()) {
2208 llvm::Type *CoerceTy = AI.getCoerceToType();
2209 if (llvm::TypeSize::isKnownGT(DL.getTypeSizeInBits(CoerceTy),
2210 DL.getTypeSizeInBits(Ty)))
2211 // If we're coercing to a type with a greater size than the canonical one,
2212 // we're introducing new undef bits.
2213 // Coercing to a type of smaller or equal size is ok, as we know that
2214 // there's no internal padding (typeSizeEqualsStoreSize).
2215 return false;
2216 }
2217 if (QTy->isBitIntType())
2218 return true;
2219 if (QTy->isReferenceType())
2220 return true;
2221 if (QTy->isNullPtrType())
2222 return false;
2223 if (QTy->isMemberPointerType())
2224 // TODO: Some member pointers are `noundef`, but it depends on the ABI. For
2225 // now, never mark them.
2226 return false;
2227 if (QTy->isScalarType()) {
2228 if (const ComplexType *Complex = dyn_cast<ComplexType>(QTy))
2229 return DetermineNoUndef(Complex->getElementType(), Types, DL, AI, false);
2230 return true;
2231 }
2232 if (const VectorType *Vector = dyn_cast<VectorType>(QTy))
2233 return DetermineNoUndef(Vector->getElementType(), Types, DL, AI, false);
2234 if (const MatrixType *Matrix = dyn_cast<MatrixType>(QTy))
2235 return DetermineNoUndef(Matrix->getElementType(), Types, DL, AI, false);
2236 if (const ArrayType *Array = dyn_cast<ArrayType>(QTy))
2237 return DetermineNoUndef(Array->getElementType(), Types, DL, AI, false);
2238
2239 // TODO: Some structs may be `noundef`, in specific situations.
2240 return false;
2241 }
2242
2243 /// Check if the argument of a function has maybe_undef attribute.
IsArgumentMaybeUndef(const Decl * TargetDecl,unsigned NumRequiredArgs,unsigned ArgNo)2244 static bool IsArgumentMaybeUndef(const Decl *TargetDecl,
2245 unsigned NumRequiredArgs, unsigned ArgNo) {
2246 const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl);
2247 if (!FD)
2248 return false;
2249
2250 // Assume variadic arguments do not have maybe_undef attribute.
2251 if (ArgNo >= NumRequiredArgs)
2252 return false;
2253
2254 // Check if argument has maybe_undef attribute.
2255 if (ArgNo < FD->getNumParams()) {
2256 const ParmVarDecl *Param = FD->getParamDecl(ArgNo);
2257 if (Param && Param->hasAttr<MaybeUndefAttr>())
2258 return true;
2259 }
2260
2261 return false;
2262 }
2263
2264 /// Test if it's legal to apply nofpclass for the given parameter type and it's
2265 /// lowered IR type.
canApplyNoFPClass(const ABIArgInfo & AI,QualType ParamType,bool IsReturn)2266 static bool canApplyNoFPClass(const ABIArgInfo &AI, QualType ParamType,
2267 bool IsReturn) {
2268 // Should only apply to FP types in the source, not ABI promoted.
2269 if (!ParamType->hasFloatingRepresentation())
2270 return false;
2271
2272 // The promoted-to IR type also needs to support nofpclass.
2273 llvm::Type *IRTy = AI.getCoerceToType();
2274 if (llvm::AttributeFuncs::isNoFPClassCompatibleType(IRTy))
2275 return true;
2276
2277 if (llvm::StructType *ST = dyn_cast<llvm::StructType>(IRTy)) {
2278 return !IsReturn && AI.getCanBeFlattened() &&
2279 llvm::all_of(ST->elements(), [](llvm::Type *Ty) {
2280 return llvm::AttributeFuncs::isNoFPClassCompatibleType(Ty);
2281 });
2282 }
2283
2284 return false;
2285 }
2286
2287 /// Return the nofpclass mask that can be applied to floating-point parameters.
getNoFPClassTestMask(const LangOptions & LangOpts)2288 static llvm::FPClassTest getNoFPClassTestMask(const LangOptions &LangOpts) {
2289 llvm::FPClassTest Mask = llvm::fcNone;
2290 if (LangOpts.NoHonorInfs)
2291 Mask |= llvm::fcInf;
2292 if (LangOpts.NoHonorNaNs)
2293 Mask |= llvm::fcNan;
2294 return Mask;
2295 }
2296
AdjustMemoryAttribute(StringRef Name,CGCalleeInfo CalleeInfo,llvm::AttributeList & Attrs)2297 void CodeGenModule::AdjustMemoryAttribute(StringRef Name,
2298 CGCalleeInfo CalleeInfo,
2299 llvm::AttributeList &Attrs) {
2300 if (Attrs.getMemoryEffects().getModRef() == llvm::ModRefInfo::NoModRef) {
2301 Attrs = Attrs.removeFnAttribute(getLLVMContext(), llvm::Attribute::Memory);
2302 llvm::Attribute MemoryAttr = llvm::Attribute::getWithMemoryEffects(
2303 getLLVMContext(), llvm::MemoryEffects::writeOnly());
2304 Attrs = Attrs.addFnAttribute(getLLVMContext(), MemoryAttr);
2305 }
2306 }
2307
2308 /// Construct the IR attribute list of a function or call.
2309 ///
2310 /// When adding an attribute, please consider where it should be handled:
2311 ///
2312 /// - getDefaultFunctionAttributes is for attributes that are essentially
2313 /// part of the global target configuration (but perhaps can be
2314 /// overridden on a per-function basis). Adding attributes there
2315 /// will cause them to also be set in frontends that build on Clang's
2316 /// target-configuration logic, as well as for code defined in library
2317 /// modules such as CUDA's libdevice.
2318 ///
2319 /// - ConstructAttributeList builds on top of getDefaultFunctionAttributes
2320 /// and adds declaration-specific, convention-specific, and
2321 /// frontend-specific logic. The last is of particular importance:
2322 /// attributes that restrict how the frontend generates code must be
2323 /// added here rather than getDefaultFunctionAttributes.
2324 ///
ConstructAttributeList(StringRef Name,const CGFunctionInfo & FI,CGCalleeInfo CalleeInfo,llvm::AttributeList & AttrList,unsigned & CallingConv,bool AttrOnCallSite,bool IsThunk)2325 void CodeGenModule::ConstructAttributeList(StringRef Name,
2326 const CGFunctionInfo &FI,
2327 CGCalleeInfo CalleeInfo,
2328 llvm::AttributeList &AttrList,
2329 unsigned &CallingConv,
2330 bool AttrOnCallSite, bool IsThunk) {
2331 llvm::AttrBuilder FuncAttrs(getLLVMContext());
2332 llvm::AttrBuilder RetAttrs(getLLVMContext());
2333
2334 // Collect function IR attributes from the CC lowering.
2335 // We'll collect the paramete and result attributes later.
2336 CallingConv = FI.getEffectiveCallingConvention();
2337 if (FI.isNoReturn())
2338 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2339 if (FI.isCmseNSCall())
2340 FuncAttrs.addAttribute("cmse_nonsecure_call");
2341
2342 // Collect function IR attributes from the callee prototype if we have one.
2343 AddAttributesFromFunctionProtoType(getContext(), FuncAttrs,
2344 CalleeInfo.getCalleeFunctionProtoType());
2345
2346 const Decl *TargetDecl = CalleeInfo.getCalleeDecl().getDecl();
2347
2348 // Attach assumption attributes to the declaration. If this is a call
2349 // site, attach assumptions from the caller to the call as well.
2350 AddAttributesFromAssumes(FuncAttrs, TargetDecl);
2351
2352 bool HasOptnone = false;
2353 // The NoBuiltinAttr attached to the target FunctionDecl.
2354 const NoBuiltinAttr *NBA = nullptr;
2355
2356 // Some ABIs may result in additional accesses to arguments that may
2357 // otherwise not be present.
2358 auto AddPotentialArgAccess = [&]() {
2359 llvm::Attribute A = FuncAttrs.getAttribute(llvm::Attribute::Memory);
2360 if (A.isValid())
2361 FuncAttrs.addMemoryAttr(A.getMemoryEffects() |
2362 llvm::MemoryEffects::argMemOnly());
2363 };
2364
2365 // Collect function IR attributes based on declaration-specific
2366 // information.
2367 // FIXME: handle sseregparm someday...
2368 if (TargetDecl) {
2369 if (TargetDecl->hasAttr<ReturnsTwiceAttr>())
2370 FuncAttrs.addAttribute(llvm::Attribute::ReturnsTwice);
2371 if (TargetDecl->hasAttr<NoThrowAttr>())
2372 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2373 if (TargetDecl->hasAttr<NoReturnAttr>())
2374 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2375 if (TargetDecl->hasAttr<ColdAttr>())
2376 FuncAttrs.addAttribute(llvm::Attribute::Cold);
2377 if (TargetDecl->hasAttr<HotAttr>())
2378 FuncAttrs.addAttribute(llvm::Attribute::Hot);
2379 if (TargetDecl->hasAttr<NoDuplicateAttr>())
2380 FuncAttrs.addAttribute(llvm::Attribute::NoDuplicate);
2381 if (TargetDecl->hasAttr<ConvergentAttr>())
2382 FuncAttrs.addAttribute(llvm::Attribute::Convergent);
2383
2384 if (const FunctionDecl *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2385 AddAttributesFromFunctionProtoType(
2386 getContext(), FuncAttrs, Fn->getType()->getAs<FunctionProtoType>());
2387 if (AttrOnCallSite && Fn->isReplaceableGlobalAllocationFunction()) {
2388 // A sane operator new returns a non-aliasing pointer.
2389 auto Kind = Fn->getDeclName().getCXXOverloadedOperator();
2390 if (getCodeGenOpts().AssumeSaneOperatorNew &&
2391 (Kind == OO_New || Kind == OO_Array_New))
2392 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2393 }
2394 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(Fn);
2395 const bool IsVirtualCall = MD && MD->isVirtual();
2396 // Don't use [[noreturn]], _Noreturn or [[no_builtin]] for a call to a
2397 // virtual function. These attributes are not inherited by overloads.
2398 if (!(AttrOnCallSite && IsVirtualCall)) {
2399 if (Fn->isNoReturn())
2400 FuncAttrs.addAttribute(llvm::Attribute::NoReturn);
2401 NBA = Fn->getAttr<NoBuiltinAttr>();
2402 }
2403 }
2404
2405 if (isa<FunctionDecl>(TargetDecl) || isa<VarDecl>(TargetDecl)) {
2406 // Only place nomerge attribute on call sites, never functions. This
2407 // allows it to work on indirect virtual function calls.
2408 if (AttrOnCallSite && TargetDecl->hasAttr<NoMergeAttr>())
2409 FuncAttrs.addAttribute(llvm::Attribute::NoMerge);
2410 }
2411
2412 // 'const', 'pure' and 'noalias' attributed functions are also nounwind.
2413 if (TargetDecl->hasAttr<ConstAttr>()) {
2414 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::none());
2415 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2416 // gcc specifies that 'const' functions have greater restrictions than
2417 // 'pure' functions, so they also cannot have infinite loops.
2418 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2419 } else if (TargetDecl->hasAttr<PureAttr>()) {
2420 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::readOnly());
2421 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2422 // gcc specifies that 'pure' functions cannot have infinite loops.
2423 FuncAttrs.addAttribute(llvm::Attribute::WillReturn);
2424 } else if (TargetDecl->hasAttr<NoAliasAttr>()) {
2425 FuncAttrs.addMemoryAttr(llvm::MemoryEffects::inaccessibleOrArgMemOnly());
2426 FuncAttrs.addAttribute(llvm::Attribute::NoUnwind);
2427 }
2428 if (TargetDecl->hasAttr<RestrictAttr>())
2429 RetAttrs.addAttribute(llvm::Attribute::NoAlias);
2430 if (TargetDecl->hasAttr<ReturnsNonNullAttr>() &&
2431 !CodeGenOpts.NullPointerIsValid)
2432 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2433 if (TargetDecl->hasAttr<AnyX86NoCallerSavedRegistersAttr>())
2434 FuncAttrs.addAttribute("no_caller_saved_registers");
2435 if (TargetDecl->hasAttr<AnyX86NoCfCheckAttr>())
2436 FuncAttrs.addAttribute(llvm::Attribute::NoCfCheck);
2437 if (TargetDecl->hasAttr<LeafAttr>())
2438 FuncAttrs.addAttribute(llvm::Attribute::NoCallback);
2439
2440 HasOptnone = TargetDecl->hasAttr<OptimizeNoneAttr>();
2441 if (auto *AllocSize = TargetDecl->getAttr<AllocSizeAttr>()) {
2442 std::optional<unsigned> NumElemsParam;
2443 if (AllocSize->getNumElemsParam().isValid())
2444 NumElemsParam = AllocSize->getNumElemsParam().getLLVMIndex();
2445 FuncAttrs.addAllocSizeAttr(AllocSize->getElemSizeParam().getLLVMIndex(),
2446 NumElemsParam);
2447 }
2448
2449 if (TargetDecl->hasAttr<OpenCLKernelAttr>()) {
2450 if (getLangOpts().OpenCLVersion <= 120) {
2451 // OpenCL v1.2 Work groups are always uniform
2452 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2453 } else {
2454 // OpenCL v2.0 Work groups may be whether uniform or not.
2455 // '-cl-uniform-work-group-size' compile option gets a hint
2456 // to the compiler that the global work-size be a multiple of
2457 // the work-group size specified to clEnqueueNDRangeKernel
2458 // (i.e. work groups are uniform).
2459 FuncAttrs.addAttribute(
2460 "uniform-work-group-size",
2461 llvm::toStringRef(getLangOpts().OffloadUniformBlock));
2462 }
2463 }
2464
2465 if (TargetDecl->hasAttr<CUDAGlobalAttr>() &&
2466 getLangOpts().OffloadUniformBlock)
2467 FuncAttrs.addAttribute("uniform-work-group-size", "true");
2468
2469 if (TargetDecl->hasAttr<ArmLocallyStreamingAttr>())
2470 FuncAttrs.addAttribute("aarch64_pstate_sm_body");
2471 }
2472
2473 // Attach "no-builtins" attributes to:
2474 // * call sites: both `nobuiltin` and "no-builtins" or "no-builtin-<name>".
2475 // * definitions: "no-builtins" or "no-builtin-<name>" only.
2476 // The attributes can come from:
2477 // * LangOpts: -ffreestanding, -fno-builtin, -fno-builtin-<name>
2478 // * FunctionDecl attributes: __attribute__((no_builtin(...)))
2479 addNoBuiltinAttributes(FuncAttrs, getLangOpts(), NBA);
2480
2481 // Collect function IR attributes based on global settiings.
2482 getDefaultFunctionAttributes(Name, HasOptnone, AttrOnCallSite, FuncAttrs);
2483
2484 // Override some default IR attributes based on declaration-specific
2485 // information.
2486 if (TargetDecl) {
2487 if (TargetDecl->hasAttr<NoSpeculativeLoadHardeningAttr>())
2488 FuncAttrs.removeAttribute(llvm::Attribute::SpeculativeLoadHardening);
2489 if (TargetDecl->hasAttr<SpeculativeLoadHardeningAttr>())
2490 FuncAttrs.addAttribute(llvm::Attribute::SpeculativeLoadHardening);
2491 if (TargetDecl->hasAttr<NoSplitStackAttr>())
2492 FuncAttrs.removeAttribute("split-stack");
2493 if (TargetDecl->hasAttr<ZeroCallUsedRegsAttr>()) {
2494 // A function "__attribute__((...))" overrides the command-line flag.
2495 auto Kind =
2496 TargetDecl->getAttr<ZeroCallUsedRegsAttr>()->getZeroCallUsedRegs();
2497 FuncAttrs.removeAttribute("zero-call-used-regs");
2498 FuncAttrs.addAttribute(
2499 "zero-call-used-regs",
2500 ZeroCallUsedRegsAttr::ConvertZeroCallUsedRegsKindToStr(Kind));
2501 }
2502
2503 // Add NonLazyBind attribute to function declarations when -fno-plt
2504 // is used.
2505 // FIXME: what if we just haven't processed the function definition
2506 // yet, or if it's an external definition like C99 inline?
2507 if (CodeGenOpts.NoPLT) {
2508 if (auto *Fn = dyn_cast<FunctionDecl>(TargetDecl)) {
2509 if (!Fn->isDefined() && !AttrOnCallSite) {
2510 FuncAttrs.addAttribute(llvm::Attribute::NonLazyBind);
2511 }
2512 }
2513 }
2514 }
2515
2516 // Add "sample-profile-suffix-elision-policy" attribute for internal linkage
2517 // functions with -funique-internal-linkage-names.
2518 if (TargetDecl && CodeGenOpts.UniqueInternalLinkageNames) {
2519 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
2520 if (!FD->isExternallyVisible())
2521 FuncAttrs.addAttribute("sample-profile-suffix-elision-policy",
2522 "selected");
2523 }
2524 }
2525
2526 // Collect non-call-site function IR attributes from declaration-specific
2527 // information.
2528 if (!AttrOnCallSite) {
2529 if (TargetDecl && TargetDecl->hasAttr<CmseNSEntryAttr>())
2530 FuncAttrs.addAttribute("cmse_nonsecure_entry");
2531
2532 // Whether tail calls are enabled.
2533 auto shouldDisableTailCalls = [&] {
2534 // Should this be honored in getDefaultFunctionAttributes?
2535 if (CodeGenOpts.DisableTailCalls)
2536 return true;
2537
2538 if (!TargetDecl)
2539 return false;
2540
2541 if (TargetDecl->hasAttr<DisableTailCallsAttr>() ||
2542 TargetDecl->hasAttr<AnyX86InterruptAttr>())
2543 return true;
2544
2545 if (CodeGenOpts.NoEscapingBlockTailCalls) {
2546 if (const auto *BD = dyn_cast<BlockDecl>(TargetDecl))
2547 if (!BD->doesNotEscape())
2548 return true;
2549 }
2550
2551 return false;
2552 };
2553 if (shouldDisableTailCalls())
2554 FuncAttrs.addAttribute("disable-tail-calls", "true");
2555
2556 // CPU/feature overrides. addDefaultFunctionDefinitionAttributes
2557 // handles these separately to set them based on the global defaults.
2558 GetCPUAndFeaturesAttributes(CalleeInfo.getCalleeDecl(), FuncAttrs);
2559 }
2560
2561 // Collect attributes from arguments and return values.
2562 ClangToLLVMArgMapping IRFunctionArgs(getContext(), FI);
2563
2564 QualType RetTy = FI.getReturnType();
2565 const ABIArgInfo &RetAI = FI.getReturnInfo();
2566 const llvm::DataLayout &DL = getDataLayout();
2567
2568 // Determine if the return type could be partially undef
2569 if (CodeGenOpts.EnableNoundefAttrs &&
2570 HasStrictReturn(*this, RetTy, TargetDecl)) {
2571 if (!RetTy->isVoidType() && RetAI.getKind() != ABIArgInfo::Indirect &&
2572 DetermineNoUndef(RetTy, getTypes(), DL, RetAI))
2573 RetAttrs.addAttribute(llvm::Attribute::NoUndef);
2574 }
2575
2576 switch (RetAI.getKind()) {
2577 case ABIArgInfo::Extend:
2578 if (RetAI.isSignExt())
2579 RetAttrs.addAttribute(llvm::Attribute::SExt);
2580 else
2581 RetAttrs.addAttribute(llvm::Attribute::ZExt);
2582 [[fallthrough]];
2583 case ABIArgInfo::Direct:
2584 if (RetAI.getInReg())
2585 RetAttrs.addAttribute(llvm::Attribute::InReg);
2586
2587 if (canApplyNoFPClass(RetAI, RetTy, true))
2588 RetAttrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
2589
2590 break;
2591 case ABIArgInfo::Ignore:
2592 break;
2593
2594 case ABIArgInfo::InAlloca:
2595 case ABIArgInfo::Indirect: {
2596 // inalloca and sret disable readnone and readonly
2597 AddPotentialArgAccess();
2598 break;
2599 }
2600
2601 case ABIArgInfo::CoerceAndExpand:
2602 break;
2603
2604 case ABIArgInfo::Expand:
2605 case ABIArgInfo::IndirectAliased:
2606 llvm_unreachable("Invalid ABI kind for return argument");
2607 }
2608
2609 if (!IsThunk) {
2610 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2611 if (const auto *RefTy = RetTy->getAs<ReferenceType>()) {
2612 QualType PTy = RefTy->getPointeeType();
2613 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2614 RetAttrs.addDereferenceableAttr(
2615 getMinimumObjectSize(PTy).getQuantity());
2616 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2617 !CodeGenOpts.NullPointerIsValid)
2618 RetAttrs.addAttribute(llvm::Attribute::NonNull);
2619 if (PTy->isObjectType()) {
2620 llvm::Align Alignment =
2621 getNaturalPointeeTypeAlignment(RetTy).getAsAlign();
2622 RetAttrs.addAlignmentAttr(Alignment);
2623 }
2624 }
2625 }
2626
2627 bool hasUsedSRet = false;
2628 SmallVector<llvm::AttributeSet, 4> ArgAttrs(IRFunctionArgs.totalIRArgs());
2629
2630 // Attach attributes to sret.
2631 if (IRFunctionArgs.hasSRetArg()) {
2632 llvm::AttrBuilder SRETAttrs(getLLVMContext());
2633 SRETAttrs.addStructRetAttr(getTypes().ConvertTypeForMem(RetTy));
2634 SRETAttrs.addAttribute(llvm::Attribute::Writable);
2635 SRETAttrs.addAttribute(llvm::Attribute::DeadOnUnwind);
2636 hasUsedSRet = true;
2637 if (RetAI.getInReg())
2638 SRETAttrs.addAttribute(llvm::Attribute::InReg);
2639 SRETAttrs.addAlignmentAttr(RetAI.getIndirectAlign().getQuantity());
2640 ArgAttrs[IRFunctionArgs.getSRetArgNo()] =
2641 llvm::AttributeSet::get(getLLVMContext(), SRETAttrs);
2642 }
2643
2644 // Attach attributes to inalloca argument.
2645 if (IRFunctionArgs.hasInallocaArg()) {
2646 llvm::AttrBuilder Attrs(getLLVMContext());
2647 Attrs.addInAllocaAttr(FI.getArgStruct());
2648 ArgAttrs[IRFunctionArgs.getInallocaArgNo()] =
2649 llvm::AttributeSet::get(getLLVMContext(), Attrs);
2650 }
2651
2652 // Apply `nonnull`, `dereferencable(N)` and `align N` to the `this` argument,
2653 // unless this is a thunk function.
2654 // FIXME: fix this properly, https://reviews.llvm.org/D100388
2655 if (FI.isInstanceMethod() && !IRFunctionArgs.hasInallocaArg() &&
2656 !FI.arg_begin()->type->isVoidPointerType() && !IsThunk) {
2657 auto IRArgs = IRFunctionArgs.getIRArgs(0);
2658
2659 assert(IRArgs.second == 1 && "Expected only a single `this` pointer.");
2660
2661 llvm::AttrBuilder Attrs(getLLVMContext());
2662
2663 QualType ThisTy =
2664 FI.arg_begin()->type.getTypePtr()->getPointeeType();
2665
2666 if (!CodeGenOpts.NullPointerIsValid &&
2667 getTypes().getTargetAddressSpace(FI.arg_begin()->type) == 0) {
2668 Attrs.addAttribute(llvm::Attribute::NonNull);
2669 Attrs.addDereferenceableAttr(getMinimumObjectSize(ThisTy).getQuantity());
2670 } else {
2671 // FIXME dereferenceable should be correct here, regardless of
2672 // NullPointerIsValid. However, dereferenceable currently does not always
2673 // respect NullPointerIsValid and may imply nonnull and break the program.
2674 // See https://reviews.llvm.org/D66618 for discussions.
2675 Attrs.addDereferenceableOrNullAttr(
2676 getMinimumObjectSize(
2677 FI.arg_begin()->type.castAs<PointerType>()->getPointeeType())
2678 .getQuantity());
2679 }
2680
2681 llvm::Align Alignment =
2682 getNaturalTypeAlignment(ThisTy, /*BaseInfo=*/nullptr,
2683 /*TBAAInfo=*/nullptr, /*forPointeeType=*/true)
2684 .getAsAlign();
2685 Attrs.addAlignmentAttr(Alignment);
2686
2687 ArgAttrs[IRArgs.first] = llvm::AttributeSet::get(getLLVMContext(), Attrs);
2688 }
2689
2690 unsigned ArgNo = 0;
2691 for (CGFunctionInfo::const_arg_iterator I = FI.arg_begin(),
2692 E = FI.arg_end();
2693 I != E; ++I, ++ArgNo) {
2694 QualType ParamType = I->type;
2695 const ABIArgInfo &AI = I->info;
2696 llvm::AttrBuilder Attrs(getLLVMContext());
2697
2698 // Add attribute for padding argument, if necessary.
2699 if (IRFunctionArgs.hasPaddingArg(ArgNo)) {
2700 if (AI.getPaddingInReg()) {
2701 ArgAttrs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
2702 llvm::AttributeSet::get(
2703 getLLVMContext(),
2704 llvm::AttrBuilder(getLLVMContext()).addAttribute(llvm::Attribute::InReg));
2705 }
2706 }
2707
2708 // Decide whether the argument we're handling could be partially undef
2709 if (CodeGenOpts.EnableNoundefAttrs &&
2710 DetermineNoUndef(ParamType, getTypes(), DL, AI)) {
2711 Attrs.addAttribute(llvm::Attribute::NoUndef);
2712 }
2713
2714 // 'restrict' -> 'noalias' is done in EmitFunctionProlog when we
2715 // have the corresponding parameter variable. It doesn't make
2716 // sense to do it here because parameters are so messed up.
2717 switch (AI.getKind()) {
2718 case ABIArgInfo::Extend:
2719 if (AI.isSignExt())
2720 Attrs.addAttribute(llvm::Attribute::SExt);
2721 else
2722 Attrs.addAttribute(llvm::Attribute::ZExt);
2723 [[fallthrough]];
2724 case ABIArgInfo::Direct:
2725 if (ArgNo == 0 && FI.isChainCall())
2726 Attrs.addAttribute(llvm::Attribute::Nest);
2727 else if (AI.getInReg())
2728 Attrs.addAttribute(llvm::Attribute::InReg);
2729 Attrs.addStackAlignmentAttr(llvm::MaybeAlign(AI.getDirectAlign()));
2730
2731 if (canApplyNoFPClass(AI, ParamType, false))
2732 Attrs.addNoFPClassAttr(getNoFPClassTestMask(getLangOpts()));
2733 break;
2734 case ABIArgInfo::Indirect: {
2735 if (AI.getInReg())
2736 Attrs.addAttribute(llvm::Attribute::InReg);
2737
2738 if (AI.getIndirectByVal())
2739 Attrs.addByValAttr(getTypes().ConvertTypeForMem(ParamType));
2740
2741 auto *Decl = ParamType->getAsRecordDecl();
2742 if (CodeGenOpts.PassByValueIsNoAlias && Decl &&
2743 Decl->getArgPassingRestrictions() ==
2744 RecordArgPassingKind::CanPassInRegs)
2745 // When calling the function, the pointer passed in will be the only
2746 // reference to the underlying object. Mark it accordingly.
2747 Attrs.addAttribute(llvm::Attribute::NoAlias);
2748
2749 // TODO: We could add the byref attribute if not byval, but it would
2750 // require updating many testcases.
2751
2752 CharUnits Align = AI.getIndirectAlign();
2753
2754 // In a byval argument, it is important that the required
2755 // alignment of the type is honored, as LLVM might be creating a
2756 // *new* stack object, and needs to know what alignment to give
2757 // it. (Sometimes it can deduce a sensible alignment on its own,
2758 // but not if clang decides it must emit a packed struct, or the
2759 // user specifies increased alignment requirements.)
2760 //
2761 // This is different from indirect *not* byval, where the object
2762 // exists already, and the align attribute is purely
2763 // informative.
2764 assert(!Align.isZero());
2765
2766 // For now, only add this when we have a byval argument.
2767 // TODO: be less lazy about updating test cases.
2768 if (AI.getIndirectByVal())
2769 Attrs.addAlignmentAttr(Align.getQuantity());
2770
2771 // byval disables readnone and readonly.
2772 AddPotentialArgAccess();
2773 break;
2774 }
2775 case ABIArgInfo::IndirectAliased: {
2776 CharUnits Align = AI.getIndirectAlign();
2777 Attrs.addByRefAttr(getTypes().ConvertTypeForMem(ParamType));
2778 Attrs.addAlignmentAttr(Align.getQuantity());
2779 break;
2780 }
2781 case ABIArgInfo::Ignore:
2782 case ABIArgInfo::Expand:
2783 case ABIArgInfo::CoerceAndExpand:
2784 break;
2785
2786 case ABIArgInfo::InAlloca:
2787 // inalloca disables readnone and readonly.
2788 AddPotentialArgAccess();
2789 continue;
2790 }
2791
2792 if (const auto *RefTy = ParamType->getAs<ReferenceType>()) {
2793 QualType PTy = RefTy->getPointeeType();
2794 if (!PTy->isIncompleteType() && PTy->isConstantSizeType())
2795 Attrs.addDereferenceableAttr(
2796 getMinimumObjectSize(PTy).getQuantity());
2797 if (getTypes().getTargetAddressSpace(PTy) == 0 &&
2798 !CodeGenOpts.NullPointerIsValid)
2799 Attrs.addAttribute(llvm::Attribute::NonNull);
2800 if (PTy->isObjectType()) {
2801 llvm::Align Alignment =
2802 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2803 Attrs.addAlignmentAttr(Alignment);
2804 }
2805 }
2806
2807 // From OpenCL spec v3.0.10 section 6.3.5 Alignment of Types:
2808 // > For arguments to a __kernel function declared to be a pointer to a
2809 // > data type, the OpenCL compiler can assume that the pointee is always
2810 // > appropriately aligned as required by the data type.
2811 if (TargetDecl && TargetDecl->hasAttr<OpenCLKernelAttr>() &&
2812 ParamType->isPointerType()) {
2813 QualType PTy = ParamType->getPointeeType();
2814 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2815 llvm::Align Alignment =
2816 getNaturalPointeeTypeAlignment(ParamType).getAsAlign();
2817 Attrs.addAlignmentAttr(Alignment);
2818 }
2819 }
2820
2821 switch (FI.getExtParameterInfo(ArgNo).getABI()) {
2822 case ParameterABI::Ordinary:
2823 break;
2824
2825 case ParameterABI::SwiftIndirectResult: {
2826 // Add 'sret' if we haven't already used it for something, but
2827 // only if the result is void.
2828 if (!hasUsedSRet && RetTy->isVoidType()) {
2829 Attrs.addStructRetAttr(getTypes().ConvertTypeForMem(ParamType));
2830 hasUsedSRet = true;
2831 }
2832
2833 // Add 'noalias' in either case.
2834 Attrs.addAttribute(llvm::Attribute::NoAlias);
2835
2836 // Add 'dereferenceable' and 'alignment'.
2837 auto PTy = ParamType->getPointeeType();
2838 if (!PTy->isIncompleteType() && PTy->isConstantSizeType()) {
2839 auto info = getContext().getTypeInfoInChars(PTy);
2840 Attrs.addDereferenceableAttr(info.Width.getQuantity());
2841 Attrs.addAlignmentAttr(info.Align.getAsAlign());
2842 }
2843 break;
2844 }
2845
2846 case ParameterABI::SwiftErrorResult:
2847 Attrs.addAttribute(llvm::Attribute::SwiftError);
2848 break;
2849
2850 case ParameterABI::SwiftContext:
2851 Attrs.addAttribute(llvm::Attribute::SwiftSelf);
2852 break;
2853
2854 case ParameterABI::SwiftAsyncContext:
2855 Attrs.addAttribute(llvm::Attribute::SwiftAsync);
2856 break;
2857 }
2858
2859 if (FI.getExtParameterInfo(ArgNo).isNoEscape())
2860 Attrs.addAttribute(llvm::Attribute::NoCapture);
2861
2862 if (Attrs.hasAttributes()) {
2863 unsigned FirstIRArg, NumIRArgs;
2864 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
2865 for (unsigned i = 0; i < NumIRArgs; i++)
2866 ArgAttrs[FirstIRArg + i] = ArgAttrs[FirstIRArg + i].addAttributes(
2867 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), Attrs));
2868 }
2869 }
2870 assert(ArgNo == FI.arg_size());
2871
2872 AttrList = llvm::AttributeList::get(
2873 getLLVMContext(), llvm::AttributeSet::get(getLLVMContext(), FuncAttrs),
2874 llvm::AttributeSet::get(getLLVMContext(), RetAttrs), ArgAttrs);
2875 }
2876
2877 /// An argument came in as a promoted argument; demote it back to its
2878 /// declared type.
emitArgumentDemotion(CodeGenFunction & CGF,const VarDecl * var,llvm::Value * value)2879 static llvm::Value *emitArgumentDemotion(CodeGenFunction &CGF,
2880 const VarDecl *var,
2881 llvm::Value *value) {
2882 llvm::Type *varType = CGF.ConvertType(var->getType());
2883
2884 // This can happen with promotions that actually don't change the
2885 // underlying type, like the enum promotions.
2886 if (value->getType() == varType) return value;
2887
2888 assert((varType->isIntegerTy() || varType->isFloatingPointTy())
2889 && "unexpected promotion type");
2890
2891 if (isa<llvm::IntegerType>(varType))
2892 return CGF.Builder.CreateTrunc(value, varType, "arg.unpromote");
2893
2894 return CGF.Builder.CreateFPCast(value, varType, "arg.unpromote");
2895 }
2896
2897 /// Returns the attribute (either parameter attribute, or function
2898 /// attribute), which declares argument ArgNo to be non-null.
getNonNullAttr(const Decl * FD,const ParmVarDecl * PVD,QualType ArgType,unsigned ArgNo)2899 static const NonNullAttr *getNonNullAttr(const Decl *FD, const ParmVarDecl *PVD,
2900 QualType ArgType, unsigned ArgNo) {
2901 // FIXME: __attribute__((nonnull)) can also be applied to:
2902 // - references to pointers, where the pointee is known to be
2903 // nonnull (apparently a Clang extension)
2904 // - transparent unions containing pointers
2905 // In the former case, LLVM IR cannot represent the constraint. In
2906 // the latter case, we have no guarantee that the transparent union
2907 // is in fact passed as a pointer.
2908 if (!ArgType->isAnyPointerType() && !ArgType->isBlockPointerType())
2909 return nullptr;
2910 // First, check attribute on parameter itself.
2911 if (PVD) {
2912 if (auto ParmNNAttr = PVD->getAttr<NonNullAttr>())
2913 return ParmNNAttr;
2914 }
2915 // Check function attributes.
2916 if (!FD)
2917 return nullptr;
2918 for (const auto *NNAttr : FD->specific_attrs<NonNullAttr>()) {
2919 if (NNAttr->isNonNull(ArgNo))
2920 return NNAttr;
2921 }
2922 return nullptr;
2923 }
2924
2925 namespace {
2926 struct CopyBackSwiftError final : EHScopeStack::Cleanup {
2927 Address Temp;
2928 Address Arg;
CopyBackSwiftError__anon8a34942c0b11::CopyBackSwiftError2929 CopyBackSwiftError(Address temp, Address arg) : Temp(temp), Arg(arg) {}
Emit__anon8a34942c0b11::CopyBackSwiftError2930 void Emit(CodeGenFunction &CGF, Flags flags) override {
2931 llvm::Value *errorValue = CGF.Builder.CreateLoad(Temp);
2932 CGF.Builder.CreateStore(errorValue, Arg);
2933 }
2934 };
2935 }
2936
EmitFunctionProlog(const CGFunctionInfo & FI,llvm::Function * Fn,const FunctionArgList & Args)2937 void CodeGenFunction::EmitFunctionProlog(const CGFunctionInfo &FI,
2938 llvm::Function *Fn,
2939 const FunctionArgList &Args) {
2940 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>())
2941 // Naked functions don't have prologues.
2942 return;
2943
2944 // If this is an implicit-return-zero function, go ahead and
2945 // initialize the return value. TODO: it might be nice to have
2946 // a more general mechanism for this that didn't require synthesized
2947 // return statements.
2948 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl)) {
2949 if (FD->hasImplicitReturnZero()) {
2950 QualType RetTy = FD->getReturnType().getUnqualifiedType();
2951 llvm::Type* LLVMTy = CGM.getTypes().ConvertType(RetTy);
2952 llvm::Constant* Zero = llvm::Constant::getNullValue(LLVMTy);
2953 Builder.CreateStore(Zero, ReturnValue);
2954 }
2955 }
2956
2957 // FIXME: We no longer need the types from FunctionArgList; lift up and
2958 // simplify.
2959
2960 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), FI);
2961 assert(Fn->arg_size() == IRFunctionArgs.totalIRArgs());
2962
2963 // If we're using inalloca, all the memory arguments are GEPs off of the last
2964 // parameter, which is a pointer to the complete memory area.
2965 Address ArgStruct = Address::invalid();
2966 if (IRFunctionArgs.hasInallocaArg())
2967 ArgStruct = Address(Fn->getArg(IRFunctionArgs.getInallocaArgNo()),
2968 FI.getArgStruct(), FI.getArgStructAlignment());
2969
2970 // Name the struct return parameter.
2971 if (IRFunctionArgs.hasSRetArg()) {
2972 auto AI = Fn->getArg(IRFunctionArgs.getSRetArgNo());
2973 AI->setName("agg.result");
2974 AI->addAttr(llvm::Attribute::NoAlias);
2975 }
2976
2977 // Track if we received the parameter as a pointer (indirect, byval, or
2978 // inalloca). If already have a pointer, EmitParmDecl doesn't need to copy it
2979 // into a local alloca for us.
2980 SmallVector<ParamValue, 16> ArgVals;
2981 ArgVals.reserve(Args.size());
2982
2983 // Create a pointer value for every parameter declaration. This usually
2984 // entails copying one or more LLVM IR arguments into an alloca. Don't push
2985 // any cleanups or do anything that might unwind. We do that separately, so
2986 // we can push the cleanups in the correct order for the ABI.
2987 assert(FI.arg_size() == Args.size() &&
2988 "Mismatch between function signature & arguments.");
2989 unsigned ArgNo = 0;
2990 CGFunctionInfo::const_arg_iterator info_it = FI.arg_begin();
2991 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
2992 i != e; ++i, ++info_it, ++ArgNo) {
2993 const VarDecl *Arg = *i;
2994 const ABIArgInfo &ArgI = info_it->info;
2995
2996 bool isPromoted =
2997 isa<ParmVarDecl>(Arg) && cast<ParmVarDecl>(Arg)->isKNRPromoted();
2998 // We are converting from ABIArgInfo type to VarDecl type directly, unless
2999 // the parameter is promoted. In this case we convert to
3000 // CGFunctionInfo::ArgInfo type with subsequent argument demotion.
3001 QualType Ty = isPromoted ? info_it->type : Arg->getType();
3002 assert(hasScalarEvaluationKind(Ty) ==
3003 hasScalarEvaluationKind(Arg->getType()));
3004
3005 unsigned FirstIRArg, NumIRArgs;
3006 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
3007
3008 switch (ArgI.getKind()) {
3009 case ABIArgInfo::InAlloca: {
3010 assert(NumIRArgs == 0);
3011 auto FieldIndex = ArgI.getInAllocaFieldIndex();
3012 Address V =
3013 Builder.CreateStructGEP(ArgStruct, FieldIndex, Arg->getName());
3014 if (ArgI.getInAllocaIndirect())
3015 V = Address(Builder.CreateLoad(V), ConvertTypeForMem(Ty),
3016 getContext().getTypeAlignInChars(Ty));
3017 ArgVals.push_back(ParamValue::forIndirect(V));
3018 break;
3019 }
3020
3021 case ABIArgInfo::Indirect:
3022 case ABIArgInfo::IndirectAliased: {
3023 assert(NumIRArgs == 1);
3024 Address ParamAddr = Address(Fn->getArg(FirstIRArg), ConvertTypeForMem(Ty),
3025 ArgI.getIndirectAlign(), KnownNonNull);
3026
3027 if (!hasScalarEvaluationKind(Ty)) {
3028 // Aggregates and complex variables are accessed by reference. All we
3029 // need to do is realign the value, if requested. Also, if the address
3030 // may be aliased, copy it to ensure that the parameter variable is
3031 // mutable and has a unique adress, as C requires.
3032 Address V = ParamAddr;
3033 if (ArgI.getIndirectRealign() || ArgI.isIndirectAliased()) {
3034 Address AlignedTemp = CreateMemTemp(Ty, "coerce");
3035
3036 // Copy from the incoming argument pointer to the temporary with the
3037 // appropriate alignment.
3038 //
3039 // FIXME: We should have a common utility for generating an aggregate
3040 // copy.
3041 CharUnits Size = getContext().getTypeSizeInChars(Ty);
3042 Builder.CreateMemCpy(
3043 AlignedTemp.getPointer(), AlignedTemp.getAlignment().getAsAlign(),
3044 ParamAddr.getPointer(), ParamAddr.getAlignment().getAsAlign(),
3045 llvm::ConstantInt::get(IntPtrTy, Size.getQuantity()));
3046 V = AlignedTemp;
3047 }
3048 ArgVals.push_back(ParamValue::forIndirect(V));
3049 } else {
3050 // Load scalar value from indirect argument.
3051 llvm::Value *V =
3052 EmitLoadOfScalar(ParamAddr, false, Ty, Arg->getBeginLoc());
3053
3054 if (isPromoted)
3055 V = emitArgumentDemotion(*this, Arg, V);
3056 ArgVals.push_back(ParamValue::forDirect(V));
3057 }
3058 break;
3059 }
3060
3061 case ABIArgInfo::Extend:
3062 case ABIArgInfo::Direct: {
3063 auto AI = Fn->getArg(FirstIRArg);
3064 llvm::Type *LTy = ConvertType(Arg->getType());
3065
3066 // Prepare parameter attributes. So far, only attributes for pointer
3067 // parameters are prepared. See
3068 // http://llvm.org/docs/LangRef.html#paramattrs.
3069 if (ArgI.getDirectOffset() == 0 && LTy->isPointerTy() &&
3070 ArgI.getCoerceToType()->isPointerTy()) {
3071 assert(NumIRArgs == 1);
3072
3073 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(Arg)) {
3074 // Set `nonnull` attribute if any.
3075 if (getNonNullAttr(CurCodeDecl, PVD, PVD->getType(),
3076 PVD->getFunctionScopeIndex()) &&
3077 !CGM.getCodeGenOpts().NullPointerIsValid)
3078 AI->addAttr(llvm::Attribute::NonNull);
3079
3080 QualType OTy = PVD->getOriginalType();
3081 if (const auto *ArrTy =
3082 getContext().getAsConstantArrayType(OTy)) {
3083 // A C99 array parameter declaration with the static keyword also
3084 // indicates dereferenceability, and if the size is constant we can
3085 // use the dereferenceable attribute (which requires the size in
3086 // bytes).
3087 if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) {
3088 QualType ETy = ArrTy->getElementType();
3089 llvm::Align Alignment =
3090 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3091 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3092 uint64_t ArrSize = ArrTy->getSize().getZExtValue();
3093 if (!ETy->isIncompleteType() && ETy->isConstantSizeType() &&
3094 ArrSize) {
3095 llvm::AttrBuilder Attrs(getLLVMContext());
3096 Attrs.addDereferenceableAttr(
3097 getContext().getTypeSizeInChars(ETy).getQuantity() *
3098 ArrSize);
3099 AI->addAttrs(Attrs);
3100 } else if (getContext().getTargetInfo().getNullPointerValue(
3101 ETy.getAddressSpace()) == 0 &&
3102 !CGM.getCodeGenOpts().NullPointerIsValid) {
3103 AI->addAttr(llvm::Attribute::NonNull);
3104 }
3105 }
3106 } else if (const auto *ArrTy =
3107 getContext().getAsVariableArrayType(OTy)) {
3108 // For C99 VLAs with the static keyword, we don't know the size so
3109 // we can't use the dereferenceable attribute, but in addrspace(0)
3110 // we know that it must be nonnull.
3111 if (ArrTy->getSizeModifier() == ArraySizeModifier::Static) {
3112 QualType ETy = ArrTy->getElementType();
3113 llvm::Align Alignment =
3114 CGM.getNaturalTypeAlignment(ETy).getAsAlign();
3115 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(Alignment));
3116 if (!getTypes().getTargetAddressSpace(ETy) &&
3117 !CGM.getCodeGenOpts().NullPointerIsValid)
3118 AI->addAttr(llvm::Attribute::NonNull);
3119 }
3120 }
3121
3122 // Set `align` attribute if any.
3123 const auto *AVAttr = PVD->getAttr<AlignValueAttr>();
3124 if (!AVAttr)
3125 if (const auto *TOTy = OTy->getAs<TypedefType>())
3126 AVAttr = TOTy->getDecl()->getAttr<AlignValueAttr>();
3127 if (AVAttr && !SanOpts.has(SanitizerKind::Alignment)) {
3128 // If alignment-assumption sanitizer is enabled, we do *not* add
3129 // alignment attribute here, but emit normal alignment assumption,
3130 // so the UBSAN check could function.
3131 llvm::ConstantInt *AlignmentCI =
3132 cast<llvm::ConstantInt>(EmitScalarExpr(AVAttr->getAlignment()));
3133 uint64_t AlignmentInt =
3134 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment);
3135 if (AI->getParamAlign().valueOrOne() < AlignmentInt) {
3136 AI->removeAttr(llvm::Attribute::AttrKind::Alignment);
3137 AI->addAttrs(llvm::AttrBuilder(getLLVMContext()).addAlignmentAttr(
3138 llvm::Align(AlignmentInt)));
3139 }
3140 }
3141 }
3142
3143 // Set 'noalias' if an argument type has the `restrict` qualifier.
3144 if (Arg->getType().isRestrictQualified())
3145 AI->addAttr(llvm::Attribute::NoAlias);
3146 }
3147
3148 // Prepare the argument value. If we have the trivial case, handle it
3149 // with no muss and fuss.
3150 if (!isa<llvm::StructType>(ArgI.getCoerceToType()) &&
3151 ArgI.getCoerceToType() == ConvertType(Ty) &&
3152 ArgI.getDirectOffset() == 0) {
3153 assert(NumIRArgs == 1);
3154
3155 // LLVM expects swifterror parameters to be used in very restricted
3156 // ways. Copy the value into a less-restricted temporary.
3157 llvm::Value *V = AI;
3158 if (FI.getExtParameterInfo(ArgNo).getABI()
3159 == ParameterABI::SwiftErrorResult) {
3160 QualType pointeeTy = Ty->getPointeeType();
3161 assert(pointeeTy->isPointerType());
3162 Address temp =
3163 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
3164 Address arg(V, ConvertTypeForMem(pointeeTy),
3165 getContext().getTypeAlignInChars(pointeeTy));
3166 llvm::Value *incomingErrorValue = Builder.CreateLoad(arg);
3167 Builder.CreateStore(incomingErrorValue, temp);
3168 V = temp.getPointer();
3169
3170 // Push a cleanup to copy the value back at the end of the function.
3171 // The convention does not guarantee that the value will be written
3172 // back if the function exits with an unwind exception.
3173 EHStack.pushCleanup<CopyBackSwiftError>(NormalCleanup, temp, arg);
3174 }
3175
3176 // Ensure the argument is the correct type.
3177 if (V->getType() != ArgI.getCoerceToType())
3178 V = Builder.CreateBitCast(V, ArgI.getCoerceToType());
3179
3180 if (isPromoted)
3181 V = emitArgumentDemotion(*this, Arg, V);
3182
3183 // Because of merging of function types from multiple decls it is
3184 // possible for the type of an argument to not match the corresponding
3185 // type in the function type. Since we are codegening the callee
3186 // in here, add a cast to the argument type.
3187 llvm::Type *LTy = ConvertType(Arg->getType());
3188 if (V->getType() != LTy)
3189 V = Builder.CreateBitCast(V, LTy);
3190
3191 ArgVals.push_back(ParamValue::forDirect(V));
3192 break;
3193 }
3194
3195 // VLST arguments are coerced to VLATs at the function boundary for
3196 // ABI consistency. If this is a VLST that was coerced to
3197 // a VLAT at the function boundary and the types match up, use
3198 // llvm.vector.extract to convert back to the original VLST.
3199 if (auto *VecTyTo = dyn_cast<llvm::FixedVectorType>(ConvertType(Ty))) {
3200 llvm::Value *Coerced = Fn->getArg(FirstIRArg);
3201 if (auto *VecTyFrom =
3202 dyn_cast<llvm::ScalableVectorType>(Coerced->getType())) {
3203 // If we are casting a scalable 16 x i1 predicate vector to a fixed i8
3204 // vector, bitcast the source and use a vector extract.
3205 auto PredType =
3206 llvm::ScalableVectorType::get(Builder.getInt1Ty(), 16);
3207 if (VecTyFrom == PredType &&
3208 VecTyTo->getElementType() == Builder.getInt8Ty()) {
3209 VecTyFrom = llvm::ScalableVectorType::get(Builder.getInt8Ty(), 2);
3210 Coerced = Builder.CreateBitCast(Coerced, VecTyFrom);
3211 }
3212 if (VecTyFrom->getElementType() == VecTyTo->getElementType()) {
3213 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
3214
3215 assert(NumIRArgs == 1);
3216 Coerced->setName(Arg->getName() + ".coerce");
3217 ArgVals.push_back(ParamValue::forDirect(Builder.CreateExtractVector(
3218 VecTyTo, Coerced, Zero, "cast.fixed")));
3219 break;
3220 }
3221 }
3222 }
3223
3224 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg),
3225 Arg->getName());
3226
3227 // Pointer to store into.
3228 Address Ptr = emitAddressAtOffset(*this, Alloca, ArgI);
3229
3230 // Fast-isel and the optimizer generally like scalar values better than
3231 // FCAs, so we flatten them if this is safe to do for this argument.
3232 llvm::StructType *STy = dyn_cast<llvm::StructType>(ArgI.getCoerceToType());
3233 if (ArgI.isDirect() && ArgI.getCanBeFlattened() && STy &&
3234 STy->getNumElements() > 1) {
3235 llvm::TypeSize StructSize = CGM.getDataLayout().getTypeAllocSize(STy);
3236 llvm::TypeSize PtrElementSize =
3237 CGM.getDataLayout().getTypeAllocSize(Ptr.getElementType());
3238 if (StructSize.isScalable()) {
3239 assert(STy->containsHomogeneousScalableVectorTypes() &&
3240 "ABI only supports structure with homogeneous scalable vector "
3241 "type");
3242 assert(StructSize == PtrElementSize &&
3243 "Only allow non-fractional movement of structure with"
3244 "homogeneous scalable vector type");
3245 assert(STy->getNumElements() == NumIRArgs);
3246
3247 llvm::Value *LoadedStructValue = llvm::PoisonValue::get(STy);
3248 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3249 auto *AI = Fn->getArg(FirstIRArg + i);
3250 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3251 LoadedStructValue =
3252 Builder.CreateInsertValue(LoadedStructValue, AI, i);
3253 }
3254
3255 Builder.CreateStore(LoadedStructValue, Ptr);
3256 } else {
3257 uint64_t SrcSize = StructSize.getFixedValue();
3258 uint64_t DstSize = PtrElementSize.getFixedValue();
3259
3260 Address AddrToStoreInto = Address::invalid();
3261 if (SrcSize <= DstSize) {
3262 AddrToStoreInto = Ptr.withElementType(STy);
3263 } else {
3264 AddrToStoreInto =
3265 CreateTempAlloca(STy, Alloca.getAlignment(), "coerce");
3266 }
3267
3268 assert(STy->getNumElements() == NumIRArgs);
3269 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
3270 auto AI = Fn->getArg(FirstIRArg + i);
3271 AI->setName(Arg->getName() + ".coerce" + Twine(i));
3272 Address EltPtr = Builder.CreateStructGEP(AddrToStoreInto, i);
3273 Builder.CreateStore(AI, EltPtr);
3274 }
3275
3276 if (SrcSize > DstSize) {
3277 Builder.CreateMemCpy(Ptr, AddrToStoreInto, DstSize);
3278 }
3279 }
3280 } else {
3281 // Simple case, just do a coerced store of the argument into the alloca.
3282 assert(NumIRArgs == 1);
3283 auto AI = Fn->getArg(FirstIRArg);
3284 AI->setName(Arg->getName() + ".coerce");
3285 CreateCoercedStore(AI, Ptr, /*DstIsVolatile=*/false, *this);
3286 }
3287
3288 // Match to what EmitParmDecl is expecting for this type.
3289 if (CodeGenFunction::hasScalarEvaluationKind(Ty)) {
3290 llvm::Value *V =
3291 EmitLoadOfScalar(Alloca, false, Ty, Arg->getBeginLoc());
3292 if (isPromoted)
3293 V = emitArgumentDemotion(*this, Arg, V);
3294 ArgVals.push_back(ParamValue::forDirect(V));
3295 } else {
3296 ArgVals.push_back(ParamValue::forIndirect(Alloca));
3297 }
3298 break;
3299 }
3300
3301 case ABIArgInfo::CoerceAndExpand: {
3302 // Reconstruct into a temporary.
3303 Address alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
3304 ArgVals.push_back(ParamValue::forIndirect(alloca));
3305
3306 auto coercionType = ArgI.getCoerceAndExpandType();
3307 alloca = alloca.withElementType(coercionType);
3308
3309 unsigned argIndex = FirstIRArg;
3310 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3311 llvm::Type *eltType = coercionType->getElementType(i);
3312 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType))
3313 continue;
3314
3315 auto eltAddr = Builder.CreateStructGEP(alloca, i);
3316 auto elt = Fn->getArg(argIndex++);
3317 Builder.CreateStore(elt, eltAddr);
3318 }
3319 assert(argIndex == FirstIRArg + NumIRArgs);
3320 break;
3321 }
3322
3323 case ABIArgInfo::Expand: {
3324 // If this structure was expanded into multiple arguments then
3325 // we need to create a temporary and reconstruct it from the
3326 // arguments.
3327 Address Alloca = CreateMemTemp(Ty, getContext().getDeclAlign(Arg));
3328 LValue LV = MakeAddrLValue(Alloca, Ty);
3329 ArgVals.push_back(ParamValue::forIndirect(Alloca));
3330
3331 auto FnArgIter = Fn->arg_begin() + FirstIRArg;
3332 ExpandTypeFromArgs(Ty, LV, FnArgIter);
3333 assert(FnArgIter == Fn->arg_begin() + FirstIRArg + NumIRArgs);
3334 for (unsigned i = 0, e = NumIRArgs; i != e; ++i) {
3335 auto AI = Fn->getArg(FirstIRArg + i);
3336 AI->setName(Arg->getName() + "." + Twine(i));
3337 }
3338 break;
3339 }
3340
3341 case ABIArgInfo::Ignore:
3342 assert(NumIRArgs == 0);
3343 // Initialize the local variable appropriately.
3344 if (!hasScalarEvaluationKind(Ty)) {
3345 ArgVals.push_back(ParamValue::forIndirect(CreateMemTemp(Ty)));
3346 } else {
3347 llvm::Value *U = llvm::UndefValue::get(ConvertType(Arg->getType()));
3348 ArgVals.push_back(ParamValue::forDirect(U));
3349 }
3350 break;
3351 }
3352 }
3353
3354 if (getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()) {
3355 for (int I = Args.size() - 1; I >= 0; --I)
3356 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
3357 } else {
3358 for (unsigned I = 0, E = Args.size(); I != E; ++I)
3359 EmitParmDecl(*Args[I], ArgVals[I], I + 1);
3360 }
3361 }
3362
eraseUnusedBitCasts(llvm::Instruction * insn)3363 static void eraseUnusedBitCasts(llvm::Instruction *insn) {
3364 while (insn->use_empty()) {
3365 llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(insn);
3366 if (!bitcast) return;
3367
3368 // This is "safe" because we would have used a ConstantExpr otherwise.
3369 insn = cast<llvm::Instruction>(bitcast->getOperand(0));
3370 bitcast->eraseFromParent();
3371 }
3372 }
3373
3374 /// Try to emit a fused autorelease of a return result.
tryEmitFusedAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)3375 static llvm::Value *tryEmitFusedAutoreleaseOfResult(CodeGenFunction &CGF,
3376 llvm::Value *result) {
3377 // We must be immediately followed the cast.
3378 llvm::BasicBlock *BB = CGF.Builder.GetInsertBlock();
3379 if (BB->empty()) return nullptr;
3380 if (&BB->back() != result) return nullptr;
3381
3382 llvm::Type *resultType = result->getType();
3383
3384 // result is in a BasicBlock and is therefore an Instruction.
3385 llvm::Instruction *generator = cast<llvm::Instruction>(result);
3386
3387 SmallVector<llvm::Instruction *, 4> InstsToKill;
3388
3389 // Look for:
3390 // %generator = bitcast %type1* %generator2 to %type2*
3391 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(generator)) {
3392 // We would have emitted this as a constant if the operand weren't
3393 // an Instruction.
3394 generator = cast<llvm::Instruction>(bitcast->getOperand(0));
3395
3396 // Require the generator to be immediately followed by the cast.
3397 if (generator->getNextNode() != bitcast)
3398 return nullptr;
3399
3400 InstsToKill.push_back(bitcast);
3401 }
3402
3403 // Look for:
3404 // %generator = call i8* @objc_retain(i8* %originalResult)
3405 // or
3406 // %generator = call i8* @objc_retainAutoreleasedReturnValue(i8* %originalResult)
3407 llvm::CallInst *call = dyn_cast<llvm::CallInst>(generator);
3408 if (!call) return nullptr;
3409
3410 bool doRetainAutorelease;
3411
3412 if (call->getCalledOperand() == CGF.CGM.getObjCEntrypoints().objc_retain) {
3413 doRetainAutorelease = true;
3414 } else if (call->getCalledOperand() ==
3415 CGF.CGM.getObjCEntrypoints().objc_retainAutoreleasedReturnValue) {
3416 doRetainAutorelease = false;
3417
3418 // If we emitted an assembly marker for this call (and the
3419 // ARCEntrypoints field should have been set if so), go looking
3420 // for that call. If we can't find it, we can't do this
3421 // optimization. But it should always be the immediately previous
3422 // instruction, unless we needed bitcasts around the call.
3423 if (CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker) {
3424 llvm::Instruction *prev = call->getPrevNode();
3425 assert(prev);
3426 if (isa<llvm::BitCastInst>(prev)) {
3427 prev = prev->getPrevNode();
3428 assert(prev);
3429 }
3430 assert(isa<llvm::CallInst>(prev));
3431 assert(cast<llvm::CallInst>(prev)->getCalledOperand() ==
3432 CGF.CGM.getObjCEntrypoints().retainAutoreleasedReturnValueMarker);
3433 InstsToKill.push_back(prev);
3434 }
3435 } else {
3436 return nullptr;
3437 }
3438
3439 result = call->getArgOperand(0);
3440 InstsToKill.push_back(call);
3441
3442 // Keep killing bitcasts, for sanity. Note that we no longer care
3443 // about precise ordering as long as there's exactly one use.
3444 while (llvm::BitCastInst *bitcast = dyn_cast<llvm::BitCastInst>(result)) {
3445 if (!bitcast->hasOneUse()) break;
3446 InstsToKill.push_back(bitcast);
3447 result = bitcast->getOperand(0);
3448 }
3449
3450 // Delete all the unnecessary instructions, from latest to earliest.
3451 for (auto *I : InstsToKill)
3452 I->eraseFromParent();
3453
3454 // Do the fused retain/autorelease if we were asked to.
3455 if (doRetainAutorelease)
3456 result = CGF.EmitARCRetainAutoreleaseReturnValue(result);
3457
3458 // Cast back to the result type.
3459 return CGF.Builder.CreateBitCast(result, resultType);
3460 }
3461
3462 /// If this is a +1 of the value of an immutable 'self', remove it.
tryRemoveRetainOfSelf(CodeGenFunction & CGF,llvm::Value * result)3463 static llvm::Value *tryRemoveRetainOfSelf(CodeGenFunction &CGF,
3464 llvm::Value *result) {
3465 // This is only applicable to a method with an immutable 'self'.
3466 const ObjCMethodDecl *method =
3467 dyn_cast_or_null<ObjCMethodDecl>(CGF.CurCodeDecl);
3468 if (!method) return nullptr;
3469 const VarDecl *self = method->getSelfDecl();
3470 if (!self->getType().isConstQualified()) return nullptr;
3471
3472 // Look for a retain call. Note: stripPointerCasts looks through returned arg
3473 // functions, which would cause us to miss the retain.
3474 llvm::CallInst *retainCall = dyn_cast<llvm::CallInst>(result);
3475 if (!retainCall || retainCall->getCalledOperand() !=
3476 CGF.CGM.getObjCEntrypoints().objc_retain)
3477 return nullptr;
3478
3479 // Look for an ordinary load of 'self'.
3480 llvm::Value *retainedValue = retainCall->getArgOperand(0);
3481 llvm::LoadInst *load =
3482 dyn_cast<llvm::LoadInst>(retainedValue->stripPointerCasts());
3483 if (!load || load->isAtomic() || load->isVolatile() ||
3484 load->getPointerOperand() != CGF.GetAddrOfLocalVar(self).getPointer())
3485 return nullptr;
3486
3487 // Okay! Burn it all down. This relies for correctness on the
3488 // assumption that the retain is emitted as part of the return and
3489 // that thereafter everything is used "linearly".
3490 llvm::Type *resultType = result->getType();
3491 eraseUnusedBitCasts(cast<llvm::Instruction>(result));
3492 assert(retainCall->use_empty());
3493 retainCall->eraseFromParent();
3494 eraseUnusedBitCasts(cast<llvm::Instruction>(retainedValue));
3495
3496 return CGF.Builder.CreateBitCast(load, resultType);
3497 }
3498
3499 /// Emit an ARC autorelease of the result of a function.
3500 ///
3501 /// \return the value to actually return from the function
emitAutoreleaseOfResult(CodeGenFunction & CGF,llvm::Value * result)3502 static llvm::Value *emitAutoreleaseOfResult(CodeGenFunction &CGF,
3503 llvm::Value *result) {
3504 // If we're returning 'self', kill the initial retain. This is a
3505 // heuristic attempt to "encourage correctness" in the really unfortunate
3506 // case where we have a return of self during a dealloc and we desperately
3507 // need to avoid the possible autorelease.
3508 if (llvm::Value *self = tryRemoveRetainOfSelf(CGF, result))
3509 return self;
3510
3511 // At -O0, try to emit a fused retain/autorelease.
3512 if (CGF.shouldUseFusedARCCalls())
3513 if (llvm::Value *fused = tryEmitFusedAutoreleaseOfResult(CGF, result))
3514 return fused;
3515
3516 return CGF.EmitARCAutoreleaseReturnValue(result);
3517 }
3518
3519 /// Heuristically search for a dominating store to the return-value slot.
findDominatingStoreToReturnValue(CodeGenFunction & CGF)3520 static llvm::StoreInst *findDominatingStoreToReturnValue(CodeGenFunction &CGF) {
3521 // Check if a User is a store which pointerOperand is the ReturnValue.
3522 // We are looking for stores to the ReturnValue, not for stores of the
3523 // ReturnValue to some other location.
3524 auto GetStoreIfValid = [&CGF](llvm::User *U) -> llvm::StoreInst * {
3525 auto *SI = dyn_cast<llvm::StoreInst>(U);
3526 if (!SI || SI->getPointerOperand() != CGF.ReturnValue.getPointer() ||
3527 SI->getValueOperand()->getType() != CGF.ReturnValue.getElementType())
3528 return nullptr;
3529 // These aren't actually possible for non-coerced returns, and we
3530 // only care about non-coerced returns on this code path.
3531 // All memory instructions inside __try block are volatile.
3532 assert(!SI->isAtomic() &&
3533 (!SI->isVolatile() || CGF.currentFunctionUsesSEHTry()));
3534 return SI;
3535 };
3536 // If there are multiple uses of the return-value slot, just check
3537 // for something immediately preceding the IP. Sometimes this can
3538 // happen with how we generate implicit-returns; it can also happen
3539 // with noreturn cleanups.
3540 if (!CGF.ReturnValue.getPointer()->hasOneUse()) {
3541 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3542 if (IP->empty()) return nullptr;
3543
3544 // Look at directly preceding instruction, skipping bitcasts and lifetime
3545 // markers.
3546 for (llvm::Instruction &I : make_range(IP->rbegin(), IP->rend())) {
3547 if (isa<llvm::BitCastInst>(&I))
3548 continue;
3549 if (auto *II = dyn_cast<llvm::IntrinsicInst>(&I))
3550 if (II->getIntrinsicID() == llvm::Intrinsic::lifetime_end)
3551 continue;
3552
3553 return GetStoreIfValid(&I);
3554 }
3555 return nullptr;
3556 }
3557
3558 llvm::StoreInst *store =
3559 GetStoreIfValid(CGF.ReturnValue.getPointer()->user_back());
3560 if (!store) return nullptr;
3561
3562 // Now do a first-and-dirty dominance check: just walk up the
3563 // single-predecessors chain from the current insertion point.
3564 llvm::BasicBlock *StoreBB = store->getParent();
3565 llvm::BasicBlock *IP = CGF.Builder.GetInsertBlock();
3566 llvm::SmallPtrSet<llvm::BasicBlock *, 4> SeenBBs;
3567 while (IP != StoreBB) {
3568 if (!SeenBBs.insert(IP).second || !(IP = IP->getSinglePredecessor()))
3569 return nullptr;
3570 }
3571
3572 // Okay, the store's basic block dominates the insertion point; we
3573 // can do our thing.
3574 return store;
3575 }
3576
3577 // Helper functions for EmitCMSEClearRecord
3578
3579 // Set the bits corresponding to a field having width `BitWidth` and located at
3580 // offset `BitOffset` (from the least significant bit) within a storage unit of
3581 // `Bits.size()` bytes. Each element of `Bits` corresponds to one target byte.
3582 // Use little-endian layout, i.e.`Bits[0]` is the LSB.
setBitRange(SmallVectorImpl<uint64_t> & Bits,int BitOffset,int BitWidth,int CharWidth)3583 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int BitOffset,
3584 int BitWidth, int CharWidth) {
3585 assert(CharWidth <= 64);
3586 assert(static_cast<unsigned>(BitWidth) <= Bits.size() * CharWidth);
3587
3588 int Pos = 0;
3589 if (BitOffset >= CharWidth) {
3590 Pos += BitOffset / CharWidth;
3591 BitOffset = BitOffset % CharWidth;
3592 }
3593
3594 const uint64_t Used = (uint64_t(1) << CharWidth) - 1;
3595 if (BitOffset + BitWidth >= CharWidth) {
3596 Bits[Pos++] |= (Used << BitOffset) & Used;
3597 BitWidth -= CharWidth - BitOffset;
3598 BitOffset = 0;
3599 }
3600
3601 while (BitWidth >= CharWidth) {
3602 Bits[Pos++] = Used;
3603 BitWidth -= CharWidth;
3604 }
3605
3606 if (BitWidth > 0)
3607 Bits[Pos++] |= (Used >> (CharWidth - BitWidth)) << BitOffset;
3608 }
3609
3610 // Set the bits corresponding to a field having width `BitWidth` and located at
3611 // offset `BitOffset` (from the least significant bit) within a storage unit of
3612 // `StorageSize` bytes, located at `StorageOffset` in `Bits`. Each element of
3613 // `Bits` corresponds to one target byte. Use target endian layout.
setBitRange(SmallVectorImpl<uint64_t> & Bits,int StorageOffset,int StorageSize,int BitOffset,int BitWidth,int CharWidth,bool BigEndian)3614 static void setBitRange(SmallVectorImpl<uint64_t> &Bits, int StorageOffset,
3615 int StorageSize, int BitOffset, int BitWidth,
3616 int CharWidth, bool BigEndian) {
3617
3618 SmallVector<uint64_t, 8> TmpBits(StorageSize);
3619 setBitRange(TmpBits, BitOffset, BitWidth, CharWidth);
3620
3621 if (BigEndian)
3622 std::reverse(TmpBits.begin(), TmpBits.end());
3623
3624 for (uint64_t V : TmpBits)
3625 Bits[StorageOffset++] |= V;
3626 }
3627
3628 static void setUsedBits(CodeGenModule &, QualType, int,
3629 SmallVectorImpl<uint64_t> &);
3630
3631 // Set the bits in `Bits`, which correspond to the value representations of
3632 // the actual members of the record type `RTy`. Note that this function does
3633 // not handle base classes, virtual tables, etc, since they cannot happen in
3634 // CMSE function arguments or return. The bit mask corresponds to the target
3635 // memory layout, i.e. it's endian dependent.
setUsedBits(CodeGenModule & CGM,const RecordType * RTy,int Offset,SmallVectorImpl<uint64_t> & Bits)3636 static void setUsedBits(CodeGenModule &CGM, const RecordType *RTy, int Offset,
3637 SmallVectorImpl<uint64_t> &Bits) {
3638 ASTContext &Context = CGM.getContext();
3639 int CharWidth = Context.getCharWidth();
3640 const RecordDecl *RD = RTy->getDecl()->getDefinition();
3641 const ASTRecordLayout &ASTLayout = Context.getASTRecordLayout(RD);
3642 const CGRecordLayout &Layout = CGM.getTypes().getCGRecordLayout(RD);
3643
3644 int Idx = 0;
3645 for (auto I = RD->field_begin(), E = RD->field_end(); I != E; ++I, ++Idx) {
3646 const FieldDecl *F = *I;
3647
3648 if (F->isUnnamedBitfield() || F->isZeroLengthBitField(Context) ||
3649 F->getType()->isIncompleteArrayType())
3650 continue;
3651
3652 if (F->isBitField()) {
3653 const CGBitFieldInfo &BFI = Layout.getBitFieldInfo(F);
3654 setBitRange(Bits, Offset + BFI.StorageOffset.getQuantity(),
3655 BFI.StorageSize / CharWidth, BFI.Offset,
3656 BFI.Size, CharWidth,
3657 CGM.getDataLayout().isBigEndian());
3658 continue;
3659 }
3660
3661 setUsedBits(CGM, F->getType(),
3662 Offset + ASTLayout.getFieldOffset(Idx) / CharWidth, Bits);
3663 }
3664 }
3665
3666 // Set the bits in `Bits`, which correspond to the value representations of
3667 // the elements of an array type `ATy`.
setUsedBits(CodeGenModule & CGM,const ConstantArrayType * ATy,int Offset,SmallVectorImpl<uint64_t> & Bits)3668 static void setUsedBits(CodeGenModule &CGM, const ConstantArrayType *ATy,
3669 int Offset, SmallVectorImpl<uint64_t> &Bits) {
3670 const ASTContext &Context = CGM.getContext();
3671
3672 QualType ETy = Context.getBaseElementType(ATy);
3673 int Size = Context.getTypeSizeInChars(ETy).getQuantity();
3674 SmallVector<uint64_t, 4> TmpBits(Size);
3675 setUsedBits(CGM, ETy, 0, TmpBits);
3676
3677 for (int I = 0, N = Context.getConstantArrayElementCount(ATy); I < N; ++I) {
3678 auto Src = TmpBits.begin();
3679 auto Dst = Bits.begin() + Offset + I * Size;
3680 for (int J = 0; J < Size; ++J)
3681 *Dst++ |= *Src++;
3682 }
3683 }
3684
3685 // Set the bits in `Bits`, which correspond to the value representations of
3686 // the type `QTy`.
setUsedBits(CodeGenModule & CGM,QualType QTy,int Offset,SmallVectorImpl<uint64_t> & Bits)3687 static void setUsedBits(CodeGenModule &CGM, QualType QTy, int Offset,
3688 SmallVectorImpl<uint64_t> &Bits) {
3689 if (const auto *RTy = QTy->getAs<RecordType>())
3690 return setUsedBits(CGM, RTy, Offset, Bits);
3691
3692 ASTContext &Context = CGM.getContext();
3693 if (const auto *ATy = Context.getAsConstantArrayType(QTy))
3694 return setUsedBits(CGM, ATy, Offset, Bits);
3695
3696 int Size = Context.getTypeSizeInChars(QTy).getQuantity();
3697 if (Size <= 0)
3698 return;
3699
3700 std::fill_n(Bits.begin() + Offset, Size,
3701 (uint64_t(1) << Context.getCharWidth()) - 1);
3702 }
3703
buildMultiCharMask(const SmallVectorImpl<uint64_t> & Bits,int Pos,int Size,int CharWidth,bool BigEndian)3704 static uint64_t buildMultiCharMask(const SmallVectorImpl<uint64_t> &Bits,
3705 int Pos, int Size, int CharWidth,
3706 bool BigEndian) {
3707 assert(Size > 0);
3708 uint64_t Mask = 0;
3709 if (BigEndian) {
3710 for (auto P = Bits.begin() + Pos, E = Bits.begin() + Pos + Size; P != E;
3711 ++P)
3712 Mask = (Mask << CharWidth) | *P;
3713 } else {
3714 auto P = Bits.begin() + Pos + Size, End = Bits.begin() + Pos;
3715 do
3716 Mask = (Mask << CharWidth) | *--P;
3717 while (P != End);
3718 }
3719 return Mask;
3720 }
3721
3722 // Emit code to clear the bits in a record, which aren't a part of any user
3723 // declared member, when the record is a function return.
EmitCMSEClearRecord(llvm::Value * Src,llvm::IntegerType * ITy,QualType QTy)3724 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3725 llvm::IntegerType *ITy,
3726 QualType QTy) {
3727 assert(Src->getType() == ITy);
3728 assert(ITy->getScalarSizeInBits() <= 64);
3729
3730 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3731 int Size = DataLayout.getTypeStoreSize(ITy);
3732 SmallVector<uint64_t, 4> Bits(Size);
3733 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3734
3735 int CharWidth = CGM.getContext().getCharWidth();
3736 uint64_t Mask =
3737 buildMultiCharMask(Bits, 0, Size, CharWidth, DataLayout.isBigEndian());
3738
3739 return Builder.CreateAnd(Src, Mask, "cmse.clear");
3740 }
3741
3742 // Emit code to clear the bits in a record, which aren't a part of any user
3743 // declared member, when the record is a function argument.
EmitCMSEClearRecord(llvm::Value * Src,llvm::ArrayType * ATy,QualType QTy)3744 llvm::Value *CodeGenFunction::EmitCMSEClearRecord(llvm::Value *Src,
3745 llvm::ArrayType *ATy,
3746 QualType QTy) {
3747 const llvm::DataLayout &DataLayout = CGM.getDataLayout();
3748 int Size = DataLayout.getTypeStoreSize(ATy);
3749 SmallVector<uint64_t, 16> Bits(Size);
3750 setUsedBits(CGM, QTy->castAs<RecordType>(), 0, Bits);
3751
3752 // Clear each element of the LLVM array.
3753 int CharWidth = CGM.getContext().getCharWidth();
3754 int CharsPerElt =
3755 ATy->getArrayElementType()->getScalarSizeInBits() / CharWidth;
3756 int MaskIndex = 0;
3757 llvm::Value *R = llvm::PoisonValue::get(ATy);
3758 for (int I = 0, N = ATy->getArrayNumElements(); I != N; ++I) {
3759 uint64_t Mask = buildMultiCharMask(Bits, MaskIndex, CharsPerElt, CharWidth,
3760 DataLayout.isBigEndian());
3761 MaskIndex += CharsPerElt;
3762 llvm::Value *T0 = Builder.CreateExtractValue(Src, I);
3763 llvm::Value *T1 = Builder.CreateAnd(T0, Mask, "cmse.clear");
3764 R = Builder.CreateInsertValue(R, T1, I);
3765 }
3766
3767 return R;
3768 }
3769
EmitFunctionEpilog(const CGFunctionInfo & FI,bool EmitRetDbgLoc,SourceLocation EndLoc)3770 void CodeGenFunction::EmitFunctionEpilog(const CGFunctionInfo &FI,
3771 bool EmitRetDbgLoc,
3772 SourceLocation EndLoc) {
3773 if (FI.isNoReturn()) {
3774 // Noreturn functions don't return.
3775 EmitUnreachable(EndLoc);
3776 return;
3777 }
3778
3779 if (CurCodeDecl && CurCodeDecl->hasAttr<NakedAttr>()) {
3780 // Naked functions don't have epilogues.
3781 Builder.CreateUnreachable();
3782 return;
3783 }
3784
3785 // Functions with no result always return void.
3786 if (!ReturnValue.isValid()) {
3787 Builder.CreateRetVoid();
3788 return;
3789 }
3790
3791 llvm::DebugLoc RetDbgLoc;
3792 llvm::Value *RV = nullptr;
3793 QualType RetTy = FI.getReturnType();
3794 const ABIArgInfo &RetAI = FI.getReturnInfo();
3795
3796 switch (RetAI.getKind()) {
3797 case ABIArgInfo::InAlloca:
3798 // Aggregates get evaluated directly into the destination. Sometimes we
3799 // need to return the sret value in a register, though.
3800 assert(hasAggregateEvaluationKind(RetTy));
3801 if (RetAI.getInAllocaSRet()) {
3802 llvm::Function::arg_iterator EI = CurFn->arg_end();
3803 --EI;
3804 llvm::Value *ArgStruct = &*EI;
3805 llvm::Value *SRet = Builder.CreateStructGEP(
3806 FI.getArgStruct(), ArgStruct, RetAI.getInAllocaFieldIndex());
3807 llvm::Type *Ty =
3808 cast<llvm::GetElementPtrInst>(SRet)->getResultElementType();
3809 RV = Builder.CreateAlignedLoad(Ty, SRet, getPointerAlign(), "sret");
3810 }
3811 break;
3812
3813 case ABIArgInfo::Indirect: {
3814 auto AI = CurFn->arg_begin();
3815 if (RetAI.isSRetAfterThis())
3816 ++AI;
3817 switch (getEvaluationKind(RetTy)) {
3818 case TEK_Complex: {
3819 ComplexPairTy RT =
3820 EmitLoadOfComplex(MakeAddrLValue(ReturnValue, RetTy), EndLoc);
3821 EmitStoreOfComplex(RT, MakeNaturalAlignAddrLValue(&*AI, RetTy),
3822 /*isInit*/ true);
3823 break;
3824 }
3825 case TEK_Aggregate:
3826 // Do nothing; aggregates get evaluated directly into the destination.
3827 break;
3828 case TEK_Scalar: {
3829 LValueBaseInfo BaseInfo;
3830 TBAAAccessInfo TBAAInfo;
3831 CharUnits Alignment =
3832 CGM.getNaturalTypeAlignment(RetTy, &BaseInfo, &TBAAInfo);
3833 Address ArgAddr(&*AI, ConvertType(RetTy), Alignment);
3834 LValue ArgVal =
3835 LValue::MakeAddr(ArgAddr, RetTy, getContext(), BaseInfo, TBAAInfo);
3836 EmitStoreOfScalar(
3837 Builder.CreateLoad(ReturnValue), ArgVal, /*isInit*/ true);
3838 break;
3839 }
3840 }
3841 break;
3842 }
3843
3844 case ABIArgInfo::Extend:
3845 case ABIArgInfo::Direct:
3846 if (RetAI.getCoerceToType() == ConvertType(RetTy) &&
3847 RetAI.getDirectOffset() == 0) {
3848 // The internal return value temp always will have pointer-to-return-type
3849 // type, just do a load.
3850
3851 // If there is a dominating store to ReturnValue, we can elide
3852 // the load, zap the store, and usually zap the alloca.
3853 if (llvm::StoreInst *SI =
3854 findDominatingStoreToReturnValue(*this)) {
3855 // Reuse the debug location from the store unless there is
3856 // cleanup code to be emitted between the store and return
3857 // instruction.
3858 if (EmitRetDbgLoc && !AutoreleaseResult)
3859 RetDbgLoc = SI->getDebugLoc();
3860 // Get the stored value and nuke the now-dead store.
3861 RV = SI->getValueOperand();
3862 SI->eraseFromParent();
3863
3864 // Otherwise, we have to do a simple load.
3865 } else {
3866 RV = Builder.CreateLoad(ReturnValue);
3867 }
3868 } else {
3869 // If the value is offset in memory, apply the offset now.
3870 Address V = emitAddressAtOffset(*this, ReturnValue, RetAI);
3871
3872 RV = CreateCoercedLoad(V, RetAI.getCoerceToType(), *this);
3873 }
3874
3875 // In ARC, end functions that return a retainable type with a call
3876 // to objc_autoreleaseReturnValue.
3877 if (AutoreleaseResult) {
3878 #ifndef NDEBUG
3879 // Type::isObjCRetainabletype has to be called on a QualType that hasn't
3880 // been stripped of the typedefs, so we cannot use RetTy here. Get the
3881 // original return type of FunctionDecl, CurCodeDecl, and BlockDecl from
3882 // CurCodeDecl or BlockInfo.
3883 QualType RT;
3884
3885 if (auto *FD = dyn_cast<FunctionDecl>(CurCodeDecl))
3886 RT = FD->getReturnType();
3887 else if (auto *MD = dyn_cast<ObjCMethodDecl>(CurCodeDecl))
3888 RT = MD->getReturnType();
3889 else if (isa<BlockDecl>(CurCodeDecl))
3890 RT = BlockInfo->BlockExpression->getFunctionType()->getReturnType();
3891 else
3892 llvm_unreachable("Unexpected function/method type");
3893
3894 assert(getLangOpts().ObjCAutoRefCount &&
3895 !FI.isReturnsRetained() &&
3896 RT->isObjCRetainableType());
3897 #endif
3898 RV = emitAutoreleaseOfResult(*this, RV);
3899 }
3900
3901 break;
3902
3903 case ABIArgInfo::Ignore:
3904 break;
3905
3906 case ABIArgInfo::CoerceAndExpand: {
3907 auto coercionType = RetAI.getCoerceAndExpandType();
3908
3909 // Load all of the coerced elements out into results.
3910 llvm::SmallVector<llvm::Value*, 4> results;
3911 Address addr = ReturnValue.withElementType(coercionType);
3912 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
3913 auto coercedEltType = coercionType->getElementType(i);
3914 if (ABIArgInfo::isPaddingForCoerceAndExpand(coercedEltType))
3915 continue;
3916
3917 auto eltAddr = Builder.CreateStructGEP(addr, i);
3918 auto elt = Builder.CreateLoad(eltAddr);
3919 results.push_back(elt);
3920 }
3921
3922 // If we have one result, it's the single direct result type.
3923 if (results.size() == 1) {
3924 RV = results[0];
3925
3926 // Otherwise, we need to make a first-class aggregate.
3927 } else {
3928 // Construct a return type that lacks padding elements.
3929 llvm::Type *returnType = RetAI.getUnpaddedCoerceAndExpandType();
3930
3931 RV = llvm::PoisonValue::get(returnType);
3932 for (unsigned i = 0, e = results.size(); i != e; ++i) {
3933 RV = Builder.CreateInsertValue(RV, results[i], i);
3934 }
3935 }
3936 break;
3937 }
3938 case ABIArgInfo::Expand:
3939 case ABIArgInfo::IndirectAliased:
3940 llvm_unreachable("Invalid ABI kind for return argument");
3941 }
3942
3943 llvm::Instruction *Ret;
3944 if (RV) {
3945 if (CurFuncDecl && CurFuncDecl->hasAttr<CmseNSEntryAttr>()) {
3946 // For certain return types, clear padding bits, as they may reveal
3947 // sensitive information.
3948 // Small struct/union types are passed as integers.
3949 auto *ITy = dyn_cast<llvm::IntegerType>(RV->getType());
3950 if (ITy != nullptr && isa<RecordType>(RetTy.getCanonicalType()))
3951 RV = EmitCMSEClearRecord(RV, ITy, RetTy);
3952 }
3953 EmitReturnValueCheck(RV);
3954 Ret = Builder.CreateRet(RV);
3955 } else {
3956 Ret = Builder.CreateRetVoid();
3957 }
3958
3959 if (RetDbgLoc)
3960 Ret->setDebugLoc(std::move(RetDbgLoc));
3961 }
3962
EmitReturnValueCheck(llvm::Value * RV)3963 void CodeGenFunction::EmitReturnValueCheck(llvm::Value *RV) {
3964 // A current decl may not be available when emitting vtable thunks.
3965 if (!CurCodeDecl)
3966 return;
3967
3968 // If the return block isn't reachable, neither is this check, so don't emit
3969 // it.
3970 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty())
3971 return;
3972
3973 ReturnsNonNullAttr *RetNNAttr = nullptr;
3974 if (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute))
3975 RetNNAttr = CurCodeDecl->getAttr<ReturnsNonNullAttr>();
3976
3977 if (!RetNNAttr && !requiresReturnValueNullabilityCheck())
3978 return;
3979
3980 // Prefer the returns_nonnull attribute if it's present.
3981 SourceLocation AttrLoc;
3982 SanitizerMask CheckKind;
3983 SanitizerHandler Handler;
3984 if (RetNNAttr) {
3985 assert(!requiresReturnValueNullabilityCheck() &&
3986 "Cannot check nullability and the nonnull attribute");
3987 AttrLoc = RetNNAttr->getLocation();
3988 CheckKind = SanitizerKind::ReturnsNonnullAttribute;
3989 Handler = SanitizerHandler::NonnullReturn;
3990 } else {
3991 if (auto *DD = dyn_cast<DeclaratorDecl>(CurCodeDecl))
3992 if (auto *TSI = DD->getTypeSourceInfo())
3993 if (auto FTL = TSI->getTypeLoc().getAsAdjusted<FunctionTypeLoc>())
3994 AttrLoc = FTL.getReturnLoc().findNullabilityLoc();
3995 CheckKind = SanitizerKind::NullabilityReturn;
3996 Handler = SanitizerHandler::NullabilityReturn;
3997 }
3998
3999 SanitizerScope SanScope(this);
4000
4001 // Make sure the "return" source location is valid. If we're checking a
4002 // nullability annotation, make sure the preconditions for the check are met.
4003 llvm::BasicBlock *Check = createBasicBlock("nullcheck");
4004 llvm::BasicBlock *NoCheck = createBasicBlock("no.nullcheck");
4005 llvm::Value *SLocPtr = Builder.CreateLoad(ReturnLocation, "return.sloc.load");
4006 llvm::Value *CanNullCheck = Builder.CreateIsNotNull(SLocPtr);
4007 if (requiresReturnValueNullabilityCheck())
4008 CanNullCheck =
4009 Builder.CreateAnd(CanNullCheck, RetValNullabilityPrecondition);
4010 Builder.CreateCondBr(CanNullCheck, Check, NoCheck);
4011 EmitBlock(Check);
4012
4013 // Now do the null check.
4014 llvm::Value *Cond = Builder.CreateIsNotNull(RV);
4015 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(AttrLoc)};
4016 llvm::Value *DynamicData[] = {SLocPtr};
4017 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, DynamicData);
4018
4019 EmitBlock(NoCheck);
4020
4021 #ifndef NDEBUG
4022 // The return location should not be used after the check has been emitted.
4023 ReturnLocation = Address::invalid();
4024 #endif
4025 }
4026
isInAllocaArgument(CGCXXABI & ABI,QualType type)4027 static bool isInAllocaArgument(CGCXXABI &ABI, QualType type) {
4028 const CXXRecordDecl *RD = type->getAsCXXRecordDecl();
4029 return RD && ABI.getRecordArgABI(RD) == CGCXXABI::RAA_DirectInMemory;
4030 }
4031
createPlaceholderSlot(CodeGenFunction & CGF,QualType Ty)4032 static AggValueSlot createPlaceholderSlot(CodeGenFunction &CGF,
4033 QualType Ty) {
4034 // FIXME: Generate IR in one pass, rather than going back and fixing up these
4035 // placeholders.
4036 llvm::Type *IRTy = CGF.ConvertTypeForMem(Ty);
4037 llvm::Type *IRPtrTy = llvm::PointerType::getUnqual(CGF.getLLVMContext());
4038 llvm::Value *Placeholder = llvm::PoisonValue::get(IRPtrTy);
4039
4040 // FIXME: When we generate this IR in one pass, we shouldn't need
4041 // this win32-specific alignment hack.
4042 CharUnits Align = CharUnits::fromQuantity(4);
4043 Placeholder = CGF.Builder.CreateAlignedLoad(IRPtrTy, Placeholder, Align);
4044
4045 return AggValueSlot::forAddr(Address(Placeholder, IRTy, Align),
4046 Ty.getQualifiers(),
4047 AggValueSlot::IsNotDestructed,
4048 AggValueSlot::DoesNotNeedGCBarriers,
4049 AggValueSlot::IsNotAliased,
4050 AggValueSlot::DoesNotOverlap);
4051 }
4052
EmitDelegateCallArg(CallArgList & args,const VarDecl * param,SourceLocation loc)4053 void CodeGenFunction::EmitDelegateCallArg(CallArgList &args,
4054 const VarDecl *param,
4055 SourceLocation loc) {
4056 // StartFunction converted the ABI-lowered parameter(s) into a
4057 // local alloca. We need to turn that into an r-value suitable
4058 // for EmitCall.
4059 Address local = GetAddrOfLocalVar(param);
4060
4061 QualType type = param->getType();
4062
4063 // GetAddrOfLocalVar returns a pointer-to-pointer for references,
4064 // but the argument needs to be the original pointer.
4065 if (type->isReferenceType()) {
4066 args.add(RValue::get(Builder.CreateLoad(local)), type);
4067
4068 // In ARC, move out of consumed arguments so that the release cleanup
4069 // entered by StartFunction doesn't cause an over-release. This isn't
4070 // optimal -O0 code generation, but it should get cleaned up when
4071 // optimization is enabled. This also assumes that delegate calls are
4072 // performed exactly once for a set of arguments, but that should be safe.
4073 } else if (getLangOpts().ObjCAutoRefCount &&
4074 param->hasAttr<NSConsumedAttr>() &&
4075 type->isObjCRetainableType()) {
4076 llvm::Value *ptr = Builder.CreateLoad(local);
4077 auto null =
4078 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(ptr->getType()));
4079 Builder.CreateStore(null, local);
4080 args.add(RValue::get(ptr), type);
4081
4082 // For the most part, we just need to load the alloca, except that
4083 // aggregate r-values are actually pointers to temporaries.
4084 } else {
4085 args.add(convertTempToRValue(local, type, loc), type);
4086 }
4087
4088 // Deactivate the cleanup for the callee-destructed param that was pushed.
4089 if (type->isRecordType() && !CurFuncIsThunk &&
4090 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee() &&
4091 param->needsDestruction(getContext())) {
4092 EHScopeStack::stable_iterator cleanup =
4093 CalleeDestructedParamCleanups.lookup(cast<ParmVarDecl>(param));
4094 assert(cleanup.isValid() &&
4095 "cleanup for callee-destructed param not recorded");
4096 // This unreachable is a temporary marker which will be removed later.
4097 llvm::Instruction *isActive = Builder.CreateUnreachable();
4098 args.addArgCleanupDeactivation(cleanup, isActive);
4099 }
4100 }
4101
isProvablyNull(llvm::Value * addr)4102 static bool isProvablyNull(llvm::Value *addr) {
4103 return isa<llvm::ConstantPointerNull>(addr);
4104 }
4105
4106 /// Emit the actual writing-back of a writeback.
emitWriteback(CodeGenFunction & CGF,const CallArgList::Writeback & writeback)4107 static void emitWriteback(CodeGenFunction &CGF,
4108 const CallArgList::Writeback &writeback) {
4109 const LValue &srcLV = writeback.Source;
4110 Address srcAddr = srcLV.getAddress(CGF);
4111 assert(!isProvablyNull(srcAddr.getPointer()) &&
4112 "shouldn't have writeback for provably null argument");
4113
4114 llvm::BasicBlock *contBB = nullptr;
4115
4116 // If the argument wasn't provably non-null, we need to null check
4117 // before doing the store.
4118 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
4119 CGF.CGM.getDataLayout());
4120 if (!provablyNonNull) {
4121 llvm::BasicBlock *writebackBB = CGF.createBasicBlock("icr.writeback");
4122 contBB = CGF.createBasicBlock("icr.done");
4123
4124 llvm::Value *isNull =
4125 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
4126 CGF.Builder.CreateCondBr(isNull, contBB, writebackBB);
4127 CGF.EmitBlock(writebackBB);
4128 }
4129
4130 // Load the value to writeback.
4131 llvm::Value *value = CGF.Builder.CreateLoad(writeback.Temporary);
4132
4133 // Cast it back, in case we're writing an id to a Foo* or something.
4134 value = CGF.Builder.CreateBitCast(value, srcAddr.getElementType(),
4135 "icr.writeback-cast");
4136
4137 // Perform the writeback.
4138
4139 // If we have a "to use" value, it's something we need to emit a use
4140 // of. This has to be carefully threaded in: if it's done after the
4141 // release it's potentially undefined behavior (and the optimizer
4142 // will ignore it), and if it happens before the retain then the
4143 // optimizer could move the release there.
4144 if (writeback.ToUse) {
4145 assert(srcLV.getObjCLifetime() == Qualifiers::OCL_Strong);
4146
4147 // Retain the new value. No need to block-copy here: the block's
4148 // being passed up the stack.
4149 value = CGF.EmitARCRetainNonBlock(value);
4150
4151 // Emit the intrinsic use here.
4152 CGF.EmitARCIntrinsicUse(writeback.ToUse);
4153
4154 // Load the old value (primitively).
4155 llvm::Value *oldValue = CGF.EmitLoadOfScalar(srcLV, SourceLocation());
4156
4157 // Put the new value in place (primitively).
4158 CGF.EmitStoreOfScalar(value, srcLV, /*init*/ false);
4159
4160 // Release the old value.
4161 CGF.EmitARCRelease(oldValue, srcLV.isARCPreciseLifetime());
4162
4163 // Otherwise, we can just do a normal lvalue store.
4164 } else {
4165 CGF.EmitStoreThroughLValue(RValue::get(value), srcLV);
4166 }
4167
4168 // Jump to the continuation block.
4169 if (!provablyNonNull)
4170 CGF.EmitBlock(contBB);
4171 }
4172
emitWritebacks(CodeGenFunction & CGF,const CallArgList & args)4173 static void emitWritebacks(CodeGenFunction &CGF,
4174 const CallArgList &args) {
4175 for (const auto &I : args.writebacks())
4176 emitWriteback(CGF, I);
4177 }
4178
deactivateArgCleanupsBeforeCall(CodeGenFunction & CGF,const CallArgList & CallArgs)4179 static void deactivateArgCleanupsBeforeCall(CodeGenFunction &CGF,
4180 const CallArgList &CallArgs) {
4181 ArrayRef<CallArgList::CallArgCleanup> Cleanups =
4182 CallArgs.getCleanupsToDeactivate();
4183 // Iterate in reverse to increase the likelihood of popping the cleanup.
4184 for (const auto &I : llvm::reverse(Cleanups)) {
4185 CGF.DeactivateCleanupBlock(I.Cleanup, I.IsActiveIP);
4186 I.IsActiveIP->eraseFromParent();
4187 }
4188 }
4189
maybeGetUnaryAddrOfOperand(const Expr * E)4190 static const Expr *maybeGetUnaryAddrOfOperand(const Expr *E) {
4191 if (const UnaryOperator *uop = dyn_cast<UnaryOperator>(E->IgnoreParens()))
4192 if (uop->getOpcode() == UO_AddrOf)
4193 return uop->getSubExpr();
4194 return nullptr;
4195 }
4196
4197 /// Emit an argument that's being passed call-by-writeback. That is,
4198 /// we are passing the address of an __autoreleased temporary; it
4199 /// might be copy-initialized with the current value of the given
4200 /// address, but it will definitely be copied out of after the call.
emitWritebackArg(CodeGenFunction & CGF,CallArgList & args,const ObjCIndirectCopyRestoreExpr * CRE)4201 static void emitWritebackArg(CodeGenFunction &CGF, CallArgList &args,
4202 const ObjCIndirectCopyRestoreExpr *CRE) {
4203 LValue srcLV;
4204
4205 // Make an optimistic effort to emit the address as an l-value.
4206 // This can fail if the argument expression is more complicated.
4207 if (const Expr *lvExpr = maybeGetUnaryAddrOfOperand(CRE->getSubExpr())) {
4208 srcLV = CGF.EmitLValue(lvExpr);
4209
4210 // Otherwise, just emit it as a scalar.
4211 } else {
4212 Address srcAddr = CGF.EmitPointerWithAlignment(CRE->getSubExpr());
4213
4214 QualType srcAddrType =
4215 CRE->getSubExpr()->getType()->castAs<PointerType>()->getPointeeType();
4216 srcLV = CGF.MakeAddrLValue(srcAddr, srcAddrType);
4217 }
4218 Address srcAddr = srcLV.getAddress(CGF);
4219
4220 // The dest and src types don't necessarily match in LLVM terms
4221 // because of the crazy ObjC compatibility rules.
4222
4223 llvm::PointerType *destType =
4224 cast<llvm::PointerType>(CGF.ConvertType(CRE->getType()));
4225 llvm::Type *destElemType =
4226 CGF.ConvertTypeForMem(CRE->getType()->getPointeeType());
4227
4228 // If the address is a constant null, just pass the appropriate null.
4229 if (isProvablyNull(srcAddr.getPointer())) {
4230 args.add(RValue::get(llvm::ConstantPointerNull::get(destType)),
4231 CRE->getType());
4232 return;
4233 }
4234
4235 // Create the temporary.
4236 Address temp =
4237 CGF.CreateTempAlloca(destElemType, CGF.getPointerAlign(), "icr.temp");
4238 // Loading an l-value can introduce a cleanup if the l-value is __weak,
4239 // and that cleanup will be conditional if we can't prove that the l-value
4240 // isn't null, so we need to register a dominating point so that the cleanups
4241 // system will make valid IR.
4242 CodeGenFunction::ConditionalEvaluation condEval(CGF);
4243
4244 // Zero-initialize it if we're not doing a copy-initialization.
4245 bool shouldCopy = CRE->shouldCopy();
4246 if (!shouldCopy) {
4247 llvm::Value *null =
4248 llvm::ConstantPointerNull::get(cast<llvm::PointerType>(destElemType));
4249 CGF.Builder.CreateStore(null, temp);
4250 }
4251
4252 llvm::BasicBlock *contBB = nullptr;
4253 llvm::BasicBlock *originBB = nullptr;
4254
4255 // If the address is *not* known to be non-null, we need to switch.
4256 llvm::Value *finalArgument;
4257
4258 bool provablyNonNull = llvm::isKnownNonZero(srcAddr.getPointer(),
4259 CGF.CGM.getDataLayout());
4260 if (provablyNonNull) {
4261 finalArgument = temp.getPointer();
4262 } else {
4263 llvm::Value *isNull =
4264 CGF.Builder.CreateIsNull(srcAddr.getPointer(), "icr.isnull");
4265
4266 finalArgument = CGF.Builder.CreateSelect(isNull,
4267 llvm::ConstantPointerNull::get(destType),
4268 temp.getPointer(), "icr.argument");
4269
4270 // If we need to copy, then the load has to be conditional, which
4271 // means we need control flow.
4272 if (shouldCopy) {
4273 originBB = CGF.Builder.GetInsertBlock();
4274 contBB = CGF.createBasicBlock("icr.cont");
4275 llvm::BasicBlock *copyBB = CGF.createBasicBlock("icr.copy");
4276 CGF.Builder.CreateCondBr(isNull, contBB, copyBB);
4277 CGF.EmitBlock(copyBB);
4278 condEval.begin(CGF);
4279 }
4280 }
4281
4282 llvm::Value *valueToUse = nullptr;
4283
4284 // Perform a copy if necessary.
4285 if (shouldCopy) {
4286 RValue srcRV = CGF.EmitLoadOfLValue(srcLV, SourceLocation());
4287 assert(srcRV.isScalar());
4288
4289 llvm::Value *src = srcRV.getScalarVal();
4290 src = CGF.Builder.CreateBitCast(src, destElemType, "icr.cast");
4291
4292 // Use an ordinary store, not a store-to-lvalue.
4293 CGF.Builder.CreateStore(src, temp);
4294
4295 // If optimization is enabled, and the value was held in a
4296 // __strong variable, we need to tell the optimizer that this
4297 // value has to stay alive until we're doing the store back.
4298 // This is because the temporary is effectively unretained,
4299 // and so otherwise we can violate the high-level semantics.
4300 if (CGF.CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4301 srcLV.getObjCLifetime() == Qualifiers::OCL_Strong) {
4302 valueToUse = src;
4303 }
4304 }
4305
4306 // Finish the control flow if we needed it.
4307 if (shouldCopy && !provablyNonNull) {
4308 llvm::BasicBlock *copyBB = CGF.Builder.GetInsertBlock();
4309 CGF.EmitBlock(contBB);
4310
4311 // Make a phi for the value to intrinsically use.
4312 if (valueToUse) {
4313 llvm::PHINode *phiToUse = CGF.Builder.CreatePHI(valueToUse->getType(), 2,
4314 "icr.to-use");
4315 phiToUse->addIncoming(valueToUse, copyBB);
4316 phiToUse->addIncoming(llvm::UndefValue::get(valueToUse->getType()),
4317 originBB);
4318 valueToUse = phiToUse;
4319 }
4320
4321 condEval.end(CGF);
4322 }
4323
4324 args.addWriteback(srcLV, temp, valueToUse);
4325 args.add(RValue::get(finalArgument), CRE->getType());
4326 }
4327
allocateArgumentMemory(CodeGenFunction & CGF)4328 void CallArgList::allocateArgumentMemory(CodeGenFunction &CGF) {
4329 assert(!StackBase);
4330
4331 // Save the stack.
4332 StackBase = CGF.Builder.CreateStackSave("inalloca.save");
4333 }
4334
freeArgumentMemory(CodeGenFunction & CGF) const4335 void CallArgList::freeArgumentMemory(CodeGenFunction &CGF) const {
4336 if (StackBase) {
4337 // Restore the stack after the call.
4338 CGF.Builder.CreateStackRestore(StackBase);
4339 }
4340 }
4341
EmitNonNullArgCheck(RValue RV,QualType ArgType,SourceLocation ArgLoc,AbstractCallee AC,unsigned ParmNum)4342 void CodeGenFunction::EmitNonNullArgCheck(RValue RV, QualType ArgType,
4343 SourceLocation ArgLoc,
4344 AbstractCallee AC,
4345 unsigned ParmNum) {
4346 if (!AC.getDecl() || !(SanOpts.has(SanitizerKind::NonnullAttribute) ||
4347 SanOpts.has(SanitizerKind::NullabilityArg)))
4348 return;
4349
4350 // The param decl may be missing in a variadic function.
4351 auto PVD = ParmNum < AC.getNumParams() ? AC.getParamDecl(ParmNum) : nullptr;
4352 unsigned ArgNo = PVD ? PVD->getFunctionScopeIndex() : ParmNum;
4353
4354 // Prefer the nonnull attribute if it's present.
4355 const NonNullAttr *NNAttr = nullptr;
4356 if (SanOpts.has(SanitizerKind::NonnullAttribute))
4357 NNAttr = getNonNullAttr(AC.getDecl(), PVD, ArgType, ArgNo);
4358
4359 bool CanCheckNullability = false;
4360 if (SanOpts.has(SanitizerKind::NullabilityArg) && !NNAttr && PVD) {
4361 auto Nullability = PVD->getType()->getNullability();
4362 CanCheckNullability = Nullability &&
4363 *Nullability == NullabilityKind::NonNull &&
4364 PVD->getTypeSourceInfo();
4365 }
4366
4367 if (!NNAttr && !CanCheckNullability)
4368 return;
4369
4370 SourceLocation AttrLoc;
4371 SanitizerMask CheckKind;
4372 SanitizerHandler Handler;
4373 if (NNAttr) {
4374 AttrLoc = NNAttr->getLocation();
4375 CheckKind = SanitizerKind::NonnullAttribute;
4376 Handler = SanitizerHandler::NonnullArg;
4377 } else {
4378 AttrLoc = PVD->getTypeSourceInfo()->getTypeLoc().findNullabilityLoc();
4379 CheckKind = SanitizerKind::NullabilityArg;
4380 Handler = SanitizerHandler::NullabilityArg;
4381 }
4382
4383 SanitizerScope SanScope(this);
4384 llvm::Value *Cond = EmitNonNullRValueCheck(RV, ArgType);
4385 llvm::Constant *StaticData[] = {
4386 EmitCheckSourceLocation(ArgLoc), EmitCheckSourceLocation(AttrLoc),
4387 llvm::ConstantInt::get(Int32Ty, ArgNo + 1),
4388 };
4389 EmitCheck(std::make_pair(Cond, CheckKind), Handler, StaticData, std::nullopt);
4390 }
4391
4392 // Check if the call is going to use the inalloca convention. This needs to
4393 // agree with CGFunctionInfo::usesInAlloca. The CGFunctionInfo is arranged
4394 // later, so we can't check it directly.
hasInAllocaArgs(CodeGenModule & CGM,CallingConv ExplicitCC,ArrayRef<QualType> ArgTypes)4395 static bool hasInAllocaArgs(CodeGenModule &CGM, CallingConv ExplicitCC,
4396 ArrayRef<QualType> ArgTypes) {
4397 // The Swift calling conventions don't go through the target-specific
4398 // argument classification, they never use inalloca.
4399 // TODO: Consider limiting inalloca use to only calling conventions supported
4400 // by MSVC.
4401 if (ExplicitCC == CC_Swift || ExplicitCC == CC_SwiftAsync)
4402 return false;
4403 if (!CGM.getTarget().getCXXABI().isMicrosoft())
4404 return false;
4405 return llvm::any_of(ArgTypes, [&](QualType Ty) {
4406 return isInAllocaArgument(CGM.getCXXABI(), Ty);
4407 });
4408 }
4409
4410 #ifndef NDEBUG
4411 // Determine whether the given argument is an Objective-C method
4412 // that may have type parameters in its signature.
isObjCMethodWithTypeParams(const ObjCMethodDecl * method)4413 static bool isObjCMethodWithTypeParams(const ObjCMethodDecl *method) {
4414 const DeclContext *dc = method->getDeclContext();
4415 if (const ObjCInterfaceDecl *classDecl = dyn_cast<ObjCInterfaceDecl>(dc)) {
4416 return classDecl->getTypeParamListAsWritten();
4417 }
4418
4419 if (const ObjCCategoryDecl *catDecl = dyn_cast<ObjCCategoryDecl>(dc)) {
4420 return catDecl->getTypeParamList();
4421 }
4422
4423 return false;
4424 }
4425 #endif
4426
4427 /// EmitCallArgs - Emit call arguments for a function.
EmitCallArgs(CallArgList & Args,PrototypeWrapper Prototype,llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,AbstractCallee AC,unsigned ParamsToSkip,EvaluationOrder Order)4428 void CodeGenFunction::EmitCallArgs(
4429 CallArgList &Args, PrototypeWrapper Prototype,
4430 llvm::iterator_range<CallExpr::const_arg_iterator> ArgRange,
4431 AbstractCallee AC, unsigned ParamsToSkip, EvaluationOrder Order) {
4432 SmallVector<QualType, 16> ArgTypes;
4433
4434 assert((ParamsToSkip == 0 || Prototype.P) &&
4435 "Can't skip parameters if type info is not provided");
4436
4437 // This variable only captures *explicitly* written conventions, not those
4438 // applied by default via command line flags or target defaults, such as
4439 // thiscall, aapcs, stdcall via -mrtd, etc. Computing that correctly would
4440 // require knowing if this is a C++ instance method or being able to see
4441 // unprototyped FunctionTypes.
4442 CallingConv ExplicitCC = CC_C;
4443
4444 // First, if a prototype was provided, use those argument types.
4445 bool IsVariadic = false;
4446 if (Prototype.P) {
4447 const auto *MD = Prototype.P.dyn_cast<const ObjCMethodDecl *>();
4448 if (MD) {
4449 IsVariadic = MD->isVariadic();
4450 ExplicitCC = getCallingConventionForDecl(
4451 MD, CGM.getTarget().getTriple().isOSWindows());
4452 ArgTypes.assign(MD->param_type_begin() + ParamsToSkip,
4453 MD->param_type_end());
4454 } else {
4455 const auto *FPT = Prototype.P.get<const FunctionProtoType *>();
4456 IsVariadic = FPT->isVariadic();
4457 ExplicitCC = FPT->getExtInfo().getCC();
4458 ArgTypes.assign(FPT->param_type_begin() + ParamsToSkip,
4459 FPT->param_type_end());
4460 }
4461
4462 #ifndef NDEBUG
4463 // Check that the prototyped types match the argument expression types.
4464 bool isGenericMethod = MD && isObjCMethodWithTypeParams(MD);
4465 CallExpr::const_arg_iterator Arg = ArgRange.begin();
4466 for (QualType Ty : ArgTypes) {
4467 assert(Arg != ArgRange.end() && "Running over edge of argument list!");
4468 assert(
4469 (isGenericMethod || Ty->isVariablyModifiedType() ||
4470 Ty.getNonReferenceType()->isObjCRetainableType() ||
4471 getContext()
4472 .getCanonicalType(Ty.getNonReferenceType())
4473 .getTypePtr() ==
4474 getContext().getCanonicalType((*Arg)->getType()).getTypePtr()) &&
4475 "type mismatch in call argument!");
4476 ++Arg;
4477 }
4478
4479 // Either we've emitted all the call args, or we have a call to variadic
4480 // function.
4481 assert((Arg == ArgRange.end() || IsVariadic) &&
4482 "Extra arguments in non-variadic function!");
4483 #endif
4484 }
4485
4486 // If we still have any arguments, emit them using the type of the argument.
4487 for (auto *A : llvm::drop_begin(ArgRange, ArgTypes.size()))
4488 ArgTypes.push_back(IsVariadic ? getVarArgType(A) : A->getType());
4489 assert((int)ArgTypes.size() == (ArgRange.end() - ArgRange.begin()));
4490
4491 // We must evaluate arguments from right to left in the MS C++ ABI,
4492 // because arguments are destroyed left to right in the callee. As a special
4493 // case, there are certain language constructs that require left-to-right
4494 // evaluation, and in those cases we consider the evaluation order requirement
4495 // to trump the "destruction order is reverse construction order" guarantee.
4496 bool LeftToRight =
4497 CGM.getTarget().getCXXABI().areArgsDestroyedLeftToRightInCallee()
4498 ? Order == EvaluationOrder::ForceLeftToRight
4499 : Order != EvaluationOrder::ForceRightToLeft;
4500
4501 auto MaybeEmitImplicitObjectSize = [&](unsigned I, const Expr *Arg,
4502 RValue EmittedArg) {
4503 if (!AC.hasFunctionDecl() || I >= AC.getNumParams())
4504 return;
4505 auto *PS = AC.getParamDecl(I)->getAttr<PassObjectSizeAttr>();
4506 if (PS == nullptr)
4507 return;
4508
4509 const auto &Context = getContext();
4510 auto SizeTy = Context.getSizeType();
4511 auto T = Builder.getIntNTy(Context.getTypeSize(SizeTy));
4512 assert(EmittedArg.getScalarVal() && "We emitted nothing for the arg?");
4513 llvm::Value *V = evaluateOrEmitBuiltinObjectSize(Arg, PS->getType(), T,
4514 EmittedArg.getScalarVal(),
4515 PS->isDynamic());
4516 Args.add(RValue::get(V), SizeTy);
4517 // If we're emitting args in reverse, be sure to do so with
4518 // pass_object_size, as well.
4519 if (!LeftToRight)
4520 std::swap(Args.back(), *(&Args.back() - 1));
4521 };
4522
4523 // Insert a stack save if we're going to need any inalloca args.
4524 if (hasInAllocaArgs(CGM, ExplicitCC, ArgTypes)) {
4525 assert(getTarget().getTriple().getArch() == llvm::Triple::x86 &&
4526 "inalloca only supported on x86");
4527 Args.allocateArgumentMemory(*this);
4528 }
4529
4530 // Evaluate each argument in the appropriate order.
4531 size_t CallArgsStart = Args.size();
4532 for (unsigned I = 0, E = ArgTypes.size(); I != E; ++I) {
4533 unsigned Idx = LeftToRight ? I : E - I - 1;
4534 CallExpr::const_arg_iterator Arg = ArgRange.begin() + Idx;
4535 unsigned InitialArgSize = Args.size();
4536 // If *Arg is an ObjCIndirectCopyRestoreExpr, check that either the types of
4537 // the argument and parameter match or the objc method is parameterized.
4538 assert((!isa<ObjCIndirectCopyRestoreExpr>(*Arg) ||
4539 getContext().hasSameUnqualifiedType((*Arg)->getType(),
4540 ArgTypes[Idx]) ||
4541 (isa<ObjCMethodDecl>(AC.getDecl()) &&
4542 isObjCMethodWithTypeParams(cast<ObjCMethodDecl>(AC.getDecl())))) &&
4543 "Argument and parameter types don't match");
4544 EmitCallArg(Args, *Arg, ArgTypes[Idx]);
4545 // In particular, we depend on it being the last arg in Args, and the
4546 // objectsize bits depend on there only being one arg if !LeftToRight.
4547 assert(InitialArgSize + 1 == Args.size() &&
4548 "The code below depends on only adding one arg per EmitCallArg");
4549 (void)InitialArgSize;
4550 // Since pointer argument are never emitted as LValue, it is safe to emit
4551 // non-null argument check for r-value only.
4552 if (!Args.back().hasLValue()) {
4553 RValue RVArg = Args.back().getKnownRValue();
4554 EmitNonNullArgCheck(RVArg, ArgTypes[Idx], (*Arg)->getExprLoc(), AC,
4555 ParamsToSkip + Idx);
4556 // @llvm.objectsize should never have side-effects and shouldn't need
4557 // destruction/cleanups, so we can safely "emit" it after its arg,
4558 // regardless of right-to-leftness
4559 MaybeEmitImplicitObjectSize(Idx, *Arg, RVArg);
4560 }
4561 }
4562
4563 if (!LeftToRight) {
4564 // Un-reverse the arguments we just evaluated so they match up with the LLVM
4565 // IR function.
4566 std::reverse(Args.begin() + CallArgsStart, Args.end());
4567 }
4568 }
4569
4570 namespace {
4571
4572 struct DestroyUnpassedArg final : EHScopeStack::Cleanup {
DestroyUnpassedArg__anon8a34942c0f11::DestroyUnpassedArg4573 DestroyUnpassedArg(Address Addr, QualType Ty)
4574 : Addr(Addr), Ty(Ty) {}
4575
4576 Address Addr;
4577 QualType Ty;
4578
Emit__anon8a34942c0f11::DestroyUnpassedArg4579 void Emit(CodeGenFunction &CGF, Flags flags) override {
4580 QualType::DestructionKind DtorKind = Ty.isDestructedType();
4581 if (DtorKind == QualType::DK_cxx_destructor) {
4582 const CXXDestructorDecl *Dtor = Ty->getAsCXXRecordDecl()->getDestructor();
4583 assert(!Dtor->isTrivial());
4584 CGF.EmitCXXDestructorCall(Dtor, Dtor_Complete, /*for vbase*/ false,
4585 /*Delegating=*/false, Addr, Ty);
4586 } else {
4587 CGF.callCStructDestructor(CGF.MakeAddrLValue(Addr, Ty));
4588 }
4589 }
4590 };
4591
4592 struct DisableDebugLocationUpdates {
4593 CodeGenFunction &CGF;
4594 bool disabledDebugInfo;
DisableDebugLocationUpdates__anon8a34942c0f11::DisableDebugLocationUpdates4595 DisableDebugLocationUpdates(CodeGenFunction &CGF, const Expr *E) : CGF(CGF) {
4596 if ((disabledDebugInfo = isa<CXXDefaultArgExpr>(E) && CGF.getDebugInfo()))
4597 CGF.disableDebugInfo();
4598 }
~DisableDebugLocationUpdates__anon8a34942c0f11::DisableDebugLocationUpdates4599 ~DisableDebugLocationUpdates() {
4600 if (disabledDebugInfo)
4601 CGF.enableDebugInfo();
4602 }
4603 };
4604
4605 } // end anonymous namespace
4606
getRValue(CodeGenFunction & CGF) const4607 RValue CallArg::getRValue(CodeGenFunction &CGF) const {
4608 if (!HasLV)
4609 return RV;
4610 LValue Copy = CGF.MakeAddrLValue(CGF.CreateMemTemp(Ty), Ty);
4611 CGF.EmitAggregateCopy(Copy, LV, Ty, AggValueSlot::DoesNotOverlap,
4612 LV.isVolatile());
4613 IsUsed = true;
4614 return RValue::getAggregate(Copy.getAddress(CGF));
4615 }
4616
copyInto(CodeGenFunction & CGF,Address Addr) const4617 void CallArg::copyInto(CodeGenFunction &CGF, Address Addr) const {
4618 LValue Dst = CGF.MakeAddrLValue(Addr, Ty);
4619 if (!HasLV && RV.isScalar())
4620 CGF.EmitStoreOfScalar(RV.getScalarVal(), Dst, /*isInit=*/true);
4621 else if (!HasLV && RV.isComplex())
4622 CGF.EmitStoreOfComplex(RV.getComplexVal(), Dst, /*init=*/true);
4623 else {
4624 auto Addr = HasLV ? LV.getAddress(CGF) : RV.getAggregateAddress();
4625 LValue SrcLV = CGF.MakeAddrLValue(Addr, Ty);
4626 // We assume that call args are never copied into subobjects.
4627 CGF.EmitAggregateCopy(Dst, SrcLV, Ty, AggValueSlot::DoesNotOverlap,
4628 HasLV ? LV.isVolatileQualified()
4629 : RV.isVolatileQualified());
4630 }
4631 IsUsed = true;
4632 }
4633
EmitCallArg(CallArgList & args,const Expr * E,QualType type)4634 void CodeGenFunction::EmitCallArg(CallArgList &args, const Expr *E,
4635 QualType type) {
4636 DisableDebugLocationUpdates Dis(*this, E);
4637 if (const ObjCIndirectCopyRestoreExpr *CRE
4638 = dyn_cast<ObjCIndirectCopyRestoreExpr>(E)) {
4639 assert(getLangOpts().ObjCAutoRefCount);
4640 return emitWritebackArg(*this, args, CRE);
4641 }
4642
4643 assert(type->isReferenceType() == E->isGLValue() &&
4644 "reference binding to unmaterialized r-value!");
4645
4646 if (E->isGLValue()) {
4647 assert(E->getObjectKind() == OK_Ordinary);
4648 return args.add(EmitReferenceBindingToExpr(E), type);
4649 }
4650
4651 bool HasAggregateEvalKind = hasAggregateEvaluationKind(type);
4652
4653 // In the Microsoft C++ ABI, aggregate arguments are destructed by the callee.
4654 // However, we still have to push an EH-only cleanup in case we unwind before
4655 // we make it to the call.
4656 if (type->isRecordType() &&
4657 type->castAs<RecordType>()->getDecl()->isParamDestroyedInCallee()) {
4658 // If we're using inalloca, use the argument memory. Otherwise, use a
4659 // temporary.
4660 AggValueSlot Slot = args.isUsingInAlloca()
4661 ? createPlaceholderSlot(*this, type) : CreateAggTemp(type, "agg.tmp");
4662
4663 bool DestroyedInCallee = true, NeedsEHCleanup = true;
4664 if (const auto *RD = type->getAsCXXRecordDecl())
4665 DestroyedInCallee = RD->hasNonTrivialDestructor();
4666 else
4667 NeedsEHCleanup = needsEHCleanup(type.isDestructedType());
4668
4669 if (DestroyedInCallee)
4670 Slot.setExternallyDestructed();
4671
4672 EmitAggExpr(E, Slot);
4673 RValue RV = Slot.asRValue();
4674 args.add(RV, type);
4675
4676 if (DestroyedInCallee && NeedsEHCleanup) {
4677 // Create a no-op GEP between the placeholder and the cleanup so we can
4678 // RAUW it successfully. It also serves as a marker of the first
4679 // instruction where the cleanup is active.
4680 pushFullExprCleanup<DestroyUnpassedArg>(EHCleanup, Slot.getAddress(),
4681 type);
4682 // This unreachable is a temporary marker which will be removed later.
4683 llvm::Instruction *IsActive = Builder.CreateUnreachable();
4684 args.addArgCleanupDeactivation(EHStack.stable_begin(), IsActive);
4685 }
4686 return;
4687 }
4688
4689 if (HasAggregateEvalKind && isa<ImplicitCastExpr>(E) &&
4690 cast<CastExpr>(E)->getCastKind() == CK_LValueToRValue) {
4691 LValue L = EmitLValue(cast<CastExpr>(E)->getSubExpr());
4692 assert(L.isSimple());
4693 args.addUncopiedAggregate(L, type);
4694 return;
4695 }
4696
4697 args.add(EmitAnyExprToTemp(E), type);
4698 }
4699
getVarArgType(const Expr * Arg)4700 QualType CodeGenFunction::getVarArgType(const Expr *Arg) {
4701 // System headers on Windows define NULL to 0 instead of 0LL on Win64. MSVC
4702 // implicitly widens null pointer constants that are arguments to varargs
4703 // functions to pointer-sized ints.
4704 if (!getTarget().getTriple().isOSWindows())
4705 return Arg->getType();
4706
4707 if (Arg->getType()->isIntegerType() &&
4708 getContext().getTypeSize(Arg->getType()) <
4709 getContext().getTargetInfo().getPointerWidth(LangAS::Default) &&
4710 Arg->isNullPointerConstant(getContext(),
4711 Expr::NPC_ValueDependentIsNotNull)) {
4712 return getContext().getIntPtrType();
4713 }
4714
4715 return Arg->getType();
4716 }
4717
4718 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4719 // optimizer it can aggressively ignore unwind edges.
4720 void
AddObjCARCExceptionMetadata(llvm::Instruction * Inst)4721 CodeGenFunction::AddObjCARCExceptionMetadata(llvm::Instruction *Inst) {
4722 if (CGM.getCodeGenOpts().OptimizationLevel != 0 &&
4723 !CGM.getCodeGenOpts().ObjCAutoRefCountExceptions)
4724 Inst->setMetadata("clang.arc.no_objc_arc_exceptions",
4725 CGM.getNoObjCARCExceptionsMetadata());
4726 }
4727
4728 /// Emits a call to the given no-arguments nounwind runtime function.
4729 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::FunctionCallee callee,const llvm::Twine & name)4730 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4731 const llvm::Twine &name) {
4732 return EmitNounwindRuntimeCall(callee, std::nullopt, name);
4733 }
4734
4735 /// Emits a call to the given nounwind runtime function.
4736 llvm::CallInst *
EmitNounwindRuntimeCall(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)4737 CodeGenFunction::EmitNounwindRuntimeCall(llvm::FunctionCallee callee,
4738 ArrayRef<llvm::Value *> args,
4739 const llvm::Twine &name) {
4740 llvm::CallInst *call = EmitRuntimeCall(callee, args, name);
4741 call->setDoesNotThrow();
4742 return call;
4743 }
4744
4745 /// Emits a simple call (never an invoke) to the given no-arguments
4746 /// runtime function.
EmitRuntimeCall(llvm::FunctionCallee callee,const llvm::Twine & name)4747 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4748 const llvm::Twine &name) {
4749 return EmitRuntimeCall(callee, std::nullopt, name);
4750 }
4751
4752 // Calls which may throw must have operand bundles indicating which funclet
4753 // they are nested within.
4754 SmallVector<llvm::OperandBundleDef, 1>
getBundlesForFunclet(llvm::Value * Callee)4755 CodeGenFunction::getBundlesForFunclet(llvm::Value *Callee) {
4756 // There is no need for a funclet operand bundle if we aren't inside a
4757 // funclet.
4758 if (!CurrentFuncletPad)
4759 return (SmallVector<llvm::OperandBundleDef, 1>());
4760
4761 // Skip intrinsics which cannot throw (as long as they don't lower into
4762 // regular function calls in the course of IR transformations).
4763 if (auto *CalleeFn = dyn_cast<llvm::Function>(Callee->stripPointerCasts())) {
4764 if (CalleeFn->isIntrinsic() && CalleeFn->doesNotThrow()) {
4765 auto IID = CalleeFn->getIntrinsicID();
4766 if (!llvm::IntrinsicInst::mayLowerToFunctionCall(IID))
4767 return (SmallVector<llvm::OperandBundleDef, 1>());
4768 }
4769 }
4770
4771 SmallVector<llvm::OperandBundleDef, 1> BundleList;
4772 BundleList.emplace_back("funclet", CurrentFuncletPad);
4773 return BundleList;
4774 }
4775
4776 /// Emits a simple call (never an invoke) to the given runtime function.
EmitRuntimeCall(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const llvm::Twine & name)4777 llvm::CallInst *CodeGenFunction::EmitRuntimeCall(llvm::FunctionCallee callee,
4778 ArrayRef<llvm::Value *> args,
4779 const llvm::Twine &name) {
4780 llvm::CallInst *call = Builder.CreateCall(
4781 callee, args, getBundlesForFunclet(callee.getCallee()), name);
4782 call->setCallingConv(getRuntimeCC());
4783 return call;
4784 }
4785
4786 /// Emits a call or invoke to the given noreturn runtime function.
EmitNoreturnRuntimeCallOrInvoke(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args)4787 void CodeGenFunction::EmitNoreturnRuntimeCallOrInvoke(
4788 llvm::FunctionCallee callee, ArrayRef<llvm::Value *> args) {
4789 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4790 getBundlesForFunclet(callee.getCallee());
4791
4792 if (getInvokeDest()) {
4793 llvm::InvokeInst *invoke =
4794 Builder.CreateInvoke(callee,
4795 getUnreachableBlock(),
4796 getInvokeDest(),
4797 args,
4798 BundleList);
4799 invoke->setDoesNotReturn();
4800 invoke->setCallingConv(getRuntimeCC());
4801 } else {
4802 llvm::CallInst *call = Builder.CreateCall(callee, args, BundleList);
4803 call->setDoesNotReturn();
4804 call->setCallingConv(getRuntimeCC());
4805 Builder.CreateUnreachable();
4806 }
4807 }
4808
4809 /// Emits a call or invoke instruction to the given nullary runtime function.
4810 llvm::CallBase *
EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,const Twine & name)4811 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4812 const Twine &name) {
4813 return EmitRuntimeCallOrInvoke(callee, std::nullopt, name);
4814 }
4815
4816 /// Emits a call or invoke instruction to the given runtime function.
4817 llvm::CallBase *
EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,ArrayRef<llvm::Value * > args,const Twine & name)4818 CodeGenFunction::EmitRuntimeCallOrInvoke(llvm::FunctionCallee callee,
4819 ArrayRef<llvm::Value *> args,
4820 const Twine &name) {
4821 llvm::CallBase *call = EmitCallOrInvoke(callee, args, name);
4822 call->setCallingConv(getRuntimeCC());
4823 return call;
4824 }
4825
4826 /// Emits a call or invoke instruction to the given function, depending
4827 /// on the current state of the EH stack.
EmitCallOrInvoke(llvm::FunctionCallee Callee,ArrayRef<llvm::Value * > Args,const Twine & Name)4828 llvm::CallBase *CodeGenFunction::EmitCallOrInvoke(llvm::FunctionCallee Callee,
4829 ArrayRef<llvm::Value *> Args,
4830 const Twine &Name) {
4831 llvm::BasicBlock *InvokeDest = getInvokeDest();
4832 SmallVector<llvm::OperandBundleDef, 1> BundleList =
4833 getBundlesForFunclet(Callee.getCallee());
4834
4835 llvm::CallBase *Inst;
4836 if (!InvokeDest)
4837 Inst = Builder.CreateCall(Callee, Args, BundleList, Name);
4838 else {
4839 llvm::BasicBlock *ContBB = createBasicBlock("invoke.cont");
4840 Inst = Builder.CreateInvoke(Callee, ContBB, InvokeDest, Args, BundleList,
4841 Name);
4842 EmitBlock(ContBB);
4843 }
4844
4845 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
4846 // optimizer it can aggressively ignore unwind edges.
4847 if (CGM.getLangOpts().ObjCAutoRefCount)
4848 AddObjCARCExceptionMetadata(Inst);
4849
4850 return Inst;
4851 }
4852
deferPlaceholderReplacement(llvm::Instruction * Old,llvm::Value * New)4853 void CodeGenFunction::deferPlaceholderReplacement(llvm::Instruction *Old,
4854 llvm::Value *New) {
4855 DeferredReplacements.push_back(
4856 std::make_pair(llvm::WeakTrackingVH(Old), New));
4857 }
4858
4859 namespace {
4860
4861 /// Specify given \p NewAlign as the alignment of return value attribute. If
4862 /// such attribute already exists, re-set it to the maximal one of two options.
4863 [[nodiscard]] llvm::AttributeList
maybeRaiseRetAlignmentAttribute(llvm::LLVMContext & Ctx,const llvm::AttributeList & Attrs,llvm::Align NewAlign)4864 maybeRaiseRetAlignmentAttribute(llvm::LLVMContext &Ctx,
4865 const llvm::AttributeList &Attrs,
4866 llvm::Align NewAlign) {
4867 llvm::Align CurAlign = Attrs.getRetAlignment().valueOrOne();
4868 if (CurAlign >= NewAlign)
4869 return Attrs;
4870 llvm::Attribute AlignAttr = llvm::Attribute::getWithAlignment(Ctx, NewAlign);
4871 return Attrs.removeRetAttribute(Ctx, llvm::Attribute::AttrKind::Alignment)
4872 .addRetAttribute(Ctx, AlignAttr);
4873 }
4874
4875 template <typename AlignedAttrTy> class AbstractAssumeAlignedAttrEmitter {
4876 protected:
4877 CodeGenFunction &CGF;
4878
4879 /// We do nothing if this is, or becomes, nullptr.
4880 const AlignedAttrTy *AA = nullptr;
4881
4882 llvm::Value *Alignment = nullptr; // May or may not be a constant.
4883 llvm::ConstantInt *OffsetCI = nullptr; // Constant, hopefully zero.
4884
AbstractAssumeAlignedAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl)4885 AbstractAssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4886 : CGF(CGF_) {
4887 if (!FuncDecl)
4888 return;
4889 AA = FuncDecl->getAttr<AlignedAttrTy>();
4890 }
4891
4892 public:
4893 /// If we can, materialize the alignment as an attribute on return value.
4894 [[nodiscard]] llvm::AttributeList
TryEmitAsCallSiteAttribute(const llvm::AttributeList & Attrs)4895 TryEmitAsCallSiteAttribute(const llvm::AttributeList &Attrs) {
4896 if (!AA || OffsetCI || CGF.SanOpts.has(SanitizerKind::Alignment))
4897 return Attrs;
4898 const auto *AlignmentCI = dyn_cast<llvm::ConstantInt>(Alignment);
4899 if (!AlignmentCI)
4900 return Attrs;
4901 // We may legitimately have non-power-of-2 alignment here.
4902 // If so, this is UB land, emit it via `@llvm.assume` instead.
4903 if (!AlignmentCI->getValue().isPowerOf2())
4904 return Attrs;
4905 llvm::AttributeList NewAttrs = maybeRaiseRetAlignmentAttribute(
4906 CGF.getLLVMContext(), Attrs,
4907 llvm::Align(
4908 AlignmentCI->getLimitedValue(llvm::Value::MaximumAlignment)));
4909 AA = nullptr; // We're done. Disallow doing anything else.
4910 return NewAttrs;
4911 }
4912
4913 /// Emit alignment assumption.
4914 /// This is a general fallback that we take if either there is an offset,
4915 /// or the alignment is variable or we are sanitizing for alignment.
EmitAsAnAssumption(SourceLocation Loc,QualType RetTy,RValue & Ret)4916 void EmitAsAnAssumption(SourceLocation Loc, QualType RetTy, RValue &Ret) {
4917 if (!AA)
4918 return;
4919 CGF.emitAlignmentAssumption(Ret.getScalarVal(), RetTy, Loc,
4920 AA->getLocation(), Alignment, OffsetCI);
4921 AA = nullptr; // We're done. Disallow doing anything else.
4922 }
4923 };
4924
4925 /// Helper data structure to emit `AssumeAlignedAttr`.
4926 class AssumeAlignedAttrEmitter final
4927 : public AbstractAssumeAlignedAttrEmitter<AssumeAlignedAttr> {
4928 public:
AssumeAlignedAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl)4929 AssumeAlignedAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl)
4930 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4931 if (!AA)
4932 return;
4933 // It is guaranteed that the alignment/offset are constants.
4934 Alignment = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(AA->getAlignment()));
4935 if (Expr *Offset = AA->getOffset()) {
4936 OffsetCI = cast<llvm::ConstantInt>(CGF.EmitScalarExpr(Offset));
4937 if (OffsetCI->isNullValue()) // Canonicalize zero offset to no offset.
4938 OffsetCI = nullptr;
4939 }
4940 }
4941 };
4942
4943 /// Helper data structure to emit `AllocAlignAttr`.
4944 class AllocAlignAttrEmitter final
4945 : public AbstractAssumeAlignedAttrEmitter<AllocAlignAttr> {
4946 public:
AllocAlignAttrEmitter(CodeGenFunction & CGF_,const Decl * FuncDecl,const CallArgList & CallArgs)4947 AllocAlignAttrEmitter(CodeGenFunction &CGF_, const Decl *FuncDecl,
4948 const CallArgList &CallArgs)
4949 : AbstractAssumeAlignedAttrEmitter(CGF_, FuncDecl) {
4950 if (!AA)
4951 return;
4952 // Alignment may or may not be a constant, and that is okay.
4953 Alignment = CallArgs[AA->getParamIndex().getLLVMIndex()]
4954 .getRValue(CGF)
4955 .getScalarVal();
4956 }
4957 };
4958
4959 } // namespace
4960
getMaxVectorWidth(const llvm::Type * Ty)4961 static unsigned getMaxVectorWidth(const llvm::Type *Ty) {
4962 if (auto *VT = dyn_cast<llvm::VectorType>(Ty))
4963 return VT->getPrimitiveSizeInBits().getKnownMinValue();
4964 if (auto *AT = dyn_cast<llvm::ArrayType>(Ty))
4965 return getMaxVectorWidth(AT->getElementType());
4966
4967 unsigned MaxVectorWidth = 0;
4968 if (auto *ST = dyn_cast<llvm::StructType>(Ty))
4969 for (auto *I : ST->elements())
4970 MaxVectorWidth = std::max(MaxVectorWidth, getMaxVectorWidth(I));
4971 return MaxVectorWidth;
4972 }
4973
EmitCall(const CGFunctionInfo & CallInfo,const CGCallee & Callee,ReturnValueSlot ReturnValue,const CallArgList & CallArgs,llvm::CallBase ** callOrInvoke,bool IsMustTail,SourceLocation Loc)4974 RValue CodeGenFunction::EmitCall(const CGFunctionInfo &CallInfo,
4975 const CGCallee &Callee,
4976 ReturnValueSlot ReturnValue,
4977 const CallArgList &CallArgs,
4978 llvm::CallBase **callOrInvoke, bool IsMustTail,
4979 SourceLocation Loc) {
4980 // FIXME: We no longer need the types from CallArgs; lift up and simplify.
4981
4982 assert(Callee.isOrdinary() || Callee.isVirtual());
4983
4984 // Handle struct-return functions by passing a pointer to the
4985 // location that we would like to return into.
4986 QualType RetTy = CallInfo.getReturnType();
4987 const ABIArgInfo &RetAI = CallInfo.getReturnInfo();
4988
4989 llvm::FunctionType *IRFuncTy = getTypes().GetFunctionType(CallInfo);
4990
4991 const Decl *TargetDecl = Callee.getAbstractInfo().getCalleeDecl().getDecl();
4992 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(TargetDecl)) {
4993 // We can only guarantee that a function is called from the correct
4994 // context/function based on the appropriate target attributes,
4995 // so only check in the case where we have both always_inline and target
4996 // since otherwise we could be making a conditional call after a check for
4997 // the proper cpu features (and it won't cause code generation issues due to
4998 // function based code generation).
4999 if (TargetDecl->hasAttr<AlwaysInlineAttr>() &&
5000 (TargetDecl->hasAttr<TargetAttr>() ||
5001 (CurFuncDecl && CurFuncDecl->hasAttr<TargetAttr>())))
5002 checkTargetFeatures(Loc, FD);
5003
5004 // Some architectures (such as x86-64) have the ABI changed based on
5005 // attribute-target/features. Give them a chance to diagnose.
5006 CGM.getTargetCodeGenInfo().checkFunctionCallABI(
5007 CGM, Loc, dyn_cast_or_null<FunctionDecl>(CurCodeDecl), FD, CallArgs);
5008 }
5009
5010 // 1. Set up the arguments.
5011
5012 // If we're using inalloca, insert the allocation after the stack save.
5013 // FIXME: Do this earlier rather than hacking it in here!
5014 Address ArgMemory = Address::invalid();
5015 if (llvm::StructType *ArgStruct = CallInfo.getArgStruct()) {
5016 const llvm::DataLayout &DL = CGM.getDataLayout();
5017 llvm::Instruction *IP = CallArgs.getStackBase();
5018 llvm::AllocaInst *AI;
5019 if (IP) {
5020 IP = IP->getNextNode();
5021 AI = new llvm::AllocaInst(ArgStruct, DL.getAllocaAddrSpace(),
5022 "argmem", IP);
5023 } else {
5024 AI = CreateTempAlloca(ArgStruct, "argmem");
5025 }
5026 auto Align = CallInfo.getArgStructAlignment();
5027 AI->setAlignment(Align.getAsAlign());
5028 AI->setUsedWithInAlloca(true);
5029 assert(AI->isUsedWithInAlloca() && !AI->isStaticAlloca());
5030 ArgMemory = Address(AI, ArgStruct, Align);
5031 }
5032
5033 ClangToLLVMArgMapping IRFunctionArgs(CGM.getContext(), CallInfo);
5034 SmallVector<llvm::Value *, 16> IRCallArgs(IRFunctionArgs.totalIRArgs());
5035
5036 // If the call returns a temporary with struct return, create a temporary
5037 // alloca to hold the result, unless one is given to us.
5038 Address SRetPtr = Address::invalid();
5039 Address SRetAlloca = Address::invalid();
5040 llvm::Value *UnusedReturnSizePtr = nullptr;
5041 if (RetAI.isIndirect() || RetAI.isInAlloca() || RetAI.isCoerceAndExpand()) {
5042 if (!ReturnValue.isNull()) {
5043 SRetPtr = ReturnValue.getValue();
5044 } else {
5045 SRetPtr = CreateMemTemp(RetTy, "tmp", &SRetAlloca);
5046 if (HaveInsertPoint() && ReturnValue.isUnused()) {
5047 llvm::TypeSize size =
5048 CGM.getDataLayout().getTypeAllocSize(ConvertTypeForMem(RetTy));
5049 UnusedReturnSizePtr = EmitLifetimeStart(size, SRetAlloca.getPointer());
5050 }
5051 }
5052 if (IRFunctionArgs.hasSRetArg()) {
5053 IRCallArgs[IRFunctionArgs.getSRetArgNo()] = SRetPtr.getPointer();
5054 } else if (RetAI.isInAlloca()) {
5055 Address Addr =
5056 Builder.CreateStructGEP(ArgMemory, RetAI.getInAllocaFieldIndex());
5057 Builder.CreateStore(SRetPtr.getPointer(), Addr);
5058 }
5059 }
5060
5061 Address swiftErrorTemp = Address::invalid();
5062 Address swiftErrorArg = Address::invalid();
5063
5064 // When passing arguments using temporary allocas, we need to add the
5065 // appropriate lifetime markers. This vector keeps track of all the lifetime
5066 // markers that need to be ended right after the call.
5067 SmallVector<CallLifetimeEnd, 2> CallLifetimeEndAfterCall;
5068
5069 // Translate all of the arguments as necessary to match the IR lowering.
5070 assert(CallInfo.arg_size() == CallArgs.size() &&
5071 "Mismatch between function signature & arguments.");
5072 unsigned ArgNo = 0;
5073 CGFunctionInfo::const_arg_iterator info_it = CallInfo.arg_begin();
5074 for (CallArgList::const_iterator I = CallArgs.begin(), E = CallArgs.end();
5075 I != E; ++I, ++info_it, ++ArgNo) {
5076 const ABIArgInfo &ArgInfo = info_it->info;
5077
5078 // Insert a padding argument to ensure proper alignment.
5079 if (IRFunctionArgs.hasPaddingArg(ArgNo))
5080 IRCallArgs[IRFunctionArgs.getPaddingArgNo(ArgNo)] =
5081 llvm::UndefValue::get(ArgInfo.getPaddingType());
5082
5083 unsigned FirstIRArg, NumIRArgs;
5084 std::tie(FirstIRArg, NumIRArgs) = IRFunctionArgs.getIRArgs(ArgNo);
5085
5086 bool ArgHasMaybeUndefAttr =
5087 IsArgumentMaybeUndef(TargetDecl, CallInfo.getNumRequiredArgs(), ArgNo);
5088
5089 switch (ArgInfo.getKind()) {
5090 case ABIArgInfo::InAlloca: {
5091 assert(NumIRArgs == 0);
5092 assert(getTarget().getTriple().getArch() == llvm::Triple::x86);
5093 if (I->isAggregate()) {
5094 Address Addr = I->hasLValue()
5095 ? I->getKnownLValue().getAddress(*this)
5096 : I->getKnownRValue().getAggregateAddress();
5097 llvm::Instruction *Placeholder =
5098 cast<llvm::Instruction>(Addr.getPointer());
5099
5100 if (!ArgInfo.getInAllocaIndirect()) {
5101 // Replace the placeholder with the appropriate argument slot GEP.
5102 CGBuilderTy::InsertPoint IP = Builder.saveIP();
5103 Builder.SetInsertPoint(Placeholder);
5104 Addr = Builder.CreateStructGEP(ArgMemory,
5105 ArgInfo.getInAllocaFieldIndex());
5106 Builder.restoreIP(IP);
5107 } else {
5108 // For indirect things such as overaligned structs, replace the
5109 // placeholder with a regular aggregate temporary alloca. Store the
5110 // address of this alloca into the struct.
5111 Addr = CreateMemTemp(info_it->type, "inalloca.indirect.tmp");
5112 Address ArgSlot = Builder.CreateStructGEP(
5113 ArgMemory, ArgInfo.getInAllocaFieldIndex());
5114 Builder.CreateStore(Addr.getPointer(), ArgSlot);
5115 }
5116 deferPlaceholderReplacement(Placeholder, Addr.getPointer());
5117 } else if (ArgInfo.getInAllocaIndirect()) {
5118 // Make a temporary alloca and store the address of it into the argument
5119 // struct.
5120 Address Addr = CreateMemTempWithoutCast(
5121 I->Ty, getContext().getTypeAlignInChars(I->Ty),
5122 "indirect-arg-temp");
5123 I->copyInto(*this, Addr);
5124 Address ArgSlot =
5125 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
5126 Builder.CreateStore(Addr.getPointer(), ArgSlot);
5127 } else {
5128 // Store the RValue into the argument struct.
5129 Address Addr =
5130 Builder.CreateStructGEP(ArgMemory, ArgInfo.getInAllocaFieldIndex());
5131 Addr = Addr.withElementType(ConvertTypeForMem(I->Ty));
5132 I->copyInto(*this, Addr);
5133 }
5134 break;
5135 }
5136
5137 case ABIArgInfo::Indirect:
5138 case ABIArgInfo::IndirectAliased: {
5139 assert(NumIRArgs == 1);
5140 if (!I->isAggregate()) {
5141 // Make a temporary alloca to pass the argument.
5142 Address Addr = CreateMemTempWithoutCast(
5143 I->Ty, ArgInfo.getIndirectAlign(), "indirect-arg-temp");
5144
5145 llvm::Value *Val = Addr.getPointer();
5146 if (ArgHasMaybeUndefAttr)
5147 Val = Builder.CreateFreeze(Addr.getPointer());
5148 IRCallArgs[FirstIRArg] = Val;
5149
5150 I->copyInto(*this, Addr);
5151 } else {
5152 // We want to avoid creating an unnecessary temporary+copy here;
5153 // however, we need one in three cases:
5154 // 1. If the argument is not byval, and we are required to copy the
5155 // source. (This case doesn't occur on any common architecture.)
5156 // 2. If the argument is byval, RV is not sufficiently aligned, and
5157 // we cannot force it to be sufficiently aligned.
5158 // 3. If the argument is byval, but RV is not located in default
5159 // or alloca address space.
5160 Address Addr = I->hasLValue()
5161 ? I->getKnownLValue().getAddress(*this)
5162 : I->getKnownRValue().getAggregateAddress();
5163 llvm::Value *V = Addr.getPointer();
5164 CharUnits Align = ArgInfo.getIndirectAlign();
5165 const llvm::DataLayout *TD = &CGM.getDataLayout();
5166
5167 assert((FirstIRArg >= IRFuncTy->getNumParams() ||
5168 IRFuncTy->getParamType(FirstIRArg)->getPointerAddressSpace() ==
5169 TD->getAllocaAddrSpace()) &&
5170 "indirect argument must be in alloca address space");
5171
5172 bool NeedCopy = false;
5173 if (Addr.getAlignment() < Align &&
5174 llvm::getOrEnforceKnownAlignment(V, Align.getAsAlign(), *TD) <
5175 Align.getAsAlign()) {
5176 NeedCopy = true;
5177 } else if (I->hasLValue()) {
5178 auto LV = I->getKnownLValue();
5179 auto AS = LV.getAddressSpace();
5180
5181 bool isByValOrRef =
5182 ArgInfo.isIndirectAliased() || ArgInfo.getIndirectByVal();
5183
5184 if (!isByValOrRef ||
5185 (LV.getAlignment() < getContext().getTypeAlignInChars(I->Ty))) {
5186 NeedCopy = true;
5187 }
5188 if (!getLangOpts().OpenCL) {
5189 if ((isByValOrRef &&
5190 (AS != LangAS::Default &&
5191 AS != CGM.getASTAllocaAddressSpace()))) {
5192 NeedCopy = true;
5193 }
5194 }
5195 // For OpenCL even if RV is located in default or alloca address space
5196 // we don't want to perform address space cast for it.
5197 else if ((isByValOrRef &&
5198 Addr.getType()->getAddressSpace() != IRFuncTy->
5199 getParamType(FirstIRArg)->getPointerAddressSpace())) {
5200 NeedCopy = true;
5201 }
5202 }
5203
5204 if (NeedCopy) {
5205 // Create an aligned temporary, and copy to it.
5206 Address AI = CreateMemTempWithoutCast(
5207 I->Ty, ArgInfo.getIndirectAlign(), "byval-temp");
5208 llvm::Value *Val = AI.getPointer();
5209 if (ArgHasMaybeUndefAttr)
5210 Val = Builder.CreateFreeze(AI.getPointer());
5211 IRCallArgs[FirstIRArg] = Val;
5212
5213 // Emit lifetime markers for the temporary alloca.
5214 llvm::TypeSize ByvalTempElementSize =
5215 CGM.getDataLayout().getTypeAllocSize(AI.getElementType());
5216 llvm::Value *LifetimeSize =
5217 EmitLifetimeStart(ByvalTempElementSize, AI.getPointer());
5218
5219 // Add cleanup code to emit the end lifetime marker after the call.
5220 if (LifetimeSize) // In case we disabled lifetime markers.
5221 CallLifetimeEndAfterCall.emplace_back(AI, LifetimeSize);
5222
5223 // Generate the copy.
5224 I->copyInto(*this, AI);
5225 } else {
5226 // Skip the extra memcpy call.
5227 auto *T = llvm::PointerType::get(
5228 CGM.getLLVMContext(), CGM.getDataLayout().getAllocaAddrSpace());
5229
5230 llvm::Value *Val = getTargetHooks().performAddrSpaceCast(
5231 *this, V, LangAS::Default, CGM.getASTAllocaAddressSpace(), T,
5232 true);
5233 if (ArgHasMaybeUndefAttr)
5234 Val = Builder.CreateFreeze(Val);
5235 IRCallArgs[FirstIRArg] = Val;
5236 }
5237 }
5238 break;
5239 }
5240
5241 case ABIArgInfo::Ignore:
5242 assert(NumIRArgs == 0);
5243 break;
5244
5245 case ABIArgInfo::Extend:
5246 case ABIArgInfo::Direct: {
5247 if (!isa<llvm::StructType>(ArgInfo.getCoerceToType()) &&
5248 ArgInfo.getCoerceToType() == ConvertType(info_it->type) &&
5249 ArgInfo.getDirectOffset() == 0) {
5250 assert(NumIRArgs == 1);
5251 llvm::Value *V;
5252 if (!I->isAggregate())
5253 V = I->getKnownRValue().getScalarVal();
5254 else
5255 V = Builder.CreateLoad(
5256 I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5257 : I->getKnownRValue().getAggregateAddress());
5258
5259 // Implement swifterror by copying into a new swifterror argument.
5260 // We'll write back in the normal path out of the call.
5261 if (CallInfo.getExtParameterInfo(ArgNo).getABI()
5262 == ParameterABI::SwiftErrorResult) {
5263 assert(!swiftErrorTemp.isValid() && "multiple swifterror args");
5264
5265 QualType pointeeTy = I->Ty->getPointeeType();
5266 swiftErrorArg = Address(V, ConvertTypeForMem(pointeeTy),
5267 getContext().getTypeAlignInChars(pointeeTy));
5268
5269 swiftErrorTemp =
5270 CreateMemTemp(pointeeTy, getPointerAlign(), "swifterror.temp");
5271 V = swiftErrorTemp.getPointer();
5272 cast<llvm::AllocaInst>(V)->setSwiftError(true);
5273
5274 llvm::Value *errorValue = Builder.CreateLoad(swiftErrorArg);
5275 Builder.CreateStore(errorValue, swiftErrorTemp);
5276 }
5277
5278 // We might have to widen integers, but we should never truncate.
5279 if (ArgInfo.getCoerceToType() != V->getType() &&
5280 V->getType()->isIntegerTy())
5281 V = Builder.CreateZExt(V, ArgInfo.getCoerceToType());
5282
5283 // If the argument doesn't match, perform a bitcast to coerce it. This
5284 // can happen due to trivial type mismatches.
5285 if (FirstIRArg < IRFuncTy->getNumParams() &&
5286 V->getType() != IRFuncTy->getParamType(FirstIRArg))
5287 V = Builder.CreateBitCast(V, IRFuncTy->getParamType(FirstIRArg));
5288
5289 if (ArgHasMaybeUndefAttr)
5290 V = Builder.CreateFreeze(V);
5291 IRCallArgs[FirstIRArg] = V;
5292 break;
5293 }
5294
5295 // FIXME: Avoid the conversion through memory if possible.
5296 Address Src = Address::invalid();
5297 if (!I->isAggregate()) {
5298 Src = CreateMemTemp(I->Ty, "coerce");
5299 I->copyInto(*this, Src);
5300 } else {
5301 Src = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5302 : I->getKnownRValue().getAggregateAddress();
5303 }
5304
5305 // If the value is offset in memory, apply the offset now.
5306 Src = emitAddressAtOffset(*this, Src, ArgInfo);
5307
5308 // Fast-isel and the optimizer generally like scalar values better than
5309 // FCAs, so we flatten them if this is safe to do for this argument.
5310 llvm::StructType *STy =
5311 dyn_cast<llvm::StructType>(ArgInfo.getCoerceToType());
5312 if (STy && ArgInfo.isDirect() && ArgInfo.getCanBeFlattened()) {
5313 llvm::Type *SrcTy = Src.getElementType();
5314 llvm::TypeSize SrcTypeSize =
5315 CGM.getDataLayout().getTypeAllocSize(SrcTy);
5316 llvm::TypeSize DstTypeSize = CGM.getDataLayout().getTypeAllocSize(STy);
5317 if (SrcTypeSize.isScalable()) {
5318 assert(STy->containsHomogeneousScalableVectorTypes() &&
5319 "ABI only supports structure with homogeneous scalable vector "
5320 "type");
5321 assert(SrcTypeSize == DstTypeSize &&
5322 "Only allow non-fractional movement of structure with "
5323 "homogeneous scalable vector type");
5324 assert(NumIRArgs == STy->getNumElements());
5325
5326 llvm::Value *StoredStructValue =
5327 Builder.CreateLoad(Src, Src.getName() + ".tuple");
5328 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5329 llvm::Value *Extract = Builder.CreateExtractValue(
5330 StoredStructValue, i, Src.getName() + ".extract" + Twine(i));
5331 IRCallArgs[FirstIRArg + i] = Extract;
5332 }
5333 } else {
5334 uint64_t SrcSize = SrcTypeSize.getFixedValue();
5335 uint64_t DstSize = DstTypeSize.getFixedValue();
5336
5337 // If the source type is smaller than the destination type of the
5338 // coerce-to logic, copy the source value into a temp alloca the size
5339 // of the destination type to allow loading all of it. The bits past
5340 // the source value are left undef.
5341 if (SrcSize < DstSize) {
5342 Address TempAlloca = CreateTempAlloca(STy, Src.getAlignment(),
5343 Src.getName() + ".coerce");
5344 Builder.CreateMemCpy(TempAlloca, Src, SrcSize);
5345 Src = TempAlloca;
5346 } else {
5347 Src = Src.withElementType(STy);
5348 }
5349
5350 assert(NumIRArgs == STy->getNumElements());
5351 for (unsigned i = 0, e = STy->getNumElements(); i != e; ++i) {
5352 Address EltPtr = Builder.CreateStructGEP(Src, i);
5353 llvm::Value *LI = Builder.CreateLoad(EltPtr);
5354 if (ArgHasMaybeUndefAttr)
5355 LI = Builder.CreateFreeze(LI);
5356 IRCallArgs[FirstIRArg + i] = LI;
5357 }
5358 }
5359 } else {
5360 // In the simple case, just pass the coerced loaded value.
5361 assert(NumIRArgs == 1);
5362 llvm::Value *Load =
5363 CreateCoercedLoad(Src, ArgInfo.getCoerceToType(), *this);
5364
5365 if (CallInfo.isCmseNSCall()) {
5366 // For certain parameter types, clear padding bits, as they may reveal
5367 // sensitive information.
5368 // Small struct/union types are passed as integer arrays.
5369 auto *ATy = dyn_cast<llvm::ArrayType>(Load->getType());
5370 if (ATy != nullptr && isa<RecordType>(I->Ty.getCanonicalType()))
5371 Load = EmitCMSEClearRecord(Load, ATy, I->Ty);
5372 }
5373
5374 if (ArgHasMaybeUndefAttr)
5375 Load = Builder.CreateFreeze(Load);
5376 IRCallArgs[FirstIRArg] = Load;
5377 }
5378
5379 break;
5380 }
5381
5382 case ABIArgInfo::CoerceAndExpand: {
5383 auto coercionType = ArgInfo.getCoerceAndExpandType();
5384 auto layout = CGM.getDataLayout().getStructLayout(coercionType);
5385
5386 llvm::Value *tempSize = nullptr;
5387 Address addr = Address::invalid();
5388 Address AllocaAddr = Address::invalid();
5389 if (I->isAggregate()) {
5390 addr = I->hasLValue() ? I->getKnownLValue().getAddress(*this)
5391 : I->getKnownRValue().getAggregateAddress();
5392
5393 } else {
5394 RValue RV = I->getKnownRValue();
5395 assert(RV.isScalar()); // complex should always just be direct
5396
5397 llvm::Type *scalarType = RV.getScalarVal()->getType();
5398 auto scalarSize = CGM.getDataLayout().getTypeAllocSize(scalarType);
5399 auto scalarAlign = CGM.getDataLayout().getPrefTypeAlign(scalarType);
5400
5401 // Materialize to a temporary.
5402 addr = CreateTempAlloca(
5403 RV.getScalarVal()->getType(),
5404 CharUnits::fromQuantity(std::max(layout->getAlignment(), scalarAlign)),
5405 "tmp",
5406 /*ArraySize=*/nullptr, &AllocaAddr);
5407 tempSize = EmitLifetimeStart(scalarSize, AllocaAddr.getPointer());
5408
5409 Builder.CreateStore(RV.getScalarVal(), addr);
5410 }
5411
5412 addr = addr.withElementType(coercionType);
5413
5414 unsigned IRArgPos = FirstIRArg;
5415 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5416 llvm::Type *eltType = coercionType->getElementType(i);
5417 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5418 Address eltAddr = Builder.CreateStructGEP(addr, i);
5419 llvm::Value *elt = Builder.CreateLoad(eltAddr);
5420 if (ArgHasMaybeUndefAttr)
5421 elt = Builder.CreateFreeze(elt);
5422 IRCallArgs[IRArgPos++] = elt;
5423 }
5424 assert(IRArgPos == FirstIRArg + NumIRArgs);
5425
5426 if (tempSize) {
5427 EmitLifetimeEnd(tempSize, AllocaAddr.getPointer());
5428 }
5429
5430 break;
5431 }
5432
5433 case ABIArgInfo::Expand: {
5434 unsigned IRArgPos = FirstIRArg;
5435 ExpandTypeToArgs(I->Ty, *I, IRFuncTy, IRCallArgs, IRArgPos);
5436 assert(IRArgPos == FirstIRArg + NumIRArgs);
5437 break;
5438 }
5439 }
5440 }
5441
5442 const CGCallee &ConcreteCallee = Callee.prepareConcreteCallee(*this);
5443 llvm::Value *CalleePtr = ConcreteCallee.getFunctionPointer();
5444
5445 // If we're using inalloca, set up that argument.
5446 if (ArgMemory.isValid()) {
5447 llvm::Value *Arg = ArgMemory.getPointer();
5448 assert(IRFunctionArgs.hasInallocaArg());
5449 IRCallArgs[IRFunctionArgs.getInallocaArgNo()] = Arg;
5450 }
5451
5452 // 2. Prepare the function pointer.
5453
5454 // If the callee is a bitcast of a non-variadic function to have a
5455 // variadic function pointer type, check to see if we can remove the
5456 // bitcast. This comes up with unprototyped functions.
5457 //
5458 // This makes the IR nicer, but more importantly it ensures that we
5459 // can inline the function at -O0 if it is marked always_inline.
5460 auto simplifyVariadicCallee = [](llvm::FunctionType *CalleeFT,
5461 llvm::Value *Ptr) -> llvm::Function * {
5462 if (!CalleeFT->isVarArg())
5463 return nullptr;
5464
5465 // Get underlying value if it's a bitcast
5466 if (llvm::ConstantExpr *CE = dyn_cast<llvm::ConstantExpr>(Ptr)) {
5467 if (CE->getOpcode() == llvm::Instruction::BitCast)
5468 Ptr = CE->getOperand(0);
5469 }
5470
5471 llvm::Function *OrigFn = dyn_cast<llvm::Function>(Ptr);
5472 if (!OrigFn)
5473 return nullptr;
5474
5475 llvm::FunctionType *OrigFT = OrigFn->getFunctionType();
5476
5477 // If the original type is variadic, or if any of the component types
5478 // disagree, we cannot remove the cast.
5479 if (OrigFT->isVarArg() ||
5480 OrigFT->getNumParams() != CalleeFT->getNumParams() ||
5481 OrigFT->getReturnType() != CalleeFT->getReturnType())
5482 return nullptr;
5483
5484 for (unsigned i = 0, e = OrigFT->getNumParams(); i != e; ++i)
5485 if (OrigFT->getParamType(i) != CalleeFT->getParamType(i))
5486 return nullptr;
5487
5488 return OrigFn;
5489 };
5490
5491 if (llvm::Function *OrigFn = simplifyVariadicCallee(IRFuncTy, CalleePtr)) {
5492 CalleePtr = OrigFn;
5493 IRFuncTy = OrigFn->getFunctionType();
5494 }
5495
5496 // 3. Perform the actual call.
5497
5498 // Deactivate any cleanups that we're supposed to do immediately before
5499 // the call.
5500 if (!CallArgs.getCleanupsToDeactivate().empty())
5501 deactivateArgCleanupsBeforeCall(*this, CallArgs);
5502
5503 // Assert that the arguments we computed match up. The IR verifier
5504 // will catch this, but this is a common enough source of problems
5505 // during IRGen changes that it's way better for debugging to catch
5506 // it ourselves here.
5507 #ifndef NDEBUG
5508 assert(IRCallArgs.size() == IRFuncTy->getNumParams() || IRFuncTy->isVarArg());
5509 for (unsigned i = 0; i < IRCallArgs.size(); ++i) {
5510 // Inalloca argument can have different type.
5511 if (IRFunctionArgs.hasInallocaArg() &&
5512 i == IRFunctionArgs.getInallocaArgNo())
5513 continue;
5514 if (i < IRFuncTy->getNumParams())
5515 assert(IRCallArgs[i]->getType() == IRFuncTy->getParamType(i));
5516 }
5517 #endif
5518
5519 // Update the largest vector width if any arguments have vector types.
5520 for (unsigned i = 0; i < IRCallArgs.size(); ++i)
5521 LargestVectorWidth = std::max(LargestVectorWidth,
5522 getMaxVectorWidth(IRCallArgs[i]->getType()));
5523
5524 // Compute the calling convention and attributes.
5525 unsigned CallingConv;
5526 llvm::AttributeList Attrs;
5527 CGM.ConstructAttributeList(CalleePtr->getName(), CallInfo,
5528 Callee.getAbstractInfo(), Attrs, CallingConv,
5529 /*AttrOnCallSite=*/true,
5530 /*IsThunk=*/false);
5531
5532 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5533 if (FD->hasAttr<StrictFPAttr>())
5534 // All calls within a strictfp function are marked strictfp
5535 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5536
5537 // If -ffast-math is enabled and the function is guarded by an
5538 // '__attribute__((optnone)) adjust the memory attribute so the BE emits the
5539 // library call instead of the intrinsic.
5540 if (FD->hasAttr<OptimizeNoneAttr>() && getLangOpts().FastMath)
5541 CGM.AdjustMemoryAttribute(CalleePtr->getName(), Callee.getAbstractInfo(),
5542 Attrs);
5543 }
5544 // Add call-site nomerge attribute if exists.
5545 if (InNoMergeAttributedStmt)
5546 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoMerge);
5547
5548 // Add call-site noinline attribute if exists.
5549 if (InNoInlineAttributedStmt)
5550 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5551
5552 // Add call-site always_inline attribute if exists.
5553 if (InAlwaysInlineAttributedStmt)
5554 Attrs =
5555 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5556
5557 // Apply some call-site-specific attributes.
5558 // TODO: work this into building the attribute set.
5559
5560 // Apply always_inline to all calls within flatten functions.
5561 // FIXME: should this really take priority over __try, below?
5562 if (CurCodeDecl && CurCodeDecl->hasAttr<FlattenAttr>() &&
5563 !InNoInlineAttributedStmt &&
5564 !(TargetDecl && TargetDecl->hasAttr<NoInlineAttr>())) {
5565 Attrs =
5566 Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::AlwaysInline);
5567 }
5568
5569 // Disable inlining inside SEH __try blocks.
5570 if (isSEHTryScope()) {
5571 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::NoInline);
5572 }
5573
5574 // Decide whether to use a call or an invoke.
5575 bool CannotThrow;
5576 if (currentFunctionUsesSEHTry()) {
5577 // SEH cares about asynchronous exceptions, so everything can "throw."
5578 CannotThrow = false;
5579 } else if (isCleanupPadScope() &&
5580 EHPersonality::get(*this).isMSVCXXPersonality()) {
5581 // The MSVC++ personality will implicitly terminate the program if an
5582 // exception is thrown during a cleanup outside of a try/catch.
5583 // We don't need to model anything in IR to get this behavior.
5584 CannotThrow = true;
5585 } else {
5586 // Otherwise, nounwind call sites will never throw.
5587 CannotThrow = Attrs.hasFnAttr(llvm::Attribute::NoUnwind);
5588
5589 if (auto *FPtr = dyn_cast<llvm::Function>(CalleePtr))
5590 if (FPtr->hasFnAttribute(llvm::Attribute::NoUnwind))
5591 CannotThrow = true;
5592 }
5593
5594 // If we made a temporary, be sure to clean up after ourselves. Note that we
5595 // can't depend on being inside of an ExprWithCleanups, so we need to manually
5596 // pop this cleanup later on. Being eager about this is OK, since this
5597 // temporary is 'invisible' outside of the callee.
5598 if (UnusedReturnSizePtr)
5599 pushFullExprCleanup<CallLifetimeEnd>(NormalEHLifetimeMarker, SRetAlloca,
5600 UnusedReturnSizePtr);
5601
5602 llvm::BasicBlock *InvokeDest = CannotThrow ? nullptr : getInvokeDest();
5603
5604 SmallVector<llvm::OperandBundleDef, 1> BundleList =
5605 getBundlesForFunclet(CalleePtr);
5606
5607 if (SanOpts.has(SanitizerKind::KCFI) &&
5608 !isa_and_nonnull<FunctionDecl>(TargetDecl))
5609 EmitKCFIOperandBundle(ConcreteCallee, BundleList);
5610
5611 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl))
5612 if (FD->hasAttr<StrictFPAttr>())
5613 // All calls within a strictfp function are marked strictfp
5614 Attrs = Attrs.addFnAttribute(getLLVMContext(), llvm::Attribute::StrictFP);
5615
5616 AssumeAlignedAttrEmitter AssumeAlignedAttrEmitter(*this, TargetDecl);
5617 Attrs = AssumeAlignedAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5618
5619 AllocAlignAttrEmitter AllocAlignAttrEmitter(*this, TargetDecl, CallArgs);
5620 Attrs = AllocAlignAttrEmitter.TryEmitAsCallSiteAttribute(Attrs);
5621
5622 // Emit the actual call/invoke instruction.
5623 llvm::CallBase *CI;
5624 if (!InvokeDest) {
5625 CI = Builder.CreateCall(IRFuncTy, CalleePtr, IRCallArgs, BundleList);
5626 } else {
5627 llvm::BasicBlock *Cont = createBasicBlock("invoke.cont");
5628 CI = Builder.CreateInvoke(IRFuncTy, CalleePtr, Cont, InvokeDest, IRCallArgs,
5629 BundleList);
5630 EmitBlock(Cont);
5631 }
5632 if (CI->getCalledFunction() && CI->getCalledFunction()->hasName() &&
5633 CI->getCalledFunction()->getName().starts_with("_Z4sqrt")) {
5634 SetSqrtFPAccuracy(CI);
5635 }
5636 if (callOrInvoke)
5637 *callOrInvoke = CI;
5638
5639 // If this is within a function that has the guard(nocf) attribute and is an
5640 // indirect call, add the "guard_nocf" attribute to this call to indicate that
5641 // Control Flow Guard checks should not be added, even if the call is inlined.
5642 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl)) {
5643 if (const auto *A = FD->getAttr<CFGuardAttr>()) {
5644 if (A->getGuard() == CFGuardAttr::GuardArg::nocf && !CI->getCalledFunction())
5645 Attrs = Attrs.addFnAttribute(getLLVMContext(), "guard_nocf");
5646 }
5647 }
5648
5649 // Apply the attributes and calling convention.
5650 CI->setAttributes(Attrs);
5651 CI->setCallingConv(static_cast<llvm::CallingConv::ID>(CallingConv));
5652
5653 // Apply various metadata.
5654
5655 if (!CI->getType()->isVoidTy())
5656 CI->setName("call");
5657
5658 // Update largest vector width from the return type.
5659 LargestVectorWidth =
5660 std::max(LargestVectorWidth, getMaxVectorWidth(CI->getType()));
5661
5662 // Insert instrumentation or attach profile metadata at indirect call sites.
5663 // For more details, see the comment before the definition of
5664 // IPVK_IndirectCallTarget in InstrProfData.inc.
5665 if (!CI->getCalledFunction())
5666 PGO.valueProfile(Builder, llvm::IPVK_IndirectCallTarget,
5667 CI, CalleePtr);
5668
5669 // In ObjC ARC mode with no ObjC ARC exception safety, tell the ARC
5670 // optimizer it can aggressively ignore unwind edges.
5671 if (CGM.getLangOpts().ObjCAutoRefCount)
5672 AddObjCARCExceptionMetadata(CI);
5673
5674 // Set tail call kind if necessary.
5675 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(CI)) {
5676 if (TargetDecl && TargetDecl->hasAttr<NotTailCalledAttr>())
5677 Call->setTailCallKind(llvm::CallInst::TCK_NoTail);
5678 else if (IsMustTail)
5679 Call->setTailCallKind(llvm::CallInst::TCK_MustTail);
5680 }
5681
5682 // Add metadata for calls to MSAllocator functions
5683 if (getDebugInfo() && TargetDecl &&
5684 TargetDecl->hasAttr<MSAllocatorAttr>())
5685 getDebugInfo()->addHeapAllocSiteMetadata(CI, RetTy->getPointeeType(), Loc);
5686
5687 // Add metadata if calling an __attribute__((error(""))) or warning fn.
5688 if (TargetDecl && TargetDecl->hasAttr<ErrorAttr>()) {
5689 llvm::ConstantInt *Line =
5690 llvm::ConstantInt::get(Int32Ty, Loc.getRawEncoding());
5691 llvm::ConstantAsMetadata *MD = llvm::ConstantAsMetadata::get(Line);
5692 llvm::MDTuple *MDT = llvm::MDNode::get(getLLVMContext(), {MD});
5693 CI->setMetadata("srcloc", MDT);
5694 }
5695
5696 // 4. Finish the call.
5697
5698 // If the call doesn't return, finish the basic block and clear the
5699 // insertion point; this allows the rest of IRGen to discard
5700 // unreachable code.
5701 if (CI->doesNotReturn()) {
5702 if (UnusedReturnSizePtr)
5703 PopCleanupBlock();
5704
5705 // Strip away the noreturn attribute to better diagnose unreachable UB.
5706 if (SanOpts.has(SanitizerKind::Unreachable)) {
5707 // Also remove from function since CallBase::hasFnAttr additionally checks
5708 // attributes of the called function.
5709 if (auto *F = CI->getCalledFunction())
5710 F->removeFnAttr(llvm::Attribute::NoReturn);
5711 CI->removeFnAttr(llvm::Attribute::NoReturn);
5712
5713 // Avoid incompatibility with ASan which relies on the `noreturn`
5714 // attribute to insert handler calls.
5715 if (SanOpts.hasOneOf(SanitizerKind::Address |
5716 SanitizerKind::KernelAddress)) {
5717 SanitizerScope SanScope(this);
5718 llvm::IRBuilder<>::InsertPointGuard IPGuard(Builder);
5719 Builder.SetInsertPoint(CI);
5720 auto *FnType = llvm::FunctionType::get(CGM.VoidTy, /*isVarArg=*/false);
5721 llvm::FunctionCallee Fn =
5722 CGM.CreateRuntimeFunction(FnType, "__asan_handle_no_return");
5723 EmitNounwindRuntimeCall(Fn);
5724 }
5725 }
5726
5727 EmitUnreachable(Loc);
5728 Builder.ClearInsertionPoint();
5729
5730 // FIXME: For now, emit a dummy basic block because expr emitters in
5731 // generally are not ready to handle emitting expressions at unreachable
5732 // points.
5733 EnsureInsertPoint();
5734
5735 // Return a reasonable RValue.
5736 return GetUndefRValue(RetTy);
5737 }
5738
5739 // If this is a musttail call, return immediately. We do not branch to the
5740 // epilogue in this case.
5741 if (IsMustTail) {
5742 for (auto it = EHStack.find(CurrentCleanupScopeDepth); it != EHStack.end();
5743 ++it) {
5744 EHCleanupScope *Cleanup = dyn_cast<EHCleanupScope>(&*it);
5745 if (!(Cleanup && Cleanup->getCleanup()->isRedundantBeforeReturn()))
5746 CGM.ErrorUnsupported(MustTailCall, "tail call skipping over cleanups");
5747 }
5748 if (CI->getType()->isVoidTy())
5749 Builder.CreateRetVoid();
5750 else
5751 Builder.CreateRet(CI);
5752 Builder.ClearInsertionPoint();
5753 EnsureInsertPoint();
5754 return GetUndefRValue(RetTy);
5755 }
5756
5757 // Perform the swifterror writeback.
5758 if (swiftErrorTemp.isValid()) {
5759 llvm::Value *errorResult = Builder.CreateLoad(swiftErrorTemp);
5760 Builder.CreateStore(errorResult, swiftErrorArg);
5761 }
5762
5763 // Emit any call-associated writebacks immediately. Arguably this
5764 // should happen after any return-value munging.
5765 if (CallArgs.hasWritebacks())
5766 emitWritebacks(*this, CallArgs);
5767
5768 // The stack cleanup for inalloca arguments has to run out of the normal
5769 // lexical order, so deactivate it and run it manually here.
5770 CallArgs.freeArgumentMemory(*this);
5771
5772 // Extract the return value.
5773 RValue Ret = [&] {
5774 switch (RetAI.getKind()) {
5775 case ABIArgInfo::CoerceAndExpand: {
5776 auto coercionType = RetAI.getCoerceAndExpandType();
5777
5778 Address addr = SRetPtr.withElementType(coercionType);
5779
5780 assert(CI->getType() == RetAI.getUnpaddedCoerceAndExpandType());
5781 bool requiresExtract = isa<llvm::StructType>(CI->getType());
5782
5783 unsigned unpaddedIndex = 0;
5784 for (unsigned i = 0, e = coercionType->getNumElements(); i != e; ++i) {
5785 llvm::Type *eltType = coercionType->getElementType(i);
5786 if (ABIArgInfo::isPaddingForCoerceAndExpand(eltType)) continue;
5787 Address eltAddr = Builder.CreateStructGEP(addr, i);
5788 llvm::Value *elt = CI;
5789 if (requiresExtract)
5790 elt = Builder.CreateExtractValue(elt, unpaddedIndex++);
5791 else
5792 assert(unpaddedIndex == 0);
5793 Builder.CreateStore(elt, eltAddr);
5794 }
5795 [[fallthrough]];
5796 }
5797
5798 case ABIArgInfo::InAlloca:
5799 case ABIArgInfo::Indirect: {
5800 RValue ret = convertTempToRValue(SRetPtr, RetTy, SourceLocation());
5801 if (UnusedReturnSizePtr)
5802 PopCleanupBlock();
5803 return ret;
5804 }
5805
5806 case ABIArgInfo::Ignore:
5807 // If we are ignoring an argument that had a result, make sure to
5808 // construct the appropriate return value for our caller.
5809 return GetUndefRValue(RetTy);
5810
5811 case ABIArgInfo::Extend:
5812 case ABIArgInfo::Direct: {
5813 llvm::Type *RetIRTy = ConvertType(RetTy);
5814 if (RetAI.getCoerceToType() == RetIRTy && RetAI.getDirectOffset() == 0) {
5815 switch (getEvaluationKind(RetTy)) {
5816 case TEK_Complex: {
5817 llvm::Value *Real = Builder.CreateExtractValue(CI, 0);
5818 llvm::Value *Imag = Builder.CreateExtractValue(CI, 1);
5819 return RValue::getComplex(std::make_pair(Real, Imag));
5820 }
5821 case TEK_Aggregate: {
5822 Address DestPtr = ReturnValue.getValue();
5823 bool DestIsVolatile = ReturnValue.isVolatile();
5824
5825 if (!DestPtr.isValid()) {
5826 DestPtr = CreateMemTemp(RetTy, "agg.tmp");
5827 DestIsVolatile = false;
5828 }
5829 EmitAggregateStore(CI, DestPtr, DestIsVolatile);
5830 return RValue::getAggregate(DestPtr);
5831 }
5832 case TEK_Scalar: {
5833 // If the argument doesn't match, perform a bitcast to coerce it. This
5834 // can happen due to trivial type mismatches.
5835 llvm::Value *V = CI;
5836 if (V->getType() != RetIRTy)
5837 V = Builder.CreateBitCast(V, RetIRTy);
5838 return RValue::get(V);
5839 }
5840 }
5841 llvm_unreachable("bad evaluation kind");
5842 }
5843
5844 // If coercing a fixed vector from a scalable vector for ABI
5845 // compatibility, and the types match, use the llvm.vector.extract
5846 // intrinsic to perform the conversion.
5847 if (auto *FixedDst = dyn_cast<llvm::FixedVectorType>(RetIRTy)) {
5848 llvm::Value *V = CI;
5849 if (auto *ScalableSrc = dyn_cast<llvm::ScalableVectorType>(V->getType())) {
5850 if (FixedDst->getElementType() == ScalableSrc->getElementType()) {
5851 llvm::Value *Zero = llvm::Constant::getNullValue(CGM.Int64Ty);
5852 V = Builder.CreateExtractVector(FixedDst, V, Zero, "cast.fixed");
5853 return RValue::get(V);
5854 }
5855 }
5856 }
5857
5858 Address DestPtr = ReturnValue.getValue();
5859 bool DestIsVolatile = ReturnValue.isVolatile();
5860
5861 if (!DestPtr.isValid()) {
5862 DestPtr = CreateMemTemp(RetTy, "coerce");
5863 DestIsVolatile = false;
5864 }
5865
5866 // An empty record can overlap other data (if declared with
5867 // no_unique_address); omit the store for such types - as there is no
5868 // actual data to store.
5869 if (!isEmptyRecord(getContext(), RetTy, true)) {
5870 // If the value is offset in memory, apply the offset now.
5871 Address StorePtr = emitAddressAtOffset(*this, DestPtr, RetAI);
5872 CreateCoercedStore(CI, StorePtr, DestIsVolatile, *this);
5873 }
5874
5875 return convertTempToRValue(DestPtr, RetTy, SourceLocation());
5876 }
5877
5878 case ABIArgInfo::Expand:
5879 case ABIArgInfo::IndirectAliased:
5880 llvm_unreachable("Invalid ABI kind for return argument");
5881 }
5882
5883 llvm_unreachable("Unhandled ABIArgInfo::Kind");
5884 } ();
5885
5886 // Emit the assume_aligned check on the return value.
5887 if (Ret.isScalar() && TargetDecl) {
5888 AssumeAlignedAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5889 AllocAlignAttrEmitter.EmitAsAnAssumption(Loc, RetTy, Ret);
5890 }
5891
5892 // Explicitly call CallLifetimeEnd::Emit just to re-use the code even though
5893 // we can't use the full cleanup mechanism.
5894 for (CallLifetimeEnd &LifetimeEnd : CallLifetimeEndAfterCall)
5895 LifetimeEnd.Emit(*this, /*Flags=*/{});
5896
5897 if (!ReturnValue.isExternallyDestructed() &&
5898 RetTy.isDestructedType() == QualType::DK_nontrivial_c_struct)
5899 pushDestroy(QualType::DK_nontrivial_c_struct, Ret.getAggregateAddress(),
5900 RetTy);
5901
5902 return Ret;
5903 }
5904
prepareConcreteCallee(CodeGenFunction & CGF) const5905 CGCallee CGCallee::prepareConcreteCallee(CodeGenFunction &CGF) const {
5906 if (isVirtual()) {
5907 const CallExpr *CE = getVirtualCallExpr();
5908 return CGF.CGM.getCXXABI().getVirtualFunctionPointer(
5909 CGF, getVirtualMethodDecl(), getThisAddress(), getVirtualFunctionType(),
5910 CE ? CE->getBeginLoc() : SourceLocation());
5911 }
5912
5913 return *this;
5914 }
5915
5916 /* VarArg handling */
5917
EmitVAArg(VAArgExpr * VE,Address & VAListAddr)5918 Address CodeGenFunction::EmitVAArg(VAArgExpr *VE, Address &VAListAddr) {
5919 VAListAddr = VE->isMicrosoftABI()
5920 ? EmitMSVAListRef(VE->getSubExpr())
5921 : EmitVAListRef(VE->getSubExpr());
5922 QualType Ty = VE->getType();
5923 if (VE->isMicrosoftABI())
5924 return CGM.getTypes().getABIInfo().EmitMSVAArg(*this, VAListAddr, Ty);
5925 return CGM.getTypes().getABIInfo().EmitVAArg(*this, VAListAddr, Ty);
5926 }
5927