1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This coordinates the per-function state used while generating code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGDebugInfo.h"
18 #include "CGOpenMPRuntime.h"
19 #include "CodeGenModule.h"
20 #include "CodeGenPGO.h"
21 #include "TargetInfo.h"
22 #include "clang/AST/ASTContext.h"
23 #include "clang/AST/Decl.h"
24 #include "clang/AST/DeclCXX.h"
25 #include "clang/AST/StmtCXX.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "clang/CodeGen/CGFunctionInfo.h"
28 #include "clang/Frontend/CodeGenOptions.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/Intrinsics.h"
31 #include "llvm/IR/MDBuilder.h"
32 #include "llvm/IR/Operator.h"
33 using namespace clang;
34 using namespace CodeGen;
35
CodeGenFunction(CodeGenModule & cgm,bool suppressNewContext)36 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
37 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
38 Builder(cgm.getModule().getContext(), llvm::ConstantFolder(),
39 CGBuilderInserterTy(this)),
40 CurFn(nullptr), CapturedStmtInfo(nullptr),
41 SanOpts(CGM.getLangOpts().Sanitize), IsSanitizerScope(false),
42 CurFuncIsThunk(false), AutoreleaseResult(false), SawAsmBlock(false),
43 BlockInfo(nullptr), BlockPointer(nullptr),
44 LambdaThisCaptureField(nullptr), NormalCleanupDest(nullptr),
45 NextCleanupDestIndex(1), FirstBlockInfo(nullptr), EHResumeBlock(nullptr),
46 ExceptionSlot(nullptr), EHSelectorSlot(nullptr),
47 DebugInfo(CGM.getModuleDebugInfo()), DisableDebugInfo(false),
48 DidCallStackSave(false), IndirectBranch(nullptr), PGO(cgm),
49 SwitchInsn(nullptr), SwitchWeights(nullptr), CaseRangeBlock(nullptr),
50 UnreachableBlock(nullptr), NumReturnExprs(0), NumSimpleReturnExprs(0),
51 CXXABIThisDecl(nullptr), CXXABIThisValue(nullptr), CXXThisValue(nullptr),
52 CXXDefaultInitExprThis(nullptr), CXXStructorImplicitParamDecl(nullptr),
53 CXXStructorImplicitParamValue(nullptr), OutermostConditional(nullptr),
54 CurLexicalScope(nullptr), TerminateLandingPad(nullptr),
55 TerminateHandler(nullptr), TrapBB(nullptr) {
56 if (!suppressNewContext)
57 CGM.getCXXABI().getMangleContext().startNewFunction();
58
59 llvm::FastMathFlags FMF;
60 if (CGM.getLangOpts().FastMath)
61 FMF.setUnsafeAlgebra();
62 if (CGM.getLangOpts().FiniteMathOnly) {
63 FMF.setNoNaNs();
64 FMF.setNoInfs();
65 }
66 if (CGM.getCodeGenOpts().NoNaNsFPMath) {
67 FMF.setNoNaNs();
68 }
69 if (CGM.getCodeGenOpts().NoSignedZeros) {
70 FMF.setNoSignedZeros();
71 }
72 Builder.SetFastMathFlags(FMF);
73 }
74
~CodeGenFunction()75 CodeGenFunction::~CodeGenFunction() {
76 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
77
78 // If there are any unclaimed block infos, go ahead and destroy them
79 // now. This can happen if IR-gen gets clever and skips evaluating
80 // something.
81 if (FirstBlockInfo)
82 destroyBlockInfos(FirstBlockInfo);
83
84 if (getLangOpts().OpenMP) {
85 CGM.getOpenMPRuntime().FunctionFinished(*this);
86 }
87 }
88
MakeNaturalAlignAddrLValue(llvm::Value * V,QualType T)89 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
90 CharUnits Alignment;
91 if (CGM.getCXXABI().isTypeInfoCalculable(T)) {
92 Alignment = getContext().getTypeAlignInChars(T);
93 unsigned MaxAlign = getContext().getLangOpts().MaxTypeAlign;
94 if (MaxAlign && Alignment.getQuantity() > MaxAlign &&
95 !getContext().isAlignmentRequired(T))
96 Alignment = CharUnits::fromQuantity(MaxAlign);
97 }
98 return LValue::MakeAddr(V, T, Alignment, getContext(), CGM.getTBAAInfo(T));
99 }
100
ConvertTypeForMem(QualType T)101 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
102 return CGM.getTypes().ConvertTypeForMem(T);
103 }
104
ConvertType(QualType T)105 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
106 return CGM.getTypes().ConvertType(T);
107 }
108
getEvaluationKind(QualType type)109 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
110 type = type.getCanonicalType();
111 while (true) {
112 switch (type->getTypeClass()) {
113 #define TYPE(name, parent)
114 #define ABSTRACT_TYPE(name, parent)
115 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
116 #define DEPENDENT_TYPE(name, parent) case Type::name:
117 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
118 #include "clang/AST/TypeNodes.def"
119 llvm_unreachable("non-canonical or dependent type in IR-generation");
120
121 case Type::Auto:
122 llvm_unreachable("undeduced auto type in IR-generation");
123
124 // Various scalar types.
125 case Type::Builtin:
126 case Type::Pointer:
127 case Type::BlockPointer:
128 case Type::LValueReference:
129 case Type::RValueReference:
130 case Type::MemberPointer:
131 case Type::Vector:
132 case Type::ExtVector:
133 case Type::FunctionProto:
134 case Type::FunctionNoProto:
135 case Type::Enum:
136 case Type::ObjCObjectPointer:
137 return TEK_Scalar;
138
139 // Complexes.
140 case Type::Complex:
141 return TEK_Complex;
142
143 // Arrays, records, and Objective-C objects.
144 case Type::ConstantArray:
145 case Type::IncompleteArray:
146 case Type::VariableArray:
147 case Type::Record:
148 case Type::ObjCObject:
149 case Type::ObjCInterface:
150 return TEK_Aggregate;
151
152 // We operate on atomic values according to their underlying type.
153 case Type::Atomic:
154 type = cast<AtomicType>(type)->getValueType();
155 continue;
156 }
157 llvm_unreachable("unknown type kind!");
158 }
159 }
160
EmitReturnBlock()161 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
162 // For cleanliness, we try to avoid emitting the return block for
163 // simple cases.
164 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
165
166 if (CurBB) {
167 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
168
169 // We have a valid insert point, reuse it if it is empty or there are no
170 // explicit jumps to the return block.
171 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
172 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
173 delete ReturnBlock.getBlock();
174 } else
175 EmitBlock(ReturnBlock.getBlock());
176 return llvm::DebugLoc();
177 }
178
179 // Otherwise, if the return block is the target of a single direct
180 // branch then we can just put the code in that block instead. This
181 // cleans up functions which started with a unified return block.
182 if (ReturnBlock.getBlock()->hasOneUse()) {
183 llvm::BranchInst *BI =
184 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
185 if (BI && BI->isUnconditional() &&
186 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
187 // Record/return the DebugLoc of the simple 'return' expression to be used
188 // later by the actual 'ret' instruction.
189 llvm::DebugLoc Loc = BI->getDebugLoc();
190 Builder.SetInsertPoint(BI->getParent());
191 BI->eraseFromParent();
192 delete ReturnBlock.getBlock();
193 return Loc;
194 }
195 }
196
197 // FIXME: We are at an unreachable point, there is no reason to emit the block
198 // unless it has uses. However, we still need a place to put the debug
199 // region.end for now.
200
201 EmitBlock(ReturnBlock.getBlock());
202 return llvm::DebugLoc();
203 }
204
EmitIfUsed(CodeGenFunction & CGF,llvm::BasicBlock * BB)205 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
206 if (!BB) return;
207 if (!BB->use_empty())
208 return CGF.CurFn->getBasicBlockList().push_back(BB);
209 delete BB;
210 }
211
FinishFunction(SourceLocation EndLoc)212 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
213 assert(BreakContinueStack.empty() &&
214 "mismatched push/pop in break/continue stack!");
215
216 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
217 && NumSimpleReturnExprs == NumReturnExprs
218 && ReturnBlock.getBlock()->use_empty();
219 // Usually the return expression is evaluated before the cleanup
220 // code. If the function contains only a simple return statement,
221 // such as a constant, the location before the cleanup code becomes
222 // the last useful breakpoint in the function, because the simple
223 // return expression will be evaluated after the cleanup code. To be
224 // safe, set the debug location for cleanup code to the location of
225 // the return statement. Otherwise the cleanup code should be at the
226 // end of the function's lexical scope.
227 //
228 // If there are multiple branches to the return block, the branch
229 // instructions will get the location of the return statements and
230 // all will be fine.
231 if (CGDebugInfo *DI = getDebugInfo()) {
232 if (OnlySimpleReturnStmts)
233 DI->EmitLocation(Builder, LastStopPoint);
234 else
235 DI->EmitLocation(Builder, EndLoc);
236 }
237
238 // Pop any cleanups that might have been associated with the
239 // parameters. Do this in whatever block we're currently in; it's
240 // important to do this before we enter the return block or return
241 // edges will be *really* confused.
242 bool EmitRetDbgLoc = true;
243 if (EHStack.stable_begin() != PrologueCleanupDepth) {
244 PopCleanupBlocks(PrologueCleanupDepth);
245
246 // Make sure the line table doesn't jump back into the body for
247 // the ret after it's been at EndLoc.
248 EmitRetDbgLoc = false;
249
250 if (CGDebugInfo *DI = getDebugInfo())
251 if (OnlySimpleReturnStmts)
252 DI->EmitLocation(Builder, EndLoc);
253 }
254
255 // Emit function epilog (to return).
256 llvm::DebugLoc Loc = EmitReturnBlock();
257
258 if (ShouldInstrumentFunction())
259 EmitFunctionInstrumentation("__cyg_profile_func_exit");
260
261 // Emit debug descriptor for function end.
262 if (CGDebugInfo *DI = getDebugInfo())
263 DI->EmitFunctionEnd(Builder);
264
265 // Reset the debug location to that of the simple 'return' expression, if any
266 // rather than that of the end of the function's scope '}'.
267 ApplyDebugLocation AL(*this, Loc);
268 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
269 EmitEndEHSpec(CurCodeDecl);
270
271 assert(EHStack.empty() &&
272 "did not remove all scopes from cleanup stack!");
273
274 // If someone did an indirect goto, emit the indirect goto block at the end of
275 // the function.
276 if (IndirectBranch) {
277 EmitBlock(IndirectBranch->getParent());
278 Builder.ClearInsertionPoint();
279 }
280
281 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
282 llvm::Instruction *Ptr = AllocaInsertPt;
283 AllocaInsertPt = nullptr;
284 Ptr->eraseFromParent();
285
286 // If someone took the address of a label but never did an indirect goto, we
287 // made a zero entry PHI node, which is illegal, zap it now.
288 if (IndirectBranch) {
289 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
290 if (PN->getNumIncomingValues() == 0) {
291 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
292 PN->eraseFromParent();
293 }
294 }
295
296 EmitIfUsed(*this, EHResumeBlock);
297 EmitIfUsed(*this, TerminateLandingPad);
298 EmitIfUsed(*this, TerminateHandler);
299 EmitIfUsed(*this, UnreachableBlock);
300
301 if (CGM.getCodeGenOpts().EmitDeclMetadata)
302 EmitDeclMetadata();
303
304 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
305 I = DeferredReplacements.begin(),
306 E = DeferredReplacements.end();
307 I != E; ++I) {
308 I->first->replaceAllUsesWith(I->second);
309 I->first->eraseFromParent();
310 }
311 }
312
313 /// ShouldInstrumentFunction - Return true if the current function should be
314 /// instrumented with __cyg_profile_func_* calls
ShouldInstrumentFunction()315 bool CodeGenFunction::ShouldInstrumentFunction() {
316 if (!CGM.getCodeGenOpts().InstrumentFunctions)
317 return false;
318 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
319 return false;
320 return true;
321 }
322
323 /// EmitFunctionInstrumentation - Emit LLVM code to call the specified
324 /// instrumentation function with the current function and the call site, if
325 /// function instrumentation is enabled.
EmitFunctionInstrumentation(const char * Fn)326 void CodeGenFunction::EmitFunctionInstrumentation(const char *Fn) {
327 // void __cyg_profile_func_{enter,exit} (void *this_fn, void *call_site);
328 llvm::PointerType *PointerTy = Int8PtrTy;
329 llvm::Type *ProfileFuncArgs[] = { PointerTy, PointerTy };
330 llvm::FunctionType *FunctionTy =
331 llvm::FunctionType::get(VoidTy, ProfileFuncArgs, false);
332
333 llvm::Constant *F = CGM.CreateRuntimeFunction(FunctionTy, Fn);
334 llvm::CallInst *CallSite = Builder.CreateCall(
335 CGM.getIntrinsic(llvm::Intrinsic::returnaddress),
336 llvm::ConstantInt::get(Int32Ty, 0),
337 "callsite");
338
339 llvm::Value *args[] = {
340 llvm::ConstantExpr::getBitCast(CurFn, PointerTy),
341 CallSite
342 };
343
344 EmitNounwindRuntimeCall(F, args);
345 }
346
EmitMCountInstrumentation()347 void CodeGenFunction::EmitMCountInstrumentation() {
348 llvm::FunctionType *FTy = llvm::FunctionType::get(VoidTy, false);
349
350 llvm::Constant *MCountFn =
351 CGM.CreateRuntimeFunction(FTy, getTarget().getMCountName());
352 EmitNounwindRuntimeCall(MCountFn);
353 }
354
355 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
356 // information in the program executable. The argument information stored
357 // includes the argument name, its type, the address and access qualifiers used.
GenOpenCLArgMetadata(const FunctionDecl * FD,llvm::Function * Fn,CodeGenModule & CGM,llvm::LLVMContext & Context,SmallVector<llvm::Metadata *,5> & kernelMDArgs,CGBuilderTy & Builder,ASTContext & ASTCtx)358 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
359 CodeGenModule &CGM, llvm::LLVMContext &Context,
360 SmallVector<llvm::Metadata *, 5> &kernelMDArgs,
361 CGBuilderTy &Builder, ASTContext &ASTCtx) {
362 // Create MDNodes that represent the kernel arg metadata.
363 // Each MDNode is a list in the form of "key", N number of values which is
364 // the same number of values as their are kernel arguments.
365
366 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
367
368 // MDNode for the kernel argument address space qualifiers.
369 SmallVector<llvm::Metadata *, 8> addressQuals;
370 addressQuals.push_back(llvm::MDString::get(Context, "kernel_arg_addr_space"));
371
372 // MDNode for the kernel argument access qualifiers (images only).
373 SmallVector<llvm::Metadata *, 8> accessQuals;
374 accessQuals.push_back(llvm::MDString::get(Context, "kernel_arg_access_qual"));
375
376 // MDNode for the kernel argument type names.
377 SmallVector<llvm::Metadata *, 8> argTypeNames;
378 argTypeNames.push_back(llvm::MDString::get(Context, "kernel_arg_type"));
379
380 // MDNode for the kernel argument base type names.
381 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
382 argBaseTypeNames.push_back(
383 llvm::MDString::get(Context, "kernel_arg_base_type"));
384
385 // MDNode for the kernel argument type qualifiers.
386 SmallVector<llvm::Metadata *, 8> argTypeQuals;
387 argTypeQuals.push_back(llvm::MDString::get(Context, "kernel_arg_type_qual"));
388
389 // MDNode for the kernel argument names.
390 SmallVector<llvm::Metadata *, 8> argNames;
391 argNames.push_back(llvm::MDString::get(Context, "kernel_arg_name"));
392
393 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
394 const ParmVarDecl *parm = FD->getParamDecl(i);
395 QualType ty = parm->getType();
396 std::string typeQuals;
397
398 if (ty->isPointerType()) {
399 QualType pointeeTy = ty->getPointeeType();
400
401 // Get address qualifier.
402 addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
403 ASTCtx.getTargetAddressSpace(pointeeTy.getAddressSpace()))));
404
405 // Get argument type name.
406 std::string typeName =
407 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
408
409 // Turn "unsigned type" to "utype"
410 std::string::size_type pos = typeName.find("unsigned");
411 if (pointeeTy.isCanonical() && pos != std::string::npos)
412 typeName.erase(pos+1, 8);
413
414 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
415
416 std::string baseTypeName =
417 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
418 Policy) +
419 "*";
420
421 // Turn "unsigned type" to "utype"
422 pos = baseTypeName.find("unsigned");
423 if (pos != std::string::npos)
424 baseTypeName.erase(pos+1, 8);
425
426 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
427
428 // Get argument type qualifiers:
429 if (ty.isRestrictQualified())
430 typeQuals = "restrict";
431 if (pointeeTy.isConstQualified() ||
432 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
433 typeQuals += typeQuals.empty() ? "const" : " const";
434 if (pointeeTy.isVolatileQualified())
435 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
436 } else {
437 uint32_t AddrSpc = 0;
438 if (ty->isImageType())
439 AddrSpc =
440 CGM.getContext().getTargetAddressSpace(LangAS::opencl_global);
441
442 addressQuals.push_back(
443 llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
444
445 // Get argument type name.
446 std::string typeName = ty.getUnqualifiedType().getAsString(Policy);
447
448 // Turn "unsigned type" to "utype"
449 std::string::size_type pos = typeName.find("unsigned");
450 if (ty.isCanonical() && pos != std::string::npos)
451 typeName.erase(pos+1, 8);
452
453 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
454
455 std::string baseTypeName =
456 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
457
458 // Turn "unsigned type" to "utype"
459 pos = baseTypeName.find("unsigned");
460 if (pos != std::string::npos)
461 baseTypeName.erase(pos+1, 8);
462
463 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
464
465 // Get argument type qualifiers:
466 if (ty.isConstQualified())
467 typeQuals = "const";
468 if (ty.isVolatileQualified())
469 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
470 }
471
472 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
473
474 // Get image access qualifier:
475 if (ty->isImageType()) {
476 const OpenCLImageAccessAttr *A = parm->getAttr<OpenCLImageAccessAttr>();
477 if (A && A->isWriteOnly())
478 accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
479 else
480 accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
481 // FIXME: what about read_write?
482 } else
483 accessQuals.push_back(llvm::MDString::get(Context, "none"));
484
485 // Get argument name.
486 argNames.push_back(llvm::MDString::get(Context, parm->getName()));
487 }
488
489 kernelMDArgs.push_back(llvm::MDNode::get(Context, addressQuals));
490 kernelMDArgs.push_back(llvm::MDNode::get(Context, accessQuals));
491 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeNames));
492 kernelMDArgs.push_back(llvm::MDNode::get(Context, argBaseTypeNames));
493 kernelMDArgs.push_back(llvm::MDNode::get(Context, argTypeQuals));
494 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
495 kernelMDArgs.push_back(llvm::MDNode::get(Context, argNames));
496 }
497
EmitOpenCLKernelMetadata(const FunctionDecl * FD,llvm::Function * Fn)498 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
499 llvm::Function *Fn)
500 {
501 if (!FD->hasAttr<OpenCLKernelAttr>())
502 return;
503
504 llvm::LLVMContext &Context = getLLVMContext();
505
506 SmallVector<llvm::Metadata *, 5> kernelMDArgs;
507 kernelMDArgs.push_back(llvm::ConstantAsMetadata::get(Fn));
508
509 GenOpenCLArgMetadata(FD, Fn, CGM, Context, kernelMDArgs, Builder,
510 getContext());
511
512 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
513 QualType hintQTy = A->getTypeHint();
514 const ExtVectorType *hintEltQTy = hintQTy->getAs<ExtVectorType>();
515 bool isSignedInteger =
516 hintQTy->isSignedIntegerType() ||
517 (hintEltQTy && hintEltQTy->getElementType()->isSignedIntegerType());
518 llvm::Metadata *attrMDArgs[] = {
519 llvm::MDString::get(Context, "vec_type_hint"),
520 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
521 CGM.getTypes().ConvertType(A->getTypeHint()))),
522 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
523 llvm::IntegerType::get(Context, 32),
524 llvm::APInt(32, (uint64_t)(isSignedInteger ? 1 : 0))))};
525 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
526 }
527
528 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
529 llvm::Metadata *attrMDArgs[] = {
530 llvm::MDString::get(Context, "work_group_size_hint"),
531 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
532 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
533 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
534 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
535 }
536
537 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
538 llvm::Metadata *attrMDArgs[] = {
539 llvm::MDString::get(Context, "reqd_work_group_size"),
540 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
541 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
542 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
543 kernelMDArgs.push_back(llvm::MDNode::get(Context, attrMDArgs));
544 }
545
546 llvm::MDNode *kernelMDNode = llvm::MDNode::get(Context, kernelMDArgs);
547 llvm::NamedMDNode *OpenCLKernelMetadata =
548 CGM.getModule().getOrInsertNamedMetadata("opencl.kernels");
549 OpenCLKernelMetadata->addOperand(kernelMDNode);
550 }
551
552 /// Determine whether the function F ends with a return stmt.
endsWithReturn(const Decl * F)553 static bool endsWithReturn(const Decl* F) {
554 const Stmt *Body = nullptr;
555 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
556 Body = FD->getBody();
557 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
558 Body = OMD->getBody();
559
560 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
561 auto LastStmt = CS->body_rbegin();
562 if (LastStmt != CS->body_rend())
563 return isa<ReturnStmt>(*LastStmt);
564 }
565 return false;
566 }
567
StartFunction(GlobalDecl GD,QualType RetTy,llvm::Function * Fn,const CGFunctionInfo & FnInfo,const FunctionArgList & Args,SourceLocation Loc,SourceLocation StartLoc)568 void CodeGenFunction::StartFunction(GlobalDecl GD,
569 QualType RetTy,
570 llvm::Function *Fn,
571 const CGFunctionInfo &FnInfo,
572 const FunctionArgList &Args,
573 SourceLocation Loc,
574 SourceLocation StartLoc) {
575 assert(!CurFn &&
576 "Do not use a CodeGenFunction object for more than one function");
577
578 const Decl *D = GD.getDecl();
579
580 DidCallStackSave = false;
581 CurCodeDecl = D;
582 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
583 FnRetTy = RetTy;
584 CurFn = Fn;
585 CurFnInfo = &FnInfo;
586 assert(CurFn->isDeclaration() && "Function already has body?");
587
588 if (CGM.isInSanitizerBlacklist(Fn, Loc))
589 SanOpts.clear();
590
591 // Pass inline keyword to optimizer if it appears explicitly on any
592 // declaration. Also, in the case of -fno-inline attach NoInline
593 // attribute to all function that are not marked AlwaysInline.
594 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
595 if (!CGM.getCodeGenOpts().NoInline) {
596 for (auto RI : FD->redecls())
597 if (RI->isInlineSpecified()) {
598 Fn->addFnAttr(llvm::Attribute::InlineHint);
599 break;
600 }
601 } else if (!FD->hasAttr<AlwaysInlineAttr>())
602 Fn->addFnAttr(llvm::Attribute::NoInline);
603 }
604
605 if (getLangOpts().OpenCL) {
606 // Add metadata for a kernel function.
607 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
608 EmitOpenCLKernelMetadata(FD, Fn);
609 }
610
611 // If we are checking function types, emit a function type signature as
612 // prologue data.
613 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
614 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
615 if (llvm::Constant *PrologueSig =
616 CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM)) {
617 llvm::Constant *FTRTTIConst =
618 CGM.GetAddrOfRTTIDescriptor(FD->getType(), /*ForEH=*/true);
619 llvm::Constant *PrologueStructElems[] = { PrologueSig, FTRTTIConst };
620 llvm::Constant *PrologueStructConst =
621 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
622 Fn->setPrologueData(PrologueStructConst);
623 }
624 }
625 }
626
627 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
628
629 // Create a marker to make it easy to insert allocas into the entryblock
630 // later. Don't create this with the builder, because we don't want it
631 // folded.
632 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
633 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "", EntryBB);
634 if (Builder.isNamePreserving())
635 AllocaInsertPt->setName("allocapt");
636
637 ReturnBlock = getJumpDestInCurrentScope("return");
638
639 Builder.SetInsertPoint(EntryBB);
640
641 // Emit subprogram debug descriptor.
642 if (CGDebugInfo *DI = getDebugInfo()) {
643 SmallVector<QualType, 16> ArgTypes;
644 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
645 i != e; ++i) {
646 ArgTypes.push_back((*i)->getType());
647 }
648
649 QualType FnType =
650 getContext().getFunctionType(RetTy, ArgTypes,
651 FunctionProtoType::ExtProtoInfo());
652 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, Builder);
653 }
654
655 if (ShouldInstrumentFunction())
656 EmitFunctionInstrumentation("__cyg_profile_func_enter");
657
658 if (CGM.getCodeGenOpts().InstrumentForProfiling)
659 EmitMCountInstrumentation();
660
661 if (RetTy->isVoidType()) {
662 // Void type; nothing to return.
663 ReturnValue = nullptr;
664
665 // Count the implicit return.
666 if (!endsWithReturn(D))
667 ++NumReturnExprs;
668 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect &&
669 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
670 // Indirect aggregate return; emit returned value directly into sret slot.
671 // This reduces code size, and affects correctness in C++.
672 auto AI = CurFn->arg_begin();
673 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
674 ++AI;
675 ReturnValue = AI;
676 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
677 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
678 // Load the sret pointer from the argument struct and return into that.
679 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
680 llvm::Function::arg_iterator EI = CurFn->arg_end();
681 --EI;
682 llvm::Value *Addr = Builder.CreateStructGEP(EI, Idx);
683 ReturnValue = Builder.CreateLoad(Addr, "agg.result");
684 } else {
685 ReturnValue = CreateIRTemp(RetTy, "retval");
686
687 // Tell the epilog emitter to autorelease the result. We do this
688 // now so that various specialized functions can suppress it
689 // during their IR-generation.
690 if (getLangOpts().ObjCAutoRefCount &&
691 !CurFnInfo->isReturnsRetained() &&
692 RetTy->isObjCRetainableType())
693 AutoreleaseResult = true;
694 }
695
696 EmitStartEHSpec(CurCodeDecl);
697
698 PrologueCleanupDepth = EHStack.stable_begin();
699 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
700
701 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
702 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
703 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
704 if (MD->getParent()->isLambda() &&
705 MD->getOverloadedOperator() == OO_Call) {
706 // We're in a lambda; figure out the captures.
707 MD->getParent()->getCaptureFields(LambdaCaptureFields,
708 LambdaThisCaptureField);
709 if (LambdaThisCaptureField) {
710 // If this lambda captures this, load it.
711 LValue ThisLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
712 CXXThisValue = EmitLoadOfLValue(ThisLValue,
713 SourceLocation()).getScalarVal();
714 }
715 for (auto *FD : MD->getParent()->fields()) {
716 if (FD->hasCapturedVLAType()) {
717 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
718 SourceLocation()).getScalarVal();
719 auto VAT = FD->getCapturedVLAType();
720 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
721 }
722 }
723 } else {
724 // Not in a lambda; just use 'this' from the method.
725 // FIXME: Should we generate a new load for each use of 'this'? The
726 // fast register allocator would be happier...
727 CXXThisValue = CXXABIThisValue;
728 }
729 }
730
731 // If any of the arguments have a variably modified type, make sure to
732 // emit the type size.
733 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
734 i != e; ++i) {
735 const VarDecl *VD = *i;
736
737 // Dig out the type as written from ParmVarDecls; it's unclear whether
738 // the standard (C99 6.9.1p10) requires this, but we're following the
739 // precedent set by gcc.
740 QualType Ty;
741 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
742 Ty = PVD->getOriginalType();
743 else
744 Ty = VD->getType();
745
746 if (Ty->isVariablyModifiedType())
747 EmitVariablyModifiedType(Ty);
748 }
749 // Emit a location at the end of the prologue.
750 if (CGDebugInfo *DI = getDebugInfo())
751 DI->EmitLocation(Builder, StartLoc);
752 }
753
EmitFunctionBody(FunctionArgList & Args,const Stmt * Body)754 void CodeGenFunction::EmitFunctionBody(FunctionArgList &Args,
755 const Stmt *Body) {
756 RegionCounter Cnt = getPGORegionCounter(Body);
757 Cnt.beginRegion(Builder);
758 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
759 EmitCompoundStmtWithoutScope(*S);
760 else
761 EmitStmt(Body);
762 }
763
764 /// When instrumenting to collect profile data, the counts for some blocks
765 /// such as switch cases need to not include the fall-through counts, so
766 /// emit a branch around the instrumentation code. When not instrumenting,
767 /// this just calls EmitBlock().
EmitBlockWithFallThrough(llvm::BasicBlock * BB,RegionCounter & Cnt)768 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
769 RegionCounter &Cnt) {
770 llvm::BasicBlock *SkipCountBB = nullptr;
771 if (HaveInsertPoint() && CGM.getCodeGenOpts().ProfileInstrGenerate) {
772 // When instrumenting for profiling, the fallthrough to certain
773 // statements needs to skip over the instrumentation code so that we
774 // get an accurate count.
775 SkipCountBB = createBasicBlock("skipcount");
776 EmitBranch(SkipCountBB);
777 }
778 EmitBlock(BB);
779 Cnt.beginRegion(Builder, /*AddIncomingFallThrough=*/true);
780 if (SkipCountBB)
781 EmitBlock(SkipCountBB);
782 }
783
784 /// Tries to mark the given function nounwind based on the
785 /// non-existence of any throwing calls within it. We believe this is
786 /// lightweight enough to do at -O0.
TryMarkNoThrow(llvm::Function * F)787 static void TryMarkNoThrow(llvm::Function *F) {
788 // LLVM treats 'nounwind' on a function as part of the type, so we
789 // can't do this on functions that can be overwritten.
790 if (F->mayBeOverridden()) return;
791
792 for (llvm::Function::iterator FI = F->begin(), FE = F->end(); FI != FE; ++FI)
793 for (llvm::BasicBlock::iterator
794 BI = FI->begin(), BE = FI->end(); BI != BE; ++BI)
795 if (llvm::CallInst *Call = dyn_cast<llvm::CallInst>(&*BI)) {
796 if (!Call->doesNotThrow())
797 return;
798 } else if (isa<llvm::ResumeInst>(&*BI)) {
799 return;
800 }
801 F->setDoesNotThrow();
802 }
803
EmitSizedDeallocationFunction(CodeGenFunction & CGF,const FunctionDecl * UnsizedDealloc)804 static void EmitSizedDeallocationFunction(CodeGenFunction &CGF,
805 const FunctionDecl *UnsizedDealloc) {
806 // This is a weak discardable definition of the sized deallocation function.
807 CGF.CurFn->setLinkage(llvm::Function::LinkOnceAnyLinkage);
808
809 // Call the unsized deallocation function and forward the first argument
810 // unchanged.
811 llvm::Constant *Unsized = CGF.CGM.GetAddrOfFunction(UnsizedDealloc);
812 CGF.Builder.CreateCall(Unsized, &*CGF.CurFn->arg_begin());
813 }
814
GenerateCode(GlobalDecl GD,llvm::Function * Fn,const CGFunctionInfo & FnInfo)815 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
816 const CGFunctionInfo &FnInfo) {
817 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
818
819 // Check if we should generate debug info for this function.
820 if (FD->hasAttr<NoDebugAttr>())
821 DebugInfo = nullptr; // disable debug info indefinitely for this function
822
823 FunctionArgList Args;
824 QualType ResTy = FD->getReturnType();
825
826 CurGD = GD;
827 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
828 if (MD && MD->isInstance()) {
829 if (CGM.getCXXABI().HasThisReturn(GD))
830 ResTy = MD->getThisType(getContext());
831 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
832 ResTy = CGM.getContext().VoidPtrTy;
833 CGM.getCXXABI().buildThisParam(*this, Args);
834 }
835
836 Args.append(FD->param_begin(), FD->param_end());
837
838 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
839 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
840
841 SourceRange BodyRange;
842 if (Stmt *Body = FD->getBody()) BodyRange = Body->getSourceRange();
843 CurEHLocation = BodyRange.getEnd();
844
845 // Use the location of the start of the function to determine where
846 // the function definition is located. By default use the location
847 // of the declaration as the location for the subprogram. A function
848 // may lack a declaration in the source code if it is created by code
849 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
850 SourceLocation Loc = FD->getLocation();
851
852 // If this is a function specialization then use the pattern body
853 // as the location for the function.
854 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
855 if (SpecDecl->hasBody(SpecDecl))
856 Loc = SpecDecl->getLocation();
857
858 // Emit the standard function prologue.
859 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
860
861 // Generate the body of the function.
862 PGO.checkGlobalDecl(GD);
863 PGO.assignRegionCounters(GD.getDecl(), CurFn);
864 if (isa<CXXDestructorDecl>(FD))
865 EmitDestructorBody(Args);
866 else if (isa<CXXConstructorDecl>(FD))
867 EmitConstructorBody(Args);
868 else if (getLangOpts().CUDA &&
869 !CGM.getCodeGenOpts().CUDAIsDevice &&
870 FD->hasAttr<CUDAGlobalAttr>())
871 CGM.getCUDARuntime().EmitDeviceStubBody(*this, Args);
872 else if (isa<CXXConversionDecl>(FD) &&
873 cast<CXXConversionDecl>(FD)->isLambdaToBlockPointerConversion()) {
874 // The lambda conversion to block pointer is special; the semantics can't be
875 // expressed in the AST, so IRGen needs to special-case it.
876 EmitLambdaToBlockPointerBody(Args);
877 } else if (isa<CXXMethodDecl>(FD) &&
878 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
879 // The lambda static invoker function is special, because it forwards or
880 // clones the body of the function call operator (but is actually static).
881 EmitLambdaStaticInvokeFunction(cast<CXXMethodDecl>(FD));
882 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
883 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
884 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
885 // Implicit copy-assignment gets the same special treatment as implicit
886 // copy-constructors.
887 emitImplicitAssignmentOperatorBody(Args);
888 } else if (Stmt *Body = FD->getBody()) {
889 EmitFunctionBody(Args, Body);
890 } else if (FunctionDecl *UnsizedDealloc =
891 FD->getCorrespondingUnsizedGlobalDeallocationFunction()) {
892 // Global sized deallocation functions get an implicit weak definition if
893 // they don't have an explicit definition.
894 EmitSizedDeallocationFunction(*this, UnsizedDealloc);
895 } else
896 llvm_unreachable("no definition for emitted function");
897
898 // C++11 [stmt.return]p2:
899 // Flowing off the end of a function [...] results in undefined behavior in
900 // a value-returning function.
901 // C11 6.9.1p12:
902 // If the '}' that terminates a function is reached, and the value of the
903 // function call is used by the caller, the behavior is undefined.
904 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
905 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
906 if (SanOpts.has(SanitizerKind::Return)) {
907 SanitizerScope SanScope(this);
908 llvm::Value *IsFalse = Builder.getFalse();
909 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
910 "missing_return", EmitCheckSourceLocation(FD->getLocation()),
911 None);
912 } else if (CGM.getCodeGenOpts().OptimizationLevel == 0)
913 Builder.CreateCall(CGM.getIntrinsic(llvm::Intrinsic::trap));
914 Builder.CreateUnreachable();
915 Builder.ClearInsertionPoint();
916 }
917
918 // Emit the standard function epilogue.
919 FinishFunction(BodyRange.getEnd());
920
921 // If we haven't marked the function nothrow through other means, do
922 // a quick pass now to see if we can.
923 if (!CurFn->doesNotThrow())
924 TryMarkNoThrow(CurFn);
925 }
926
927 /// ContainsLabel - Return true if the statement contains a label in it. If
928 /// this statement is not executed normally, it not containing a label means
929 /// that we can just remove the code.
ContainsLabel(const Stmt * S,bool IgnoreCaseStmts)930 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
931 // Null statement, not a label!
932 if (!S) return false;
933
934 // If this is a label, we have to emit the code, consider something like:
935 // if (0) { ... foo: bar(); } goto foo;
936 //
937 // TODO: If anyone cared, we could track __label__'s, since we know that you
938 // can't jump to one from outside their declared region.
939 if (isa<LabelStmt>(S))
940 return true;
941
942 // If this is a case/default statement, and we haven't seen a switch, we have
943 // to emit the code.
944 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
945 return true;
946
947 // If this is a switch statement, we want to ignore cases below it.
948 if (isa<SwitchStmt>(S))
949 IgnoreCaseStmts = true;
950
951 // Scan subexpressions for verboten labels.
952 for (Stmt::const_child_range I = S->children(); I; ++I)
953 if (ContainsLabel(*I, IgnoreCaseStmts))
954 return true;
955
956 return false;
957 }
958
959 /// containsBreak - Return true if the statement contains a break out of it.
960 /// If the statement (recursively) contains a switch or loop with a break
961 /// inside of it, this is fine.
containsBreak(const Stmt * S)962 bool CodeGenFunction::containsBreak(const Stmt *S) {
963 // Null statement, not a label!
964 if (!S) return false;
965
966 // If this is a switch or loop that defines its own break scope, then we can
967 // include it and anything inside of it.
968 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
969 isa<ForStmt>(S))
970 return false;
971
972 if (isa<BreakStmt>(S))
973 return true;
974
975 // Scan subexpressions for verboten breaks.
976 for (Stmt::const_child_range I = S->children(); I; ++I)
977 if (containsBreak(*I))
978 return true;
979
980 return false;
981 }
982
983
984 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
985 /// to a constant, or if it does but contains a label, return false. If it
986 /// constant folds return true and set the boolean result in Result.
ConstantFoldsToSimpleInteger(const Expr * Cond,bool & ResultBool)987 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
988 bool &ResultBool) {
989 llvm::APSInt ResultInt;
990 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt))
991 return false;
992
993 ResultBool = ResultInt.getBoolValue();
994 return true;
995 }
996
997 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
998 /// to a constant, or if it does but contains a label, return false. If it
999 /// constant folds return true and set the folded value.
1000 bool CodeGenFunction::
ConstantFoldsToSimpleInteger(const Expr * Cond,llvm::APSInt & ResultInt)1001 ConstantFoldsToSimpleInteger(const Expr *Cond, llvm::APSInt &ResultInt) {
1002 // FIXME: Rename and handle conversion of other evaluatable things
1003 // to bool.
1004 llvm::APSInt Int;
1005 if (!Cond->EvaluateAsInt(Int, getContext()))
1006 return false; // Not foldable, not integer or not fully evaluatable.
1007
1008 if (CodeGenFunction::ContainsLabel(Cond))
1009 return false; // Contains a label.
1010
1011 ResultInt = Int;
1012 return true;
1013 }
1014
1015
1016
1017 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1018 /// statement) to the specified blocks. Based on the condition, this might try
1019 /// to simplify the codegen of the conditional based on the branch.
1020 ///
EmitBranchOnBoolExpr(const Expr * Cond,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount)1021 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1022 llvm::BasicBlock *TrueBlock,
1023 llvm::BasicBlock *FalseBlock,
1024 uint64_t TrueCount) {
1025 Cond = Cond->IgnoreParens();
1026
1027 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1028
1029 // Handle X && Y in a condition.
1030 if (CondBOp->getOpcode() == BO_LAnd) {
1031 RegionCounter Cnt = getPGORegionCounter(CondBOp);
1032
1033 // If we have "1 && X", simplify the code. "0 && X" would have constant
1034 // folded if the case was simple enough.
1035 bool ConstantBool = false;
1036 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1037 ConstantBool) {
1038 // br(1 && X) -> br(X).
1039 Cnt.beginRegion(Builder);
1040 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1041 TrueCount);
1042 }
1043
1044 // If we have "X && 1", simplify the code to use an uncond branch.
1045 // "X && 0" would have been constant folded to 0.
1046 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1047 ConstantBool) {
1048 // br(X && 1) -> br(X).
1049 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1050 TrueCount);
1051 }
1052
1053 // Emit the LHS as a conditional. If the LHS conditional is false, we
1054 // want to jump to the FalseBlock.
1055 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1056 // The counter tells us how often we evaluate RHS, and all of TrueCount
1057 // can be propagated to that branch.
1058 uint64_t RHSCount = Cnt.getCount();
1059
1060 ConditionalEvaluation eval(*this);
1061 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1062 EmitBlock(LHSTrue);
1063
1064 // Any temporaries created here are conditional.
1065 Cnt.beginRegion(Builder);
1066 eval.begin(*this);
1067 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1068 eval.end(*this);
1069
1070 return;
1071 }
1072
1073 if (CondBOp->getOpcode() == BO_LOr) {
1074 RegionCounter Cnt = getPGORegionCounter(CondBOp);
1075
1076 // If we have "0 || X", simplify the code. "1 || X" would have constant
1077 // folded if the case was simple enough.
1078 bool ConstantBool = false;
1079 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1080 !ConstantBool) {
1081 // br(0 || X) -> br(X).
1082 Cnt.beginRegion(Builder);
1083 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1084 TrueCount);
1085 }
1086
1087 // If we have "X || 0", simplify the code to use an uncond branch.
1088 // "X || 1" would have been constant folded to 1.
1089 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1090 !ConstantBool) {
1091 // br(X || 0) -> br(X).
1092 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1093 TrueCount);
1094 }
1095
1096 // Emit the LHS as a conditional. If the LHS conditional is true, we
1097 // want to jump to the TrueBlock.
1098 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1099 // We have the count for entry to the RHS and for the whole expression
1100 // being true, so we can divy up True count between the short circuit and
1101 // the RHS.
1102 uint64_t LHSCount = Cnt.getParentCount() - Cnt.getCount();
1103 uint64_t RHSCount = TrueCount - LHSCount;
1104
1105 ConditionalEvaluation eval(*this);
1106 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1107 EmitBlock(LHSFalse);
1108
1109 // Any temporaries created here are conditional.
1110 Cnt.beginRegion(Builder);
1111 eval.begin(*this);
1112 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1113
1114 eval.end(*this);
1115
1116 return;
1117 }
1118 }
1119
1120 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1121 // br(!x, t, f) -> br(x, f, t)
1122 if (CondUOp->getOpcode() == UO_LNot) {
1123 // Negate the count.
1124 uint64_t FalseCount = PGO.getCurrentRegionCount() - TrueCount;
1125 // Negate the condition and swap the destination blocks.
1126 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1127 FalseCount);
1128 }
1129 }
1130
1131 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1132 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1133 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1134 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1135
1136 RegionCounter Cnt = getPGORegionCounter(CondOp);
1137 ConditionalEvaluation cond(*this);
1138 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock, Cnt.getCount());
1139
1140 // When computing PGO branch weights, we only know the overall count for
1141 // the true block. This code is essentially doing tail duplication of the
1142 // naive code-gen, introducing new edges for which counts are not
1143 // available. Divide the counts proportionally between the LHS and RHS of
1144 // the conditional operator.
1145 uint64_t LHSScaledTrueCount = 0;
1146 if (TrueCount) {
1147 double LHSRatio = Cnt.getCount() / (double) Cnt.getParentCount();
1148 LHSScaledTrueCount = TrueCount * LHSRatio;
1149 }
1150
1151 cond.begin(*this);
1152 EmitBlock(LHSBlock);
1153 Cnt.beginRegion(Builder);
1154 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1155 LHSScaledTrueCount);
1156 cond.end(*this);
1157
1158 cond.begin(*this);
1159 EmitBlock(RHSBlock);
1160 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1161 TrueCount - LHSScaledTrueCount);
1162 cond.end(*this);
1163
1164 return;
1165 }
1166
1167 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1168 // Conditional operator handling can give us a throw expression as a
1169 // condition for a case like:
1170 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1171 // Fold this to:
1172 // br(c, throw x, br(y, t, f))
1173 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1174 return;
1175 }
1176
1177 // Create branch weights based on the number of times we get here and the
1178 // number of times the condition should be true.
1179 uint64_t CurrentCount = std::max(PGO.getCurrentRegionCount(), TrueCount);
1180 llvm::MDNode *Weights = PGO.createBranchWeights(TrueCount,
1181 CurrentCount - TrueCount);
1182
1183 // Emit the code with the fully general case.
1184 llvm::Value *CondV = EvaluateExprAsBool(Cond);
1185 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights);
1186 }
1187
1188 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1189 /// specified stmt yet.
ErrorUnsupported(const Stmt * S,const char * Type)1190 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1191 CGM.ErrorUnsupported(S, Type);
1192 }
1193
1194 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1195 /// variable-length array whose elements have a non-zero bit-pattern.
1196 ///
1197 /// \param baseType the inner-most element type of the array
1198 /// \param src - a char* pointing to the bit-pattern for a single
1199 /// base element of the array
1200 /// \param sizeInChars - the total size of the VLA, in chars
emitNonZeroVLAInit(CodeGenFunction & CGF,QualType baseType,llvm::Value * dest,llvm::Value * src,llvm::Value * sizeInChars)1201 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1202 llvm::Value *dest, llvm::Value *src,
1203 llvm::Value *sizeInChars) {
1204 std::pair<CharUnits,CharUnits> baseSizeAndAlign
1205 = CGF.getContext().getTypeInfoInChars(baseType);
1206
1207 CGBuilderTy &Builder = CGF.Builder;
1208
1209 llvm::Value *baseSizeInChars
1210 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSizeAndAlign.first.getQuantity());
1211
1212 llvm::Type *i8p = Builder.getInt8PtrTy();
1213
1214 llvm::Value *begin = Builder.CreateBitCast(dest, i8p, "vla.begin");
1215 llvm::Value *end = Builder.CreateInBoundsGEP(dest, sizeInChars, "vla.end");
1216
1217 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1218 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1219 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1220
1221 // Make a loop over the VLA. C99 guarantees that the VLA element
1222 // count must be nonzero.
1223 CGF.EmitBlock(loopBB);
1224
1225 llvm::PHINode *cur = Builder.CreatePHI(i8p, 2, "vla.cur");
1226 cur->addIncoming(begin, originBB);
1227
1228 // memcpy the individual element bit-pattern.
1229 Builder.CreateMemCpy(cur, src, baseSizeInChars,
1230 baseSizeAndAlign.second.getQuantity(),
1231 /*volatile*/ false);
1232
1233 // Go to the next element.
1234 llvm::Value *next = Builder.CreateConstInBoundsGEP1_32(cur, 1, "vla.next");
1235
1236 // Leave if that's the end of the VLA.
1237 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1238 Builder.CreateCondBr(done, contBB, loopBB);
1239 cur->addIncoming(next, loopBB);
1240
1241 CGF.EmitBlock(contBB);
1242 }
1243
1244 void
EmitNullInitialization(llvm::Value * DestPtr,QualType Ty)1245 CodeGenFunction::EmitNullInitialization(llvm::Value *DestPtr, QualType Ty) {
1246 // Ignore empty classes in C++.
1247 if (getLangOpts().CPlusPlus) {
1248 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1249 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1250 return;
1251 }
1252 }
1253
1254 // Cast the dest ptr to the appropriate i8 pointer type.
1255 unsigned DestAS =
1256 cast<llvm::PointerType>(DestPtr->getType())->getAddressSpace();
1257 llvm::Type *BP = Builder.getInt8PtrTy(DestAS);
1258 if (DestPtr->getType() != BP)
1259 DestPtr = Builder.CreateBitCast(DestPtr, BP);
1260
1261 // Get size and alignment info for this aggregate.
1262 std::pair<CharUnits, CharUnits> TypeInfo =
1263 getContext().getTypeInfoInChars(Ty);
1264 CharUnits Size = TypeInfo.first;
1265 CharUnits Align = TypeInfo.second;
1266
1267 llvm::Value *SizeVal;
1268 const VariableArrayType *vla;
1269
1270 // Don't bother emitting a zero-byte memset.
1271 if (Size.isZero()) {
1272 // But note that getTypeInfo returns 0 for a VLA.
1273 if (const VariableArrayType *vlaType =
1274 dyn_cast_or_null<VariableArrayType>(
1275 getContext().getAsArrayType(Ty))) {
1276 QualType eltType;
1277 llvm::Value *numElts;
1278 std::tie(numElts, eltType) = getVLASize(vlaType);
1279
1280 SizeVal = numElts;
1281 CharUnits eltSize = getContext().getTypeSizeInChars(eltType);
1282 if (!eltSize.isOne())
1283 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1284 vla = vlaType;
1285 } else {
1286 return;
1287 }
1288 } else {
1289 SizeVal = CGM.getSize(Size);
1290 vla = nullptr;
1291 }
1292
1293 // If the type contains a pointer to data member we can't memset it to zero.
1294 // Instead, create a null constant and copy it to the destination.
1295 // TODO: there are other patterns besides zero that we can usefully memset,
1296 // like -1, which happens to be the pattern used by member-pointers.
1297 if (!CGM.getTypes().isZeroInitializable(Ty)) {
1298 // For a VLA, emit a single element, then splat that over the VLA.
1299 if (vla) Ty = getContext().getBaseElementType(vla);
1300
1301 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1302
1303 llvm::GlobalVariable *NullVariable =
1304 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1305 /*isConstant=*/true,
1306 llvm::GlobalVariable::PrivateLinkage,
1307 NullConstant, Twine());
1308 llvm::Value *SrcPtr =
1309 Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy());
1310
1311 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1312
1313 // Get and call the appropriate llvm.memcpy overload.
1314 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, Align.getQuantity(), false);
1315 return;
1316 }
1317
1318 // Otherwise, just memset the whole thing to zero. This is legal
1319 // because in LLVM, all default initializers (other than the ones we just
1320 // handled above) are guaranteed to have a bit pattern of all zeros.
1321 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal,
1322 Align.getQuantity(), false);
1323 }
1324
GetAddrOfLabel(const LabelDecl * L)1325 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1326 // Make sure that there is a block for the indirect goto.
1327 if (!IndirectBranch)
1328 GetIndirectGotoBlock();
1329
1330 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1331
1332 // Make sure the indirect branch includes all of the address-taken blocks.
1333 IndirectBranch->addDestination(BB);
1334 return llvm::BlockAddress::get(CurFn, BB);
1335 }
1336
GetIndirectGotoBlock()1337 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1338 // If we already made the indirect branch for indirect goto, return its block.
1339 if (IndirectBranch) return IndirectBranch->getParent();
1340
1341 CGBuilderTy TmpBuilder(createBasicBlock("indirectgoto"));
1342
1343 // Create the PHI node that indirect gotos will add entries to.
1344 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1345 "indirect.goto.dest");
1346
1347 // Create the indirect branch instruction.
1348 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1349 return IndirectBranch->getParent();
1350 }
1351
1352 /// Computes the length of an array in elements, as well as the base
1353 /// element type and a properly-typed first element pointer.
emitArrayLength(const ArrayType * origArrayType,QualType & baseType,llvm::Value * & addr)1354 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1355 QualType &baseType,
1356 llvm::Value *&addr) {
1357 const ArrayType *arrayType = origArrayType;
1358
1359 // If it's a VLA, we have to load the stored size. Note that
1360 // this is the size of the VLA in bytes, not its size in elements.
1361 llvm::Value *numVLAElements = nullptr;
1362 if (isa<VariableArrayType>(arrayType)) {
1363 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).first;
1364
1365 // Walk into all VLAs. This doesn't require changes to addr,
1366 // which has type T* where T is the first non-VLA element type.
1367 do {
1368 QualType elementType = arrayType->getElementType();
1369 arrayType = getContext().getAsArrayType(elementType);
1370
1371 // If we only have VLA components, 'addr' requires no adjustment.
1372 if (!arrayType) {
1373 baseType = elementType;
1374 return numVLAElements;
1375 }
1376 } while (isa<VariableArrayType>(arrayType));
1377
1378 // We get out here only if we find a constant array type
1379 // inside the VLA.
1380 }
1381
1382 // We have some number of constant-length arrays, so addr should
1383 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1384 // down to the first element of addr.
1385 SmallVector<llvm::Value*, 8> gepIndices;
1386
1387 // GEP down to the array type.
1388 llvm::ConstantInt *zero = Builder.getInt32(0);
1389 gepIndices.push_back(zero);
1390
1391 uint64_t countFromCLAs = 1;
1392 QualType eltType;
1393
1394 llvm::ArrayType *llvmArrayType =
1395 dyn_cast<llvm::ArrayType>(
1396 cast<llvm::PointerType>(addr->getType())->getElementType());
1397 while (llvmArrayType) {
1398 assert(isa<ConstantArrayType>(arrayType));
1399 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1400 == llvmArrayType->getNumElements());
1401
1402 gepIndices.push_back(zero);
1403 countFromCLAs *= llvmArrayType->getNumElements();
1404 eltType = arrayType->getElementType();
1405
1406 llvmArrayType =
1407 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1408 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1409 assert((!llvmArrayType || arrayType) &&
1410 "LLVM and Clang types are out-of-synch");
1411 }
1412
1413 if (arrayType) {
1414 // From this point onwards, the Clang array type has been emitted
1415 // as some other type (probably a packed struct). Compute the array
1416 // size, and just emit the 'begin' expression as a bitcast.
1417 while (arrayType) {
1418 countFromCLAs *=
1419 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1420 eltType = arrayType->getElementType();
1421 arrayType = getContext().getAsArrayType(eltType);
1422 }
1423
1424 unsigned AddressSpace = addr->getType()->getPointerAddressSpace();
1425 llvm::Type *BaseType = ConvertType(eltType)->getPointerTo(AddressSpace);
1426 addr = Builder.CreateBitCast(addr, BaseType, "array.begin");
1427 } else {
1428 // Create the actual GEP.
1429 addr = Builder.CreateInBoundsGEP(addr, gepIndices, "array.begin");
1430 }
1431
1432 baseType = eltType;
1433
1434 llvm::Value *numElements
1435 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1436
1437 // If we had any VLA dimensions, factor them in.
1438 if (numVLAElements)
1439 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1440
1441 return numElements;
1442 }
1443
1444 std::pair<llvm::Value*, QualType>
getVLASize(QualType type)1445 CodeGenFunction::getVLASize(QualType type) {
1446 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1447 assert(vla && "type was not a variable array type!");
1448 return getVLASize(vla);
1449 }
1450
1451 std::pair<llvm::Value*, QualType>
getVLASize(const VariableArrayType * type)1452 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1453 // The number of elements so far; always size_t.
1454 llvm::Value *numElements = nullptr;
1455
1456 QualType elementType;
1457 do {
1458 elementType = type->getElementType();
1459 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1460 assert(vlaSize && "no size for VLA!");
1461 assert(vlaSize->getType() == SizeTy);
1462
1463 if (!numElements) {
1464 numElements = vlaSize;
1465 } else {
1466 // It's undefined behavior if this wraps around, so mark it that way.
1467 // FIXME: Teach -fsanitize=undefined to trap this.
1468 numElements = Builder.CreateNUWMul(numElements, vlaSize);
1469 }
1470 } while ((type = getContext().getAsVariableArrayType(elementType)));
1471
1472 return std::pair<llvm::Value*,QualType>(numElements, elementType);
1473 }
1474
EmitVariablyModifiedType(QualType type)1475 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
1476 assert(type->isVariablyModifiedType() &&
1477 "Must pass variably modified type to EmitVLASizes!");
1478
1479 EnsureInsertPoint();
1480
1481 // We're going to walk down into the type and look for VLA
1482 // expressions.
1483 do {
1484 assert(type->isVariablyModifiedType());
1485
1486 const Type *ty = type.getTypePtr();
1487 switch (ty->getTypeClass()) {
1488
1489 #define TYPE(Class, Base)
1490 #define ABSTRACT_TYPE(Class, Base)
1491 #define NON_CANONICAL_TYPE(Class, Base)
1492 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
1493 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
1494 #include "clang/AST/TypeNodes.def"
1495 llvm_unreachable("unexpected dependent type!");
1496
1497 // These types are never variably-modified.
1498 case Type::Builtin:
1499 case Type::Complex:
1500 case Type::Vector:
1501 case Type::ExtVector:
1502 case Type::Record:
1503 case Type::Enum:
1504 case Type::Elaborated:
1505 case Type::TemplateSpecialization:
1506 case Type::ObjCObject:
1507 case Type::ObjCInterface:
1508 case Type::ObjCObjectPointer:
1509 llvm_unreachable("type class is never variably-modified!");
1510
1511 case Type::Adjusted:
1512 type = cast<AdjustedType>(ty)->getAdjustedType();
1513 break;
1514
1515 case Type::Decayed:
1516 type = cast<DecayedType>(ty)->getPointeeType();
1517 break;
1518
1519 case Type::Pointer:
1520 type = cast<PointerType>(ty)->getPointeeType();
1521 break;
1522
1523 case Type::BlockPointer:
1524 type = cast<BlockPointerType>(ty)->getPointeeType();
1525 break;
1526
1527 case Type::LValueReference:
1528 case Type::RValueReference:
1529 type = cast<ReferenceType>(ty)->getPointeeType();
1530 break;
1531
1532 case Type::MemberPointer:
1533 type = cast<MemberPointerType>(ty)->getPointeeType();
1534 break;
1535
1536 case Type::ConstantArray:
1537 case Type::IncompleteArray:
1538 // Losing element qualification here is fine.
1539 type = cast<ArrayType>(ty)->getElementType();
1540 break;
1541
1542 case Type::VariableArray: {
1543 // Losing element qualification here is fine.
1544 const VariableArrayType *vat = cast<VariableArrayType>(ty);
1545
1546 // Unknown size indication requires no size computation.
1547 // Otherwise, evaluate and record it.
1548 if (const Expr *size = vat->getSizeExpr()) {
1549 // It's possible that we might have emitted this already,
1550 // e.g. with a typedef and a pointer to it.
1551 llvm::Value *&entry = VLASizeMap[size];
1552 if (!entry) {
1553 llvm::Value *Size = EmitScalarExpr(size);
1554
1555 // C11 6.7.6.2p5:
1556 // If the size is an expression that is not an integer constant
1557 // expression [...] each time it is evaluated it shall have a value
1558 // greater than zero.
1559 if (SanOpts.has(SanitizerKind::VLABound) &&
1560 size->getType()->isSignedIntegerType()) {
1561 SanitizerScope SanScope(this);
1562 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
1563 llvm::Constant *StaticArgs[] = {
1564 EmitCheckSourceLocation(size->getLocStart()),
1565 EmitCheckTypeDescriptor(size->getType())
1566 };
1567 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
1568 SanitizerKind::VLABound),
1569 "vla_bound_not_positive", StaticArgs, Size);
1570 }
1571
1572 // Always zexting here would be wrong if it weren't
1573 // undefined behavior to have a negative bound.
1574 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
1575 }
1576 }
1577 type = vat->getElementType();
1578 break;
1579 }
1580
1581 case Type::FunctionProto:
1582 case Type::FunctionNoProto:
1583 type = cast<FunctionType>(ty)->getReturnType();
1584 break;
1585
1586 case Type::Paren:
1587 case Type::TypeOf:
1588 case Type::UnaryTransform:
1589 case Type::Attributed:
1590 case Type::SubstTemplateTypeParm:
1591 case Type::PackExpansion:
1592 // Keep walking after single level desugaring.
1593 type = type.getSingleStepDesugaredType(getContext());
1594 break;
1595
1596 case Type::Typedef:
1597 case Type::Decltype:
1598 case Type::Auto:
1599 // Stop walking: nothing to do.
1600 return;
1601
1602 case Type::TypeOfExpr:
1603 // Stop walking: emit typeof expression.
1604 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
1605 return;
1606
1607 case Type::Atomic:
1608 type = cast<AtomicType>(ty)->getValueType();
1609 break;
1610 }
1611 } while (type->isVariablyModifiedType());
1612 }
1613
EmitVAListRef(const Expr * E)1614 llvm::Value* CodeGenFunction::EmitVAListRef(const Expr* E) {
1615 if (getContext().getBuiltinVaListType()->isArrayType())
1616 return EmitScalarExpr(E);
1617 return EmitLValue(E).getAddress();
1618 }
1619
EmitDeclRefExprDbgValue(const DeclRefExpr * E,llvm::Constant * Init)1620 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
1621 llvm::Constant *Init) {
1622 assert (Init && "Invalid DeclRefExpr initializer!");
1623 if (CGDebugInfo *Dbg = getDebugInfo())
1624 if (CGM.getCodeGenOpts().getDebugInfo() >= CodeGenOptions::LimitedDebugInfo)
1625 Dbg->EmitGlobalVariable(E->getDecl(), Init);
1626 }
1627
1628 CodeGenFunction::PeepholeProtection
protectFromPeepholes(RValue rvalue)1629 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
1630 // At the moment, the only aggressive peephole we do in IR gen
1631 // is trunc(zext) folding, but if we add more, we can easily
1632 // extend this protection.
1633
1634 if (!rvalue.isScalar()) return PeepholeProtection();
1635 llvm::Value *value = rvalue.getScalarVal();
1636 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
1637
1638 // Just make an extra bitcast.
1639 assert(HaveInsertPoint());
1640 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
1641 Builder.GetInsertBlock());
1642
1643 PeepholeProtection protection;
1644 protection.Inst = inst;
1645 return protection;
1646 }
1647
unprotectFromPeepholes(PeepholeProtection protection)1648 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
1649 if (!protection.Inst) return;
1650
1651 // In theory, we could try to duplicate the peepholes now, but whatever.
1652 protection.Inst->eraseFromParent();
1653 }
1654
EmitAnnotationCall(llvm::Value * AnnotationFn,llvm::Value * AnnotatedVal,StringRef AnnotationStr,SourceLocation Location)1655 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
1656 llvm::Value *AnnotatedVal,
1657 StringRef AnnotationStr,
1658 SourceLocation Location) {
1659 llvm::Value *Args[4] = {
1660 AnnotatedVal,
1661 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
1662 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
1663 CGM.EmitAnnotationLineNo(Location)
1664 };
1665 return Builder.CreateCall(AnnotationFn, Args);
1666 }
1667
EmitVarAnnotations(const VarDecl * D,llvm::Value * V)1668 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
1669 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1670 // FIXME We create a new bitcast for every annotation because that's what
1671 // llvm-gcc was doing.
1672 for (const auto *I : D->specific_attrs<AnnotateAttr>())
1673 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
1674 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
1675 I->getAnnotation(), D->getLocation());
1676 }
1677
EmitFieldAnnotations(const FieldDecl * D,llvm::Value * V)1678 llvm::Value *CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
1679 llvm::Value *V) {
1680 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
1681 llvm::Type *VTy = V->getType();
1682 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
1683 CGM.Int8PtrTy);
1684
1685 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
1686 // FIXME Always emit the cast inst so we can differentiate between
1687 // annotation on the first field of a struct and annotation on the struct
1688 // itself.
1689 if (VTy != CGM.Int8PtrTy)
1690 V = Builder.Insert(new llvm::BitCastInst(V, CGM.Int8PtrTy));
1691 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
1692 V = Builder.CreateBitCast(V, VTy);
1693 }
1694
1695 return V;
1696 }
1697
~CGCapturedStmtInfo()1698 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
1699
SanitizerScope(CodeGenFunction * CGF)1700 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
1701 : CGF(CGF) {
1702 assert(!CGF->IsSanitizerScope);
1703 CGF->IsSanitizerScope = true;
1704 }
1705
~SanitizerScope()1706 CodeGenFunction::SanitizerScope::~SanitizerScope() {
1707 CGF->IsSanitizerScope = false;
1708 }
1709
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const1710 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
1711 const llvm::Twine &Name,
1712 llvm::BasicBlock *BB,
1713 llvm::BasicBlock::iterator InsertPt) const {
1714 LoopStack.InsertHelper(I);
1715 if (IsSanitizerScope)
1716 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
1717 }
1718
1719 template <bool PreserveNames>
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const1720 void CGBuilderInserter<PreserveNames>::InsertHelper(
1721 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
1722 llvm::BasicBlock::iterator InsertPt) const {
1723 llvm::IRBuilderDefaultInserter<PreserveNames>::InsertHelper(I, Name, BB,
1724 InsertPt);
1725 if (CGF)
1726 CGF->InsertHelper(I, Name, BB, InsertPt);
1727 }
1728
1729 #ifdef NDEBUG
1730 #define PreserveNames false
1731 #else
1732 #define PreserveNames true
1733 #endif
1734 template void CGBuilderInserter<PreserveNames>::InsertHelper(
1735 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
1736 llvm::BasicBlock::iterator InsertPt) const;
1737 #undef PreserveNames
1738