1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This coordinates the per-function state used while generating code.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "CodeGenFunction.h"
15 #include "CGBlocks.h"
16 #include "CGCleanup.h"
17 #include "CGCUDARuntime.h"
18 #include "CGCXXABI.h"
19 #include "CGDebugInfo.h"
20 #include "CGOpenMPRuntime.h"
21 #include "CodeGenModule.h"
22 #include "CodeGenPGO.h"
23 #include "TargetInfo.h"
24 #include "clang/AST/ASTContext.h"
25 #include "clang/AST/ASTLambda.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclCXX.h"
28 #include "clang/AST/StmtCXX.h"
29 #include "clang/AST/StmtObjC.h"
30 #include "clang/Basic/Builtins.h"
31 #include "clang/Basic/CodeGenOptions.h"
32 #include "clang/Basic/TargetInfo.h"
33 #include "clang/CodeGen/CGFunctionInfo.h"
34 #include "clang/Frontend/FrontendDiagnostic.h"
35 #include "llvm/IR/DataLayout.h"
36 #include "llvm/IR/Dominators.h"
37 #include "llvm/IR/Intrinsics.h"
38 #include "llvm/IR/MDBuilder.h"
39 #include "llvm/IR/Operator.h"
40 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
41 using namespace clang;
42 using namespace CodeGen;
43
44 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
45 /// markers.
shouldEmitLifetimeMarkers(const CodeGenOptions & CGOpts,const LangOptions & LangOpts)46 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
47 const LangOptions &LangOpts) {
48 if (CGOpts.DisableLifetimeMarkers)
49 return false;
50
51 // Disable lifetime markers in msan builds.
52 // FIXME: Remove this when msan works with lifetime markers.
53 if (LangOpts.Sanitize.has(SanitizerKind::Memory))
54 return false;
55
56 // Asan uses markers for use-after-scope checks.
57 if (CGOpts.SanitizeAddressUseAfterScope)
58 return true;
59
60 // For now, only in optimized builds.
61 return CGOpts.OptimizationLevel != 0;
62 }
63
CodeGenFunction(CodeGenModule & cgm,bool suppressNewContext)64 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
65 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
66 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
67 CGBuilderInserterTy(this)),
68 SanOpts(CGM.getLangOpts().Sanitize), DebugInfo(CGM.getModuleDebugInfo()),
69 PGO(cgm), ShouldEmitLifetimeMarkers(shouldEmitLifetimeMarkers(
70 CGM.getCodeGenOpts(), CGM.getLangOpts())) {
71 if (!suppressNewContext)
72 CGM.getCXXABI().getMangleContext().startNewFunction();
73
74 llvm::FastMathFlags FMF;
75 if (CGM.getLangOpts().FastMath)
76 FMF.setFast();
77 if (CGM.getLangOpts().FiniteMathOnly) {
78 FMF.setNoNaNs();
79 FMF.setNoInfs();
80 }
81 if (CGM.getCodeGenOpts().NoNaNsFPMath) {
82 FMF.setNoNaNs();
83 }
84 if (CGM.getCodeGenOpts().NoSignedZeros) {
85 FMF.setNoSignedZeros();
86 }
87 if (CGM.getCodeGenOpts().ReciprocalMath) {
88 FMF.setAllowReciprocal();
89 }
90 if (CGM.getCodeGenOpts().Reassociate) {
91 FMF.setAllowReassoc();
92 }
93 Builder.setFastMathFlags(FMF);
94 }
95
~CodeGenFunction()96 CodeGenFunction::~CodeGenFunction() {
97 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
98
99 // If there are any unclaimed block infos, go ahead and destroy them
100 // now. This can happen if IR-gen gets clever and skips evaluating
101 // something.
102 if (FirstBlockInfo)
103 destroyBlockInfos(FirstBlockInfo);
104
105 if (getLangOpts().OpenMP && CurFn)
106 CGM.getOpenMPRuntime().functionFinished(*this);
107 }
108
getNaturalPointeeTypeAlignment(QualType T,LValueBaseInfo * BaseInfo,TBAAAccessInfo * TBAAInfo)109 CharUnits CodeGenFunction::getNaturalPointeeTypeAlignment(QualType T,
110 LValueBaseInfo *BaseInfo,
111 TBAAAccessInfo *TBAAInfo) {
112 return getNaturalTypeAlignment(T->getPointeeType(), BaseInfo, TBAAInfo,
113 /* forPointeeType= */ true);
114 }
115
getNaturalTypeAlignment(QualType T,LValueBaseInfo * BaseInfo,TBAAAccessInfo * TBAAInfo,bool forPointeeType)116 CharUnits CodeGenFunction::getNaturalTypeAlignment(QualType T,
117 LValueBaseInfo *BaseInfo,
118 TBAAAccessInfo *TBAAInfo,
119 bool forPointeeType) {
120 if (TBAAInfo)
121 *TBAAInfo = CGM.getTBAAAccessInfo(T);
122
123 // Honor alignment typedef attributes even on incomplete types.
124 // We also honor them straight for C++ class types, even as pointees;
125 // there's an expressivity gap here.
126 if (auto TT = T->getAs<TypedefType>()) {
127 if (auto Align = TT->getDecl()->getMaxAlignment()) {
128 if (BaseInfo)
129 *BaseInfo = LValueBaseInfo(AlignmentSource::AttributedType);
130 return getContext().toCharUnitsFromBits(Align);
131 }
132 }
133
134 if (BaseInfo)
135 *BaseInfo = LValueBaseInfo(AlignmentSource::Type);
136
137 CharUnits Alignment;
138 if (T->isIncompleteType()) {
139 Alignment = CharUnits::One(); // Shouldn't be used, but pessimistic is best.
140 } else {
141 // For C++ class pointees, we don't know whether we're pointing at a
142 // base or a complete object, so we generally need to use the
143 // non-virtual alignment.
144 const CXXRecordDecl *RD;
145 if (forPointeeType && (RD = T->getAsCXXRecordDecl())) {
146 Alignment = CGM.getClassPointerAlignment(RD);
147 } else {
148 Alignment = getContext().getTypeAlignInChars(T);
149 if (T.getQualifiers().hasUnaligned())
150 Alignment = CharUnits::One();
151 }
152
153 // Cap to the global maximum type alignment unless the alignment
154 // was somehow explicit on the type.
155 if (unsigned MaxAlign = getLangOpts().MaxTypeAlign) {
156 if (Alignment.getQuantity() > MaxAlign &&
157 !getContext().isAlignmentRequired(T))
158 Alignment = CharUnits::fromQuantity(MaxAlign);
159 }
160 }
161 return Alignment;
162 }
163
MakeNaturalAlignAddrLValue(llvm::Value * V,QualType T)164 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
165 LValueBaseInfo BaseInfo;
166 TBAAAccessInfo TBAAInfo;
167 CharUnits Alignment = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
168 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
169 TBAAInfo);
170 }
171
172 /// Given a value of type T* that may not be to a complete object,
173 /// construct an l-value with the natural pointee alignment of T.
174 LValue
MakeNaturalAlignPointeeAddrLValue(llvm::Value * V,QualType T)175 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
176 LValueBaseInfo BaseInfo;
177 TBAAAccessInfo TBAAInfo;
178 CharUnits Align = getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
179 /* forPointeeType= */ true);
180 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
181 }
182
183
ConvertTypeForMem(QualType T)184 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
185 return CGM.getTypes().ConvertTypeForMem(T);
186 }
187
ConvertType(QualType T)188 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
189 return CGM.getTypes().ConvertType(T);
190 }
191
getEvaluationKind(QualType type)192 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
193 type = type.getCanonicalType();
194 while (true) {
195 switch (type->getTypeClass()) {
196 #define TYPE(name, parent)
197 #define ABSTRACT_TYPE(name, parent)
198 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
199 #define DEPENDENT_TYPE(name, parent) case Type::name:
200 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
201 #include "clang/AST/TypeNodes.def"
202 llvm_unreachable("non-canonical or dependent type in IR-generation");
203
204 case Type::Auto:
205 case Type::DeducedTemplateSpecialization:
206 llvm_unreachable("undeduced type in IR-generation");
207
208 // Various scalar types.
209 case Type::Builtin:
210 case Type::Pointer:
211 case Type::BlockPointer:
212 case Type::LValueReference:
213 case Type::RValueReference:
214 case Type::MemberPointer:
215 case Type::Vector:
216 case Type::ExtVector:
217 case Type::FunctionProto:
218 case Type::FunctionNoProto:
219 case Type::Enum:
220 case Type::ObjCObjectPointer:
221 case Type::Pipe:
222 return TEK_Scalar;
223
224 // Complexes.
225 case Type::Complex:
226 return TEK_Complex;
227
228 // Arrays, records, and Objective-C objects.
229 case Type::ConstantArray:
230 case Type::IncompleteArray:
231 case Type::VariableArray:
232 case Type::Record:
233 case Type::ObjCObject:
234 case Type::ObjCInterface:
235 return TEK_Aggregate;
236
237 // We operate on atomic values according to their underlying type.
238 case Type::Atomic:
239 type = cast<AtomicType>(type)->getValueType();
240 continue;
241 }
242 llvm_unreachable("unknown type kind!");
243 }
244 }
245
EmitReturnBlock()246 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
247 // For cleanliness, we try to avoid emitting the return block for
248 // simple cases.
249 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
250
251 if (CurBB) {
252 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
253
254 // We have a valid insert point, reuse it if it is empty or there are no
255 // explicit jumps to the return block.
256 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
257 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
258 delete ReturnBlock.getBlock();
259 } else
260 EmitBlock(ReturnBlock.getBlock());
261 return llvm::DebugLoc();
262 }
263
264 // Otherwise, if the return block is the target of a single direct
265 // branch then we can just put the code in that block instead. This
266 // cleans up functions which started with a unified return block.
267 if (ReturnBlock.getBlock()->hasOneUse()) {
268 llvm::BranchInst *BI =
269 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
270 if (BI && BI->isUnconditional() &&
271 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
272 // Record/return the DebugLoc of the simple 'return' expression to be used
273 // later by the actual 'ret' instruction.
274 llvm::DebugLoc Loc = BI->getDebugLoc();
275 Builder.SetInsertPoint(BI->getParent());
276 BI->eraseFromParent();
277 delete ReturnBlock.getBlock();
278 return Loc;
279 }
280 }
281
282 // FIXME: We are at an unreachable point, there is no reason to emit the block
283 // unless it has uses. However, we still need a place to put the debug
284 // region.end for now.
285
286 EmitBlock(ReturnBlock.getBlock());
287 return llvm::DebugLoc();
288 }
289
EmitIfUsed(CodeGenFunction & CGF,llvm::BasicBlock * BB)290 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
291 if (!BB) return;
292 if (!BB->use_empty())
293 return CGF.CurFn->getBasicBlockList().push_back(BB);
294 delete BB;
295 }
296
FinishFunction(SourceLocation EndLoc)297 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
298 assert(BreakContinueStack.empty() &&
299 "mismatched push/pop in break/continue stack!");
300
301 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
302 && NumSimpleReturnExprs == NumReturnExprs
303 && ReturnBlock.getBlock()->use_empty();
304 // Usually the return expression is evaluated before the cleanup
305 // code. If the function contains only a simple return statement,
306 // such as a constant, the location before the cleanup code becomes
307 // the last useful breakpoint in the function, because the simple
308 // return expression will be evaluated after the cleanup code. To be
309 // safe, set the debug location for cleanup code to the location of
310 // the return statement. Otherwise the cleanup code should be at the
311 // end of the function's lexical scope.
312 //
313 // If there are multiple branches to the return block, the branch
314 // instructions will get the location of the return statements and
315 // all will be fine.
316 if (CGDebugInfo *DI = getDebugInfo()) {
317 if (OnlySimpleReturnStmts)
318 DI->EmitLocation(Builder, LastStopPoint);
319 else
320 DI->EmitLocation(Builder, EndLoc);
321 }
322
323 // Pop any cleanups that might have been associated with the
324 // parameters. Do this in whatever block we're currently in; it's
325 // important to do this before we enter the return block or return
326 // edges will be *really* confused.
327 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
328 bool HasOnlyLifetimeMarkers =
329 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
330 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
331 if (HasCleanups) {
332 // Make sure the line table doesn't jump back into the body for
333 // the ret after it's been at EndLoc.
334 if (CGDebugInfo *DI = getDebugInfo())
335 if (OnlySimpleReturnStmts)
336 DI->EmitLocation(Builder, EndLoc);
337
338 PopCleanupBlocks(PrologueCleanupDepth);
339 }
340
341 // Emit function epilog (to return).
342 llvm::DebugLoc Loc = EmitReturnBlock();
343
344 if (ShouldInstrumentFunction()) {
345 if (CGM.getCodeGenOpts().InstrumentFunctions)
346 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
347 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
348 CurFn->addFnAttr("instrument-function-exit-inlined",
349 "__cyg_profile_func_exit");
350 }
351
352 // Emit debug descriptor for function end.
353 if (CGDebugInfo *DI = getDebugInfo())
354 DI->EmitFunctionEnd(Builder, CurFn);
355
356 // Reset the debug location to that of the simple 'return' expression, if any
357 // rather than that of the end of the function's scope '}'.
358 ApplyDebugLocation AL(*this, Loc);
359 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
360 EmitEndEHSpec(CurCodeDecl);
361
362 assert(EHStack.empty() &&
363 "did not remove all scopes from cleanup stack!");
364
365 // If someone did an indirect goto, emit the indirect goto block at the end of
366 // the function.
367 if (IndirectBranch) {
368 EmitBlock(IndirectBranch->getParent());
369 Builder.ClearInsertionPoint();
370 }
371
372 // If some of our locals escaped, insert a call to llvm.localescape in the
373 // entry block.
374 if (!EscapedLocals.empty()) {
375 // Invert the map from local to index into a simple vector. There should be
376 // no holes.
377 SmallVector<llvm::Value *, 4> EscapeArgs;
378 EscapeArgs.resize(EscapedLocals.size());
379 for (auto &Pair : EscapedLocals)
380 EscapeArgs[Pair.second] = Pair.first;
381 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
382 &CGM.getModule(), llvm::Intrinsic::localescape);
383 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
384 }
385
386 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
387 llvm::Instruction *Ptr = AllocaInsertPt;
388 AllocaInsertPt = nullptr;
389 Ptr->eraseFromParent();
390
391 // If someone took the address of a label but never did an indirect goto, we
392 // made a zero entry PHI node, which is illegal, zap it now.
393 if (IndirectBranch) {
394 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
395 if (PN->getNumIncomingValues() == 0) {
396 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
397 PN->eraseFromParent();
398 }
399 }
400
401 EmitIfUsed(*this, EHResumeBlock);
402 EmitIfUsed(*this, TerminateLandingPad);
403 EmitIfUsed(*this, TerminateHandler);
404 EmitIfUsed(*this, UnreachableBlock);
405
406 for (const auto &FuncletAndParent : TerminateFunclets)
407 EmitIfUsed(*this, FuncletAndParent.second);
408
409 if (CGM.getCodeGenOpts().EmitDeclMetadata)
410 EmitDeclMetadata();
411
412 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
413 I = DeferredReplacements.begin(),
414 E = DeferredReplacements.end();
415 I != E; ++I) {
416 I->first->replaceAllUsesWith(I->second);
417 I->first->eraseFromParent();
418 }
419
420 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
421 // PHIs if the current function is a coroutine. We don't do it for all
422 // functions as it may result in slight increase in numbers of instructions
423 // if compiled with no optimizations. We do it for coroutine as the lifetime
424 // of CleanupDestSlot alloca make correct coroutine frame building very
425 // difficult.
426 if (NormalCleanupDest.isValid() && isCoroutine()) {
427 llvm::DominatorTree DT(*CurFn);
428 llvm::PromoteMemToReg(
429 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
430 NormalCleanupDest = Address::invalid();
431 }
432
433 // Scan function arguments for vector width.
434 for (llvm::Argument &A : CurFn->args())
435 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
436 LargestVectorWidth = std::max(LargestVectorWidth,
437 VT->getPrimitiveSizeInBits());
438
439 // Update vector width based on return type.
440 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
441 LargestVectorWidth = std::max(LargestVectorWidth,
442 VT->getPrimitiveSizeInBits());
443
444 // Add the required-vector-width attribute. This contains the max width from:
445 // 1. min-vector-width attribute used in the source program.
446 // 2. Any builtins used that have a vector width specified.
447 // 3. Values passed in and out of inline assembly.
448 // 4. Width of vector arguments and return types for this function.
449 // 5. Width of vector aguments and return types for functions called by this
450 // function.
451 CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
452 }
453
454 /// ShouldInstrumentFunction - Return true if the current function should be
455 /// instrumented with __cyg_profile_func_* calls
ShouldInstrumentFunction()456 bool CodeGenFunction::ShouldInstrumentFunction() {
457 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
458 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
459 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
460 return false;
461 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
462 return false;
463 return true;
464 }
465
466 /// ShouldXRayInstrument - Return true if the current function should be
467 /// instrumented with XRay nop sleds.
ShouldXRayInstrumentFunction() const468 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
469 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
470 }
471
472 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
473 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
AlwaysEmitXRayCustomEvents() const474 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
475 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
476 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
477 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
478 XRayInstrKind::Custom);
479 }
480
AlwaysEmitXRayTypedEvents() const481 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
482 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
483 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
484 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
485 XRayInstrKind::Typed);
486 }
487
488 llvm::Constant *
EncodeAddrForUseInPrologue(llvm::Function * F,llvm::Constant * Addr)489 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
490 llvm::Constant *Addr) {
491 // Addresses stored in prologue data can't require run-time fixups and must
492 // be PC-relative. Run-time fixups are undesirable because they necessitate
493 // writable text segments, which are unsafe. And absolute addresses are
494 // undesirable because they break PIE mode.
495
496 // Add a layer of indirection through a private global. Taking its address
497 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
498 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
499 /*isConstant=*/true,
500 llvm::GlobalValue::PrivateLinkage, Addr);
501
502 // Create a PC-relative address.
503 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
504 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
505 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
506 return (IntPtrTy == Int32Ty)
507 ? PCRelAsInt
508 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
509 }
510
511 llvm::Value *
DecodeAddrUsedInPrologue(llvm::Value * F,llvm::Value * EncodedAddr)512 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
513 llvm::Value *EncodedAddr) {
514 // Reconstruct the address of the global.
515 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
516 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
517 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
518 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
519
520 // Load the original pointer through the global.
521 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
522 "decoded_addr");
523 }
524
removeImageAccessQualifier(std::string & TyName)525 static void removeImageAccessQualifier(std::string& TyName) {
526 std::string ReadOnlyQual("__read_only");
527 std::string::size_type ReadOnlyPos = TyName.find(ReadOnlyQual);
528 if (ReadOnlyPos != std::string::npos)
529 // "+ 1" for the space after access qualifier.
530 TyName.erase(ReadOnlyPos, ReadOnlyQual.size() + 1);
531 else {
532 std::string WriteOnlyQual("__write_only");
533 std::string::size_type WriteOnlyPos = TyName.find(WriteOnlyQual);
534 if (WriteOnlyPos != std::string::npos)
535 TyName.erase(WriteOnlyPos, WriteOnlyQual.size() + 1);
536 else {
537 std::string ReadWriteQual("__read_write");
538 std::string::size_type ReadWritePos = TyName.find(ReadWriteQual);
539 if (ReadWritePos != std::string::npos)
540 TyName.erase(ReadWritePos, ReadWriteQual.size() + 1);
541 }
542 }
543 }
544
545 // Returns the address space id that should be produced to the
546 // kernel_arg_addr_space metadata. This is always fixed to the ids
547 // as specified in the SPIR 2.0 specification in order to differentiate
548 // for example in clGetKernelArgInfo() implementation between the address
549 // spaces with targets without unique mapping to the OpenCL address spaces
550 // (basically all single AS CPUs).
ArgInfoAddressSpace(LangAS AS)551 static unsigned ArgInfoAddressSpace(LangAS AS) {
552 switch (AS) {
553 case LangAS::opencl_global: return 1;
554 case LangAS::opencl_constant: return 2;
555 case LangAS::opencl_local: return 3;
556 case LangAS::opencl_generic: return 4; // Not in SPIR 2.0 specs.
557 default:
558 return 0; // Assume private.
559 }
560 }
561
562 // OpenCL v1.2 s5.6.4.6 allows the compiler to store kernel argument
563 // information in the program executable. The argument information stored
564 // includes the argument name, its type, the address and access qualifiers used.
GenOpenCLArgMetadata(const FunctionDecl * FD,llvm::Function * Fn,CodeGenModule & CGM,llvm::LLVMContext & Context,CGBuilderTy & Builder,ASTContext & ASTCtx)565 static void GenOpenCLArgMetadata(const FunctionDecl *FD, llvm::Function *Fn,
566 CodeGenModule &CGM, llvm::LLVMContext &Context,
567 CGBuilderTy &Builder, ASTContext &ASTCtx) {
568 // Create MDNodes that represent the kernel arg metadata.
569 // Each MDNode is a list in the form of "key", N number of values which is
570 // the same number of values as their are kernel arguments.
571
572 const PrintingPolicy &Policy = ASTCtx.getPrintingPolicy();
573
574 // MDNode for the kernel argument address space qualifiers.
575 SmallVector<llvm::Metadata *, 8> addressQuals;
576
577 // MDNode for the kernel argument access qualifiers (images only).
578 SmallVector<llvm::Metadata *, 8> accessQuals;
579
580 // MDNode for the kernel argument type names.
581 SmallVector<llvm::Metadata *, 8> argTypeNames;
582
583 // MDNode for the kernel argument base type names.
584 SmallVector<llvm::Metadata *, 8> argBaseTypeNames;
585
586 // MDNode for the kernel argument type qualifiers.
587 SmallVector<llvm::Metadata *, 8> argTypeQuals;
588
589 // MDNode for the kernel argument names.
590 SmallVector<llvm::Metadata *, 8> argNames;
591
592 for (unsigned i = 0, e = FD->getNumParams(); i != e; ++i) {
593 const ParmVarDecl *parm = FD->getParamDecl(i);
594 QualType ty = parm->getType();
595 std::string typeQuals;
596
597 if (ty->isPointerType()) {
598 QualType pointeeTy = ty->getPointeeType();
599
600 // Get address qualifier.
601 addressQuals.push_back(llvm::ConstantAsMetadata::get(Builder.getInt32(
602 ArgInfoAddressSpace(pointeeTy.getAddressSpace()))));
603
604 // Get argument type name.
605 std::string typeName =
606 pointeeTy.getUnqualifiedType().getAsString(Policy) + "*";
607
608 // Turn "unsigned type" to "utype"
609 std::string::size_type pos = typeName.find("unsigned");
610 if (pointeeTy.isCanonical() && pos != std::string::npos)
611 typeName.erase(pos+1, 8);
612
613 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
614
615 std::string baseTypeName =
616 pointeeTy.getUnqualifiedType().getCanonicalType().getAsString(
617 Policy) +
618 "*";
619
620 // Turn "unsigned type" to "utype"
621 pos = baseTypeName.find("unsigned");
622 if (pos != std::string::npos)
623 baseTypeName.erase(pos+1, 8);
624
625 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
626
627 // Get argument type qualifiers:
628 if (ty.isRestrictQualified())
629 typeQuals = "restrict";
630 if (pointeeTy.isConstQualified() ||
631 (pointeeTy.getAddressSpace() == LangAS::opencl_constant))
632 typeQuals += typeQuals.empty() ? "const" : " const";
633 if (pointeeTy.isVolatileQualified())
634 typeQuals += typeQuals.empty() ? "volatile" : " volatile";
635 } else {
636 uint32_t AddrSpc = 0;
637 bool isPipe = ty->isPipeType();
638 if (ty->isImageType() || isPipe)
639 AddrSpc = ArgInfoAddressSpace(LangAS::opencl_global);
640
641 addressQuals.push_back(
642 llvm::ConstantAsMetadata::get(Builder.getInt32(AddrSpc)));
643
644 // Get argument type name.
645 std::string typeName;
646 if (isPipe)
647 typeName = ty.getCanonicalType()->getAs<PipeType>()->getElementType()
648 .getAsString(Policy);
649 else
650 typeName = ty.getUnqualifiedType().getAsString(Policy);
651
652 // Turn "unsigned type" to "utype"
653 std::string::size_type pos = typeName.find("unsigned");
654 if (ty.isCanonical() && pos != std::string::npos)
655 typeName.erase(pos+1, 8);
656
657 std::string baseTypeName;
658 if (isPipe)
659 baseTypeName = ty.getCanonicalType()->getAs<PipeType>()
660 ->getElementType().getCanonicalType()
661 .getAsString(Policy);
662 else
663 baseTypeName =
664 ty.getUnqualifiedType().getCanonicalType().getAsString(Policy);
665
666 // Remove access qualifiers on images
667 // (as they are inseparable from type in clang implementation,
668 // but OpenCL spec provides a special query to get access qualifier
669 // via clGetKernelArgInfo with CL_KERNEL_ARG_ACCESS_QUALIFIER):
670 if (ty->isImageType()) {
671 removeImageAccessQualifier(typeName);
672 removeImageAccessQualifier(baseTypeName);
673 }
674
675 argTypeNames.push_back(llvm::MDString::get(Context, typeName));
676
677 // Turn "unsigned type" to "utype"
678 pos = baseTypeName.find("unsigned");
679 if (pos != std::string::npos)
680 baseTypeName.erase(pos+1, 8);
681
682 argBaseTypeNames.push_back(llvm::MDString::get(Context, baseTypeName));
683
684 if (isPipe)
685 typeQuals = "pipe";
686 }
687
688 argTypeQuals.push_back(llvm::MDString::get(Context, typeQuals));
689
690 // Get image and pipe access qualifier:
691 if (ty->isImageType()|| ty->isPipeType()) {
692 const Decl *PDecl = parm;
693 if (auto *TD = dyn_cast<TypedefType>(ty))
694 PDecl = TD->getDecl();
695 const OpenCLAccessAttr *A = PDecl->getAttr<OpenCLAccessAttr>();
696 if (A && A->isWriteOnly())
697 accessQuals.push_back(llvm::MDString::get(Context, "write_only"));
698 else if (A && A->isReadWrite())
699 accessQuals.push_back(llvm::MDString::get(Context, "read_write"));
700 else
701 accessQuals.push_back(llvm::MDString::get(Context, "read_only"));
702 } else
703 accessQuals.push_back(llvm::MDString::get(Context, "none"));
704
705 // Get argument name.
706 argNames.push_back(llvm::MDString::get(Context, parm->getName()));
707 }
708
709 Fn->setMetadata("kernel_arg_addr_space",
710 llvm::MDNode::get(Context, addressQuals));
711 Fn->setMetadata("kernel_arg_access_qual",
712 llvm::MDNode::get(Context, accessQuals));
713 Fn->setMetadata("kernel_arg_type",
714 llvm::MDNode::get(Context, argTypeNames));
715 Fn->setMetadata("kernel_arg_base_type",
716 llvm::MDNode::get(Context, argBaseTypeNames));
717 Fn->setMetadata("kernel_arg_type_qual",
718 llvm::MDNode::get(Context, argTypeQuals));
719 if (CGM.getCodeGenOpts().EmitOpenCLArgMetadata)
720 Fn->setMetadata("kernel_arg_name",
721 llvm::MDNode::get(Context, argNames));
722 }
723
EmitOpenCLKernelMetadata(const FunctionDecl * FD,llvm::Function * Fn)724 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
725 llvm::Function *Fn)
726 {
727 if (!FD->hasAttr<OpenCLKernelAttr>())
728 return;
729
730 llvm::LLVMContext &Context = getLLVMContext();
731
732 GenOpenCLArgMetadata(FD, Fn, CGM, Context, Builder, getContext());
733
734 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
735 QualType HintQTy = A->getTypeHint();
736 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
737 bool IsSignedInteger =
738 HintQTy->isSignedIntegerType() ||
739 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
740 llvm::Metadata *AttrMDArgs[] = {
741 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
742 CGM.getTypes().ConvertType(A->getTypeHint()))),
743 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
744 llvm::IntegerType::get(Context, 32),
745 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
746 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
747 }
748
749 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
750 llvm::Metadata *AttrMDArgs[] = {
751 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
752 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
753 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
754 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
755 }
756
757 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
758 llvm::Metadata *AttrMDArgs[] = {
759 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
760 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
761 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
762 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
763 }
764
765 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
766 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
767 llvm::Metadata *AttrMDArgs[] = {
768 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
769 Fn->setMetadata("intel_reqd_sub_group_size",
770 llvm::MDNode::get(Context, AttrMDArgs));
771 }
772 }
773
774 /// Determine whether the function F ends with a return stmt.
endsWithReturn(const Decl * F)775 static bool endsWithReturn(const Decl* F) {
776 const Stmt *Body = nullptr;
777 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
778 Body = FD->getBody();
779 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
780 Body = OMD->getBody();
781
782 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
783 auto LastStmt = CS->body_rbegin();
784 if (LastStmt != CS->body_rend())
785 return isa<ReturnStmt>(*LastStmt);
786 }
787 return false;
788 }
789
markAsIgnoreThreadCheckingAtRuntime(llvm::Function * Fn)790 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
791 if (SanOpts.has(SanitizerKind::Thread)) {
792 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
793 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
794 }
795 }
796
matchesStlAllocatorFn(const Decl * D,const ASTContext & Ctx)797 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
798 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
799 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
800 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
801 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
802 return false;
803
804 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
805 return false;
806
807 if (MD->getNumParams() == 2) {
808 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
809 if (!PT || !PT->isVoidPointerType() ||
810 !PT->getPointeeType().isConstQualified())
811 return false;
812 }
813
814 return true;
815 }
816
817 /// Return the UBSan prologue signature for \p FD if one is available.
getPrologueSignature(CodeGenModule & CGM,const FunctionDecl * FD)818 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
819 const FunctionDecl *FD) {
820 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
821 if (!MD->isStatic())
822 return nullptr;
823 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
824 }
825
StartFunction(GlobalDecl GD,QualType RetTy,llvm::Function * Fn,const CGFunctionInfo & FnInfo,const FunctionArgList & Args,SourceLocation Loc,SourceLocation StartLoc)826 void CodeGenFunction::StartFunction(GlobalDecl GD,
827 QualType RetTy,
828 llvm::Function *Fn,
829 const CGFunctionInfo &FnInfo,
830 const FunctionArgList &Args,
831 SourceLocation Loc,
832 SourceLocation StartLoc) {
833 assert(!CurFn &&
834 "Do not use a CodeGenFunction object for more than one function");
835
836 const Decl *D = GD.getDecl();
837
838 DidCallStackSave = false;
839 CurCodeDecl = D;
840 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
841 if (FD->usesSEHTry())
842 CurSEHParent = FD;
843 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
844 FnRetTy = RetTy;
845 CurFn = Fn;
846 CurFnInfo = &FnInfo;
847 assert(CurFn->isDeclaration() && "Function already has body?");
848
849 // If this function has been blacklisted for any of the enabled sanitizers,
850 // disable the sanitizer for the function.
851 do {
852 #define SANITIZER(NAME, ID) \
853 if (SanOpts.empty()) \
854 break; \
855 if (SanOpts.has(SanitizerKind::ID)) \
856 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
857 SanOpts.set(SanitizerKind::ID, false);
858
859 #include "clang/Basic/Sanitizers.def"
860 #undef SANITIZER
861 } while (0);
862
863 if (D) {
864 // Apply the no_sanitize* attributes to SanOpts.
865 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
866 SanitizerMask mask = Attr->getMask();
867 SanOpts.Mask &= ~mask;
868 if (mask & SanitizerKind::Address)
869 SanOpts.set(SanitizerKind::KernelAddress, false);
870 if (mask & SanitizerKind::KernelAddress)
871 SanOpts.set(SanitizerKind::Address, false);
872 if (mask & SanitizerKind::HWAddress)
873 SanOpts.set(SanitizerKind::KernelHWAddress, false);
874 if (mask & SanitizerKind::KernelHWAddress)
875 SanOpts.set(SanitizerKind::HWAddress, false);
876 }
877 }
878
879 // Apply sanitizer attributes to the function.
880 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
881 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
882 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
883 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
884 if (SanOpts.has(SanitizerKind::Thread))
885 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
886 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
887 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
888 if (SanOpts.has(SanitizerKind::SafeStack))
889 Fn->addFnAttr(llvm::Attribute::SafeStack);
890 if (SanOpts.has(SanitizerKind::ShadowCallStack))
891 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
892
893 // Apply fuzzing attribute to the function.
894 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
895 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
896
897 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
898 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
899 if (SanOpts.has(SanitizerKind::Thread)) {
900 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
901 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
902 if (OMD->getMethodFamily() == OMF_dealloc ||
903 OMD->getMethodFamily() == OMF_initialize ||
904 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
905 markAsIgnoreThreadCheckingAtRuntime(Fn);
906 }
907 }
908 }
909
910 // Ignore unrelated casts in STL allocate() since the allocator must cast
911 // from void* to T* before object initialization completes. Don't match on the
912 // namespace because not all allocators are in std::
913 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
914 if (matchesStlAllocatorFn(D, getContext()))
915 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
916 }
917
918 // Apply xray attributes to the function (as a string, for now)
919 if (D) {
920 if (const auto *XRayAttr = D->getAttr<XRayInstrumentAttr>()) {
921 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
922 XRayInstrKind::Function)) {
923 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction())
924 Fn->addFnAttr("function-instrument", "xray-always");
925 if (XRayAttr->neverXRayInstrument())
926 Fn->addFnAttr("function-instrument", "xray-never");
927 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
928 if (ShouldXRayInstrumentFunction())
929 Fn->addFnAttr("xray-log-args",
930 llvm::utostr(LogArgs->getArgumentCount()));
931 }
932 } else {
933 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
934 Fn->addFnAttr(
935 "xray-instruction-threshold",
936 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
937 }
938 }
939
940 // Add no-jump-tables value.
941 Fn->addFnAttr("no-jump-tables",
942 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
943
944 // Add profile-sample-accurate value.
945 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
946 Fn->addFnAttr("profile-sample-accurate");
947
948 if (getLangOpts().OpenCL) {
949 // Add metadata for a kernel function.
950 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
951 EmitOpenCLKernelMetadata(FD, Fn);
952 }
953
954 // If we are checking function types, emit a function type signature as
955 // prologue data.
956 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
957 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
958 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
959 // Remove any (C++17) exception specifications, to allow calling e.g. a
960 // noexcept function through a non-noexcept pointer.
961 auto ProtoTy =
962 getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
963 EST_None);
964 llvm::Constant *FTRTTIConst =
965 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
966 llvm::Constant *FTRTTIConstEncoded =
967 EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
968 llvm::Constant *PrologueStructElems[] = {PrologueSig,
969 FTRTTIConstEncoded};
970 llvm::Constant *PrologueStructConst =
971 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
972 Fn->setPrologueData(PrologueStructConst);
973 }
974 }
975 }
976
977 // If we're checking nullability, we need to know whether we can check the
978 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
979 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
980 auto Nullability = FnRetTy->getNullability(getContext());
981 if (Nullability && *Nullability == NullabilityKind::NonNull) {
982 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
983 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
984 RetValNullabilityPrecondition =
985 llvm::ConstantInt::getTrue(getLLVMContext());
986 }
987 }
988
989 // If we're in C++ mode and the function name is "main", it is guaranteed
990 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
991 // used within a program").
992 if (getLangOpts().CPlusPlus)
993 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
994 if (FD->isMain())
995 Fn->addFnAttr(llvm::Attribute::NoRecurse);
996
997 // If a custom alignment is used, force realigning to this alignment on
998 // any main function which certainly will need it.
999 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
1000 if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
1001 CGM.getCodeGenOpts().StackAlignment)
1002 Fn->addFnAttr("stackrealign");
1003
1004 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
1005
1006 // Create a marker to make it easy to insert allocas into the entryblock
1007 // later. Don't create this with the builder, because we don't want it
1008 // folded.
1009 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
1010 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
1011
1012 ReturnBlock = getJumpDestInCurrentScope("return");
1013
1014 Builder.SetInsertPoint(EntryBB);
1015
1016 // If we're checking the return value, allocate space for a pointer to a
1017 // precise source location of the checked return statement.
1018 if (requiresReturnValueCheck()) {
1019 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
1020 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
1021 }
1022
1023 // Emit subprogram debug descriptor.
1024 if (CGDebugInfo *DI = getDebugInfo()) {
1025 // Reconstruct the type from the argument list so that implicit parameters,
1026 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
1027 // convention.
1028 CallingConv CC = CallingConv::CC_C;
1029 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
1030 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
1031 CC = SrcFnTy->getCallConv();
1032 SmallVector<QualType, 16> ArgTypes;
1033 for (const VarDecl *VD : Args)
1034 ArgTypes.push_back(VD->getType());
1035 QualType FnType = getContext().getFunctionType(
1036 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
1037 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
1038 Builder);
1039 }
1040
1041 if (ShouldInstrumentFunction()) {
1042 if (CGM.getCodeGenOpts().InstrumentFunctions)
1043 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
1044 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
1045 CurFn->addFnAttr("instrument-function-entry-inlined",
1046 "__cyg_profile_func_enter");
1047 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
1048 CurFn->addFnAttr("instrument-function-entry-inlined",
1049 "__cyg_profile_func_enter_bare");
1050 }
1051
1052 // Since emitting the mcount call here impacts optimizations such as function
1053 // inlining, we just add an attribute to insert a mcount call in backend.
1054 // The attribute "counting-function" is set to mcount function name which is
1055 // architecture dependent.
1056 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1057 // Calls to fentry/mcount should not be generated if function has
1058 // the no_instrument_function attribute.
1059 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1060 if (CGM.getCodeGenOpts().CallFEntry)
1061 Fn->addFnAttr("fentry-call", "true");
1062 else {
1063 Fn->addFnAttr("instrument-function-entry-inlined",
1064 getTarget().getMCountName());
1065 }
1066 }
1067 }
1068
1069 if (RetTy->isVoidType()) {
1070 // Void type; nothing to return.
1071 ReturnValue = Address::invalid();
1072
1073 // Count the implicit return.
1074 if (!endsWithReturn(D))
1075 ++NumReturnExprs;
1076 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1077 // Indirect return; emit returned value directly into sret slot.
1078 // This reduces code size, and affects correctness in C++.
1079 auto AI = CurFn->arg_begin();
1080 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1081 ++AI;
1082 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
1083 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1084 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1085 // Load the sret pointer from the argument struct and return into that.
1086 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1087 llvm::Function::arg_iterator EI = CurFn->arg_end();
1088 --EI;
1089 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1090 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
1091 ReturnValue = Address(Addr, getNaturalTypeAlignment(RetTy));
1092 } else {
1093 ReturnValue = CreateIRTemp(RetTy, "retval");
1094
1095 // Tell the epilog emitter to autorelease the result. We do this
1096 // now so that various specialized functions can suppress it
1097 // during their IR-generation.
1098 if (getLangOpts().ObjCAutoRefCount &&
1099 !CurFnInfo->isReturnsRetained() &&
1100 RetTy->isObjCRetainableType())
1101 AutoreleaseResult = true;
1102 }
1103
1104 EmitStartEHSpec(CurCodeDecl);
1105
1106 PrologueCleanupDepth = EHStack.stable_begin();
1107
1108 // Emit OpenMP specific initialization of the device functions.
1109 if (getLangOpts().OpenMP && CurCodeDecl)
1110 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1111
1112 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1113
1114 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
1115 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1116 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1117 if (MD->getParent()->isLambda() &&
1118 MD->getOverloadedOperator() == OO_Call) {
1119 // We're in a lambda; figure out the captures.
1120 MD->getParent()->getCaptureFields(LambdaCaptureFields,
1121 LambdaThisCaptureField);
1122 if (LambdaThisCaptureField) {
1123 // If the lambda captures the object referred to by '*this' - either by
1124 // value or by reference, make sure CXXThisValue points to the correct
1125 // object.
1126
1127 // Get the lvalue for the field (which is a copy of the enclosing object
1128 // or contains the address of the enclosing object).
1129 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1130 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1131 // If the enclosing object was captured by value, just use its address.
1132 CXXThisValue = ThisFieldLValue.getAddress().getPointer();
1133 } else {
1134 // Load the lvalue pointed to by the field, since '*this' was captured
1135 // by reference.
1136 CXXThisValue =
1137 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1138 }
1139 }
1140 for (auto *FD : MD->getParent()->fields()) {
1141 if (FD->hasCapturedVLAType()) {
1142 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1143 SourceLocation()).getScalarVal();
1144 auto VAT = FD->getCapturedVLAType();
1145 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1146 }
1147 }
1148 } else {
1149 // Not in a lambda; just use 'this' from the method.
1150 // FIXME: Should we generate a new load for each use of 'this'? The
1151 // fast register allocator would be happier...
1152 CXXThisValue = CXXABIThisValue;
1153 }
1154
1155 // Check the 'this' pointer once per function, if it's available.
1156 if (CXXABIThisValue) {
1157 SanitizerSet SkippedChecks;
1158 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1159 QualType ThisTy = MD->getThisType();
1160
1161 // If this is the call operator of a lambda with no capture-default, it
1162 // may have a static invoker function, which may call this operator with
1163 // a null 'this' pointer.
1164 if (isLambdaCallOperator(MD) &&
1165 MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1166 SkippedChecks.set(SanitizerKind::Null, true);
1167
1168 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1169 : TCK_MemberCall,
1170 Loc, CXXABIThisValue, ThisTy,
1171 getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1172 SkippedChecks);
1173 }
1174 }
1175
1176 // If any of the arguments have a variably modified type, make sure to
1177 // emit the type size.
1178 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1179 i != e; ++i) {
1180 const VarDecl *VD = *i;
1181
1182 // Dig out the type as written from ParmVarDecls; it's unclear whether
1183 // the standard (C99 6.9.1p10) requires this, but we're following the
1184 // precedent set by gcc.
1185 QualType Ty;
1186 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1187 Ty = PVD->getOriginalType();
1188 else
1189 Ty = VD->getType();
1190
1191 if (Ty->isVariablyModifiedType())
1192 EmitVariablyModifiedType(Ty);
1193 }
1194 // Emit a location at the end of the prologue.
1195 if (CGDebugInfo *DI = getDebugInfo())
1196 DI->EmitLocation(Builder, StartLoc);
1197
1198 // TODO: Do we need to handle this in two places like we do with
1199 // target-features/target-cpu?
1200 if (CurFuncDecl)
1201 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1202 LargestVectorWidth = VecWidth->getVectorWidth();
1203 }
1204
EmitFunctionBody(const Stmt * Body)1205 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1206 incrementProfileCounter(Body);
1207 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1208 EmitCompoundStmtWithoutScope(*S);
1209 else
1210 EmitStmt(Body);
1211 }
1212
1213 /// When instrumenting to collect profile data, the counts for some blocks
1214 /// such as switch cases need to not include the fall-through counts, so
1215 /// emit a branch around the instrumentation code. When not instrumenting,
1216 /// this just calls EmitBlock().
EmitBlockWithFallThrough(llvm::BasicBlock * BB,const Stmt * S)1217 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1218 const Stmt *S) {
1219 llvm::BasicBlock *SkipCountBB = nullptr;
1220 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1221 // When instrumenting for profiling, the fallthrough to certain
1222 // statements needs to skip over the instrumentation code so that we
1223 // get an accurate count.
1224 SkipCountBB = createBasicBlock("skipcount");
1225 EmitBranch(SkipCountBB);
1226 }
1227 EmitBlock(BB);
1228 uint64_t CurrentCount = getCurrentProfileCount();
1229 incrementProfileCounter(S);
1230 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1231 if (SkipCountBB)
1232 EmitBlock(SkipCountBB);
1233 }
1234
1235 /// Tries to mark the given function nounwind based on the
1236 /// non-existence of any throwing calls within it. We believe this is
1237 /// lightweight enough to do at -O0.
TryMarkNoThrow(llvm::Function * F)1238 static void TryMarkNoThrow(llvm::Function *F) {
1239 // LLVM treats 'nounwind' on a function as part of the type, so we
1240 // can't do this on functions that can be overwritten.
1241 if (F->isInterposable()) return;
1242
1243 for (llvm::BasicBlock &BB : *F)
1244 for (llvm::Instruction &I : BB)
1245 if (I.mayThrow())
1246 return;
1247
1248 F->setDoesNotThrow();
1249 }
1250
BuildFunctionArgList(GlobalDecl GD,FunctionArgList & Args)1251 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1252 FunctionArgList &Args) {
1253 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1254 QualType ResTy = FD->getReturnType();
1255
1256 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1257 if (MD && MD->isInstance()) {
1258 if (CGM.getCXXABI().HasThisReturn(GD))
1259 ResTy = MD->getThisType();
1260 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1261 ResTy = CGM.getContext().VoidPtrTy;
1262 CGM.getCXXABI().buildThisParam(*this, Args);
1263 }
1264
1265 // The base version of an inheriting constructor whose constructed base is a
1266 // virtual base is not passed any arguments (because it doesn't actually call
1267 // the inherited constructor).
1268 bool PassedParams = true;
1269 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1270 if (auto Inherited = CD->getInheritedConstructor())
1271 PassedParams =
1272 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1273
1274 if (PassedParams) {
1275 for (auto *Param : FD->parameters()) {
1276 Args.push_back(Param);
1277 if (!Param->hasAttr<PassObjectSizeAttr>())
1278 continue;
1279
1280 auto *Implicit = ImplicitParamDecl::Create(
1281 getContext(), Param->getDeclContext(), Param->getLocation(),
1282 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1283 SizeArguments[Param] = Implicit;
1284 Args.push_back(Implicit);
1285 }
1286 }
1287
1288 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1289 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1290
1291 return ResTy;
1292 }
1293
1294 static bool
shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl * FD,const ASTContext & Context)1295 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1296 const ASTContext &Context) {
1297 QualType T = FD->getReturnType();
1298 // Avoid the optimization for functions that return a record type with a
1299 // trivial destructor or another trivially copyable type.
1300 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1301 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1302 return !ClassDecl->hasTrivialDestructor();
1303 }
1304 return !T.isTriviallyCopyableType(Context);
1305 }
1306
GenerateCode(GlobalDecl GD,llvm::Function * Fn,const CGFunctionInfo & FnInfo)1307 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1308 const CGFunctionInfo &FnInfo) {
1309 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1310 CurGD = GD;
1311
1312 FunctionArgList Args;
1313 QualType ResTy = BuildFunctionArgList(GD, Args);
1314
1315 // Check if we should generate debug info for this function.
1316 if (FD->hasAttr<NoDebugAttr>())
1317 DebugInfo = nullptr; // disable debug info indefinitely for this function
1318
1319 // The function might not have a body if we're generating thunks for a
1320 // function declaration.
1321 SourceRange BodyRange;
1322 if (Stmt *Body = FD->getBody())
1323 BodyRange = Body->getSourceRange();
1324 else
1325 BodyRange = FD->getLocation();
1326 CurEHLocation = BodyRange.getEnd();
1327
1328 // Use the location of the start of the function to determine where
1329 // the function definition is located. By default use the location
1330 // of the declaration as the location for the subprogram. A function
1331 // may lack a declaration in the source code if it is created by code
1332 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1333 SourceLocation Loc = FD->getLocation();
1334
1335 // If this is a function specialization then use the pattern body
1336 // as the location for the function.
1337 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1338 if (SpecDecl->hasBody(SpecDecl))
1339 Loc = SpecDecl->getLocation();
1340
1341 Stmt *Body = FD->getBody();
1342
1343 // Initialize helper which will detect jumps which can cause invalid lifetime
1344 // markers.
1345 if (Body && ShouldEmitLifetimeMarkers)
1346 Bypasses.Init(Body);
1347
1348 // Emit the standard function prologue.
1349 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1350
1351 // Generate the body of the function.
1352 PGO.assignRegionCounters(GD, CurFn);
1353 if (isa<CXXDestructorDecl>(FD))
1354 EmitDestructorBody(Args);
1355 else if (isa<CXXConstructorDecl>(FD))
1356 EmitConstructorBody(Args);
1357 else if (getLangOpts().CUDA &&
1358 !getLangOpts().CUDAIsDevice &&
1359 FD->hasAttr<CUDAGlobalAttr>())
1360 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1361 else if (isa<CXXMethodDecl>(FD) &&
1362 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1363 // The lambda static invoker function is special, because it forwards or
1364 // clones the body of the function call operator (but is actually static).
1365 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1366 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1367 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1368 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1369 // Implicit copy-assignment gets the same special treatment as implicit
1370 // copy-constructors.
1371 emitImplicitAssignmentOperatorBody(Args);
1372 } else if (Body) {
1373 EmitFunctionBody(Body);
1374 } else
1375 llvm_unreachable("no definition for emitted function");
1376
1377 // C++11 [stmt.return]p2:
1378 // Flowing off the end of a function [...] results in undefined behavior in
1379 // a value-returning function.
1380 // C11 6.9.1p12:
1381 // If the '}' that terminates a function is reached, and the value of the
1382 // function call is used by the caller, the behavior is undefined.
1383 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1384 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1385 bool ShouldEmitUnreachable =
1386 CGM.getCodeGenOpts().StrictReturn ||
1387 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1388 if (SanOpts.has(SanitizerKind::Return)) {
1389 SanitizerScope SanScope(this);
1390 llvm::Value *IsFalse = Builder.getFalse();
1391 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1392 SanitizerHandler::MissingReturn,
1393 EmitCheckSourceLocation(FD->getLocation()), None);
1394 } else if (ShouldEmitUnreachable) {
1395 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1396 EmitTrapCall(llvm::Intrinsic::trap);
1397 }
1398 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1399 Builder.CreateUnreachable();
1400 Builder.ClearInsertionPoint();
1401 }
1402 }
1403
1404 // Emit the standard function epilogue.
1405 FinishFunction(BodyRange.getEnd());
1406
1407 // If we haven't marked the function nothrow through other means, do
1408 // a quick pass now to see if we can.
1409 if (!CurFn->doesNotThrow())
1410 TryMarkNoThrow(CurFn);
1411 }
1412
1413 /// ContainsLabel - Return true if the statement contains a label in it. If
1414 /// this statement is not executed normally, it not containing a label means
1415 /// that we can just remove the code.
ContainsLabel(const Stmt * S,bool IgnoreCaseStmts)1416 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1417 // Null statement, not a label!
1418 if (!S) return false;
1419
1420 // If this is a label, we have to emit the code, consider something like:
1421 // if (0) { ... foo: bar(); } goto foo;
1422 //
1423 // TODO: If anyone cared, we could track __label__'s, since we know that you
1424 // can't jump to one from outside their declared region.
1425 if (isa<LabelStmt>(S))
1426 return true;
1427
1428 // If this is a case/default statement, and we haven't seen a switch, we have
1429 // to emit the code.
1430 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1431 return true;
1432
1433 // If this is a switch statement, we want to ignore cases below it.
1434 if (isa<SwitchStmt>(S))
1435 IgnoreCaseStmts = true;
1436
1437 // Scan subexpressions for verboten labels.
1438 for (const Stmt *SubStmt : S->children())
1439 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1440 return true;
1441
1442 return false;
1443 }
1444
1445 /// containsBreak - Return true if the statement contains a break out of it.
1446 /// If the statement (recursively) contains a switch or loop with a break
1447 /// inside of it, this is fine.
containsBreak(const Stmt * S)1448 bool CodeGenFunction::containsBreak(const Stmt *S) {
1449 // Null statement, not a label!
1450 if (!S) return false;
1451
1452 // If this is a switch or loop that defines its own break scope, then we can
1453 // include it and anything inside of it.
1454 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1455 isa<ForStmt>(S))
1456 return false;
1457
1458 if (isa<BreakStmt>(S))
1459 return true;
1460
1461 // Scan subexpressions for verboten breaks.
1462 for (const Stmt *SubStmt : S->children())
1463 if (containsBreak(SubStmt))
1464 return true;
1465
1466 return false;
1467 }
1468
mightAddDeclToScope(const Stmt * S)1469 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1470 if (!S) return false;
1471
1472 // Some statement kinds add a scope and thus never add a decl to the current
1473 // scope. Note, this list is longer than the list of statements that might
1474 // have an unscoped decl nested within them, but this way is conservatively
1475 // correct even if more statement kinds are added.
1476 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1477 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1478 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1479 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1480 return false;
1481
1482 if (isa<DeclStmt>(S))
1483 return true;
1484
1485 for (const Stmt *SubStmt : S->children())
1486 if (mightAddDeclToScope(SubStmt))
1487 return true;
1488
1489 return false;
1490 }
1491
1492 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1493 /// to a constant, or if it does but contains a label, return false. If it
1494 /// constant folds return true and set the boolean result in Result.
ConstantFoldsToSimpleInteger(const Expr * Cond,bool & ResultBool,bool AllowLabels)1495 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1496 bool &ResultBool,
1497 bool AllowLabels) {
1498 llvm::APSInt ResultInt;
1499 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1500 return false;
1501
1502 ResultBool = ResultInt.getBoolValue();
1503 return true;
1504 }
1505
1506 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1507 /// to a constant, or if it does but contains a label, return false. If it
1508 /// constant folds return true and set the folded value.
ConstantFoldsToSimpleInteger(const Expr * Cond,llvm::APSInt & ResultInt,bool AllowLabels)1509 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1510 llvm::APSInt &ResultInt,
1511 bool AllowLabels) {
1512 // FIXME: Rename and handle conversion of other evaluatable things
1513 // to bool.
1514 Expr::EvalResult Result;
1515 if (!Cond->EvaluateAsInt(Result, getContext()))
1516 return false; // Not foldable, not integer or not fully evaluatable.
1517
1518 llvm::APSInt Int = Result.Val.getInt();
1519 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1520 return false; // Contains a label.
1521
1522 ResultInt = Int;
1523 return true;
1524 }
1525
1526
1527
1528 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1529 /// statement) to the specified blocks. Based on the condition, this might try
1530 /// to simplify the codegen of the conditional based on the branch.
1531 ///
EmitBranchOnBoolExpr(const Expr * Cond,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount)1532 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1533 llvm::BasicBlock *TrueBlock,
1534 llvm::BasicBlock *FalseBlock,
1535 uint64_t TrueCount) {
1536 Cond = Cond->IgnoreParens();
1537
1538 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1539
1540 // Handle X && Y in a condition.
1541 if (CondBOp->getOpcode() == BO_LAnd) {
1542 // If we have "1 && X", simplify the code. "0 && X" would have constant
1543 // folded if the case was simple enough.
1544 bool ConstantBool = false;
1545 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1546 ConstantBool) {
1547 // br(1 && X) -> br(X).
1548 incrementProfileCounter(CondBOp);
1549 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1550 TrueCount);
1551 }
1552
1553 // If we have "X && 1", simplify the code to use an uncond branch.
1554 // "X && 0" would have been constant folded to 0.
1555 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1556 ConstantBool) {
1557 // br(X && 1) -> br(X).
1558 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1559 TrueCount);
1560 }
1561
1562 // Emit the LHS as a conditional. If the LHS conditional is false, we
1563 // want to jump to the FalseBlock.
1564 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1565 // The counter tells us how often we evaluate RHS, and all of TrueCount
1566 // can be propagated to that branch.
1567 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1568
1569 ConditionalEvaluation eval(*this);
1570 {
1571 ApplyDebugLocation DL(*this, Cond);
1572 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount);
1573 EmitBlock(LHSTrue);
1574 }
1575
1576 incrementProfileCounter(CondBOp);
1577 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1578
1579 // Any temporaries created here are conditional.
1580 eval.begin(*this);
1581 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount);
1582 eval.end(*this);
1583
1584 return;
1585 }
1586
1587 if (CondBOp->getOpcode() == BO_LOr) {
1588 // If we have "0 || X", simplify the code. "1 || X" would have constant
1589 // folded if the case was simple enough.
1590 bool ConstantBool = false;
1591 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1592 !ConstantBool) {
1593 // br(0 || X) -> br(X).
1594 incrementProfileCounter(CondBOp);
1595 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1596 TrueCount);
1597 }
1598
1599 // If we have "X || 0", simplify the code to use an uncond branch.
1600 // "X || 1" would have been constant folded to 1.
1601 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1602 !ConstantBool) {
1603 // br(X || 0) -> br(X).
1604 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1605 TrueCount);
1606 }
1607
1608 // Emit the LHS as a conditional. If the LHS conditional is true, we
1609 // want to jump to the TrueBlock.
1610 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1611 // We have the count for entry to the RHS and for the whole expression
1612 // being true, so we can divy up True count between the short circuit and
1613 // the RHS.
1614 uint64_t LHSCount =
1615 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1616 uint64_t RHSCount = TrueCount - LHSCount;
1617
1618 ConditionalEvaluation eval(*this);
1619 {
1620 ApplyDebugLocation DL(*this, Cond);
1621 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount);
1622 EmitBlock(LHSFalse);
1623 }
1624
1625 incrementProfileCounter(CondBOp);
1626 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1627
1628 // Any temporaries created here are conditional.
1629 eval.begin(*this);
1630 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount);
1631
1632 eval.end(*this);
1633
1634 return;
1635 }
1636 }
1637
1638 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1639 // br(!x, t, f) -> br(x, f, t)
1640 if (CondUOp->getOpcode() == UO_LNot) {
1641 // Negate the count.
1642 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1643 // Negate the condition and swap the destination blocks.
1644 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1645 FalseCount);
1646 }
1647 }
1648
1649 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1650 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1651 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1652 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1653
1654 ConditionalEvaluation cond(*this);
1655 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1656 getProfileCount(CondOp));
1657
1658 // When computing PGO branch weights, we only know the overall count for
1659 // the true block. This code is essentially doing tail duplication of the
1660 // naive code-gen, introducing new edges for which counts are not
1661 // available. Divide the counts proportionally between the LHS and RHS of
1662 // the conditional operator.
1663 uint64_t LHSScaledTrueCount = 0;
1664 if (TrueCount) {
1665 double LHSRatio =
1666 getProfileCount(CondOp) / (double)getCurrentProfileCount();
1667 LHSScaledTrueCount = TrueCount * LHSRatio;
1668 }
1669
1670 cond.begin(*this);
1671 EmitBlock(LHSBlock);
1672 incrementProfileCounter(CondOp);
1673 {
1674 ApplyDebugLocation DL(*this, Cond);
1675 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1676 LHSScaledTrueCount);
1677 }
1678 cond.end(*this);
1679
1680 cond.begin(*this);
1681 EmitBlock(RHSBlock);
1682 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1683 TrueCount - LHSScaledTrueCount);
1684 cond.end(*this);
1685
1686 return;
1687 }
1688
1689 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1690 // Conditional operator handling can give us a throw expression as a
1691 // condition for a case like:
1692 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1693 // Fold this to:
1694 // br(c, throw x, br(y, t, f))
1695 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1696 return;
1697 }
1698
1699 // If the branch has a condition wrapped by __builtin_unpredictable,
1700 // create metadata that specifies that the branch is unpredictable.
1701 // Don't bother if not optimizing because that metadata would not be used.
1702 llvm::MDNode *Unpredictable = nullptr;
1703 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1704 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1705 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1706 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1707 llvm::MDBuilder MDHelper(getLLVMContext());
1708 Unpredictable = MDHelper.createUnpredictable();
1709 }
1710 }
1711
1712 // Create branch weights based on the number of times we get here and the
1713 // number of times the condition should be true.
1714 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1715 llvm::MDNode *Weights =
1716 createProfileWeights(TrueCount, CurrentCount - TrueCount);
1717
1718 // Emit the code with the fully general case.
1719 llvm::Value *CondV;
1720 {
1721 ApplyDebugLocation DL(*this, Cond);
1722 CondV = EvaluateExprAsBool(Cond);
1723 }
1724 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1725 }
1726
1727 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1728 /// specified stmt yet.
ErrorUnsupported(const Stmt * S,const char * Type)1729 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1730 CGM.ErrorUnsupported(S, Type);
1731 }
1732
1733 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1734 /// variable-length array whose elements have a non-zero bit-pattern.
1735 ///
1736 /// \param baseType the inner-most element type of the array
1737 /// \param src - a char* pointing to the bit-pattern for a single
1738 /// base element of the array
1739 /// \param sizeInChars - the total size of the VLA, in chars
emitNonZeroVLAInit(CodeGenFunction & CGF,QualType baseType,Address dest,Address src,llvm::Value * sizeInChars)1740 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1741 Address dest, Address src,
1742 llvm::Value *sizeInChars) {
1743 CGBuilderTy &Builder = CGF.Builder;
1744
1745 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1746 llvm::Value *baseSizeInChars
1747 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1748
1749 Address begin =
1750 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1751 llvm::Value *end =
1752 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1753
1754 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1755 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1756 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1757
1758 // Make a loop over the VLA. C99 guarantees that the VLA element
1759 // count must be nonzero.
1760 CGF.EmitBlock(loopBB);
1761
1762 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1763 cur->addIncoming(begin.getPointer(), originBB);
1764
1765 CharUnits curAlign =
1766 dest.getAlignment().alignmentOfArrayElement(baseSize);
1767
1768 // memcpy the individual element bit-pattern.
1769 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1770 /*volatile*/ false);
1771
1772 // Go to the next element.
1773 llvm::Value *next =
1774 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1775
1776 // Leave if that's the end of the VLA.
1777 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1778 Builder.CreateCondBr(done, contBB, loopBB);
1779 cur->addIncoming(next, loopBB);
1780
1781 CGF.EmitBlock(contBB);
1782 }
1783
1784 void
EmitNullInitialization(Address DestPtr,QualType Ty)1785 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1786 // Ignore empty classes in C++.
1787 if (getLangOpts().CPlusPlus) {
1788 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1789 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1790 return;
1791 }
1792 }
1793
1794 // Cast the dest ptr to the appropriate i8 pointer type.
1795 if (DestPtr.getElementType() != Int8Ty)
1796 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1797
1798 // Get size and alignment info for this aggregate.
1799 CharUnits size = getContext().getTypeSizeInChars(Ty);
1800
1801 llvm::Value *SizeVal;
1802 const VariableArrayType *vla;
1803
1804 // Don't bother emitting a zero-byte memset.
1805 if (size.isZero()) {
1806 // But note that getTypeInfo returns 0 for a VLA.
1807 if (const VariableArrayType *vlaType =
1808 dyn_cast_or_null<VariableArrayType>(
1809 getContext().getAsArrayType(Ty))) {
1810 auto VlaSize = getVLASize(vlaType);
1811 SizeVal = VlaSize.NumElts;
1812 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1813 if (!eltSize.isOne())
1814 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1815 vla = vlaType;
1816 } else {
1817 return;
1818 }
1819 } else {
1820 SizeVal = CGM.getSize(size);
1821 vla = nullptr;
1822 }
1823
1824 // If the type contains a pointer to data member we can't memset it to zero.
1825 // Instead, create a null constant and copy it to the destination.
1826 // TODO: there are other patterns besides zero that we can usefully memset,
1827 // like -1, which happens to be the pattern used by member-pointers.
1828 if (!CGM.getTypes().isZeroInitializable(Ty)) {
1829 // For a VLA, emit a single element, then splat that over the VLA.
1830 if (vla) Ty = getContext().getBaseElementType(vla);
1831
1832 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1833
1834 llvm::GlobalVariable *NullVariable =
1835 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1836 /*isConstant=*/true,
1837 llvm::GlobalVariable::PrivateLinkage,
1838 NullConstant, Twine());
1839 CharUnits NullAlign = DestPtr.getAlignment();
1840 NullVariable->setAlignment(NullAlign.getQuantity());
1841 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1842 NullAlign);
1843
1844 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1845
1846 // Get and call the appropriate llvm.memcpy overload.
1847 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1848 return;
1849 }
1850
1851 // Otherwise, just memset the whole thing to zero. This is legal
1852 // because in LLVM, all default initializers (other than the ones we just
1853 // handled above) are guaranteed to have a bit pattern of all zeros.
1854 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1855 }
1856
GetAddrOfLabel(const LabelDecl * L)1857 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1858 // Make sure that there is a block for the indirect goto.
1859 if (!IndirectBranch)
1860 GetIndirectGotoBlock();
1861
1862 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1863
1864 // Make sure the indirect branch includes all of the address-taken blocks.
1865 IndirectBranch->addDestination(BB);
1866 return llvm::BlockAddress::get(CurFn, BB);
1867 }
1868
GetIndirectGotoBlock()1869 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1870 // If we already made the indirect branch for indirect goto, return its block.
1871 if (IndirectBranch) return IndirectBranch->getParent();
1872
1873 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1874
1875 // Create the PHI node that indirect gotos will add entries to.
1876 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1877 "indirect.goto.dest");
1878
1879 // Create the indirect branch instruction.
1880 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1881 return IndirectBranch->getParent();
1882 }
1883
1884 /// Computes the length of an array in elements, as well as the base
1885 /// element type and a properly-typed first element pointer.
emitArrayLength(const ArrayType * origArrayType,QualType & baseType,Address & addr)1886 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1887 QualType &baseType,
1888 Address &addr) {
1889 const ArrayType *arrayType = origArrayType;
1890
1891 // If it's a VLA, we have to load the stored size. Note that
1892 // this is the size of the VLA in bytes, not its size in elements.
1893 llvm::Value *numVLAElements = nullptr;
1894 if (isa<VariableArrayType>(arrayType)) {
1895 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1896
1897 // Walk into all VLAs. This doesn't require changes to addr,
1898 // which has type T* where T is the first non-VLA element type.
1899 do {
1900 QualType elementType = arrayType->getElementType();
1901 arrayType = getContext().getAsArrayType(elementType);
1902
1903 // If we only have VLA components, 'addr' requires no adjustment.
1904 if (!arrayType) {
1905 baseType = elementType;
1906 return numVLAElements;
1907 }
1908 } while (isa<VariableArrayType>(arrayType));
1909
1910 // We get out here only if we find a constant array type
1911 // inside the VLA.
1912 }
1913
1914 // We have some number of constant-length arrays, so addr should
1915 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1916 // down to the first element of addr.
1917 SmallVector<llvm::Value*, 8> gepIndices;
1918
1919 // GEP down to the array type.
1920 llvm::ConstantInt *zero = Builder.getInt32(0);
1921 gepIndices.push_back(zero);
1922
1923 uint64_t countFromCLAs = 1;
1924 QualType eltType;
1925
1926 llvm::ArrayType *llvmArrayType =
1927 dyn_cast<llvm::ArrayType>(addr.getElementType());
1928 while (llvmArrayType) {
1929 assert(isa<ConstantArrayType>(arrayType));
1930 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1931 == llvmArrayType->getNumElements());
1932
1933 gepIndices.push_back(zero);
1934 countFromCLAs *= llvmArrayType->getNumElements();
1935 eltType = arrayType->getElementType();
1936
1937 llvmArrayType =
1938 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1939 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1940 assert((!llvmArrayType || arrayType) &&
1941 "LLVM and Clang types are out-of-synch");
1942 }
1943
1944 if (arrayType) {
1945 // From this point onwards, the Clang array type has been emitted
1946 // as some other type (probably a packed struct). Compute the array
1947 // size, and just emit the 'begin' expression as a bitcast.
1948 while (arrayType) {
1949 countFromCLAs *=
1950 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1951 eltType = arrayType->getElementType();
1952 arrayType = getContext().getAsArrayType(eltType);
1953 }
1954
1955 llvm::Type *baseType = ConvertType(eltType);
1956 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1957 } else {
1958 // Create the actual GEP.
1959 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1960 gepIndices, "array.begin"),
1961 addr.getAlignment());
1962 }
1963
1964 baseType = eltType;
1965
1966 llvm::Value *numElements
1967 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1968
1969 // If we had any VLA dimensions, factor them in.
1970 if (numVLAElements)
1971 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1972
1973 return numElements;
1974 }
1975
getVLASize(QualType type)1976 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1977 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1978 assert(vla && "type was not a variable array type!");
1979 return getVLASize(vla);
1980 }
1981
1982 CodeGenFunction::VlaSizePair
getVLASize(const VariableArrayType * type)1983 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1984 // The number of elements so far; always size_t.
1985 llvm::Value *numElements = nullptr;
1986
1987 QualType elementType;
1988 do {
1989 elementType = type->getElementType();
1990 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1991 assert(vlaSize && "no size for VLA!");
1992 assert(vlaSize->getType() == SizeTy);
1993
1994 if (!numElements) {
1995 numElements = vlaSize;
1996 } else {
1997 // It's undefined behavior if this wraps around, so mark it that way.
1998 // FIXME: Teach -fsanitize=undefined to trap this.
1999 numElements = Builder.CreateNUWMul(numElements, vlaSize);
2000 }
2001 } while ((type = getContext().getAsVariableArrayType(elementType)));
2002
2003 return { numElements, elementType };
2004 }
2005
2006 CodeGenFunction::VlaSizePair
getVLAElements1D(QualType type)2007 CodeGenFunction::getVLAElements1D(QualType type) {
2008 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2009 assert(vla && "type was not a variable array type!");
2010 return getVLAElements1D(vla);
2011 }
2012
2013 CodeGenFunction::VlaSizePair
getVLAElements1D(const VariableArrayType * Vla)2014 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2015 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2016 assert(VlaSize && "no size for VLA!");
2017 assert(VlaSize->getType() == SizeTy);
2018 return { VlaSize, Vla->getElementType() };
2019 }
2020
EmitVariablyModifiedType(QualType type)2021 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2022 assert(type->isVariablyModifiedType() &&
2023 "Must pass variably modified type to EmitVLASizes!");
2024
2025 EnsureInsertPoint();
2026
2027 // We're going to walk down into the type and look for VLA
2028 // expressions.
2029 do {
2030 assert(type->isVariablyModifiedType());
2031
2032 const Type *ty = type.getTypePtr();
2033 switch (ty->getTypeClass()) {
2034
2035 #define TYPE(Class, Base)
2036 #define ABSTRACT_TYPE(Class, Base)
2037 #define NON_CANONICAL_TYPE(Class, Base)
2038 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2039 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2040 #include "clang/AST/TypeNodes.def"
2041 llvm_unreachable("unexpected dependent type!");
2042
2043 // These types are never variably-modified.
2044 case Type::Builtin:
2045 case Type::Complex:
2046 case Type::Vector:
2047 case Type::ExtVector:
2048 case Type::Record:
2049 case Type::Enum:
2050 case Type::Elaborated:
2051 case Type::TemplateSpecialization:
2052 case Type::ObjCTypeParam:
2053 case Type::ObjCObject:
2054 case Type::ObjCInterface:
2055 case Type::ObjCObjectPointer:
2056 llvm_unreachable("type class is never variably-modified!");
2057
2058 case Type::Adjusted:
2059 type = cast<AdjustedType>(ty)->getAdjustedType();
2060 break;
2061
2062 case Type::Decayed:
2063 type = cast<DecayedType>(ty)->getPointeeType();
2064 break;
2065
2066 case Type::Pointer:
2067 type = cast<PointerType>(ty)->getPointeeType();
2068 break;
2069
2070 case Type::BlockPointer:
2071 type = cast<BlockPointerType>(ty)->getPointeeType();
2072 break;
2073
2074 case Type::LValueReference:
2075 case Type::RValueReference:
2076 type = cast<ReferenceType>(ty)->getPointeeType();
2077 break;
2078
2079 case Type::MemberPointer:
2080 type = cast<MemberPointerType>(ty)->getPointeeType();
2081 break;
2082
2083 case Type::ConstantArray:
2084 case Type::IncompleteArray:
2085 // Losing element qualification here is fine.
2086 type = cast<ArrayType>(ty)->getElementType();
2087 break;
2088
2089 case Type::VariableArray: {
2090 // Losing element qualification here is fine.
2091 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2092
2093 // Unknown size indication requires no size computation.
2094 // Otherwise, evaluate and record it.
2095 if (const Expr *size = vat->getSizeExpr()) {
2096 // It's possible that we might have emitted this already,
2097 // e.g. with a typedef and a pointer to it.
2098 llvm::Value *&entry = VLASizeMap[size];
2099 if (!entry) {
2100 llvm::Value *Size = EmitScalarExpr(size);
2101
2102 // C11 6.7.6.2p5:
2103 // If the size is an expression that is not an integer constant
2104 // expression [...] each time it is evaluated it shall have a value
2105 // greater than zero.
2106 if (SanOpts.has(SanitizerKind::VLABound) &&
2107 size->getType()->isSignedIntegerType()) {
2108 SanitizerScope SanScope(this);
2109 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
2110 llvm::Constant *StaticArgs[] = {
2111 EmitCheckSourceLocation(size->getBeginLoc()),
2112 EmitCheckTypeDescriptor(size->getType())};
2113 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
2114 SanitizerKind::VLABound),
2115 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
2116 }
2117
2118 // Always zexting here would be wrong if it weren't
2119 // undefined behavior to have a negative bound.
2120 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
2121 }
2122 }
2123 type = vat->getElementType();
2124 break;
2125 }
2126
2127 case Type::FunctionProto:
2128 case Type::FunctionNoProto:
2129 type = cast<FunctionType>(ty)->getReturnType();
2130 break;
2131
2132 case Type::Paren:
2133 case Type::TypeOf:
2134 case Type::UnaryTransform:
2135 case Type::Attributed:
2136 case Type::SubstTemplateTypeParm:
2137 case Type::PackExpansion:
2138 // Keep walking after single level desugaring.
2139 type = type.getSingleStepDesugaredType(getContext());
2140 break;
2141
2142 case Type::Typedef:
2143 case Type::Decltype:
2144 case Type::Auto:
2145 case Type::DeducedTemplateSpecialization:
2146 // Stop walking: nothing to do.
2147 return;
2148
2149 case Type::TypeOfExpr:
2150 // Stop walking: emit typeof expression.
2151 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2152 return;
2153
2154 case Type::Atomic:
2155 type = cast<AtomicType>(ty)->getValueType();
2156 break;
2157
2158 case Type::Pipe:
2159 type = cast<PipeType>(ty)->getElementType();
2160 break;
2161 }
2162 } while (type->isVariablyModifiedType());
2163 }
2164
EmitVAListRef(const Expr * E)2165 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2166 if (getContext().getBuiltinVaListType()->isArrayType())
2167 return EmitPointerWithAlignment(E);
2168 return EmitLValue(E).getAddress();
2169 }
2170
EmitMSVAListRef(const Expr * E)2171 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2172 return EmitLValue(E).getAddress();
2173 }
2174
EmitDeclRefExprDbgValue(const DeclRefExpr * E,const APValue & Init)2175 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2176 const APValue &Init) {
2177 assert(!Init.isUninit() && "Invalid DeclRefExpr initializer!");
2178 if (CGDebugInfo *Dbg = getDebugInfo())
2179 if (CGM.getCodeGenOpts().getDebugInfo() >= codegenoptions::LimitedDebugInfo)
2180 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2181 }
2182
2183 CodeGenFunction::PeepholeProtection
protectFromPeepholes(RValue rvalue)2184 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2185 // At the moment, the only aggressive peephole we do in IR gen
2186 // is trunc(zext) folding, but if we add more, we can easily
2187 // extend this protection.
2188
2189 if (!rvalue.isScalar()) return PeepholeProtection();
2190 llvm::Value *value = rvalue.getScalarVal();
2191 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2192
2193 // Just make an extra bitcast.
2194 assert(HaveInsertPoint());
2195 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2196 Builder.GetInsertBlock());
2197
2198 PeepholeProtection protection;
2199 protection.Inst = inst;
2200 return protection;
2201 }
2202
unprotectFromPeepholes(PeepholeProtection protection)2203 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2204 if (!protection.Inst) return;
2205
2206 // In theory, we could try to duplicate the peepholes now, but whatever.
2207 protection.Inst->eraseFromParent();
2208 }
2209
EmitAlignmentAssumption(llvm::Value * PtrValue,QualType Ty,SourceLocation Loc,SourceLocation AssumptionLoc,llvm::Value * Alignment,llvm::Value * OffsetValue)2210 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2211 QualType Ty, SourceLocation Loc,
2212 SourceLocation AssumptionLoc,
2213 llvm::Value *Alignment,
2214 llvm::Value *OffsetValue) {
2215 llvm::Value *TheCheck;
2216 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2217 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2218 if (SanOpts.has(SanitizerKind::Alignment)) {
2219 EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2220 OffsetValue, TheCheck, Assumption);
2221 }
2222 }
2223
EmitAlignmentAssumption(llvm::Value * PtrValue,QualType Ty,SourceLocation Loc,SourceLocation AssumptionLoc,unsigned Alignment,llvm::Value * OffsetValue)2224 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2225 QualType Ty, SourceLocation Loc,
2226 SourceLocation AssumptionLoc,
2227 unsigned Alignment,
2228 llvm::Value *OffsetValue) {
2229 llvm::Value *TheCheck;
2230 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2231 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue, &TheCheck);
2232 if (SanOpts.has(SanitizerKind::Alignment)) {
2233 llvm::Value *AlignmentVal = llvm::ConstantInt::get(IntPtrTy, Alignment);
2234 EmitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, AlignmentVal,
2235 OffsetValue, TheCheck, Assumption);
2236 }
2237 }
2238
EmitAlignmentAssumption(llvm::Value * PtrValue,const Expr * E,SourceLocation AssumptionLoc,unsigned Alignment,llvm::Value * OffsetValue)2239 void CodeGenFunction::EmitAlignmentAssumption(llvm::Value *PtrValue,
2240 const Expr *E,
2241 SourceLocation AssumptionLoc,
2242 unsigned Alignment,
2243 llvm::Value *OffsetValue) {
2244 if (auto *CE = dyn_cast<CastExpr>(E))
2245 E = CE->getSubExprAsWritten();
2246 QualType Ty = E->getType();
2247 SourceLocation Loc = E->getExprLoc();
2248
2249 EmitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2250 OffsetValue);
2251 }
2252
EmitAnnotationCall(llvm::Value * AnnotationFn,llvm::Value * AnnotatedVal,StringRef AnnotationStr,SourceLocation Location)2253 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Value *AnnotationFn,
2254 llvm::Value *AnnotatedVal,
2255 StringRef AnnotationStr,
2256 SourceLocation Location) {
2257 llvm::Value *Args[4] = {
2258 AnnotatedVal,
2259 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2260 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2261 CGM.EmitAnnotationLineNo(Location)
2262 };
2263 return Builder.CreateCall(AnnotationFn, Args);
2264 }
2265
EmitVarAnnotations(const VarDecl * D,llvm::Value * V)2266 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2267 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2268 // FIXME We create a new bitcast for every annotation because that's what
2269 // llvm-gcc was doing.
2270 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2271 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2272 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2273 I->getAnnotation(), D->getLocation());
2274 }
2275
EmitFieldAnnotations(const FieldDecl * D,Address Addr)2276 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2277 Address Addr) {
2278 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2279 llvm::Value *V = Addr.getPointer();
2280 llvm::Type *VTy = V->getType();
2281 llvm::Value *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2282 CGM.Int8PtrTy);
2283
2284 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2285 // FIXME Always emit the cast inst so we can differentiate between
2286 // annotation on the first field of a struct and annotation on the struct
2287 // itself.
2288 if (VTy != CGM.Int8PtrTy)
2289 V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2290 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation());
2291 V = Builder.CreateBitCast(V, VTy);
2292 }
2293
2294 return Address(V, Addr.getAlignment());
2295 }
2296
~CGCapturedStmtInfo()2297 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2298
SanitizerScope(CodeGenFunction * CGF)2299 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2300 : CGF(CGF) {
2301 assert(!CGF->IsSanitizerScope);
2302 CGF->IsSanitizerScope = true;
2303 }
2304
~SanitizerScope()2305 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2306 CGF->IsSanitizerScope = false;
2307 }
2308
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const2309 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2310 const llvm::Twine &Name,
2311 llvm::BasicBlock *BB,
2312 llvm::BasicBlock::iterator InsertPt) const {
2313 LoopStack.InsertHelper(I);
2314 if (IsSanitizerScope)
2315 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2316 }
2317
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const2318 void CGBuilderInserter::InsertHelper(
2319 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2320 llvm::BasicBlock::iterator InsertPt) const {
2321 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2322 if (CGF)
2323 CGF->InsertHelper(I, Name, BB, InsertPt);
2324 }
2325
hasRequiredFeatures(const SmallVectorImpl<StringRef> & ReqFeatures,CodeGenModule & CGM,const FunctionDecl * FD,std::string & FirstMissing)2326 static bool hasRequiredFeatures(const SmallVectorImpl<StringRef> &ReqFeatures,
2327 CodeGenModule &CGM, const FunctionDecl *FD,
2328 std::string &FirstMissing) {
2329 // If there aren't any required features listed then go ahead and return.
2330 if (ReqFeatures.empty())
2331 return false;
2332
2333 // Now build up the set of caller features and verify that all the required
2334 // features are there.
2335 llvm::StringMap<bool> CallerFeatureMap;
2336 CGM.getFunctionFeatureMap(CallerFeatureMap, GlobalDecl().getWithDecl(FD));
2337
2338 // If we have at least one of the features in the feature list return
2339 // true, otherwise return false.
2340 return std::all_of(
2341 ReqFeatures.begin(), ReqFeatures.end(), [&](StringRef Feature) {
2342 SmallVector<StringRef, 1> OrFeatures;
2343 Feature.split(OrFeatures, '|');
2344 return llvm::any_of(OrFeatures, [&](StringRef Feature) {
2345 if (!CallerFeatureMap.lookup(Feature)) {
2346 FirstMissing = Feature.str();
2347 return false;
2348 }
2349 return true;
2350 });
2351 });
2352 }
2353
2354 // Emits an error if we don't have a valid set of target features for the
2355 // called function.
checkTargetFeatures(const CallExpr * E,const FunctionDecl * TargetDecl)2356 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2357 const FunctionDecl *TargetDecl) {
2358 // Early exit if this is an indirect call.
2359 if (!TargetDecl)
2360 return;
2361
2362 // Get the current enclosing function if it exists. If it doesn't
2363 // we can't check the target features anyhow.
2364 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurFuncDecl);
2365 if (!FD)
2366 return;
2367
2368 // Grab the required features for the call. For a builtin this is listed in
2369 // the td file with the default cpu, for an always_inline function this is any
2370 // listed cpu and any listed features.
2371 unsigned BuiltinID = TargetDecl->getBuiltinID();
2372 std::string MissingFeature;
2373 if (BuiltinID) {
2374 SmallVector<StringRef, 1> ReqFeatures;
2375 const char *FeatureList =
2376 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2377 // Return if the builtin doesn't have any required features.
2378 if (!FeatureList || StringRef(FeatureList) == "")
2379 return;
2380 StringRef(FeatureList).split(ReqFeatures, ',');
2381 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2382 CGM.getDiags().Report(E->getBeginLoc(), diag::err_builtin_needs_feature)
2383 << TargetDecl->getDeclName()
2384 << CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID);
2385
2386 } else if (TargetDecl->hasAttr<TargetAttr>() ||
2387 TargetDecl->hasAttr<CPUSpecificAttr>()) {
2388 // Get the required features for the callee.
2389
2390 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2391 TargetAttr::ParsedTargetAttr ParsedAttr = CGM.filterFunctionTargetAttrs(TD);
2392
2393 SmallVector<StringRef, 1> ReqFeatures;
2394 llvm::StringMap<bool> CalleeFeatureMap;
2395 CGM.getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2396
2397 for (const auto &F : ParsedAttr.Features) {
2398 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2399 ReqFeatures.push_back(StringRef(F).substr(1));
2400 }
2401
2402 for (const auto &F : CalleeFeatureMap) {
2403 // Only positive features are "required".
2404 if (F.getValue())
2405 ReqFeatures.push_back(F.getKey());
2406 }
2407 if (!hasRequiredFeatures(ReqFeatures, CGM, FD, MissingFeature))
2408 CGM.getDiags().Report(E->getBeginLoc(), diag::err_function_needs_feature)
2409 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2410 }
2411 }
2412
EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)2413 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2414 if (!CGM.getCodeGenOpts().SanitizeStats)
2415 return;
2416
2417 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2418 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2419 CGM.getSanStats().create(IRB, SSK);
2420 }
2421
2422 llvm::Value *
FormResolverCondition(const MultiVersionResolverOption & RO)2423 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2424 llvm::Value *Condition = nullptr;
2425
2426 if (!RO.Conditions.Architecture.empty())
2427 Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2428
2429 if (!RO.Conditions.Features.empty()) {
2430 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2431 Condition =
2432 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2433 }
2434 return Condition;
2435 }
2436
CreateMultiVersionResolverReturn(CodeGenModule & CGM,llvm::Function * Resolver,CGBuilderTy & Builder,llvm::Function * FuncToReturn,bool SupportsIFunc)2437 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2438 llvm::Function *Resolver,
2439 CGBuilderTy &Builder,
2440 llvm::Function *FuncToReturn,
2441 bool SupportsIFunc) {
2442 if (SupportsIFunc) {
2443 Builder.CreateRet(FuncToReturn);
2444 return;
2445 }
2446
2447 llvm::SmallVector<llvm::Value *, 10> Args;
2448 llvm::for_each(Resolver->args(),
2449 [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2450
2451 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2452 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2453
2454 if (Resolver->getReturnType()->isVoidTy())
2455 Builder.CreateRetVoid();
2456 else
2457 Builder.CreateRet(Result);
2458 }
2459
EmitMultiVersionResolver(llvm::Function * Resolver,ArrayRef<MultiVersionResolverOption> Options)2460 void CodeGenFunction::EmitMultiVersionResolver(
2461 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2462 assert((getContext().getTargetInfo().getTriple().getArch() ==
2463 llvm::Triple::x86 ||
2464 getContext().getTargetInfo().getTriple().getArch() ==
2465 llvm::Triple::x86_64) &&
2466 "Only implemented for x86 targets");
2467
2468 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2469
2470 // Main function's basic block.
2471 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2472 Builder.SetInsertPoint(CurBlock);
2473 EmitX86CpuInit();
2474
2475 for (const MultiVersionResolverOption &RO : Options) {
2476 Builder.SetInsertPoint(CurBlock);
2477 llvm::Value *Condition = FormResolverCondition(RO);
2478
2479 // The 'default' or 'generic' case.
2480 if (!Condition) {
2481 assert(&RO == Options.end() - 1 &&
2482 "Default or Generic case must be last");
2483 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2484 SupportsIFunc);
2485 return;
2486 }
2487
2488 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2489 CGBuilderTy RetBuilder(*this, RetBlock);
2490 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2491 SupportsIFunc);
2492 CurBlock = createBasicBlock("resolver_else", Resolver);
2493 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2494 }
2495
2496 // If no generic/default, emit an unreachable.
2497 Builder.SetInsertPoint(CurBlock);
2498 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2499 TrapCall->setDoesNotReturn();
2500 TrapCall->setDoesNotThrow();
2501 Builder.CreateUnreachable();
2502 Builder.ClearInsertionPoint();
2503 }
2504
2505 // Loc - where the diagnostic will point, where in the source code this
2506 // alignment has failed.
2507 // SecondaryLoc - if present (will be present if sufficiently different from
2508 // Loc), the diagnostic will additionally point a "Note:" to this location.
2509 // It should be the location where the __attribute__((assume_aligned))
2510 // was written e.g.
EmitAlignmentAssumptionCheck(llvm::Value * Ptr,QualType Ty,SourceLocation Loc,SourceLocation SecondaryLoc,llvm::Value * Alignment,llvm::Value * OffsetValue,llvm::Value * TheCheck,llvm::Instruction * Assumption)2511 void CodeGenFunction::EmitAlignmentAssumptionCheck(
2512 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2513 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2514 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2515 llvm::Instruction *Assumption) {
2516 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2517 cast<llvm::CallInst>(Assumption)->getCalledValue() ==
2518 llvm::Intrinsic::getDeclaration(
2519 Builder.GetInsertBlock()->getParent()->getParent(),
2520 llvm::Intrinsic::assume) &&
2521 "Assumption should be a call to llvm.assume().");
2522 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2523 "Assumption should be the last instruction of the basic block, "
2524 "since the basic block is still being generated.");
2525
2526 if (!SanOpts.has(SanitizerKind::Alignment))
2527 return;
2528
2529 // Don't check pointers to volatile data. The behavior here is implementation-
2530 // defined.
2531 if (Ty->getPointeeType().isVolatileQualified())
2532 return;
2533
2534 // We need to temorairly remove the assumption so we can insert the
2535 // sanitizer check before it, else the check will be dropped by optimizations.
2536 Assumption->removeFromParent();
2537
2538 {
2539 SanitizerScope SanScope(this);
2540
2541 if (!OffsetValue)
2542 OffsetValue = Builder.getInt1(0); // no offset.
2543
2544 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2545 EmitCheckSourceLocation(SecondaryLoc),
2546 EmitCheckTypeDescriptor(Ty)};
2547 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2548 EmitCheckValue(Alignment),
2549 EmitCheckValue(OffsetValue)};
2550 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2551 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2552 }
2553
2554 // We are now in the (new, empty) "cont" basic block.
2555 // Reintroduce the assumption.
2556 Builder.Insert(Assumption);
2557 // FIXME: Assumption still has it's original basic block as it's Parent.
2558 }
2559
SourceLocToDebugLoc(SourceLocation Location)2560 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2561 if (CGDebugInfo *DI = getDebugInfo())
2562 return DI->SourceLocToDebugLoc(Location);
2563
2564 return llvm::DebugLoc();
2565 }
2566