1 //===--- CodeGenFunction.cpp - Emit LLVM Code from ASTs for a Function ----===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This coordinates the per-function state used while generating code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CodeGenFunction.h"
14 #include "CGBlocks.h"
15 #include "CGCUDARuntime.h"
16 #include "CGCXXABI.h"
17 #include "CGCleanup.h"
18 #include "CGDebugInfo.h"
19 #include "CGOpenMPRuntime.h"
20 #include "CodeGenModule.h"
21 #include "CodeGenPGO.h"
22 #include "TargetInfo.h"
23 #include "clang/AST/ASTContext.h"
24 #include "clang/AST/ASTLambda.h"
25 #include "clang/AST/Attr.h"
26 #include "clang/AST/Decl.h"
27 #include "clang/AST/DeclCXX.h"
28 #include "clang/AST/Expr.h"
29 #include "clang/AST/StmtCXX.h"
30 #include "clang/AST/StmtObjC.h"
31 #include "clang/Basic/Builtins.h"
32 #include "clang/Basic/CodeGenOptions.h"
33 #include "clang/Basic/TargetInfo.h"
34 #include "clang/CodeGen/CGFunctionInfo.h"
35 #include "clang/Frontend/FrontendDiagnostic.h"
36 #include "llvm/ADT/ArrayRef.h"
37 #include "llvm/Frontend/OpenMP/OMPIRBuilder.h"
38 #include "llvm/IR/DataLayout.h"
39 #include "llvm/IR/Dominators.h"
40 #include "llvm/IR/FPEnv.h"
41 #include "llvm/IR/IntrinsicInst.h"
42 #include "llvm/IR/Intrinsics.h"
43 #include "llvm/IR/MDBuilder.h"
44 #include "llvm/IR/Operator.h"
45 #include "llvm/Support/CRC.h"
46 #include "llvm/Transforms/Scalar/LowerExpectIntrinsic.h"
47 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
48 using namespace clang;
49 using namespace CodeGen;
50
51 /// shouldEmitLifetimeMarkers - Decide whether we need emit the life-time
52 /// markers.
shouldEmitLifetimeMarkers(const CodeGenOptions & CGOpts,const LangOptions & LangOpts)53 static bool shouldEmitLifetimeMarkers(const CodeGenOptions &CGOpts,
54 const LangOptions &LangOpts) {
55 if (CGOpts.DisableLifetimeMarkers)
56 return false;
57
58 // Sanitizers may use markers.
59 if (CGOpts.SanitizeAddressUseAfterScope ||
60 LangOpts.Sanitize.has(SanitizerKind::HWAddress) ||
61 LangOpts.Sanitize.has(SanitizerKind::Memory))
62 return true;
63
64 // For now, only in optimized builds.
65 return CGOpts.OptimizationLevel != 0;
66 }
67
CodeGenFunction(CodeGenModule & cgm,bool suppressNewContext)68 CodeGenFunction::CodeGenFunction(CodeGenModule &cgm, bool suppressNewContext)
69 : CodeGenTypeCache(cgm), CGM(cgm), Target(cgm.getTarget()),
70 Builder(cgm, cgm.getModule().getContext(), llvm::ConstantFolder(),
71 CGBuilderInserterTy(this)),
72 SanOpts(CGM.getLangOpts().Sanitize), CurFPFeatures(CGM.getLangOpts()),
73 DebugInfo(CGM.getModuleDebugInfo()), PGO(cgm),
74 ShouldEmitLifetimeMarkers(
75 shouldEmitLifetimeMarkers(CGM.getCodeGenOpts(), CGM.getLangOpts())) {
76 if (!suppressNewContext)
77 CGM.getCXXABI().getMangleContext().startNewFunction();
78
79 SetFastMathFlags(CurFPFeatures);
80 SetFPModel();
81 }
82
~CodeGenFunction()83 CodeGenFunction::~CodeGenFunction() {
84 assert(LifetimeExtendedCleanupStack.empty() && "failed to emit a cleanup");
85
86 if (getLangOpts().OpenMP && CurFn)
87 CGM.getOpenMPRuntime().functionFinished(*this);
88
89 // If we have an OpenMPIRBuilder we want to finalize functions (incl.
90 // outlining etc) at some point. Doing it once the function codegen is done
91 // seems to be a reasonable spot. We do it here, as opposed to the deletion
92 // time of the CodeGenModule, because we have to ensure the IR has not yet
93 // been "emitted" to the outside, thus, modifications are still sensible.
94 if (CGM.getLangOpts().OpenMPIRBuilder)
95 CGM.getOpenMPRuntime().getOMPBuilder().finalize();
96 }
97
98 // Map the LangOption for exception behavior into
99 // the corresponding enum in the IR.
100 llvm::fp::ExceptionBehavior
ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind)101 clang::ToConstrainedExceptMD(LangOptions::FPExceptionModeKind Kind) {
102
103 switch (Kind) {
104 case LangOptions::FPE_Ignore: return llvm::fp::ebIgnore;
105 case LangOptions::FPE_MayTrap: return llvm::fp::ebMayTrap;
106 case LangOptions::FPE_Strict: return llvm::fp::ebStrict;
107 }
108 llvm_unreachable("Unsupported FP Exception Behavior");
109 }
110
SetFPModel()111 void CodeGenFunction::SetFPModel() {
112 llvm::RoundingMode RM = getLangOpts().getFPRoundingMode();
113 auto fpExceptionBehavior = ToConstrainedExceptMD(
114 getLangOpts().getFPExceptionMode());
115
116 Builder.setDefaultConstrainedRounding(RM);
117 Builder.setDefaultConstrainedExcept(fpExceptionBehavior);
118 Builder.setIsFPConstrained(fpExceptionBehavior != llvm::fp::ebIgnore ||
119 RM != llvm::RoundingMode::NearestTiesToEven);
120 }
121
SetFastMathFlags(FPOptions FPFeatures)122 void CodeGenFunction::SetFastMathFlags(FPOptions FPFeatures) {
123 llvm::FastMathFlags FMF;
124 FMF.setAllowReassoc(FPFeatures.getAllowFPReassociate());
125 FMF.setNoNaNs(FPFeatures.getNoHonorNaNs());
126 FMF.setNoInfs(FPFeatures.getNoHonorInfs());
127 FMF.setNoSignedZeros(FPFeatures.getNoSignedZero());
128 FMF.setAllowReciprocal(FPFeatures.getAllowReciprocal());
129 FMF.setApproxFunc(FPFeatures.getAllowApproxFunc());
130 FMF.setAllowContract(FPFeatures.allowFPContractAcrossStatement());
131 Builder.setFastMathFlags(FMF);
132 }
133
CGFPOptionsRAII(CodeGenFunction & CGF,const Expr * E)134 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
135 const Expr *E)
136 : CGF(CGF) {
137 ConstructorHelper(E->getFPFeaturesInEffect(CGF.getLangOpts()));
138 }
139
CGFPOptionsRAII(CodeGenFunction & CGF,FPOptions FPFeatures)140 CodeGenFunction::CGFPOptionsRAII::CGFPOptionsRAII(CodeGenFunction &CGF,
141 FPOptions FPFeatures)
142 : CGF(CGF) {
143 ConstructorHelper(FPFeatures);
144 }
145
ConstructorHelper(FPOptions FPFeatures)146 void CodeGenFunction::CGFPOptionsRAII::ConstructorHelper(FPOptions FPFeatures) {
147 OldFPFeatures = CGF.CurFPFeatures;
148 CGF.CurFPFeatures = FPFeatures;
149
150 OldExcept = CGF.Builder.getDefaultConstrainedExcept();
151 OldRounding = CGF.Builder.getDefaultConstrainedRounding();
152
153 if (OldFPFeatures == FPFeatures)
154 return;
155
156 FMFGuard.emplace(CGF.Builder);
157
158 llvm::RoundingMode NewRoundingBehavior =
159 static_cast<llvm::RoundingMode>(FPFeatures.getRoundingMode());
160 CGF.Builder.setDefaultConstrainedRounding(NewRoundingBehavior);
161 auto NewExceptionBehavior =
162 ToConstrainedExceptMD(static_cast<LangOptions::FPExceptionModeKind>(
163 FPFeatures.getFPExceptionMode()));
164 CGF.Builder.setDefaultConstrainedExcept(NewExceptionBehavior);
165
166 CGF.SetFastMathFlags(FPFeatures);
167
168 assert((CGF.CurFuncDecl == nullptr || CGF.Builder.getIsFPConstrained() ||
169 isa<CXXConstructorDecl>(CGF.CurFuncDecl) ||
170 isa<CXXDestructorDecl>(CGF.CurFuncDecl) ||
171 (NewExceptionBehavior == llvm::fp::ebIgnore &&
172 NewRoundingBehavior == llvm::RoundingMode::NearestTiesToEven)) &&
173 "FPConstrained should be enabled on entire function");
174
175 auto mergeFnAttrValue = [&](StringRef Name, bool Value) {
176 auto OldValue =
177 CGF.CurFn->getFnAttribute(Name).getValueAsString() == "true";
178 auto NewValue = OldValue & Value;
179 if (OldValue != NewValue)
180 CGF.CurFn->addFnAttr(Name, llvm::toStringRef(NewValue));
181 };
182 mergeFnAttrValue("no-infs-fp-math", FPFeatures.getNoHonorInfs());
183 mergeFnAttrValue("no-nans-fp-math", FPFeatures.getNoHonorNaNs());
184 mergeFnAttrValue("no-signed-zeros-fp-math", FPFeatures.getNoSignedZero());
185 mergeFnAttrValue("unsafe-fp-math", FPFeatures.getAllowFPReassociate() &&
186 FPFeatures.getAllowReciprocal() &&
187 FPFeatures.getAllowApproxFunc() &&
188 FPFeatures.getNoSignedZero());
189 }
190
~CGFPOptionsRAII()191 CodeGenFunction::CGFPOptionsRAII::~CGFPOptionsRAII() {
192 CGF.CurFPFeatures = OldFPFeatures;
193 CGF.Builder.setDefaultConstrainedExcept(OldExcept);
194 CGF.Builder.setDefaultConstrainedRounding(OldRounding);
195 }
196
MakeNaturalAlignAddrLValue(llvm::Value * V,QualType T)197 LValue CodeGenFunction::MakeNaturalAlignAddrLValue(llvm::Value *V, QualType T) {
198 LValueBaseInfo BaseInfo;
199 TBAAAccessInfo TBAAInfo;
200 CharUnits Alignment = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo);
201 return LValue::MakeAddr(Address(V, Alignment), T, getContext(), BaseInfo,
202 TBAAInfo);
203 }
204
205 /// Given a value of type T* that may not be to a complete object,
206 /// construct an l-value with the natural pointee alignment of T.
207 LValue
MakeNaturalAlignPointeeAddrLValue(llvm::Value * V,QualType T)208 CodeGenFunction::MakeNaturalAlignPointeeAddrLValue(llvm::Value *V, QualType T) {
209 LValueBaseInfo BaseInfo;
210 TBAAAccessInfo TBAAInfo;
211 CharUnits Align = CGM.getNaturalTypeAlignment(T, &BaseInfo, &TBAAInfo,
212 /* forPointeeType= */ true);
213 return MakeAddrLValue(Address(V, Align), T, BaseInfo, TBAAInfo);
214 }
215
216
ConvertTypeForMem(QualType T)217 llvm::Type *CodeGenFunction::ConvertTypeForMem(QualType T) {
218 return CGM.getTypes().ConvertTypeForMem(T);
219 }
220
ConvertType(QualType T)221 llvm::Type *CodeGenFunction::ConvertType(QualType T) {
222 return CGM.getTypes().ConvertType(T);
223 }
224
getEvaluationKind(QualType type)225 TypeEvaluationKind CodeGenFunction::getEvaluationKind(QualType type) {
226 type = type.getCanonicalType();
227 while (true) {
228 switch (type->getTypeClass()) {
229 #define TYPE(name, parent)
230 #define ABSTRACT_TYPE(name, parent)
231 #define NON_CANONICAL_TYPE(name, parent) case Type::name:
232 #define DEPENDENT_TYPE(name, parent) case Type::name:
233 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(name, parent) case Type::name:
234 #include "clang/AST/TypeNodes.inc"
235 llvm_unreachable("non-canonical or dependent type in IR-generation");
236
237 case Type::Auto:
238 case Type::DeducedTemplateSpecialization:
239 llvm_unreachable("undeduced type in IR-generation");
240
241 // Various scalar types.
242 case Type::Builtin:
243 case Type::Pointer:
244 case Type::BlockPointer:
245 case Type::LValueReference:
246 case Type::RValueReference:
247 case Type::MemberPointer:
248 case Type::Vector:
249 case Type::ExtVector:
250 case Type::ConstantMatrix:
251 case Type::FunctionProto:
252 case Type::FunctionNoProto:
253 case Type::Enum:
254 case Type::ObjCObjectPointer:
255 case Type::Pipe:
256 case Type::ExtInt:
257 return TEK_Scalar;
258
259 // Complexes.
260 case Type::Complex:
261 return TEK_Complex;
262
263 // Arrays, records, and Objective-C objects.
264 case Type::ConstantArray:
265 case Type::IncompleteArray:
266 case Type::VariableArray:
267 case Type::Record:
268 case Type::ObjCObject:
269 case Type::ObjCInterface:
270 return TEK_Aggregate;
271
272 // We operate on atomic values according to their underlying type.
273 case Type::Atomic:
274 type = cast<AtomicType>(type)->getValueType();
275 continue;
276 }
277 llvm_unreachable("unknown type kind!");
278 }
279 }
280
EmitReturnBlock()281 llvm::DebugLoc CodeGenFunction::EmitReturnBlock() {
282 // For cleanliness, we try to avoid emitting the return block for
283 // simple cases.
284 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
285
286 if (CurBB) {
287 assert(!CurBB->getTerminator() && "Unexpected terminated block.");
288
289 // We have a valid insert point, reuse it if it is empty or there are no
290 // explicit jumps to the return block.
291 if (CurBB->empty() || ReturnBlock.getBlock()->use_empty()) {
292 ReturnBlock.getBlock()->replaceAllUsesWith(CurBB);
293 delete ReturnBlock.getBlock();
294 ReturnBlock = JumpDest();
295 } else
296 EmitBlock(ReturnBlock.getBlock());
297 return llvm::DebugLoc();
298 }
299
300 // Otherwise, if the return block is the target of a single direct
301 // branch then we can just put the code in that block instead. This
302 // cleans up functions which started with a unified return block.
303 if (ReturnBlock.getBlock()->hasOneUse()) {
304 llvm::BranchInst *BI =
305 dyn_cast<llvm::BranchInst>(*ReturnBlock.getBlock()->user_begin());
306 if (BI && BI->isUnconditional() &&
307 BI->getSuccessor(0) == ReturnBlock.getBlock()) {
308 // Record/return the DebugLoc of the simple 'return' expression to be used
309 // later by the actual 'ret' instruction.
310 llvm::DebugLoc Loc = BI->getDebugLoc();
311 Builder.SetInsertPoint(BI->getParent());
312 BI->eraseFromParent();
313 delete ReturnBlock.getBlock();
314 ReturnBlock = JumpDest();
315 return Loc;
316 }
317 }
318
319 // FIXME: We are at an unreachable point, there is no reason to emit the block
320 // unless it has uses. However, we still need a place to put the debug
321 // region.end for now.
322
323 EmitBlock(ReturnBlock.getBlock());
324 return llvm::DebugLoc();
325 }
326
EmitIfUsed(CodeGenFunction & CGF,llvm::BasicBlock * BB)327 static void EmitIfUsed(CodeGenFunction &CGF, llvm::BasicBlock *BB) {
328 if (!BB) return;
329 if (!BB->use_empty())
330 return CGF.CurFn->getBasicBlockList().push_back(BB);
331 delete BB;
332 }
333
FinishFunction(SourceLocation EndLoc)334 void CodeGenFunction::FinishFunction(SourceLocation EndLoc) {
335 assert(BreakContinueStack.empty() &&
336 "mismatched push/pop in break/continue stack!");
337
338 bool OnlySimpleReturnStmts = NumSimpleReturnExprs > 0
339 && NumSimpleReturnExprs == NumReturnExprs
340 && ReturnBlock.getBlock()->use_empty();
341 // Usually the return expression is evaluated before the cleanup
342 // code. If the function contains only a simple return statement,
343 // such as a constant, the location before the cleanup code becomes
344 // the last useful breakpoint in the function, because the simple
345 // return expression will be evaluated after the cleanup code. To be
346 // safe, set the debug location for cleanup code to the location of
347 // the return statement. Otherwise the cleanup code should be at the
348 // end of the function's lexical scope.
349 //
350 // If there are multiple branches to the return block, the branch
351 // instructions will get the location of the return statements and
352 // all will be fine.
353 if (CGDebugInfo *DI = getDebugInfo()) {
354 if (OnlySimpleReturnStmts)
355 DI->EmitLocation(Builder, LastStopPoint);
356 else
357 DI->EmitLocation(Builder, EndLoc);
358 }
359
360 // Pop any cleanups that might have been associated with the
361 // parameters. Do this in whatever block we're currently in; it's
362 // important to do this before we enter the return block or return
363 // edges will be *really* confused.
364 bool HasCleanups = EHStack.stable_begin() != PrologueCleanupDepth;
365 bool HasOnlyLifetimeMarkers =
366 HasCleanups && EHStack.containsOnlyLifetimeMarkers(PrologueCleanupDepth);
367 bool EmitRetDbgLoc = !HasCleanups || HasOnlyLifetimeMarkers;
368 if (HasCleanups) {
369 // Make sure the line table doesn't jump back into the body for
370 // the ret after it's been at EndLoc.
371 Optional<ApplyDebugLocation> AL;
372 if (CGDebugInfo *DI = getDebugInfo()) {
373 if (OnlySimpleReturnStmts)
374 DI->EmitLocation(Builder, EndLoc);
375 else
376 // We may not have a valid end location. Try to apply it anyway, and
377 // fall back to an artificial location if needed.
378 AL = ApplyDebugLocation::CreateDefaultArtificial(*this, EndLoc);
379 }
380
381 PopCleanupBlocks(PrologueCleanupDepth);
382 }
383
384 // Emit function epilog (to return).
385 llvm::DebugLoc Loc = EmitReturnBlock();
386
387 if (ShouldInstrumentFunction()) {
388 if (CGM.getCodeGenOpts().InstrumentFunctions)
389 CurFn->addFnAttr("instrument-function-exit", "__cyg_profile_func_exit");
390 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
391 CurFn->addFnAttr("instrument-function-exit-inlined",
392 "__cyg_profile_func_exit");
393 }
394
395 // Emit debug descriptor for function end.
396 if (CGDebugInfo *DI = getDebugInfo())
397 DI->EmitFunctionEnd(Builder, CurFn);
398
399 // Reset the debug location to that of the simple 'return' expression, if any
400 // rather than that of the end of the function's scope '}'.
401 ApplyDebugLocation AL(*this, Loc);
402 EmitFunctionEpilog(*CurFnInfo, EmitRetDbgLoc, EndLoc);
403 EmitEndEHSpec(CurCodeDecl);
404
405 assert(EHStack.empty() &&
406 "did not remove all scopes from cleanup stack!");
407
408 // If someone did an indirect goto, emit the indirect goto block at the end of
409 // the function.
410 if (IndirectBranch) {
411 EmitBlock(IndirectBranch->getParent());
412 Builder.ClearInsertionPoint();
413 }
414
415 // If some of our locals escaped, insert a call to llvm.localescape in the
416 // entry block.
417 if (!EscapedLocals.empty()) {
418 // Invert the map from local to index into a simple vector. There should be
419 // no holes.
420 SmallVector<llvm::Value *, 4> EscapeArgs;
421 EscapeArgs.resize(EscapedLocals.size());
422 for (auto &Pair : EscapedLocals)
423 EscapeArgs[Pair.second] = Pair.first;
424 llvm::Function *FrameEscapeFn = llvm::Intrinsic::getDeclaration(
425 &CGM.getModule(), llvm::Intrinsic::localescape);
426 CGBuilderTy(*this, AllocaInsertPt).CreateCall(FrameEscapeFn, EscapeArgs);
427 }
428
429 // Remove the AllocaInsertPt instruction, which is just a convenience for us.
430 llvm::Instruction *Ptr = AllocaInsertPt;
431 AllocaInsertPt = nullptr;
432 Ptr->eraseFromParent();
433
434 // If someone took the address of a label but never did an indirect goto, we
435 // made a zero entry PHI node, which is illegal, zap it now.
436 if (IndirectBranch) {
437 llvm::PHINode *PN = cast<llvm::PHINode>(IndirectBranch->getAddress());
438 if (PN->getNumIncomingValues() == 0) {
439 PN->replaceAllUsesWith(llvm::UndefValue::get(PN->getType()));
440 PN->eraseFromParent();
441 }
442 }
443
444 EmitIfUsed(*this, EHResumeBlock);
445 EmitIfUsed(*this, TerminateLandingPad);
446 EmitIfUsed(*this, TerminateHandler);
447 EmitIfUsed(*this, UnreachableBlock);
448
449 for (const auto &FuncletAndParent : TerminateFunclets)
450 EmitIfUsed(*this, FuncletAndParent.second);
451
452 if (CGM.getCodeGenOpts().EmitDeclMetadata)
453 EmitDeclMetadata();
454
455 for (SmallVectorImpl<std::pair<llvm::Instruction *, llvm::Value *> >::iterator
456 I = DeferredReplacements.begin(),
457 E = DeferredReplacements.end();
458 I != E; ++I) {
459 I->first->replaceAllUsesWith(I->second);
460 I->first->eraseFromParent();
461 }
462
463 // Eliminate CleanupDestSlot alloca by replacing it with SSA values and
464 // PHIs if the current function is a coroutine. We don't do it for all
465 // functions as it may result in slight increase in numbers of instructions
466 // if compiled with no optimizations. We do it for coroutine as the lifetime
467 // of CleanupDestSlot alloca make correct coroutine frame building very
468 // difficult.
469 if (NormalCleanupDest.isValid() && isCoroutine()) {
470 llvm::DominatorTree DT(*CurFn);
471 llvm::PromoteMemToReg(
472 cast<llvm::AllocaInst>(NormalCleanupDest.getPointer()), DT);
473 NormalCleanupDest = Address::invalid();
474 }
475
476 // Scan function arguments for vector width.
477 for (llvm::Argument &A : CurFn->args())
478 if (auto *VT = dyn_cast<llvm::VectorType>(A.getType()))
479 LargestVectorWidth =
480 std::max((uint64_t)LargestVectorWidth,
481 VT->getPrimitiveSizeInBits().getKnownMinSize());
482
483 // Update vector width based on return type.
484 if (auto *VT = dyn_cast<llvm::VectorType>(CurFn->getReturnType()))
485 LargestVectorWidth =
486 std::max((uint64_t)LargestVectorWidth,
487 VT->getPrimitiveSizeInBits().getKnownMinSize());
488
489 // Add the required-vector-width attribute. This contains the max width from:
490 // 1. min-vector-width attribute used in the source program.
491 // 2. Any builtins used that have a vector width specified.
492 // 3. Values passed in and out of inline assembly.
493 // 4. Width of vector arguments and return types for this function.
494 // 5. Width of vector aguments and return types for functions called by this
495 // function.
496 CurFn->addFnAttr("min-legal-vector-width", llvm::utostr(LargestVectorWidth));
497
498 // If we generated an unreachable return block, delete it now.
499 if (ReturnBlock.isValid() && ReturnBlock.getBlock()->use_empty()) {
500 Builder.ClearInsertionPoint();
501 ReturnBlock.getBlock()->eraseFromParent();
502 }
503 if (ReturnValue.isValid()) {
504 auto *RetAlloca = dyn_cast<llvm::AllocaInst>(ReturnValue.getPointer());
505 if (RetAlloca && RetAlloca->use_empty()) {
506 RetAlloca->eraseFromParent();
507 ReturnValue = Address::invalid();
508 }
509 }
510 }
511
512 /// ShouldInstrumentFunction - Return true if the current function should be
513 /// instrumented with __cyg_profile_func_* calls
ShouldInstrumentFunction()514 bool CodeGenFunction::ShouldInstrumentFunction() {
515 if (!CGM.getCodeGenOpts().InstrumentFunctions &&
516 !CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining &&
517 !CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
518 return false;
519 if (!CurFuncDecl || CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>())
520 return false;
521 return true;
522 }
523
524 /// ShouldXRayInstrument - Return true if the current function should be
525 /// instrumented with XRay nop sleds.
ShouldXRayInstrumentFunction() const526 bool CodeGenFunction::ShouldXRayInstrumentFunction() const {
527 return CGM.getCodeGenOpts().XRayInstrumentFunctions;
528 }
529
530 /// AlwaysEmitXRayCustomEvents - Return true if we should emit IR for calls to
531 /// the __xray_customevent(...) builtin calls, when doing XRay instrumentation.
AlwaysEmitXRayCustomEvents() const532 bool CodeGenFunction::AlwaysEmitXRayCustomEvents() const {
533 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
534 (CGM.getCodeGenOpts().XRayAlwaysEmitCustomEvents ||
535 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
536 XRayInstrKind::Custom);
537 }
538
AlwaysEmitXRayTypedEvents() const539 bool CodeGenFunction::AlwaysEmitXRayTypedEvents() const {
540 return CGM.getCodeGenOpts().XRayInstrumentFunctions &&
541 (CGM.getCodeGenOpts().XRayAlwaysEmitTypedEvents ||
542 CGM.getCodeGenOpts().XRayInstrumentationBundle.Mask ==
543 XRayInstrKind::Typed);
544 }
545
546 llvm::Constant *
EncodeAddrForUseInPrologue(llvm::Function * F,llvm::Constant * Addr)547 CodeGenFunction::EncodeAddrForUseInPrologue(llvm::Function *F,
548 llvm::Constant *Addr) {
549 // Addresses stored in prologue data can't require run-time fixups and must
550 // be PC-relative. Run-time fixups are undesirable because they necessitate
551 // writable text segments, which are unsafe. And absolute addresses are
552 // undesirable because they break PIE mode.
553
554 // Add a layer of indirection through a private global. Taking its address
555 // won't result in a run-time fixup, even if Addr has linkonce_odr linkage.
556 auto *GV = new llvm::GlobalVariable(CGM.getModule(), Addr->getType(),
557 /*isConstant=*/true,
558 llvm::GlobalValue::PrivateLinkage, Addr);
559
560 // Create a PC-relative address.
561 auto *GOTAsInt = llvm::ConstantExpr::getPtrToInt(GV, IntPtrTy);
562 auto *FuncAsInt = llvm::ConstantExpr::getPtrToInt(F, IntPtrTy);
563 auto *PCRelAsInt = llvm::ConstantExpr::getSub(GOTAsInt, FuncAsInt);
564 return (IntPtrTy == Int32Ty)
565 ? PCRelAsInt
566 : llvm::ConstantExpr::getTrunc(PCRelAsInt, Int32Ty);
567 }
568
569 llvm::Value *
DecodeAddrUsedInPrologue(llvm::Value * F,llvm::Value * EncodedAddr)570 CodeGenFunction::DecodeAddrUsedInPrologue(llvm::Value *F,
571 llvm::Value *EncodedAddr) {
572 // Reconstruct the address of the global.
573 auto *PCRelAsInt = Builder.CreateSExt(EncodedAddr, IntPtrTy);
574 auto *FuncAsInt = Builder.CreatePtrToInt(F, IntPtrTy, "func_addr.int");
575 auto *GOTAsInt = Builder.CreateAdd(PCRelAsInt, FuncAsInt, "global_addr.int");
576 auto *GOTAddr = Builder.CreateIntToPtr(GOTAsInt, Int8PtrPtrTy, "global_addr");
577
578 // Load the original pointer through the global.
579 return Builder.CreateLoad(Address(GOTAddr, getPointerAlign()),
580 "decoded_addr");
581 }
582
EmitOpenCLKernelMetadata(const FunctionDecl * FD,llvm::Function * Fn)583 void CodeGenFunction::EmitOpenCLKernelMetadata(const FunctionDecl *FD,
584 llvm::Function *Fn)
585 {
586 if (!FD->hasAttr<OpenCLKernelAttr>())
587 return;
588
589 llvm::LLVMContext &Context = getLLVMContext();
590
591 CGM.GenOpenCLArgMetadata(Fn, FD, this);
592
593 if (const VecTypeHintAttr *A = FD->getAttr<VecTypeHintAttr>()) {
594 QualType HintQTy = A->getTypeHint();
595 const ExtVectorType *HintEltQTy = HintQTy->getAs<ExtVectorType>();
596 bool IsSignedInteger =
597 HintQTy->isSignedIntegerType() ||
598 (HintEltQTy && HintEltQTy->getElementType()->isSignedIntegerType());
599 llvm::Metadata *AttrMDArgs[] = {
600 llvm::ConstantAsMetadata::get(llvm::UndefValue::get(
601 CGM.getTypes().ConvertType(A->getTypeHint()))),
602 llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
603 llvm::IntegerType::get(Context, 32),
604 llvm::APInt(32, (uint64_t)(IsSignedInteger ? 1 : 0))))};
605 Fn->setMetadata("vec_type_hint", llvm::MDNode::get(Context, AttrMDArgs));
606 }
607
608 if (const WorkGroupSizeHintAttr *A = FD->getAttr<WorkGroupSizeHintAttr>()) {
609 llvm::Metadata *AttrMDArgs[] = {
610 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
611 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
612 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
613 Fn->setMetadata("work_group_size_hint", llvm::MDNode::get(Context, AttrMDArgs));
614 }
615
616 if (const ReqdWorkGroupSizeAttr *A = FD->getAttr<ReqdWorkGroupSizeAttr>()) {
617 llvm::Metadata *AttrMDArgs[] = {
618 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getXDim())),
619 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getYDim())),
620 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getZDim()))};
621 Fn->setMetadata("reqd_work_group_size", llvm::MDNode::get(Context, AttrMDArgs));
622 }
623
624 if (const OpenCLIntelReqdSubGroupSizeAttr *A =
625 FD->getAttr<OpenCLIntelReqdSubGroupSizeAttr>()) {
626 llvm::Metadata *AttrMDArgs[] = {
627 llvm::ConstantAsMetadata::get(Builder.getInt32(A->getSubGroupSize()))};
628 Fn->setMetadata("intel_reqd_sub_group_size",
629 llvm::MDNode::get(Context, AttrMDArgs));
630 }
631 }
632
633 /// Determine whether the function F ends with a return stmt.
endsWithReturn(const Decl * F)634 static bool endsWithReturn(const Decl* F) {
635 const Stmt *Body = nullptr;
636 if (auto *FD = dyn_cast_or_null<FunctionDecl>(F))
637 Body = FD->getBody();
638 else if (auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(F))
639 Body = OMD->getBody();
640
641 if (auto *CS = dyn_cast_or_null<CompoundStmt>(Body)) {
642 auto LastStmt = CS->body_rbegin();
643 if (LastStmt != CS->body_rend())
644 return isa<ReturnStmt>(*LastStmt);
645 }
646 return false;
647 }
648
markAsIgnoreThreadCheckingAtRuntime(llvm::Function * Fn)649 void CodeGenFunction::markAsIgnoreThreadCheckingAtRuntime(llvm::Function *Fn) {
650 if (SanOpts.has(SanitizerKind::Thread)) {
651 Fn->addFnAttr("sanitize_thread_no_checking_at_run_time");
652 Fn->removeFnAttr(llvm::Attribute::SanitizeThread);
653 }
654 }
655
656 /// Check if the return value of this function requires sanitization.
requiresReturnValueCheck() const657 bool CodeGenFunction::requiresReturnValueCheck() const {
658 return requiresReturnValueNullabilityCheck() ||
659 (SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) && CurCodeDecl &&
660 CurCodeDecl->getAttr<ReturnsNonNullAttr>());
661 }
662
matchesStlAllocatorFn(const Decl * D,const ASTContext & Ctx)663 static bool matchesStlAllocatorFn(const Decl *D, const ASTContext &Ctx) {
664 auto *MD = dyn_cast_or_null<CXXMethodDecl>(D);
665 if (!MD || !MD->getDeclName().getAsIdentifierInfo() ||
666 !MD->getDeclName().getAsIdentifierInfo()->isStr("allocate") ||
667 (MD->getNumParams() != 1 && MD->getNumParams() != 2))
668 return false;
669
670 if (MD->parameters()[0]->getType().getCanonicalType() != Ctx.getSizeType())
671 return false;
672
673 if (MD->getNumParams() == 2) {
674 auto *PT = MD->parameters()[1]->getType()->getAs<PointerType>();
675 if (!PT || !PT->isVoidPointerType() ||
676 !PT->getPointeeType().isConstQualified())
677 return false;
678 }
679
680 return true;
681 }
682
683 /// Return the UBSan prologue signature for \p FD if one is available.
getPrologueSignature(CodeGenModule & CGM,const FunctionDecl * FD)684 static llvm::Constant *getPrologueSignature(CodeGenModule &CGM,
685 const FunctionDecl *FD) {
686 if (const auto *MD = dyn_cast<CXXMethodDecl>(FD))
687 if (!MD->isStatic())
688 return nullptr;
689 return CGM.getTargetCodeGenInfo().getUBSanFunctionSignature(CGM);
690 }
691
StartFunction(GlobalDecl GD,QualType RetTy,llvm::Function * Fn,const CGFunctionInfo & FnInfo,const FunctionArgList & Args,SourceLocation Loc,SourceLocation StartLoc)692 void CodeGenFunction::StartFunction(GlobalDecl GD, QualType RetTy,
693 llvm::Function *Fn,
694 const CGFunctionInfo &FnInfo,
695 const FunctionArgList &Args,
696 SourceLocation Loc,
697 SourceLocation StartLoc) {
698 assert(!CurFn &&
699 "Do not use a CodeGenFunction object for more than one function");
700
701 const Decl *D = GD.getDecl();
702
703 DidCallStackSave = false;
704 CurCodeDecl = D;
705 if (const auto *FD = dyn_cast_or_null<FunctionDecl>(D))
706 if (FD->usesSEHTry())
707 CurSEHParent = FD;
708 CurFuncDecl = (D ? D->getNonClosureContext() : nullptr);
709 FnRetTy = RetTy;
710 CurFn = Fn;
711 CurFnInfo = &FnInfo;
712 assert(CurFn->isDeclaration() && "Function already has body?");
713
714 // If this function has been blacklisted for any of the enabled sanitizers,
715 // disable the sanitizer for the function.
716 do {
717 #define SANITIZER(NAME, ID) \
718 if (SanOpts.empty()) \
719 break; \
720 if (SanOpts.has(SanitizerKind::ID)) \
721 if (CGM.isInSanitizerBlacklist(SanitizerKind::ID, Fn, Loc)) \
722 SanOpts.set(SanitizerKind::ID, false);
723
724 #include "clang/Basic/Sanitizers.def"
725 #undef SANITIZER
726 } while (0);
727
728 if (D) {
729 // Apply the no_sanitize* attributes to SanOpts.
730 for (auto Attr : D->specific_attrs<NoSanitizeAttr>()) {
731 SanitizerMask mask = Attr->getMask();
732 SanOpts.Mask &= ~mask;
733 if (mask & SanitizerKind::Address)
734 SanOpts.set(SanitizerKind::KernelAddress, false);
735 if (mask & SanitizerKind::KernelAddress)
736 SanOpts.set(SanitizerKind::Address, false);
737 if (mask & SanitizerKind::HWAddress)
738 SanOpts.set(SanitizerKind::KernelHWAddress, false);
739 if (mask & SanitizerKind::KernelHWAddress)
740 SanOpts.set(SanitizerKind::HWAddress, false);
741 }
742 }
743
744 // Apply sanitizer attributes to the function.
745 if (SanOpts.hasOneOf(SanitizerKind::Address | SanitizerKind::KernelAddress))
746 Fn->addFnAttr(llvm::Attribute::SanitizeAddress);
747 if (SanOpts.hasOneOf(SanitizerKind::HWAddress | SanitizerKind::KernelHWAddress))
748 Fn->addFnAttr(llvm::Attribute::SanitizeHWAddress);
749 if (SanOpts.has(SanitizerKind::MemTag))
750 Fn->addFnAttr(llvm::Attribute::SanitizeMemTag);
751 if (SanOpts.has(SanitizerKind::Thread))
752 Fn->addFnAttr(llvm::Attribute::SanitizeThread);
753 if (SanOpts.hasOneOf(SanitizerKind::Memory | SanitizerKind::KernelMemory))
754 Fn->addFnAttr(llvm::Attribute::SanitizeMemory);
755 if (SanOpts.has(SanitizerKind::SafeStack))
756 Fn->addFnAttr(llvm::Attribute::SafeStack);
757 if (SanOpts.has(SanitizerKind::ShadowCallStack))
758 Fn->addFnAttr(llvm::Attribute::ShadowCallStack);
759
760 // Apply fuzzing attribute to the function.
761 if (SanOpts.hasOneOf(SanitizerKind::Fuzzer | SanitizerKind::FuzzerNoLink))
762 Fn->addFnAttr(llvm::Attribute::OptForFuzzing);
763
764 // Ignore TSan memory acesses from within ObjC/ObjC++ dealloc, initialize,
765 // .cxx_destruct, __destroy_helper_block_ and all of their calees at run time.
766 if (SanOpts.has(SanitizerKind::Thread)) {
767 if (const auto *OMD = dyn_cast_or_null<ObjCMethodDecl>(D)) {
768 IdentifierInfo *II = OMD->getSelector().getIdentifierInfoForSlot(0);
769 if (OMD->getMethodFamily() == OMF_dealloc ||
770 OMD->getMethodFamily() == OMF_initialize ||
771 (OMD->getSelector().isUnarySelector() && II->isStr(".cxx_destruct"))) {
772 markAsIgnoreThreadCheckingAtRuntime(Fn);
773 }
774 }
775 }
776
777 // Ignore unrelated casts in STL allocate() since the allocator must cast
778 // from void* to T* before object initialization completes. Don't match on the
779 // namespace because not all allocators are in std::
780 if (D && SanOpts.has(SanitizerKind::CFIUnrelatedCast)) {
781 if (matchesStlAllocatorFn(D, getContext()))
782 SanOpts.Mask &= ~SanitizerKind::CFIUnrelatedCast;
783 }
784
785 // Ignore null checks in coroutine functions since the coroutines passes
786 // are not aware of how to move the extra UBSan instructions across the split
787 // coroutine boundaries.
788 if (D && SanOpts.has(SanitizerKind::Null))
789 if (const auto *FD = dyn_cast<FunctionDecl>(D))
790 if (FD->getBody() &&
791 FD->getBody()->getStmtClass() == Stmt::CoroutineBodyStmtClass)
792 SanOpts.Mask &= ~SanitizerKind::Null;
793
794 // Apply xray attributes to the function (as a string, for now)
795 bool AlwaysXRayAttr = false;
796 if (const auto *XRayAttr = D ? D->getAttr<XRayInstrumentAttr>() : nullptr) {
797 if (CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
798 XRayInstrKind::FunctionEntry) ||
799 CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
800 XRayInstrKind::FunctionExit)) {
801 if (XRayAttr->alwaysXRayInstrument() && ShouldXRayInstrumentFunction()) {
802 Fn->addFnAttr("function-instrument", "xray-always");
803 AlwaysXRayAttr = true;
804 }
805 if (XRayAttr->neverXRayInstrument())
806 Fn->addFnAttr("function-instrument", "xray-never");
807 if (const auto *LogArgs = D->getAttr<XRayLogArgsAttr>())
808 if (ShouldXRayInstrumentFunction())
809 Fn->addFnAttr("xray-log-args",
810 llvm::utostr(LogArgs->getArgumentCount()));
811 }
812 } else {
813 if (ShouldXRayInstrumentFunction() && !CGM.imbueXRayAttrs(Fn, Loc))
814 Fn->addFnAttr(
815 "xray-instruction-threshold",
816 llvm::itostr(CGM.getCodeGenOpts().XRayInstructionThreshold));
817 }
818
819 if (ShouldXRayInstrumentFunction()) {
820 if (CGM.getCodeGenOpts().XRayIgnoreLoops)
821 Fn->addFnAttr("xray-ignore-loops");
822
823 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
824 XRayInstrKind::FunctionExit))
825 Fn->addFnAttr("xray-skip-exit");
826
827 if (!CGM.getCodeGenOpts().XRayInstrumentationBundle.has(
828 XRayInstrKind::FunctionEntry))
829 Fn->addFnAttr("xray-skip-entry");
830
831 auto FuncGroups = CGM.getCodeGenOpts().XRayTotalFunctionGroups;
832 if (FuncGroups > 1) {
833 auto FuncName = llvm::makeArrayRef<uint8_t>(
834 CurFn->getName().bytes_begin(), CurFn->getName().bytes_end());
835 auto Group = crc32(FuncName) % FuncGroups;
836 if (Group != CGM.getCodeGenOpts().XRaySelectedFunctionGroup &&
837 !AlwaysXRayAttr)
838 Fn->addFnAttr("function-instrument", "xray-never");
839 }
840 }
841
842 unsigned Count, Offset;
843 if (const auto *Attr =
844 D ? D->getAttr<PatchableFunctionEntryAttr>() : nullptr) {
845 Count = Attr->getCount();
846 Offset = Attr->getOffset();
847 } else {
848 Count = CGM.getCodeGenOpts().PatchableFunctionEntryCount;
849 Offset = CGM.getCodeGenOpts().PatchableFunctionEntryOffset;
850 }
851 if (Count && Offset <= Count) {
852 Fn->addFnAttr("patchable-function-entry", std::to_string(Count - Offset));
853 if (Offset)
854 Fn->addFnAttr("patchable-function-prefix", std::to_string(Offset));
855 }
856
857 // Add no-jump-tables value.
858 Fn->addFnAttr("no-jump-tables",
859 llvm::toStringRef(CGM.getCodeGenOpts().NoUseJumpTables));
860
861 // Add no-inline-line-tables value.
862 if (CGM.getCodeGenOpts().NoInlineLineTables)
863 Fn->addFnAttr("no-inline-line-tables");
864
865 // Add profile-sample-accurate value.
866 if (CGM.getCodeGenOpts().ProfileSampleAccurate)
867 Fn->addFnAttr("profile-sample-accurate");
868
869 if (!CGM.getCodeGenOpts().SampleProfileFile.empty())
870 Fn->addFnAttr("use-sample-profile");
871
872 if (D && D->hasAttr<CFICanonicalJumpTableAttr>())
873 Fn->addFnAttr("cfi-canonical-jump-table");
874
875 if (getLangOpts().OpenCL) {
876 // Add metadata for a kernel function.
877 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
878 EmitOpenCLKernelMetadata(FD, Fn);
879 }
880
881 // If we are checking function types, emit a function type signature as
882 // prologue data.
883 if (getLangOpts().CPlusPlus && SanOpts.has(SanitizerKind::Function)) {
884 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
885 if (llvm::Constant *PrologueSig = getPrologueSignature(CGM, FD)) {
886 // Remove any (C++17) exception specifications, to allow calling e.g. a
887 // noexcept function through a non-noexcept pointer.
888 auto ProtoTy =
889 getContext().getFunctionTypeWithExceptionSpec(FD->getType(),
890 EST_None);
891 llvm::Constant *FTRTTIConst =
892 CGM.GetAddrOfRTTIDescriptor(ProtoTy, /*ForEH=*/true);
893 llvm::Constant *FTRTTIConstEncoded =
894 EncodeAddrForUseInPrologue(Fn, FTRTTIConst);
895 llvm::Constant *PrologueStructElems[] = {PrologueSig,
896 FTRTTIConstEncoded};
897 llvm::Constant *PrologueStructConst =
898 llvm::ConstantStruct::getAnon(PrologueStructElems, /*Packed=*/true);
899 Fn->setPrologueData(PrologueStructConst);
900 }
901 }
902 }
903
904 // If we're checking nullability, we need to know whether we can check the
905 // return value. Initialize the flag to 'true' and refine it in EmitParmDecl.
906 if (SanOpts.has(SanitizerKind::NullabilityReturn)) {
907 auto Nullability = FnRetTy->getNullability(getContext());
908 if (Nullability && *Nullability == NullabilityKind::NonNull) {
909 if (!(SanOpts.has(SanitizerKind::ReturnsNonnullAttribute) &&
910 CurCodeDecl && CurCodeDecl->getAttr<ReturnsNonNullAttr>()))
911 RetValNullabilityPrecondition =
912 llvm::ConstantInt::getTrue(getLLVMContext());
913 }
914 }
915
916 // If we're in C++ mode and the function name is "main", it is guaranteed
917 // to be norecurse by the standard (3.6.1.3 "The function main shall not be
918 // used within a program").
919 //
920 // OpenCL C 2.0 v2.2-11 s6.9.i:
921 // Recursion is not supported.
922 //
923 // SYCL v1.2.1 s3.10:
924 // kernels cannot include RTTI information, exception classes,
925 // recursive code, virtual functions or make use of C++ libraries that
926 // are not compiled for the device.
927 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
928 if ((getLangOpts().CPlusPlus && FD->isMain()) || getLangOpts().OpenCL ||
929 getLangOpts().SYCLIsDevice ||
930 (getLangOpts().CUDA && FD->hasAttr<CUDAGlobalAttr>()))
931 Fn->addFnAttr(llvm::Attribute::NoRecurse);
932 }
933
934 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D)) {
935 Builder.setIsFPConstrained(FD->hasAttr<StrictFPAttr>());
936 if (FD->hasAttr<StrictFPAttr>())
937 Fn->addFnAttr(llvm::Attribute::StrictFP);
938 }
939
940 // If a custom alignment is used, force realigning to this alignment on
941 // any main function which certainly will need it.
942 if (const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(D))
943 if ((FD->isMain() || FD->isMSVCRTEntryPoint()) &&
944 CGM.getCodeGenOpts().StackAlignment)
945 Fn->addFnAttr("stackrealign");
946
947 llvm::BasicBlock *EntryBB = createBasicBlock("entry", CurFn);
948
949 // Create a marker to make it easy to insert allocas into the entryblock
950 // later. Don't create this with the builder, because we don't want it
951 // folded.
952 llvm::Value *Undef = llvm::UndefValue::get(Int32Ty);
953 AllocaInsertPt = new llvm::BitCastInst(Undef, Int32Ty, "allocapt", EntryBB);
954
955 ReturnBlock = getJumpDestInCurrentScope("return");
956
957 Builder.SetInsertPoint(EntryBB);
958
959 // If we're checking the return value, allocate space for a pointer to a
960 // precise source location of the checked return statement.
961 if (requiresReturnValueCheck()) {
962 ReturnLocation = CreateDefaultAlignTempAlloca(Int8PtrTy, "return.sloc.ptr");
963 InitTempAlloca(ReturnLocation, llvm::ConstantPointerNull::get(Int8PtrTy));
964 }
965
966 // Emit subprogram debug descriptor.
967 if (CGDebugInfo *DI = getDebugInfo()) {
968 // Reconstruct the type from the argument list so that implicit parameters,
969 // such as 'this' and 'vtt', show up in the debug info. Preserve the calling
970 // convention.
971 CallingConv CC = CallingConv::CC_C;
972 if (auto *FD = dyn_cast_or_null<FunctionDecl>(D))
973 if (const auto *SrcFnTy = FD->getType()->getAs<FunctionType>())
974 CC = SrcFnTy->getCallConv();
975 SmallVector<QualType, 16> ArgTypes;
976 for (const VarDecl *VD : Args)
977 ArgTypes.push_back(VD->getType());
978 QualType FnType = getContext().getFunctionType(
979 RetTy, ArgTypes, FunctionProtoType::ExtProtoInfo(CC));
980 DI->EmitFunctionStart(GD, Loc, StartLoc, FnType, CurFn, CurFuncIsThunk,
981 Builder);
982 }
983
984 if (ShouldInstrumentFunction()) {
985 if (CGM.getCodeGenOpts().InstrumentFunctions)
986 CurFn->addFnAttr("instrument-function-entry", "__cyg_profile_func_enter");
987 if (CGM.getCodeGenOpts().InstrumentFunctionsAfterInlining)
988 CurFn->addFnAttr("instrument-function-entry-inlined",
989 "__cyg_profile_func_enter");
990 if (CGM.getCodeGenOpts().InstrumentFunctionEntryBare)
991 CurFn->addFnAttr("instrument-function-entry-inlined",
992 "__cyg_profile_func_enter_bare");
993 }
994
995 // Since emitting the mcount call here impacts optimizations such as function
996 // inlining, we just add an attribute to insert a mcount call in backend.
997 // The attribute "counting-function" is set to mcount function name which is
998 // architecture dependent.
999 if (CGM.getCodeGenOpts().InstrumentForProfiling) {
1000 // Calls to fentry/mcount should not be generated if function has
1001 // the no_instrument_function attribute.
1002 if (!CurFuncDecl || !CurFuncDecl->hasAttr<NoInstrumentFunctionAttr>()) {
1003 if (CGM.getCodeGenOpts().CallFEntry)
1004 Fn->addFnAttr("fentry-call", "true");
1005 else {
1006 Fn->addFnAttr("instrument-function-entry-inlined",
1007 getTarget().getMCountName());
1008 }
1009 if (CGM.getCodeGenOpts().MNopMCount) {
1010 if (!CGM.getCodeGenOpts().CallFEntry)
1011 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1012 << "-mnop-mcount" << "-mfentry";
1013 Fn->addFnAttr("mnop-mcount");
1014 }
1015
1016 if (CGM.getCodeGenOpts().RecordMCount) {
1017 if (!CGM.getCodeGenOpts().CallFEntry)
1018 CGM.getDiags().Report(diag::err_opt_not_valid_without_opt)
1019 << "-mrecord-mcount" << "-mfentry";
1020 Fn->addFnAttr("mrecord-mcount");
1021 }
1022 }
1023 }
1024
1025 if (CGM.getCodeGenOpts().PackedStack) {
1026 if (getContext().getTargetInfo().getTriple().getArch() !=
1027 llvm::Triple::systemz)
1028 CGM.getDiags().Report(diag::err_opt_not_valid_on_target)
1029 << "-mpacked-stack";
1030 Fn->addFnAttr("packed-stack");
1031 }
1032
1033 if (RetTy->isVoidType()) {
1034 // Void type; nothing to return.
1035 ReturnValue = Address::invalid();
1036
1037 // Count the implicit return.
1038 if (!endsWithReturn(D))
1039 ++NumReturnExprs;
1040 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::Indirect) {
1041 // Indirect return; emit returned value directly into sret slot.
1042 // This reduces code size, and affects correctness in C++.
1043 auto AI = CurFn->arg_begin();
1044 if (CurFnInfo->getReturnInfo().isSRetAfterThis())
1045 ++AI;
1046 ReturnValue = Address(&*AI, CurFnInfo->getReturnInfo().getIndirectAlign());
1047 if (!CurFnInfo->getReturnInfo().getIndirectByVal()) {
1048 ReturnValuePointer =
1049 CreateDefaultAlignTempAlloca(Int8PtrTy, "result.ptr");
1050 Builder.CreateStore(Builder.CreatePointerBitCastOrAddrSpaceCast(
1051 ReturnValue.getPointer(), Int8PtrTy),
1052 ReturnValuePointer);
1053 }
1054 } else if (CurFnInfo->getReturnInfo().getKind() == ABIArgInfo::InAlloca &&
1055 !hasScalarEvaluationKind(CurFnInfo->getReturnType())) {
1056 // Load the sret pointer from the argument struct and return into that.
1057 unsigned Idx = CurFnInfo->getReturnInfo().getInAllocaFieldIndex();
1058 llvm::Function::arg_iterator EI = CurFn->arg_end();
1059 --EI;
1060 llvm::Value *Addr = Builder.CreateStructGEP(nullptr, &*EI, Idx);
1061 ReturnValuePointer = Address(Addr, getPointerAlign());
1062 Addr = Builder.CreateAlignedLoad(Addr, getPointerAlign(), "agg.result");
1063 ReturnValue = Address(Addr, CGM.getNaturalTypeAlignment(RetTy));
1064 } else {
1065 ReturnValue = CreateIRTemp(RetTy, "retval");
1066
1067 // Tell the epilog emitter to autorelease the result. We do this
1068 // now so that various specialized functions can suppress it
1069 // during their IR-generation.
1070 if (getLangOpts().ObjCAutoRefCount &&
1071 !CurFnInfo->isReturnsRetained() &&
1072 RetTy->isObjCRetainableType())
1073 AutoreleaseResult = true;
1074 }
1075
1076 EmitStartEHSpec(CurCodeDecl);
1077
1078 PrologueCleanupDepth = EHStack.stable_begin();
1079
1080 // Emit OpenMP specific initialization of the device functions.
1081 if (getLangOpts().OpenMP && CurCodeDecl)
1082 CGM.getOpenMPRuntime().emitFunctionProlog(*this, CurCodeDecl);
1083
1084 EmitFunctionProlog(*CurFnInfo, CurFn, Args);
1085
1086 if (D && isa<CXXMethodDecl>(D) && cast<CXXMethodDecl>(D)->isInstance()) {
1087 CGM.getCXXABI().EmitInstanceFunctionProlog(*this);
1088 const CXXMethodDecl *MD = cast<CXXMethodDecl>(D);
1089 if (MD->getParent()->isLambda() &&
1090 MD->getOverloadedOperator() == OO_Call) {
1091 // We're in a lambda; figure out the captures.
1092 MD->getParent()->getCaptureFields(LambdaCaptureFields,
1093 LambdaThisCaptureField);
1094 if (LambdaThisCaptureField) {
1095 // If the lambda captures the object referred to by '*this' - either by
1096 // value or by reference, make sure CXXThisValue points to the correct
1097 // object.
1098
1099 // Get the lvalue for the field (which is a copy of the enclosing object
1100 // or contains the address of the enclosing object).
1101 LValue ThisFieldLValue = EmitLValueForLambdaField(LambdaThisCaptureField);
1102 if (!LambdaThisCaptureField->getType()->isPointerType()) {
1103 // If the enclosing object was captured by value, just use its address.
1104 CXXThisValue = ThisFieldLValue.getAddress(*this).getPointer();
1105 } else {
1106 // Load the lvalue pointed to by the field, since '*this' was captured
1107 // by reference.
1108 CXXThisValue =
1109 EmitLoadOfLValue(ThisFieldLValue, SourceLocation()).getScalarVal();
1110 }
1111 }
1112 for (auto *FD : MD->getParent()->fields()) {
1113 if (FD->hasCapturedVLAType()) {
1114 auto *ExprArg = EmitLoadOfLValue(EmitLValueForLambdaField(FD),
1115 SourceLocation()).getScalarVal();
1116 auto VAT = FD->getCapturedVLAType();
1117 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
1118 }
1119 }
1120 } else {
1121 // Not in a lambda; just use 'this' from the method.
1122 // FIXME: Should we generate a new load for each use of 'this'? The
1123 // fast register allocator would be happier...
1124 CXXThisValue = CXXABIThisValue;
1125 }
1126
1127 // Check the 'this' pointer once per function, if it's available.
1128 if (CXXABIThisValue) {
1129 SanitizerSet SkippedChecks;
1130 SkippedChecks.set(SanitizerKind::ObjectSize, true);
1131 QualType ThisTy = MD->getThisType();
1132
1133 // If this is the call operator of a lambda with no capture-default, it
1134 // may have a static invoker function, which may call this operator with
1135 // a null 'this' pointer.
1136 if (isLambdaCallOperator(MD) &&
1137 MD->getParent()->getLambdaCaptureDefault() == LCD_None)
1138 SkippedChecks.set(SanitizerKind::Null, true);
1139
1140 EmitTypeCheck(isa<CXXConstructorDecl>(MD) ? TCK_ConstructorCall
1141 : TCK_MemberCall,
1142 Loc, CXXABIThisValue, ThisTy,
1143 getContext().getTypeAlignInChars(ThisTy->getPointeeType()),
1144 SkippedChecks);
1145 }
1146 }
1147
1148 // If any of the arguments have a variably modified type, make sure to
1149 // emit the type size.
1150 for (FunctionArgList::const_iterator i = Args.begin(), e = Args.end();
1151 i != e; ++i) {
1152 const VarDecl *VD = *i;
1153
1154 // Dig out the type as written from ParmVarDecls; it's unclear whether
1155 // the standard (C99 6.9.1p10) requires this, but we're following the
1156 // precedent set by gcc.
1157 QualType Ty;
1158 if (const ParmVarDecl *PVD = dyn_cast<ParmVarDecl>(VD))
1159 Ty = PVD->getOriginalType();
1160 else
1161 Ty = VD->getType();
1162
1163 if (Ty->isVariablyModifiedType())
1164 EmitVariablyModifiedType(Ty);
1165 }
1166 // Emit a location at the end of the prologue.
1167 if (CGDebugInfo *DI = getDebugInfo())
1168 DI->EmitLocation(Builder, StartLoc);
1169
1170 // TODO: Do we need to handle this in two places like we do with
1171 // target-features/target-cpu?
1172 if (CurFuncDecl)
1173 if (const auto *VecWidth = CurFuncDecl->getAttr<MinVectorWidthAttr>())
1174 LargestVectorWidth = VecWidth->getVectorWidth();
1175 }
1176
EmitFunctionBody(const Stmt * Body)1177 void CodeGenFunction::EmitFunctionBody(const Stmt *Body) {
1178 incrementProfileCounter(Body);
1179 if (CPlusPlusWithProgress())
1180 FnIsMustProgress = true;
1181
1182 if (const CompoundStmt *S = dyn_cast<CompoundStmt>(Body))
1183 EmitCompoundStmtWithoutScope(*S);
1184 else
1185 EmitStmt(Body);
1186
1187 // This is checked after emitting the function body so we know if there
1188 // are any permitted infinite loops.
1189 if (FnIsMustProgress)
1190 CurFn->addFnAttr(llvm::Attribute::MustProgress);
1191 }
1192
1193 /// When instrumenting to collect profile data, the counts for some blocks
1194 /// such as switch cases need to not include the fall-through counts, so
1195 /// emit a branch around the instrumentation code. When not instrumenting,
1196 /// this just calls EmitBlock().
EmitBlockWithFallThrough(llvm::BasicBlock * BB,const Stmt * S)1197 void CodeGenFunction::EmitBlockWithFallThrough(llvm::BasicBlock *BB,
1198 const Stmt *S) {
1199 llvm::BasicBlock *SkipCountBB = nullptr;
1200 if (HaveInsertPoint() && CGM.getCodeGenOpts().hasProfileClangInstr()) {
1201 // When instrumenting for profiling, the fallthrough to certain
1202 // statements needs to skip over the instrumentation code so that we
1203 // get an accurate count.
1204 SkipCountBB = createBasicBlock("skipcount");
1205 EmitBranch(SkipCountBB);
1206 }
1207 EmitBlock(BB);
1208 uint64_t CurrentCount = getCurrentProfileCount();
1209 incrementProfileCounter(S);
1210 setCurrentProfileCount(getCurrentProfileCount() + CurrentCount);
1211 if (SkipCountBB)
1212 EmitBlock(SkipCountBB);
1213 }
1214
1215 /// Tries to mark the given function nounwind based on the
1216 /// non-existence of any throwing calls within it. We believe this is
1217 /// lightweight enough to do at -O0.
TryMarkNoThrow(llvm::Function * F)1218 static void TryMarkNoThrow(llvm::Function *F) {
1219 // LLVM treats 'nounwind' on a function as part of the type, so we
1220 // can't do this on functions that can be overwritten.
1221 if (F->isInterposable()) return;
1222
1223 for (llvm::BasicBlock &BB : *F)
1224 for (llvm::Instruction &I : BB)
1225 if (I.mayThrow())
1226 return;
1227
1228 F->setDoesNotThrow();
1229 }
1230
BuildFunctionArgList(GlobalDecl GD,FunctionArgList & Args)1231 QualType CodeGenFunction::BuildFunctionArgList(GlobalDecl GD,
1232 FunctionArgList &Args) {
1233 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1234 QualType ResTy = FD->getReturnType();
1235
1236 const CXXMethodDecl *MD = dyn_cast<CXXMethodDecl>(FD);
1237 if (MD && MD->isInstance()) {
1238 if (CGM.getCXXABI().HasThisReturn(GD))
1239 ResTy = MD->getThisType();
1240 else if (CGM.getCXXABI().hasMostDerivedReturn(GD))
1241 ResTy = CGM.getContext().VoidPtrTy;
1242 CGM.getCXXABI().buildThisParam(*this, Args);
1243 }
1244
1245 // The base version of an inheriting constructor whose constructed base is a
1246 // virtual base is not passed any arguments (because it doesn't actually call
1247 // the inherited constructor).
1248 bool PassedParams = true;
1249 if (const CXXConstructorDecl *CD = dyn_cast<CXXConstructorDecl>(FD))
1250 if (auto Inherited = CD->getInheritedConstructor())
1251 PassedParams =
1252 getTypes().inheritingCtorHasParams(Inherited, GD.getCtorType());
1253
1254 if (PassedParams) {
1255 for (auto *Param : FD->parameters()) {
1256 Args.push_back(Param);
1257 if (!Param->hasAttr<PassObjectSizeAttr>())
1258 continue;
1259
1260 auto *Implicit = ImplicitParamDecl::Create(
1261 getContext(), Param->getDeclContext(), Param->getLocation(),
1262 /*Id=*/nullptr, getContext().getSizeType(), ImplicitParamDecl::Other);
1263 SizeArguments[Param] = Implicit;
1264 Args.push_back(Implicit);
1265 }
1266 }
1267
1268 if (MD && (isa<CXXConstructorDecl>(MD) || isa<CXXDestructorDecl>(MD)))
1269 CGM.getCXXABI().addImplicitStructorParams(*this, ResTy, Args);
1270
1271 return ResTy;
1272 }
1273
1274 static bool
shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl * FD,const ASTContext & Context)1275 shouldUseUndefinedBehaviorReturnOptimization(const FunctionDecl *FD,
1276 const ASTContext &Context) {
1277 QualType T = FD->getReturnType();
1278 // Avoid the optimization for functions that return a record type with a
1279 // trivial destructor or another trivially copyable type.
1280 if (const RecordType *RT = T.getCanonicalType()->getAs<RecordType>()) {
1281 if (const auto *ClassDecl = dyn_cast<CXXRecordDecl>(RT->getDecl()))
1282 return !ClassDecl->hasTrivialDestructor();
1283 }
1284 return !T.isTriviallyCopyableType(Context);
1285 }
1286
GenerateCode(GlobalDecl GD,llvm::Function * Fn,const CGFunctionInfo & FnInfo)1287 void CodeGenFunction::GenerateCode(GlobalDecl GD, llvm::Function *Fn,
1288 const CGFunctionInfo &FnInfo) {
1289 const FunctionDecl *FD = cast<FunctionDecl>(GD.getDecl());
1290 CurGD = GD;
1291
1292 FunctionArgList Args;
1293 QualType ResTy = BuildFunctionArgList(GD, Args);
1294
1295 // Check if we should generate debug info for this function.
1296 if (FD->hasAttr<NoDebugAttr>())
1297 DebugInfo = nullptr; // disable debug info indefinitely for this function
1298
1299 // The function might not have a body if we're generating thunks for a
1300 // function declaration.
1301 SourceRange BodyRange;
1302 if (Stmt *Body = FD->getBody())
1303 BodyRange = Body->getSourceRange();
1304 else
1305 BodyRange = FD->getLocation();
1306 CurEHLocation = BodyRange.getEnd();
1307
1308 // Use the location of the start of the function to determine where
1309 // the function definition is located. By default use the location
1310 // of the declaration as the location for the subprogram. A function
1311 // may lack a declaration in the source code if it is created by code
1312 // gen. (examples: _GLOBAL__I_a, __cxx_global_array_dtor, thunk).
1313 SourceLocation Loc = FD->getLocation();
1314
1315 // If this is a function specialization then use the pattern body
1316 // as the location for the function.
1317 if (const FunctionDecl *SpecDecl = FD->getTemplateInstantiationPattern())
1318 if (SpecDecl->hasBody(SpecDecl))
1319 Loc = SpecDecl->getLocation();
1320
1321 Stmt *Body = FD->getBody();
1322
1323 // Initialize helper which will detect jumps which can cause invalid lifetime
1324 // markers.
1325 if (Body && ShouldEmitLifetimeMarkers)
1326 Bypasses.Init(Body);
1327
1328 // Emit the standard function prologue.
1329 StartFunction(GD, ResTy, Fn, FnInfo, Args, Loc, BodyRange.getBegin());
1330
1331 // Generate the body of the function.
1332 PGO.assignRegionCounters(GD, CurFn);
1333 if (isa<CXXDestructorDecl>(FD))
1334 EmitDestructorBody(Args);
1335 else if (isa<CXXConstructorDecl>(FD))
1336 EmitConstructorBody(Args);
1337 else if (getLangOpts().CUDA &&
1338 !getLangOpts().CUDAIsDevice &&
1339 FD->hasAttr<CUDAGlobalAttr>())
1340 CGM.getCUDARuntime().emitDeviceStub(*this, Args);
1341 else if (isa<CXXMethodDecl>(FD) &&
1342 cast<CXXMethodDecl>(FD)->isLambdaStaticInvoker()) {
1343 // The lambda static invoker function is special, because it forwards or
1344 // clones the body of the function call operator (but is actually static).
1345 EmitLambdaStaticInvokeBody(cast<CXXMethodDecl>(FD));
1346 } else if (FD->isDefaulted() && isa<CXXMethodDecl>(FD) &&
1347 (cast<CXXMethodDecl>(FD)->isCopyAssignmentOperator() ||
1348 cast<CXXMethodDecl>(FD)->isMoveAssignmentOperator())) {
1349 // Implicit copy-assignment gets the same special treatment as implicit
1350 // copy-constructors.
1351 emitImplicitAssignmentOperatorBody(Args);
1352 } else if (Body) {
1353 EmitFunctionBody(Body);
1354 } else
1355 llvm_unreachable("no definition for emitted function");
1356
1357 // C++11 [stmt.return]p2:
1358 // Flowing off the end of a function [...] results in undefined behavior in
1359 // a value-returning function.
1360 // C11 6.9.1p12:
1361 // If the '}' that terminates a function is reached, and the value of the
1362 // function call is used by the caller, the behavior is undefined.
1363 if (getLangOpts().CPlusPlus && !FD->hasImplicitReturnZero() && !SawAsmBlock &&
1364 !FD->getReturnType()->isVoidType() && Builder.GetInsertBlock()) {
1365 bool ShouldEmitUnreachable =
1366 CGM.getCodeGenOpts().StrictReturn ||
1367 shouldUseUndefinedBehaviorReturnOptimization(FD, getContext());
1368 if (SanOpts.has(SanitizerKind::Return)) {
1369 SanitizerScope SanScope(this);
1370 llvm::Value *IsFalse = Builder.getFalse();
1371 EmitCheck(std::make_pair(IsFalse, SanitizerKind::Return),
1372 SanitizerHandler::MissingReturn,
1373 EmitCheckSourceLocation(FD->getLocation()), None);
1374 } else if (ShouldEmitUnreachable) {
1375 if (CGM.getCodeGenOpts().OptimizationLevel == 0)
1376 EmitTrapCall(llvm::Intrinsic::trap);
1377 }
1378 if (SanOpts.has(SanitizerKind::Return) || ShouldEmitUnreachable) {
1379 Builder.CreateUnreachable();
1380 Builder.ClearInsertionPoint();
1381 }
1382 }
1383
1384 // Emit the standard function epilogue.
1385 FinishFunction(BodyRange.getEnd());
1386
1387 // If we haven't marked the function nothrow through other means, do
1388 // a quick pass now to see if we can.
1389 if (!CurFn->doesNotThrow())
1390 TryMarkNoThrow(CurFn);
1391 }
1392
1393 /// ContainsLabel - Return true if the statement contains a label in it. If
1394 /// this statement is not executed normally, it not containing a label means
1395 /// that we can just remove the code.
ContainsLabel(const Stmt * S,bool IgnoreCaseStmts)1396 bool CodeGenFunction::ContainsLabel(const Stmt *S, bool IgnoreCaseStmts) {
1397 // Null statement, not a label!
1398 if (!S) return false;
1399
1400 // If this is a label, we have to emit the code, consider something like:
1401 // if (0) { ... foo: bar(); } goto foo;
1402 //
1403 // TODO: If anyone cared, we could track __label__'s, since we know that you
1404 // can't jump to one from outside their declared region.
1405 if (isa<LabelStmt>(S))
1406 return true;
1407
1408 // If this is a case/default statement, and we haven't seen a switch, we have
1409 // to emit the code.
1410 if (isa<SwitchCase>(S) && !IgnoreCaseStmts)
1411 return true;
1412
1413 // If this is a switch statement, we want to ignore cases below it.
1414 if (isa<SwitchStmt>(S))
1415 IgnoreCaseStmts = true;
1416
1417 // Scan subexpressions for verboten labels.
1418 for (const Stmt *SubStmt : S->children())
1419 if (ContainsLabel(SubStmt, IgnoreCaseStmts))
1420 return true;
1421
1422 return false;
1423 }
1424
1425 /// containsBreak - Return true if the statement contains a break out of it.
1426 /// If the statement (recursively) contains a switch or loop with a break
1427 /// inside of it, this is fine.
containsBreak(const Stmt * S)1428 bool CodeGenFunction::containsBreak(const Stmt *S) {
1429 // Null statement, not a label!
1430 if (!S) return false;
1431
1432 // If this is a switch or loop that defines its own break scope, then we can
1433 // include it and anything inside of it.
1434 if (isa<SwitchStmt>(S) || isa<WhileStmt>(S) || isa<DoStmt>(S) ||
1435 isa<ForStmt>(S))
1436 return false;
1437
1438 if (isa<BreakStmt>(S))
1439 return true;
1440
1441 // Scan subexpressions for verboten breaks.
1442 for (const Stmt *SubStmt : S->children())
1443 if (containsBreak(SubStmt))
1444 return true;
1445
1446 return false;
1447 }
1448
mightAddDeclToScope(const Stmt * S)1449 bool CodeGenFunction::mightAddDeclToScope(const Stmt *S) {
1450 if (!S) return false;
1451
1452 // Some statement kinds add a scope and thus never add a decl to the current
1453 // scope. Note, this list is longer than the list of statements that might
1454 // have an unscoped decl nested within them, but this way is conservatively
1455 // correct even if more statement kinds are added.
1456 if (isa<IfStmt>(S) || isa<SwitchStmt>(S) || isa<WhileStmt>(S) ||
1457 isa<DoStmt>(S) || isa<ForStmt>(S) || isa<CompoundStmt>(S) ||
1458 isa<CXXForRangeStmt>(S) || isa<CXXTryStmt>(S) ||
1459 isa<ObjCForCollectionStmt>(S) || isa<ObjCAtTryStmt>(S))
1460 return false;
1461
1462 if (isa<DeclStmt>(S))
1463 return true;
1464
1465 for (const Stmt *SubStmt : S->children())
1466 if (mightAddDeclToScope(SubStmt))
1467 return true;
1468
1469 return false;
1470 }
1471
1472 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1473 /// to a constant, or if it does but contains a label, return false. If it
1474 /// constant folds return true and set the boolean result in Result.
ConstantFoldsToSimpleInteger(const Expr * Cond,bool & ResultBool,bool AllowLabels)1475 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1476 bool &ResultBool,
1477 bool AllowLabels) {
1478 llvm::APSInt ResultInt;
1479 if (!ConstantFoldsToSimpleInteger(Cond, ResultInt, AllowLabels))
1480 return false;
1481
1482 ResultBool = ResultInt.getBoolValue();
1483 return true;
1484 }
1485
1486 /// ConstantFoldsToSimpleInteger - If the specified expression does not fold
1487 /// to a constant, or if it does but contains a label, return false. If it
1488 /// constant folds return true and set the folded value.
ConstantFoldsToSimpleInteger(const Expr * Cond,llvm::APSInt & ResultInt,bool AllowLabels)1489 bool CodeGenFunction::ConstantFoldsToSimpleInteger(const Expr *Cond,
1490 llvm::APSInt &ResultInt,
1491 bool AllowLabels) {
1492 // FIXME: Rename and handle conversion of other evaluatable things
1493 // to bool.
1494 Expr::EvalResult Result;
1495 if (!Cond->EvaluateAsInt(Result, getContext()))
1496 return false; // Not foldable, not integer or not fully evaluatable.
1497
1498 llvm::APSInt Int = Result.Val.getInt();
1499 if (!AllowLabels && CodeGenFunction::ContainsLabel(Cond))
1500 return false; // Contains a label.
1501
1502 ResultInt = Int;
1503 return true;
1504 }
1505
1506 /// EmitBranchOnBoolExpr - Emit a branch on a boolean condition (e.g. for an if
1507 /// statement) to the specified blocks. Based on the condition, this might try
1508 /// to simplify the codegen of the conditional based on the branch.
1509 /// \param LH The value of the likelihood attribute on the True branch.
EmitBranchOnBoolExpr(const Expr * Cond,llvm::BasicBlock * TrueBlock,llvm::BasicBlock * FalseBlock,uint64_t TrueCount,Stmt::Likelihood LH)1510 void CodeGenFunction::EmitBranchOnBoolExpr(const Expr *Cond,
1511 llvm::BasicBlock *TrueBlock,
1512 llvm::BasicBlock *FalseBlock,
1513 uint64_t TrueCount,
1514 Stmt::Likelihood LH) {
1515 Cond = Cond->IgnoreParens();
1516
1517 if (const BinaryOperator *CondBOp = dyn_cast<BinaryOperator>(Cond)) {
1518
1519 // Handle X && Y in a condition.
1520 if (CondBOp->getOpcode() == BO_LAnd) {
1521 // If we have "1 && X", simplify the code. "0 && X" would have constant
1522 // folded if the case was simple enough.
1523 bool ConstantBool = false;
1524 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1525 ConstantBool) {
1526 // br(1 && X) -> br(X).
1527 incrementProfileCounter(CondBOp);
1528 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1529 TrueCount, LH);
1530 }
1531
1532 // If we have "X && 1", simplify the code to use an uncond branch.
1533 // "X && 0" would have been constant folded to 0.
1534 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1535 ConstantBool) {
1536 // br(X && 1) -> br(X).
1537 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1538 TrueCount, LH);
1539 }
1540
1541 // Emit the LHS as a conditional. If the LHS conditional is false, we
1542 // want to jump to the FalseBlock.
1543 llvm::BasicBlock *LHSTrue = createBasicBlock("land.lhs.true");
1544 // The counter tells us how often we evaluate RHS, and all of TrueCount
1545 // can be propagated to that branch.
1546 uint64_t RHSCount = getProfileCount(CondBOp->getRHS());
1547
1548 ConditionalEvaluation eval(*this);
1549 {
1550 ApplyDebugLocation DL(*this, Cond);
1551 // Propagate the likelihood attribute like __builtin_expect
1552 // __builtin_expect(X && Y, 1) -> X and Y are likely
1553 // __builtin_expect(X && Y, 0) -> only Y is unlikely
1554 EmitBranchOnBoolExpr(CondBOp->getLHS(), LHSTrue, FalseBlock, RHSCount,
1555 LH == Stmt::LH_Unlikely ? Stmt::LH_None : LH);
1556 EmitBlock(LHSTrue);
1557 }
1558
1559 incrementProfileCounter(CondBOp);
1560 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1561
1562 // Any temporaries created here are conditional.
1563 eval.begin(*this);
1564 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, TrueCount,
1565 LH);
1566 eval.end(*this);
1567
1568 return;
1569 }
1570
1571 if (CondBOp->getOpcode() == BO_LOr) {
1572 // If we have "0 || X", simplify the code. "1 || X" would have constant
1573 // folded if the case was simple enough.
1574 bool ConstantBool = false;
1575 if (ConstantFoldsToSimpleInteger(CondBOp->getLHS(), ConstantBool) &&
1576 !ConstantBool) {
1577 // br(0 || X) -> br(X).
1578 incrementProfileCounter(CondBOp);
1579 return EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock,
1580 TrueCount, LH);
1581 }
1582
1583 // If we have "X || 0", simplify the code to use an uncond branch.
1584 // "X || 1" would have been constant folded to 1.
1585 if (ConstantFoldsToSimpleInteger(CondBOp->getRHS(), ConstantBool) &&
1586 !ConstantBool) {
1587 // br(X || 0) -> br(X).
1588 return EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, FalseBlock,
1589 TrueCount, LH);
1590 }
1591
1592 // Emit the LHS as a conditional. If the LHS conditional is true, we
1593 // want to jump to the TrueBlock.
1594 llvm::BasicBlock *LHSFalse = createBasicBlock("lor.lhs.false");
1595 // We have the count for entry to the RHS and for the whole expression
1596 // being true, so we can divy up True count between the short circuit and
1597 // the RHS.
1598 uint64_t LHSCount =
1599 getCurrentProfileCount() - getProfileCount(CondBOp->getRHS());
1600 uint64_t RHSCount = TrueCount - LHSCount;
1601
1602 ConditionalEvaluation eval(*this);
1603 {
1604 // Propagate the likelihood attribute like __builtin_expect
1605 // __builtin_expect(X || Y, 1) -> only Y is likely
1606 // __builtin_expect(X || Y, 0) -> both X and Y are unlikely
1607 ApplyDebugLocation DL(*this, Cond);
1608 EmitBranchOnBoolExpr(CondBOp->getLHS(), TrueBlock, LHSFalse, LHSCount,
1609 LH == Stmt::LH_Likely ? Stmt::LH_None : LH);
1610 EmitBlock(LHSFalse);
1611 }
1612
1613 incrementProfileCounter(CondBOp);
1614 setCurrentProfileCount(getProfileCount(CondBOp->getRHS()));
1615
1616 // Any temporaries created here are conditional.
1617 eval.begin(*this);
1618 EmitBranchOnBoolExpr(CondBOp->getRHS(), TrueBlock, FalseBlock, RHSCount,
1619 LH);
1620
1621 eval.end(*this);
1622
1623 return;
1624 }
1625 }
1626
1627 if (const UnaryOperator *CondUOp = dyn_cast<UnaryOperator>(Cond)) {
1628 // br(!x, t, f) -> br(x, f, t)
1629 if (CondUOp->getOpcode() == UO_LNot) {
1630 // Negate the count.
1631 uint64_t FalseCount = getCurrentProfileCount() - TrueCount;
1632 // The values of the enum are chosen to make this negation possible.
1633 LH = static_cast<Stmt::Likelihood>(-LH);
1634 // Negate the condition and swap the destination blocks.
1635 return EmitBranchOnBoolExpr(CondUOp->getSubExpr(), FalseBlock, TrueBlock,
1636 FalseCount, LH);
1637 }
1638 }
1639
1640 if (const ConditionalOperator *CondOp = dyn_cast<ConditionalOperator>(Cond)) {
1641 // br(c ? x : y, t, f) -> br(c, br(x, t, f), br(y, t, f))
1642 llvm::BasicBlock *LHSBlock = createBasicBlock("cond.true");
1643 llvm::BasicBlock *RHSBlock = createBasicBlock("cond.false");
1644
1645 // The ConditionalOperator itself has no likelihood information for its
1646 // true and false branches. This matches the behavior of __builtin_expect.
1647 ConditionalEvaluation cond(*this);
1648 EmitBranchOnBoolExpr(CondOp->getCond(), LHSBlock, RHSBlock,
1649 getProfileCount(CondOp), Stmt::LH_None);
1650
1651 // When computing PGO branch weights, we only know the overall count for
1652 // the true block. This code is essentially doing tail duplication of the
1653 // naive code-gen, introducing new edges for which counts are not
1654 // available. Divide the counts proportionally between the LHS and RHS of
1655 // the conditional operator.
1656 uint64_t LHSScaledTrueCount = 0;
1657 if (TrueCount) {
1658 double LHSRatio =
1659 getProfileCount(CondOp) / (double)getCurrentProfileCount();
1660 LHSScaledTrueCount = TrueCount * LHSRatio;
1661 }
1662
1663 cond.begin(*this);
1664 EmitBlock(LHSBlock);
1665 incrementProfileCounter(CondOp);
1666 {
1667 ApplyDebugLocation DL(*this, Cond);
1668 EmitBranchOnBoolExpr(CondOp->getLHS(), TrueBlock, FalseBlock,
1669 LHSScaledTrueCount, LH);
1670 }
1671 cond.end(*this);
1672
1673 cond.begin(*this);
1674 EmitBlock(RHSBlock);
1675 EmitBranchOnBoolExpr(CondOp->getRHS(), TrueBlock, FalseBlock,
1676 TrueCount - LHSScaledTrueCount, LH);
1677 cond.end(*this);
1678
1679 return;
1680 }
1681
1682 if (const CXXThrowExpr *Throw = dyn_cast<CXXThrowExpr>(Cond)) {
1683 // Conditional operator handling can give us a throw expression as a
1684 // condition for a case like:
1685 // br(c ? throw x : y, t, f) -> br(c, br(throw x, t, f), br(y, t, f)
1686 // Fold this to:
1687 // br(c, throw x, br(y, t, f))
1688 EmitCXXThrowExpr(Throw, /*KeepInsertionPoint*/false);
1689 return;
1690 }
1691
1692 // If the branch has a condition wrapped by __builtin_unpredictable,
1693 // create metadata that specifies that the branch is unpredictable.
1694 // Don't bother if not optimizing because that metadata would not be used.
1695 llvm::MDNode *Unpredictable = nullptr;
1696 auto *Call = dyn_cast<CallExpr>(Cond->IgnoreImpCasts());
1697 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1698 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1699 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1700 llvm::MDBuilder MDHelper(getLLVMContext());
1701 Unpredictable = MDHelper.createUnpredictable();
1702 }
1703 }
1704
1705 llvm::MDNode *Weights = createBranchWeights(LH);
1706 if (!Weights) {
1707 uint64_t CurrentCount = std::max(getCurrentProfileCount(), TrueCount);
1708 Weights = createProfileWeights(TrueCount, CurrentCount - TrueCount);
1709 }
1710
1711 // Emit the code with the fully general case.
1712 llvm::Value *CondV;
1713 {
1714 ApplyDebugLocation DL(*this, Cond);
1715 CondV = EvaluateExprAsBool(Cond);
1716 }
1717 Builder.CreateCondBr(CondV, TrueBlock, FalseBlock, Weights, Unpredictable);
1718 }
1719
1720 /// ErrorUnsupported - Print out an error that codegen doesn't support the
1721 /// specified stmt yet.
ErrorUnsupported(const Stmt * S,const char * Type)1722 void CodeGenFunction::ErrorUnsupported(const Stmt *S, const char *Type) {
1723 CGM.ErrorUnsupported(S, Type);
1724 }
1725
1726 /// emitNonZeroVLAInit - Emit the "zero" initialization of a
1727 /// variable-length array whose elements have a non-zero bit-pattern.
1728 ///
1729 /// \param baseType the inner-most element type of the array
1730 /// \param src - a char* pointing to the bit-pattern for a single
1731 /// base element of the array
1732 /// \param sizeInChars - the total size of the VLA, in chars
emitNonZeroVLAInit(CodeGenFunction & CGF,QualType baseType,Address dest,Address src,llvm::Value * sizeInChars)1733 static void emitNonZeroVLAInit(CodeGenFunction &CGF, QualType baseType,
1734 Address dest, Address src,
1735 llvm::Value *sizeInChars) {
1736 CGBuilderTy &Builder = CGF.Builder;
1737
1738 CharUnits baseSize = CGF.getContext().getTypeSizeInChars(baseType);
1739 llvm::Value *baseSizeInChars
1740 = llvm::ConstantInt::get(CGF.IntPtrTy, baseSize.getQuantity());
1741
1742 Address begin =
1743 Builder.CreateElementBitCast(dest, CGF.Int8Ty, "vla.begin");
1744 llvm::Value *end =
1745 Builder.CreateInBoundsGEP(begin.getPointer(), sizeInChars, "vla.end");
1746
1747 llvm::BasicBlock *originBB = CGF.Builder.GetInsertBlock();
1748 llvm::BasicBlock *loopBB = CGF.createBasicBlock("vla-init.loop");
1749 llvm::BasicBlock *contBB = CGF.createBasicBlock("vla-init.cont");
1750
1751 // Make a loop over the VLA. C99 guarantees that the VLA element
1752 // count must be nonzero.
1753 CGF.EmitBlock(loopBB);
1754
1755 llvm::PHINode *cur = Builder.CreatePHI(begin.getType(), 2, "vla.cur");
1756 cur->addIncoming(begin.getPointer(), originBB);
1757
1758 CharUnits curAlign =
1759 dest.getAlignment().alignmentOfArrayElement(baseSize);
1760
1761 // memcpy the individual element bit-pattern.
1762 Builder.CreateMemCpy(Address(cur, curAlign), src, baseSizeInChars,
1763 /*volatile*/ false);
1764
1765 // Go to the next element.
1766 llvm::Value *next =
1767 Builder.CreateInBoundsGEP(CGF.Int8Ty, cur, baseSizeInChars, "vla.next");
1768
1769 // Leave if that's the end of the VLA.
1770 llvm::Value *done = Builder.CreateICmpEQ(next, end, "vla-init.isdone");
1771 Builder.CreateCondBr(done, contBB, loopBB);
1772 cur->addIncoming(next, loopBB);
1773
1774 CGF.EmitBlock(contBB);
1775 }
1776
1777 void
EmitNullInitialization(Address DestPtr,QualType Ty)1778 CodeGenFunction::EmitNullInitialization(Address DestPtr, QualType Ty) {
1779 // Ignore empty classes in C++.
1780 if (getLangOpts().CPlusPlus) {
1781 if (const RecordType *RT = Ty->getAs<RecordType>()) {
1782 if (cast<CXXRecordDecl>(RT->getDecl())->isEmpty())
1783 return;
1784 }
1785 }
1786
1787 // Cast the dest ptr to the appropriate i8 pointer type.
1788 if (DestPtr.getElementType() != Int8Ty)
1789 DestPtr = Builder.CreateElementBitCast(DestPtr, Int8Ty);
1790
1791 // Get size and alignment info for this aggregate.
1792 CharUnits size = getContext().getTypeSizeInChars(Ty);
1793
1794 llvm::Value *SizeVal;
1795 const VariableArrayType *vla;
1796
1797 // Don't bother emitting a zero-byte memset.
1798 if (size.isZero()) {
1799 // But note that getTypeInfo returns 0 for a VLA.
1800 if (const VariableArrayType *vlaType =
1801 dyn_cast_or_null<VariableArrayType>(
1802 getContext().getAsArrayType(Ty))) {
1803 auto VlaSize = getVLASize(vlaType);
1804 SizeVal = VlaSize.NumElts;
1805 CharUnits eltSize = getContext().getTypeSizeInChars(VlaSize.Type);
1806 if (!eltSize.isOne())
1807 SizeVal = Builder.CreateNUWMul(SizeVal, CGM.getSize(eltSize));
1808 vla = vlaType;
1809 } else {
1810 return;
1811 }
1812 } else {
1813 SizeVal = CGM.getSize(size);
1814 vla = nullptr;
1815 }
1816
1817 // If the type contains a pointer to data member we can't memset it to zero.
1818 // Instead, create a null constant and copy it to the destination.
1819 // TODO: there are other patterns besides zero that we can usefully memset,
1820 // like -1, which happens to be the pattern used by member-pointers.
1821 if (!CGM.getTypes().isZeroInitializable(Ty)) {
1822 // For a VLA, emit a single element, then splat that over the VLA.
1823 if (vla) Ty = getContext().getBaseElementType(vla);
1824
1825 llvm::Constant *NullConstant = CGM.EmitNullConstant(Ty);
1826
1827 llvm::GlobalVariable *NullVariable =
1828 new llvm::GlobalVariable(CGM.getModule(), NullConstant->getType(),
1829 /*isConstant=*/true,
1830 llvm::GlobalVariable::PrivateLinkage,
1831 NullConstant, Twine());
1832 CharUnits NullAlign = DestPtr.getAlignment();
1833 NullVariable->setAlignment(NullAlign.getAsAlign());
1834 Address SrcPtr(Builder.CreateBitCast(NullVariable, Builder.getInt8PtrTy()),
1835 NullAlign);
1836
1837 if (vla) return emitNonZeroVLAInit(*this, Ty, DestPtr, SrcPtr, SizeVal);
1838
1839 // Get and call the appropriate llvm.memcpy overload.
1840 Builder.CreateMemCpy(DestPtr, SrcPtr, SizeVal, false);
1841 return;
1842 }
1843
1844 // Otherwise, just memset the whole thing to zero. This is legal
1845 // because in LLVM, all default initializers (other than the ones we just
1846 // handled above) are guaranteed to have a bit pattern of all zeros.
1847 Builder.CreateMemSet(DestPtr, Builder.getInt8(0), SizeVal, false);
1848 }
1849
GetAddrOfLabel(const LabelDecl * L)1850 llvm::BlockAddress *CodeGenFunction::GetAddrOfLabel(const LabelDecl *L) {
1851 // Make sure that there is a block for the indirect goto.
1852 if (!IndirectBranch)
1853 GetIndirectGotoBlock();
1854
1855 llvm::BasicBlock *BB = getJumpDestForLabel(L).getBlock();
1856
1857 // Make sure the indirect branch includes all of the address-taken blocks.
1858 IndirectBranch->addDestination(BB);
1859 return llvm::BlockAddress::get(CurFn, BB);
1860 }
1861
GetIndirectGotoBlock()1862 llvm::BasicBlock *CodeGenFunction::GetIndirectGotoBlock() {
1863 // If we already made the indirect branch for indirect goto, return its block.
1864 if (IndirectBranch) return IndirectBranch->getParent();
1865
1866 CGBuilderTy TmpBuilder(*this, createBasicBlock("indirectgoto"));
1867
1868 // Create the PHI node that indirect gotos will add entries to.
1869 llvm::Value *DestVal = TmpBuilder.CreatePHI(Int8PtrTy, 0,
1870 "indirect.goto.dest");
1871
1872 // Create the indirect branch instruction.
1873 IndirectBranch = TmpBuilder.CreateIndirectBr(DestVal);
1874 return IndirectBranch->getParent();
1875 }
1876
1877 /// Computes the length of an array in elements, as well as the base
1878 /// element type and a properly-typed first element pointer.
emitArrayLength(const ArrayType * origArrayType,QualType & baseType,Address & addr)1879 llvm::Value *CodeGenFunction::emitArrayLength(const ArrayType *origArrayType,
1880 QualType &baseType,
1881 Address &addr) {
1882 const ArrayType *arrayType = origArrayType;
1883
1884 // If it's a VLA, we have to load the stored size. Note that
1885 // this is the size of the VLA in bytes, not its size in elements.
1886 llvm::Value *numVLAElements = nullptr;
1887 if (isa<VariableArrayType>(arrayType)) {
1888 numVLAElements = getVLASize(cast<VariableArrayType>(arrayType)).NumElts;
1889
1890 // Walk into all VLAs. This doesn't require changes to addr,
1891 // which has type T* where T is the first non-VLA element type.
1892 do {
1893 QualType elementType = arrayType->getElementType();
1894 arrayType = getContext().getAsArrayType(elementType);
1895
1896 // If we only have VLA components, 'addr' requires no adjustment.
1897 if (!arrayType) {
1898 baseType = elementType;
1899 return numVLAElements;
1900 }
1901 } while (isa<VariableArrayType>(arrayType));
1902
1903 // We get out here only if we find a constant array type
1904 // inside the VLA.
1905 }
1906
1907 // We have some number of constant-length arrays, so addr should
1908 // have LLVM type [M x [N x [...]]]*. Build a GEP that walks
1909 // down to the first element of addr.
1910 SmallVector<llvm::Value*, 8> gepIndices;
1911
1912 // GEP down to the array type.
1913 llvm::ConstantInt *zero = Builder.getInt32(0);
1914 gepIndices.push_back(zero);
1915
1916 uint64_t countFromCLAs = 1;
1917 QualType eltType;
1918
1919 llvm::ArrayType *llvmArrayType =
1920 dyn_cast<llvm::ArrayType>(addr.getElementType());
1921 while (llvmArrayType) {
1922 assert(isa<ConstantArrayType>(arrayType));
1923 assert(cast<ConstantArrayType>(arrayType)->getSize().getZExtValue()
1924 == llvmArrayType->getNumElements());
1925
1926 gepIndices.push_back(zero);
1927 countFromCLAs *= llvmArrayType->getNumElements();
1928 eltType = arrayType->getElementType();
1929
1930 llvmArrayType =
1931 dyn_cast<llvm::ArrayType>(llvmArrayType->getElementType());
1932 arrayType = getContext().getAsArrayType(arrayType->getElementType());
1933 assert((!llvmArrayType || arrayType) &&
1934 "LLVM and Clang types are out-of-synch");
1935 }
1936
1937 if (arrayType) {
1938 // From this point onwards, the Clang array type has been emitted
1939 // as some other type (probably a packed struct). Compute the array
1940 // size, and just emit the 'begin' expression as a bitcast.
1941 while (arrayType) {
1942 countFromCLAs *=
1943 cast<ConstantArrayType>(arrayType)->getSize().getZExtValue();
1944 eltType = arrayType->getElementType();
1945 arrayType = getContext().getAsArrayType(eltType);
1946 }
1947
1948 llvm::Type *baseType = ConvertType(eltType);
1949 addr = Builder.CreateElementBitCast(addr, baseType, "array.begin");
1950 } else {
1951 // Create the actual GEP.
1952 addr = Address(Builder.CreateInBoundsGEP(addr.getPointer(),
1953 gepIndices, "array.begin"),
1954 addr.getAlignment());
1955 }
1956
1957 baseType = eltType;
1958
1959 llvm::Value *numElements
1960 = llvm::ConstantInt::get(SizeTy, countFromCLAs);
1961
1962 // If we had any VLA dimensions, factor them in.
1963 if (numVLAElements)
1964 numElements = Builder.CreateNUWMul(numVLAElements, numElements);
1965
1966 return numElements;
1967 }
1968
getVLASize(QualType type)1969 CodeGenFunction::VlaSizePair CodeGenFunction::getVLASize(QualType type) {
1970 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
1971 assert(vla && "type was not a variable array type!");
1972 return getVLASize(vla);
1973 }
1974
1975 CodeGenFunction::VlaSizePair
getVLASize(const VariableArrayType * type)1976 CodeGenFunction::getVLASize(const VariableArrayType *type) {
1977 // The number of elements so far; always size_t.
1978 llvm::Value *numElements = nullptr;
1979
1980 QualType elementType;
1981 do {
1982 elementType = type->getElementType();
1983 llvm::Value *vlaSize = VLASizeMap[type->getSizeExpr()];
1984 assert(vlaSize && "no size for VLA!");
1985 assert(vlaSize->getType() == SizeTy);
1986
1987 if (!numElements) {
1988 numElements = vlaSize;
1989 } else {
1990 // It's undefined behavior if this wraps around, so mark it that way.
1991 // FIXME: Teach -fsanitize=undefined to trap this.
1992 numElements = Builder.CreateNUWMul(numElements, vlaSize);
1993 }
1994 } while ((type = getContext().getAsVariableArrayType(elementType)));
1995
1996 return { numElements, elementType };
1997 }
1998
1999 CodeGenFunction::VlaSizePair
getVLAElements1D(QualType type)2000 CodeGenFunction::getVLAElements1D(QualType type) {
2001 const VariableArrayType *vla = getContext().getAsVariableArrayType(type);
2002 assert(vla && "type was not a variable array type!");
2003 return getVLAElements1D(vla);
2004 }
2005
2006 CodeGenFunction::VlaSizePair
getVLAElements1D(const VariableArrayType * Vla)2007 CodeGenFunction::getVLAElements1D(const VariableArrayType *Vla) {
2008 llvm::Value *VlaSize = VLASizeMap[Vla->getSizeExpr()];
2009 assert(VlaSize && "no size for VLA!");
2010 assert(VlaSize->getType() == SizeTy);
2011 return { VlaSize, Vla->getElementType() };
2012 }
2013
EmitVariablyModifiedType(QualType type)2014 void CodeGenFunction::EmitVariablyModifiedType(QualType type) {
2015 assert(type->isVariablyModifiedType() &&
2016 "Must pass variably modified type to EmitVLASizes!");
2017
2018 EnsureInsertPoint();
2019
2020 // We're going to walk down into the type and look for VLA
2021 // expressions.
2022 do {
2023 assert(type->isVariablyModifiedType());
2024
2025 const Type *ty = type.getTypePtr();
2026 switch (ty->getTypeClass()) {
2027
2028 #define TYPE(Class, Base)
2029 #define ABSTRACT_TYPE(Class, Base)
2030 #define NON_CANONICAL_TYPE(Class, Base)
2031 #define DEPENDENT_TYPE(Class, Base) case Type::Class:
2032 #define NON_CANONICAL_UNLESS_DEPENDENT_TYPE(Class, Base)
2033 #include "clang/AST/TypeNodes.inc"
2034 llvm_unreachable("unexpected dependent type!");
2035
2036 // These types are never variably-modified.
2037 case Type::Builtin:
2038 case Type::Complex:
2039 case Type::Vector:
2040 case Type::ExtVector:
2041 case Type::ConstantMatrix:
2042 case Type::Record:
2043 case Type::Enum:
2044 case Type::Elaborated:
2045 case Type::TemplateSpecialization:
2046 case Type::ObjCTypeParam:
2047 case Type::ObjCObject:
2048 case Type::ObjCInterface:
2049 case Type::ObjCObjectPointer:
2050 case Type::ExtInt:
2051 llvm_unreachable("type class is never variably-modified!");
2052
2053 case Type::Adjusted:
2054 type = cast<AdjustedType>(ty)->getAdjustedType();
2055 break;
2056
2057 case Type::Decayed:
2058 type = cast<DecayedType>(ty)->getPointeeType();
2059 break;
2060
2061 case Type::Pointer:
2062 type = cast<PointerType>(ty)->getPointeeType();
2063 break;
2064
2065 case Type::BlockPointer:
2066 type = cast<BlockPointerType>(ty)->getPointeeType();
2067 break;
2068
2069 case Type::LValueReference:
2070 case Type::RValueReference:
2071 type = cast<ReferenceType>(ty)->getPointeeType();
2072 break;
2073
2074 case Type::MemberPointer:
2075 type = cast<MemberPointerType>(ty)->getPointeeType();
2076 break;
2077
2078 case Type::ConstantArray:
2079 case Type::IncompleteArray:
2080 // Losing element qualification here is fine.
2081 type = cast<ArrayType>(ty)->getElementType();
2082 break;
2083
2084 case Type::VariableArray: {
2085 // Losing element qualification here is fine.
2086 const VariableArrayType *vat = cast<VariableArrayType>(ty);
2087
2088 // Unknown size indication requires no size computation.
2089 // Otherwise, evaluate and record it.
2090 if (const Expr *size = vat->getSizeExpr()) {
2091 // It's possible that we might have emitted this already,
2092 // e.g. with a typedef and a pointer to it.
2093 llvm::Value *&entry = VLASizeMap[size];
2094 if (!entry) {
2095 llvm::Value *Size = EmitScalarExpr(size);
2096
2097 // C11 6.7.6.2p5:
2098 // If the size is an expression that is not an integer constant
2099 // expression [...] each time it is evaluated it shall have a value
2100 // greater than zero.
2101 if (SanOpts.has(SanitizerKind::VLABound) &&
2102 size->getType()->isSignedIntegerType()) {
2103 SanitizerScope SanScope(this);
2104 llvm::Value *Zero = llvm::Constant::getNullValue(Size->getType());
2105 llvm::Constant *StaticArgs[] = {
2106 EmitCheckSourceLocation(size->getBeginLoc()),
2107 EmitCheckTypeDescriptor(size->getType())};
2108 EmitCheck(std::make_pair(Builder.CreateICmpSGT(Size, Zero),
2109 SanitizerKind::VLABound),
2110 SanitizerHandler::VLABoundNotPositive, StaticArgs, Size);
2111 }
2112
2113 // Always zexting here would be wrong if it weren't
2114 // undefined behavior to have a negative bound.
2115 entry = Builder.CreateIntCast(Size, SizeTy, /*signed*/ false);
2116 }
2117 }
2118 type = vat->getElementType();
2119 break;
2120 }
2121
2122 case Type::FunctionProto:
2123 case Type::FunctionNoProto:
2124 type = cast<FunctionType>(ty)->getReturnType();
2125 break;
2126
2127 case Type::Paren:
2128 case Type::TypeOf:
2129 case Type::UnaryTransform:
2130 case Type::Attributed:
2131 case Type::SubstTemplateTypeParm:
2132 case Type::MacroQualified:
2133 // Keep walking after single level desugaring.
2134 type = type.getSingleStepDesugaredType(getContext());
2135 break;
2136
2137 case Type::Typedef:
2138 case Type::Decltype:
2139 case Type::Auto:
2140 case Type::DeducedTemplateSpecialization:
2141 // Stop walking: nothing to do.
2142 return;
2143
2144 case Type::TypeOfExpr:
2145 // Stop walking: emit typeof expression.
2146 EmitIgnoredExpr(cast<TypeOfExprType>(ty)->getUnderlyingExpr());
2147 return;
2148
2149 case Type::Atomic:
2150 type = cast<AtomicType>(ty)->getValueType();
2151 break;
2152
2153 case Type::Pipe:
2154 type = cast<PipeType>(ty)->getElementType();
2155 break;
2156 }
2157 } while (type->isVariablyModifiedType());
2158 }
2159
EmitVAListRef(const Expr * E)2160 Address CodeGenFunction::EmitVAListRef(const Expr* E) {
2161 if (getContext().getBuiltinVaListType()->isArrayType())
2162 return EmitPointerWithAlignment(E);
2163 return EmitLValue(E).getAddress(*this);
2164 }
2165
EmitMSVAListRef(const Expr * E)2166 Address CodeGenFunction::EmitMSVAListRef(const Expr *E) {
2167 return EmitLValue(E).getAddress(*this);
2168 }
2169
EmitDeclRefExprDbgValue(const DeclRefExpr * E,const APValue & Init)2170 void CodeGenFunction::EmitDeclRefExprDbgValue(const DeclRefExpr *E,
2171 const APValue &Init) {
2172 assert(Init.hasValue() && "Invalid DeclRefExpr initializer!");
2173 if (CGDebugInfo *Dbg = getDebugInfo())
2174 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
2175 Dbg->EmitGlobalVariable(E->getDecl(), Init);
2176 }
2177
2178 CodeGenFunction::PeepholeProtection
protectFromPeepholes(RValue rvalue)2179 CodeGenFunction::protectFromPeepholes(RValue rvalue) {
2180 // At the moment, the only aggressive peephole we do in IR gen
2181 // is trunc(zext) folding, but if we add more, we can easily
2182 // extend this protection.
2183
2184 if (!rvalue.isScalar()) return PeepholeProtection();
2185 llvm::Value *value = rvalue.getScalarVal();
2186 if (!isa<llvm::ZExtInst>(value)) return PeepholeProtection();
2187
2188 // Just make an extra bitcast.
2189 assert(HaveInsertPoint());
2190 llvm::Instruction *inst = new llvm::BitCastInst(value, value->getType(), "",
2191 Builder.GetInsertBlock());
2192
2193 PeepholeProtection protection;
2194 protection.Inst = inst;
2195 return protection;
2196 }
2197
unprotectFromPeepholes(PeepholeProtection protection)2198 void CodeGenFunction::unprotectFromPeepholes(PeepholeProtection protection) {
2199 if (!protection.Inst) return;
2200
2201 // In theory, we could try to duplicate the peepholes now, but whatever.
2202 protection.Inst->eraseFromParent();
2203 }
2204
emitAlignmentAssumption(llvm::Value * PtrValue,QualType Ty,SourceLocation Loc,SourceLocation AssumptionLoc,llvm::Value * Alignment,llvm::Value * OffsetValue)2205 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2206 QualType Ty, SourceLocation Loc,
2207 SourceLocation AssumptionLoc,
2208 llvm::Value *Alignment,
2209 llvm::Value *OffsetValue) {
2210 if (Alignment->getType() != IntPtrTy)
2211 Alignment =
2212 Builder.CreateIntCast(Alignment, IntPtrTy, false, "casted.align");
2213 if (OffsetValue && OffsetValue->getType() != IntPtrTy)
2214 OffsetValue =
2215 Builder.CreateIntCast(OffsetValue, IntPtrTy, true, "casted.offset");
2216 llvm::Value *TheCheck = nullptr;
2217 if (SanOpts.has(SanitizerKind::Alignment)) {
2218 llvm::Value *PtrIntValue =
2219 Builder.CreatePtrToInt(PtrValue, IntPtrTy, "ptrint");
2220
2221 if (OffsetValue) {
2222 bool IsOffsetZero = false;
2223 if (const auto *CI = dyn_cast<llvm::ConstantInt>(OffsetValue))
2224 IsOffsetZero = CI->isZero();
2225
2226 if (!IsOffsetZero)
2227 PtrIntValue = Builder.CreateSub(PtrIntValue, OffsetValue, "offsetptr");
2228 }
2229
2230 llvm::Value *Zero = llvm::ConstantInt::get(IntPtrTy, 0);
2231 llvm::Value *Mask =
2232 Builder.CreateSub(Alignment, llvm::ConstantInt::get(IntPtrTy, 1));
2233 llvm::Value *MaskedPtr = Builder.CreateAnd(PtrIntValue, Mask, "maskedptr");
2234 TheCheck = Builder.CreateICmpEQ(MaskedPtr, Zero, "maskcond");
2235 }
2236 llvm::Instruction *Assumption = Builder.CreateAlignmentAssumption(
2237 CGM.getDataLayout(), PtrValue, Alignment, OffsetValue);
2238
2239 if (!SanOpts.has(SanitizerKind::Alignment))
2240 return;
2241 emitAlignmentAssumptionCheck(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2242 OffsetValue, TheCheck, Assumption);
2243 }
2244
emitAlignmentAssumption(llvm::Value * PtrValue,const Expr * E,SourceLocation AssumptionLoc,llvm::Value * Alignment,llvm::Value * OffsetValue)2245 void CodeGenFunction::emitAlignmentAssumption(llvm::Value *PtrValue,
2246 const Expr *E,
2247 SourceLocation AssumptionLoc,
2248 llvm::Value *Alignment,
2249 llvm::Value *OffsetValue) {
2250 if (auto *CE = dyn_cast<CastExpr>(E))
2251 E = CE->getSubExprAsWritten();
2252 QualType Ty = E->getType();
2253 SourceLocation Loc = E->getExprLoc();
2254
2255 emitAlignmentAssumption(PtrValue, Ty, Loc, AssumptionLoc, Alignment,
2256 OffsetValue);
2257 }
2258
EmitAnnotationCall(llvm::Function * AnnotationFn,llvm::Value * AnnotatedVal,StringRef AnnotationStr,SourceLocation Location,const AnnotateAttr * Attr)2259 llvm::Value *CodeGenFunction::EmitAnnotationCall(llvm::Function *AnnotationFn,
2260 llvm::Value *AnnotatedVal,
2261 StringRef AnnotationStr,
2262 SourceLocation Location,
2263 const AnnotateAttr *Attr) {
2264 SmallVector<llvm::Value *, 5> Args = {
2265 AnnotatedVal,
2266 Builder.CreateBitCast(CGM.EmitAnnotationString(AnnotationStr), Int8PtrTy),
2267 Builder.CreateBitCast(CGM.EmitAnnotationUnit(Location), Int8PtrTy),
2268 CGM.EmitAnnotationLineNo(Location),
2269 };
2270 if (Attr)
2271 Args.push_back(CGM.EmitAnnotationArgs(Attr));
2272 return Builder.CreateCall(AnnotationFn, Args);
2273 }
2274
EmitVarAnnotations(const VarDecl * D,llvm::Value * V)2275 void CodeGenFunction::EmitVarAnnotations(const VarDecl *D, llvm::Value *V) {
2276 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2277 // FIXME We create a new bitcast for every annotation because that's what
2278 // llvm-gcc was doing.
2279 for (const auto *I : D->specific_attrs<AnnotateAttr>())
2280 EmitAnnotationCall(CGM.getIntrinsic(llvm::Intrinsic::var_annotation),
2281 Builder.CreateBitCast(V, CGM.Int8PtrTy, V->getName()),
2282 I->getAnnotation(), D->getLocation(), I);
2283 }
2284
EmitFieldAnnotations(const FieldDecl * D,Address Addr)2285 Address CodeGenFunction::EmitFieldAnnotations(const FieldDecl *D,
2286 Address Addr) {
2287 assert(D->hasAttr<AnnotateAttr>() && "no annotate attribute");
2288 llvm::Value *V = Addr.getPointer();
2289 llvm::Type *VTy = V->getType();
2290 llvm::Function *F = CGM.getIntrinsic(llvm::Intrinsic::ptr_annotation,
2291 CGM.Int8PtrTy);
2292
2293 for (const auto *I : D->specific_attrs<AnnotateAttr>()) {
2294 // FIXME Always emit the cast inst so we can differentiate between
2295 // annotation on the first field of a struct and annotation on the struct
2296 // itself.
2297 if (VTy != CGM.Int8PtrTy)
2298 V = Builder.CreateBitCast(V, CGM.Int8PtrTy);
2299 V = EmitAnnotationCall(F, V, I->getAnnotation(), D->getLocation(), I);
2300 V = Builder.CreateBitCast(V, VTy);
2301 }
2302
2303 return Address(V, Addr.getAlignment());
2304 }
2305
~CGCapturedStmtInfo()2306 CodeGenFunction::CGCapturedStmtInfo::~CGCapturedStmtInfo() { }
2307
SanitizerScope(CodeGenFunction * CGF)2308 CodeGenFunction::SanitizerScope::SanitizerScope(CodeGenFunction *CGF)
2309 : CGF(CGF) {
2310 assert(!CGF->IsSanitizerScope);
2311 CGF->IsSanitizerScope = true;
2312 }
2313
~SanitizerScope()2314 CodeGenFunction::SanitizerScope::~SanitizerScope() {
2315 CGF->IsSanitizerScope = false;
2316 }
2317
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const2318 void CodeGenFunction::InsertHelper(llvm::Instruction *I,
2319 const llvm::Twine &Name,
2320 llvm::BasicBlock *BB,
2321 llvm::BasicBlock::iterator InsertPt) const {
2322 LoopStack.InsertHelper(I);
2323 if (IsSanitizerScope)
2324 CGM.getSanitizerMetadata()->disableSanitizerForInstruction(I);
2325 }
2326
InsertHelper(llvm::Instruction * I,const llvm::Twine & Name,llvm::BasicBlock * BB,llvm::BasicBlock::iterator InsertPt) const2327 void CGBuilderInserter::InsertHelper(
2328 llvm::Instruction *I, const llvm::Twine &Name, llvm::BasicBlock *BB,
2329 llvm::BasicBlock::iterator InsertPt) const {
2330 llvm::IRBuilderDefaultInserter::InsertHelper(I, Name, BB, InsertPt);
2331 if (CGF)
2332 CGF->InsertHelper(I, Name, BB, InsertPt);
2333 }
2334
2335 // Emits an error if we don't have a valid set of target features for the
2336 // called function.
checkTargetFeatures(const CallExpr * E,const FunctionDecl * TargetDecl)2337 void CodeGenFunction::checkTargetFeatures(const CallExpr *E,
2338 const FunctionDecl *TargetDecl) {
2339 return checkTargetFeatures(E->getBeginLoc(), TargetDecl);
2340 }
2341
2342 // Emits an error if we don't have a valid set of target features for the
2343 // called function.
checkTargetFeatures(SourceLocation Loc,const FunctionDecl * TargetDecl)2344 void CodeGenFunction::checkTargetFeatures(SourceLocation Loc,
2345 const FunctionDecl *TargetDecl) {
2346 // Early exit if this is an indirect call.
2347 if (!TargetDecl)
2348 return;
2349
2350 // Get the current enclosing function if it exists. If it doesn't
2351 // we can't check the target features anyhow.
2352 const FunctionDecl *FD = dyn_cast_or_null<FunctionDecl>(CurCodeDecl);
2353 if (!FD)
2354 return;
2355
2356 // Grab the required features for the call. For a builtin this is listed in
2357 // the td file with the default cpu, for an always_inline function this is any
2358 // listed cpu and any listed features.
2359 unsigned BuiltinID = TargetDecl->getBuiltinID();
2360 std::string MissingFeature;
2361 llvm::StringMap<bool> CallerFeatureMap;
2362 CGM.getContext().getFunctionFeatureMap(CallerFeatureMap, FD);
2363 if (BuiltinID) {
2364 StringRef FeatureList(
2365 CGM.getContext().BuiltinInfo.getRequiredFeatures(BuiltinID));
2366 // Return if the builtin doesn't have any required features.
2367 if (FeatureList.empty())
2368 return;
2369 assert(FeatureList.find(' ') == StringRef::npos &&
2370 "Space in feature list");
2371 TargetFeatures TF(CallerFeatureMap);
2372 if (!TF.hasRequiredFeatures(FeatureList))
2373 CGM.getDiags().Report(Loc, diag::err_builtin_needs_feature)
2374 << TargetDecl->getDeclName() << FeatureList;
2375 } else if (!TargetDecl->isMultiVersion() &&
2376 TargetDecl->hasAttr<TargetAttr>()) {
2377 // Get the required features for the callee.
2378
2379 const TargetAttr *TD = TargetDecl->getAttr<TargetAttr>();
2380 ParsedTargetAttr ParsedAttr =
2381 CGM.getContext().filterFunctionTargetAttrs(TD);
2382
2383 SmallVector<StringRef, 1> ReqFeatures;
2384 llvm::StringMap<bool> CalleeFeatureMap;
2385 CGM.getContext().getFunctionFeatureMap(CalleeFeatureMap, TargetDecl);
2386
2387 for (const auto &F : ParsedAttr.Features) {
2388 if (F[0] == '+' && CalleeFeatureMap.lookup(F.substr(1)))
2389 ReqFeatures.push_back(StringRef(F).substr(1));
2390 }
2391
2392 for (const auto &F : CalleeFeatureMap) {
2393 // Only positive features are "required".
2394 if (F.getValue())
2395 ReqFeatures.push_back(F.getKey());
2396 }
2397 if (!llvm::all_of(ReqFeatures, [&](StringRef Feature) {
2398 if (!CallerFeatureMap.lookup(Feature)) {
2399 MissingFeature = Feature.str();
2400 return false;
2401 }
2402 return true;
2403 }))
2404 CGM.getDiags().Report(Loc, diag::err_function_needs_feature)
2405 << FD->getDeclName() << TargetDecl->getDeclName() << MissingFeature;
2406 }
2407 }
2408
EmitSanitizerStatReport(llvm::SanitizerStatKind SSK)2409 void CodeGenFunction::EmitSanitizerStatReport(llvm::SanitizerStatKind SSK) {
2410 if (!CGM.getCodeGenOpts().SanitizeStats)
2411 return;
2412
2413 llvm::IRBuilder<> IRB(Builder.GetInsertBlock(), Builder.GetInsertPoint());
2414 IRB.SetCurrentDebugLocation(Builder.getCurrentDebugLocation());
2415 CGM.getSanStats().create(IRB, SSK);
2416 }
2417
2418 llvm::Value *
FormResolverCondition(const MultiVersionResolverOption & RO)2419 CodeGenFunction::FormResolverCondition(const MultiVersionResolverOption &RO) {
2420 llvm::Value *Condition = nullptr;
2421
2422 if (!RO.Conditions.Architecture.empty())
2423 Condition = EmitX86CpuIs(RO.Conditions.Architecture);
2424
2425 if (!RO.Conditions.Features.empty()) {
2426 llvm::Value *FeatureCond = EmitX86CpuSupports(RO.Conditions.Features);
2427 Condition =
2428 Condition ? Builder.CreateAnd(Condition, FeatureCond) : FeatureCond;
2429 }
2430 return Condition;
2431 }
2432
CreateMultiVersionResolverReturn(CodeGenModule & CGM,llvm::Function * Resolver,CGBuilderTy & Builder,llvm::Function * FuncToReturn,bool SupportsIFunc)2433 static void CreateMultiVersionResolverReturn(CodeGenModule &CGM,
2434 llvm::Function *Resolver,
2435 CGBuilderTy &Builder,
2436 llvm::Function *FuncToReturn,
2437 bool SupportsIFunc) {
2438 if (SupportsIFunc) {
2439 Builder.CreateRet(FuncToReturn);
2440 return;
2441 }
2442
2443 llvm::SmallVector<llvm::Value *, 10> Args;
2444 llvm::for_each(Resolver->args(),
2445 [&](llvm::Argument &Arg) { Args.push_back(&Arg); });
2446
2447 llvm::CallInst *Result = Builder.CreateCall(FuncToReturn, Args);
2448 Result->setTailCallKind(llvm::CallInst::TCK_MustTail);
2449
2450 if (Resolver->getReturnType()->isVoidTy())
2451 Builder.CreateRetVoid();
2452 else
2453 Builder.CreateRet(Result);
2454 }
2455
EmitMultiVersionResolver(llvm::Function * Resolver,ArrayRef<MultiVersionResolverOption> Options)2456 void CodeGenFunction::EmitMultiVersionResolver(
2457 llvm::Function *Resolver, ArrayRef<MultiVersionResolverOption> Options) {
2458 assert(getContext().getTargetInfo().getTriple().isX86() &&
2459 "Only implemented for x86 targets");
2460
2461 bool SupportsIFunc = getContext().getTargetInfo().supportsIFunc();
2462
2463 // Main function's basic block.
2464 llvm::BasicBlock *CurBlock = createBasicBlock("resolver_entry", Resolver);
2465 Builder.SetInsertPoint(CurBlock);
2466 EmitX86CpuInit();
2467
2468 for (const MultiVersionResolverOption &RO : Options) {
2469 Builder.SetInsertPoint(CurBlock);
2470 llvm::Value *Condition = FormResolverCondition(RO);
2471
2472 // The 'default' or 'generic' case.
2473 if (!Condition) {
2474 assert(&RO == Options.end() - 1 &&
2475 "Default or Generic case must be last");
2476 CreateMultiVersionResolverReturn(CGM, Resolver, Builder, RO.Function,
2477 SupportsIFunc);
2478 return;
2479 }
2480
2481 llvm::BasicBlock *RetBlock = createBasicBlock("resolver_return", Resolver);
2482 CGBuilderTy RetBuilder(*this, RetBlock);
2483 CreateMultiVersionResolverReturn(CGM, Resolver, RetBuilder, RO.Function,
2484 SupportsIFunc);
2485 CurBlock = createBasicBlock("resolver_else", Resolver);
2486 Builder.CreateCondBr(Condition, RetBlock, CurBlock);
2487 }
2488
2489 // If no generic/default, emit an unreachable.
2490 Builder.SetInsertPoint(CurBlock);
2491 llvm::CallInst *TrapCall = EmitTrapCall(llvm::Intrinsic::trap);
2492 TrapCall->setDoesNotReturn();
2493 TrapCall->setDoesNotThrow();
2494 Builder.CreateUnreachable();
2495 Builder.ClearInsertionPoint();
2496 }
2497
2498 // Loc - where the diagnostic will point, where in the source code this
2499 // alignment has failed.
2500 // SecondaryLoc - if present (will be present if sufficiently different from
2501 // Loc), the diagnostic will additionally point a "Note:" to this location.
2502 // It should be the location where the __attribute__((assume_aligned))
2503 // was written e.g.
emitAlignmentAssumptionCheck(llvm::Value * Ptr,QualType Ty,SourceLocation Loc,SourceLocation SecondaryLoc,llvm::Value * Alignment,llvm::Value * OffsetValue,llvm::Value * TheCheck,llvm::Instruction * Assumption)2504 void CodeGenFunction::emitAlignmentAssumptionCheck(
2505 llvm::Value *Ptr, QualType Ty, SourceLocation Loc,
2506 SourceLocation SecondaryLoc, llvm::Value *Alignment,
2507 llvm::Value *OffsetValue, llvm::Value *TheCheck,
2508 llvm::Instruction *Assumption) {
2509 assert(Assumption && isa<llvm::CallInst>(Assumption) &&
2510 cast<llvm::CallInst>(Assumption)->getCalledOperand() ==
2511 llvm::Intrinsic::getDeclaration(
2512 Builder.GetInsertBlock()->getParent()->getParent(),
2513 llvm::Intrinsic::assume) &&
2514 "Assumption should be a call to llvm.assume().");
2515 assert(&(Builder.GetInsertBlock()->back()) == Assumption &&
2516 "Assumption should be the last instruction of the basic block, "
2517 "since the basic block is still being generated.");
2518
2519 if (!SanOpts.has(SanitizerKind::Alignment))
2520 return;
2521
2522 // Don't check pointers to volatile data. The behavior here is implementation-
2523 // defined.
2524 if (Ty->getPointeeType().isVolatileQualified())
2525 return;
2526
2527 // We need to temorairly remove the assumption so we can insert the
2528 // sanitizer check before it, else the check will be dropped by optimizations.
2529 Assumption->removeFromParent();
2530
2531 {
2532 SanitizerScope SanScope(this);
2533
2534 if (!OffsetValue)
2535 OffsetValue = Builder.getInt1(0); // no offset.
2536
2537 llvm::Constant *StaticData[] = {EmitCheckSourceLocation(Loc),
2538 EmitCheckSourceLocation(SecondaryLoc),
2539 EmitCheckTypeDescriptor(Ty)};
2540 llvm::Value *DynamicData[] = {EmitCheckValue(Ptr),
2541 EmitCheckValue(Alignment),
2542 EmitCheckValue(OffsetValue)};
2543 EmitCheck({std::make_pair(TheCheck, SanitizerKind::Alignment)},
2544 SanitizerHandler::AlignmentAssumption, StaticData, DynamicData);
2545 }
2546
2547 // We are now in the (new, empty) "cont" basic block.
2548 // Reintroduce the assumption.
2549 Builder.Insert(Assumption);
2550 // FIXME: Assumption still has it's original basic block as it's Parent.
2551 }
2552
SourceLocToDebugLoc(SourceLocation Location)2553 llvm::DebugLoc CodeGenFunction::SourceLocToDebugLoc(SourceLocation Location) {
2554 if (CGDebugInfo *DI = getDebugInfo())
2555 return DI->SourceLocToDebugLoc(Location);
2556
2557 return llvm::DebugLoc();
2558 }
2559
2560 static Optional<std::pair<uint32_t, uint32_t>>
getLikelihoodWeights(Stmt::Likelihood LH)2561 getLikelihoodWeights(Stmt::Likelihood LH) {
2562 switch (LH) {
2563 case Stmt::LH_Unlikely:
2564 return std::pair<uint32_t, uint32_t>(llvm::UnlikelyBranchWeight,
2565 llvm::LikelyBranchWeight);
2566 case Stmt::LH_None:
2567 return None;
2568 case Stmt::LH_Likely:
2569 return std::pair<uint32_t, uint32_t>(llvm::LikelyBranchWeight,
2570 llvm::UnlikelyBranchWeight);
2571 }
2572 llvm_unreachable("Unknown Likelihood");
2573 }
2574
createBranchWeights(Stmt::Likelihood LH) const2575 llvm::MDNode *CodeGenFunction::createBranchWeights(Stmt::Likelihood LH) const {
2576 Optional<std::pair<uint32_t, uint32_t>> LHW = getLikelihoodWeights(LH);
2577 if (!LHW)
2578 return nullptr;
2579
2580 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2581 return MDHelper.createBranchWeights(LHW->first, LHW->second);
2582 }
2583
createProfileOrBranchWeightsForLoop(const Stmt * Cond,uint64_t LoopCount,const Stmt * Body) const2584 llvm::MDNode *CodeGenFunction::createProfileOrBranchWeightsForLoop(
2585 const Stmt *Cond, uint64_t LoopCount, const Stmt *Body) const {
2586 llvm::MDNode *Weights = createProfileWeightsForLoop(Cond, LoopCount);
2587 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
2588 Weights = createBranchWeights(Stmt::getLikelihood(Body));
2589
2590 return Weights;
2591 }
2592