1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/Stmt.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
23 #include "clang/Basic/DiagnosticSema.h"
24 #include "clang/Basic/PrettyStackTrace.h"
25 #include "clang/Basic/SourceManager.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/MDBuilder.h"
33 #include "llvm/Support/SaveAndRestore.h"
34
35 using namespace clang;
36 using namespace CodeGen;
37
38 //===----------------------------------------------------------------------===//
39 // Statement Emission
40 //===----------------------------------------------------------------------===//
41
EmitStopPoint(const Stmt * S)42 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
43 if (CGDebugInfo *DI = getDebugInfo()) {
44 SourceLocation Loc;
45 Loc = S->getBeginLoc();
46 DI->EmitLocation(Builder, Loc);
47
48 LastStopPoint = Loc;
49 }
50 }
51
EmitStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)52 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
53 assert(S && "Null statement?");
54 PGO.setCurrentStmt(S);
55
56 // These statements have their own debug info handling.
57 if (EmitSimpleStmt(S, Attrs))
58 return;
59
60 // Check if we are generating unreachable code.
61 if (!HaveInsertPoint()) {
62 // If so, and the statement doesn't contain a label, then we do not need to
63 // generate actual code. This is safe because (1) the current point is
64 // unreachable, so we don't need to execute the code, and (2) we've already
65 // handled the statements which update internal data structures (like the
66 // local variable map) which could be used by subsequent statements.
67 if (!ContainsLabel(S)) {
68 // Verify that any decl statements were handled as simple, they may be in
69 // scope of subsequent reachable statements.
70 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
71 return;
72 }
73
74 // Otherwise, make a new block to hold the code.
75 EnsureInsertPoint();
76 }
77
78 // Generate a stoppoint if we are emitting debug info.
79 EmitStopPoint(S);
80
81 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
82 // enabled.
83 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
84 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
85 EmitSimpleOMPExecutableDirective(*D);
86 return;
87 }
88 }
89
90 switch (S->getStmtClass()) {
91 case Stmt::NoStmtClass:
92 case Stmt::CXXCatchStmtClass:
93 case Stmt::SEHExceptStmtClass:
94 case Stmt::SEHFinallyStmtClass:
95 case Stmt::MSDependentExistsStmtClass:
96 llvm_unreachable("invalid statement class to emit generically");
97 case Stmt::NullStmtClass:
98 case Stmt::CompoundStmtClass:
99 case Stmt::DeclStmtClass:
100 case Stmt::LabelStmtClass:
101 case Stmt::AttributedStmtClass:
102 case Stmt::GotoStmtClass:
103 case Stmt::BreakStmtClass:
104 case Stmt::ContinueStmtClass:
105 case Stmt::DefaultStmtClass:
106 case Stmt::CaseStmtClass:
107 case Stmt::SEHLeaveStmtClass:
108 llvm_unreachable("should have emitted these statements as simple");
109
110 #define STMT(Type, Base)
111 #define ABSTRACT_STMT(Op)
112 #define EXPR(Type, Base) \
113 case Stmt::Type##Class:
114 #include "clang/AST/StmtNodes.inc"
115 {
116 // Remember the block we came in on.
117 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
118 assert(incoming && "expression emission must have an insertion point");
119
120 EmitIgnoredExpr(cast<Expr>(S));
121
122 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
123 assert(outgoing && "expression emission cleared block!");
124
125 // The expression emitters assume (reasonably!) that the insertion
126 // point is always set. To maintain that, the call-emission code
127 // for noreturn functions has to enter a new block with no
128 // predecessors. We want to kill that block and mark the current
129 // insertion point unreachable in the common case of a call like
130 // "exit();". Since expression emission doesn't otherwise create
131 // blocks with no predecessors, we can just test for that.
132 // However, we must be careful not to do this to our incoming
133 // block, because *statement* emission does sometimes create
134 // reachable blocks which will have no predecessors until later in
135 // the function. This occurs with, e.g., labels that are not
136 // reachable by fallthrough.
137 if (incoming != outgoing && outgoing->use_empty()) {
138 outgoing->eraseFromParent();
139 Builder.ClearInsertionPoint();
140 }
141 break;
142 }
143
144 case Stmt::IndirectGotoStmtClass:
145 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
146
147 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
148 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
149 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
150 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
151
152 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
153
154 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
155 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
156 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
157 case Stmt::CoroutineBodyStmtClass:
158 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
159 break;
160 case Stmt::CoreturnStmtClass:
161 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
162 break;
163 case Stmt::CapturedStmtClass: {
164 const CapturedStmt *CS = cast<CapturedStmt>(S);
165 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
166 }
167 break;
168 case Stmt::ObjCAtTryStmtClass:
169 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
170 break;
171 case Stmt::ObjCAtCatchStmtClass:
172 llvm_unreachable(
173 "@catch statements should be handled by EmitObjCAtTryStmt");
174 case Stmt::ObjCAtFinallyStmtClass:
175 llvm_unreachable(
176 "@finally statements should be handled by EmitObjCAtTryStmt");
177 case Stmt::ObjCAtThrowStmtClass:
178 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
179 break;
180 case Stmt::ObjCAtSynchronizedStmtClass:
181 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
182 break;
183 case Stmt::ObjCForCollectionStmtClass:
184 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
185 break;
186 case Stmt::ObjCAutoreleasePoolStmtClass:
187 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
188 break;
189
190 case Stmt::CXXTryStmtClass:
191 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
192 break;
193 case Stmt::CXXForRangeStmtClass:
194 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
195 break;
196 case Stmt::SEHTryStmtClass:
197 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
198 break;
199 case Stmt::OMPCanonicalLoopClass:
200 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
201 break;
202 case Stmt::OMPParallelDirectiveClass:
203 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
204 break;
205 case Stmt::OMPSimdDirectiveClass:
206 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
207 break;
208 case Stmt::OMPTileDirectiveClass:
209 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
210 break;
211 case Stmt::OMPForDirectiveClass:
212 EmitOMPForDirective(cast<OMPForDirective>(*S));
213 break;
214 case Stmt::OMPForSimdDirectiveClass:
215 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
216 break;
217 case Stmt::OMPSectionsDirectiveClass:
218 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
219 break;
220 case Stmt::OMPSectionDirectiveClass:
221 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
222 break;
223 case Stmt::OMPSingleDirectiveClass:
224 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
225 break;
226 case Stmt::OMPMasterDirectiveClass:
227 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
228 break;
229 case Stmt::OMPCriticalDirectiveClass:
230 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
231 break;
232 case Stmt::OMPParallelForDirectiveClass:
233 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
234 break;
235 case Stmt::OMPParallelForSimdDirectiveClass:
236 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
237 break;
238 case Stmt::OMPParallelMasterDirectiveClass:
239 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
240 break;
241 case Stmt::OMPParallelSectionsDirectiveClass:
242 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
243 break;
244 case Stmt::OMPTaskDirectiveClass:
245 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
246 break;
247 case Stmt::OMPTaskyieldDirectiveClass:
248 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
249 break;
250 case Stmt::OMPBarrierDirectiveClass:
251 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
252 break;
253 case Stmt::OMPTaskwaitDirectiveClass:
254 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
255 break;
256 case Stmt::OMPTaskgroupDirectiveClass:
257 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
258 break;
259 case Stmt::OMPFlushDirectiveClass:
260 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
261 break;
262 case Stmt::OMPDepobjDirectiveClass:
263 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
264 break;
265 case Stmt::OMPScanDirectiveClass:
266 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
267 break;
268 case Stmt::OMPOrderedDirectiveClass:
269 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
270 break;
271 case Stmt::OMPAtomicDirectiveClass:
272 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
273 break;
274 case Stmt::OMPTargetDirectiveClass:
275 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
276 break;
277 case Stmt::OMPTeamsDirectiveClass:
278 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
279 break;
280 case Stmt::OMPCancellationPointDirectiveClass:
281 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
282 break;
283 case Stmt::OMPCancelDirectiveClass:
284 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
285 break;
286 case Stmt::OMPTargetDataDirectiveClass:
287 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
288 break;
289 case Stmt::OMPTargetEnterDataDirectiveClass:
290 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
291 break;
292 case Stmt::OMPTargetExitDataDirectiveClass:
293 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
294 break;
295 case Stmt::OMPTargetParallelDirectiveClass:
296 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
297 break;
298 case Stmt::OMPTargetParallelForDirectiveClass:
299 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
300 break;
301 case Stmt::OMPTaskLoopDirectiveClass:
302 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
303 break;
304 case Stmt::OMPTaskLoopSimdDirectiveClass:
305 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
306 break;
307 case Stmt::OMPMasterTaskLoopDirectiveClass:
308 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
309 break;
310 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
311 EmitOMPMasterTaskLoopSimdDirective(
312 cast<OMPMasterTaskLoopSimdDirective>(*S));
313 break;
314 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
315 EmitOMPParallelMasterTaskLoopDirective(
316 cast<OMPParallelMasterTaskLoopDirective>(*S));
317 break;
318 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
319 EmitOMPParallelMasterTaskLoopSimdDirective(
320 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
321 break;
322 case Stmt::OMPDistributeDirectiveClass:
323 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
324 break;
325 case Stmt::OMPTargetUpdateDirectiveClass:
326 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
327 break;
328 case Stmt::OMPDistributeParallelForDirectiveClass:
329 EmitOMPDistributeParallelForDirective(
330 cast<OMPDistributeParallelForDirective>(*S));
331 break;
332 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
333 EmitOMPDistributeParallelForSimdDirective(
334 cast<OMPDistributeParallelForSimdDirective>(*S));
335 break;
336 case Stmt::OMPDistributeSimdDirectiveClass:
337 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
338 break;
339 case Stmt::OMPTargetParallelForSimdDirectiveClass:
340 EmitOMPTargetParallelForSimdDirective(
341 cast<OMPTargetParallelForSimdDirective>(*S));
342 break;
343 case Stmt::OMPTargetSimdDirectiveClass:
344 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
345 break;
346 case Stmt::OMPTeamsDistributeDirectiveClass:
347 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
348 break;
349 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
350 EmitOMPTeamsDistributeSimdDirective(
351 cast<OMPTeamsDistributeSimdDirective>(*S));
352 break;
353 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
354 EmitOMPTeamsDistributeParallelForSimdDirective(
355 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
356 break;
357 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
358 EmitOMPTeamsDistributeParallelForDirective(
359 cast<OMPTeamsDistributeParallelForDirective>(*S));
360 break;
361 case Stmt::OMPTargetTeamsDirectiveClass:
362 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
363 break;
364 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
365 EmitOMPTargetTeamsDistributeDirective(
366 cast<OMPTargetTeamsDistributeDirective>(*S));
367 break;
368 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
369 EmitOMPTargetTeamsDistributeParallelForDirective(
370 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
371 break;
372 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
373 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
374 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
375 break;
376 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
377 EmitOMPTargetTeamsDistributeSimdDirective(
378 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
379 break;
380 case Stmt::OMPInteropDirectiveClass:
381 llvm_unreachable("Interop directive not supported yet.");
382 break;
383 case Stmt::OMPDispatchDirectiveClass:
384 llvm_unreachable("Dispatch directive not supported yet.");
385 break;
386 case Stmt::OMPMaskedDirectiveClass:
387 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
388 break;
389 }
390 }
391
EmitSimpleStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)392 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
393 ArrayRef<const Attr *> Attrs) {
394 switch (S->getStmtClass()) {
395 default:
396 return false;
397 case Stmt::NullStmtClass:
398 break;
399 case Stmt::CompoundStmtClass:
400 EmitCompoundStmt(cast<CompoundStmt>(*S));
401 break;
402 case Stmt::DeclStmtClass:
403 EmitDeclStmt(cast<DeclStmt>(*S));
404 break;
405 case Stmt::LabelStmtClass:
406 EmitLabelStmt(cast<LabelStmt>(*S));
407 break;
408 case Stmt::AttributedStmtClass:
409 EmitAttributedStmt(cast<AttributedStmt>(*S));
410 break;
411 case Stmt::GotoStmtClass:
412 EmitGotoStmt(cast<GotoStmt>(*S));
413 break;
414 case Stmt::BreakStmtClass:
415 EmitBreakStmt(cast<BreakStmt>(*S));
416 break;
417 case Stmt::ContinueStmtClass:
418 EmitContinueStmt(cast<ContinueStmt>(*S));
419 break;
420 case Stmt::DefaultStmtClass:
421 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
422 break;
423 case Stmt::CaseStmtClass:
424 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
425 break;
426 case Stmt::SEHLeaveStmtClass:
427 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
428 break;
429 }
430 return true;
431 }
432
433 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
434 /// this captures the expression result of the last sub-statement and returns it
435 /// (for use by the statement expression extension).
EmitCompoundStmt(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)436 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
437 AggValueSlot AggSlot) {
438 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
439 "LLVM IR generation of compound statement ('{}')");
440
441 // Keep track of the current cleanup stack depth, including debug scopes.
442 LexicalScope Scope(*this, S.getSourceRange());
443
444 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
445 }
446
447 Address
EmitCompoundStmtWithoutScope(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)448 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
449 bool GetLast,
450 AggValueSlot AggSlot) {
451
452 const Stmt *ExprResult = S.getStmtExprResult();
453 assert((!GetLast || (GetLast && ExprResult)) &&
454 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
455
456 Address RetAlloca = Address::invalid();
457
458 for (auto *CurStmt : S.body()) {
459 if (GetLast && ExprResult == CurStmt) {
460 // We have to special case labels here. They are statements, but when put
461 // at the end of a statement expression, they yield the value of their
462 // subexpression. Handle this by walking through all labels we encounter,
463 // emitting them before we evaluate the subexpr.
464 // Similar issues arise for attributed statements.
465 while (!isa<Expr>(ExprResult)) {
466 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
467 EmitLabel(LS->getDecl());
468 ExprResult = LS->getSubStmt();
469 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
470 // FIXME: Update this if we ever have attributes that affect the
471 // semantics of an expression.
472 ExprResult = AS->getSubStmt();
473 } else {
474 llvm_unreachable("unknown value statement");
475 }
476 }
477
478 EnsureInsertPoint();
479
480 const Expr *E = cast<Expr>(ExprResult);
481 QualType ExprTy = E->getType();
482 if (hasAggregateEvaluationKind(ExprTy)) {
483 EmitAggExpr(E, AggSlot);
484 } else {
485 // We can't return an RValue here because there might be cleanups at
486 // the end of the StmtExpr. Because of that, we have to emit the result
487 // here into a temporary alloca.
488 RetAlloca = CreateMemTemp(ExprTy);
489 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
490 /*IsInit*/ false);
491 }
492 } else {
493 EmitStmt(CurStmt);
494 }
495 }
496
497 return RetAlloca;
498 }
499
SimplifyForwardingBlocks(llvm::BasicBlock * BB)500 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
501 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
502
503 // If there is a cleanup stack, then we it isn't worth trying to
504 // simplify this block (we would need to remove it from the scope map
505 // and cleanup entry).
506 if (!EHStack.empty())
507 return;
508
509 // Can only simplify direct branches.
510 if (!BI || !BI->isUnconditional())
511 return;
512
513 // Can only simplify empty blocks.
514 if (BI->getIterator() != BB->begin())
515 return;
516
517 BB->replaceAllUsesWith(BI->getSuccessor(0));
518 BI->eraseFromParent();
519 BB->eraseFromParent();
520 }
521
EmitBlock(llvm::BasicBlock * BB,bool IsFinished)522 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
523 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
524
525 // Fall out of the current block (if necessary).
526 EmitBranch(BB);
527
528 if (IsFinished && BB->use_empty()) {
529 delete BB;
530 return;
531 }
532
533 // Place the block after the current block, if possible, or else at
534 // the end of the function.
535 if (CurBB && CurBB->getParent())
536 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
537 else
538 CurFn->getBasicBlockList().push_back(BB);
539 Builder.SetInsertPoint(BB);
540 }
541
EmitBranch(llvm::BasicBlock * Target)542 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
543 // Emit a branch from the current block to the target one if this
544 // was a real block. If this was just a fall-through block after a
545 // terminator, don't emit it.
546 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
547
548 if (!CurBB || CurBB->getTerminator()) {
549 // If there is no insert point or the previous block is already
550 // terminated, don't touch it.
551 } else {
552 // Otherwise, create a fall-through branch.
553 Builder.CreateBr(Target);
554 }
555
556 Builder.ClearInsertionPoint();
557 }
558
EmitBlockAfterUses(llvm::BasicBlock * block)559 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
560 bool inserted = false;
561 for (llvm::User *u : block->users()) {
562 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
563 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
564 block);
565 inserted = true;
566 break;
567 }
568 }
569
570 if (!inserted)
571 CurFn->getBasicBlockList().push_back(block);
572
573 Builder.SetInsertPoint(block);
574 }
575
576 CodeGenFunction::JumpDest
getJumpDestForLabel(const LabelDecl * D)577 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
578 JumpDest &Dest = LabelMap[D];
579 if (Dest.isValid()) return Dest;
580
581 // Create, but don't insert, the new block.
582 Dest = JumpDest(createBasicBlock(D->getName()),
583 EHScopeStack::stable_iterator::invalid(),
584 NextCleanupDestIndex++);
585 return Dest;
586 }
587
EmitLabel(const LabelDecl * D)588 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
589 // Add this label to the current lexical scope if we're within any
590 // normal cleanups. Jumps "in" to this label --- when permitted by
591 // the language --- may need to be routed around such cleanups.
592 if (EHStack.hasNormalCleanups() && CurLexicalScope)
593 CurLexicalScope->addLabel(D);
594
595 JumpDest &Dest = LabelMap[D];
596
597 // If we didn't need a forward reference to this label, just go
598 // ahead and create a destination at the current scope.
599 if (!Dest.isValid()) {
600 Dest = getJumpDestInCurrentScope(D->getName());
601
602 // Otherwise, we need to give this label a target depth and remove
603 // it from the branch-fixups list.
604 } else {
605 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
606 Dest.setScopeDepth(EHStack.stable_begin());
607 ResolveBranchFixups(Dest.getBlock());
608 }
609
610 EmitBlock(Dest.getBlock());
611
612 // Emit debug info for labels.
613 if (CGDebugInfo *DI = getDebugInfo()) {
614 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
615 DI->setLocation(D->getLocation());
616 DI->EmitLabel(D, Builder);
617 }
618 }
619
620 incrementProfileCounter(D->getStmt());
621 }
622
623 /// Change the cleanup scope of the labels in this lexical scope to
624 /// match the scope of the enclosing context.
rescopeLabels()625 void CodeGenFunction::LexicalScope::rescopeLabels() {
626 assert(!Labels.empty());
627 EHScopeStack::stable_iterator innermostScope
628 = CGF.EHStack.getInnermostNormalCleanup();
629
630 // Change the scope depth of all the labels.
631 for (SmallVectorImpl<const LabelDecl*>::const_iterator
632 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
633 assert(CGF.LabelMap.count(*i));
634 JumpDest &dest = CGF.LabelMap.find(*i)->second;
635 assert(dest.getScopeDepth().isValid());
636 assert(innermostScope.encloses(dest.getScopeDepth()));
637 dest.setScopeDepth(innermostScope);
638 }
639
640 // Reparent the labels if the new scope also has cleanups.
641 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
642 ParentScope->Labels.append(Labels.begin(), Labels.end());
643 }
644 }
645
646
EmitLabelStmt(const LabelStmt & S)647 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
648 EmitLabel(S.getDecl());
649
650 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
651 if (getLangOpts().EHAsynch && S.isSideEntry())
652 EmitSehCppScopeBegin();
653
654 EmitStmt(S.getSubStmt());
655 }
656
EmitAttributedStmt(const AttributedStmt & S)657 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
658 bool nomerge = false;
659 const CallExpr *musttail = nullptr;
660
661 for (const auto *A : S.getAttrs()) {
662 if (A->getKind() == attr::NoMerge) {
663 nomerge = true;
664 }
665 if (A->getKind() == attr::MustTail) {
666 const Stmt *Sub = S.getSubStmt();
667 const ReturnStmt *R = cast<ReturnStmt>(Sub);
668 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
669 }
670 }
671 SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
672 SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
673 EmitStmt(S.getSubStmt(), S.getAttrs());
674 }
675
EmitGotoStmt(const GotoStmt & S)676 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
677 // If this code is reachable then emit a stop point (if generating
678 // debug info). We have to do this ourselves because we are on the
679 // "simple" statement path.
680 if (HaveInsertPoint())
681 EmitStopPoint(&S);
682
683 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
684 }
685
686
EmitIndirectGotoStmt(const IndirectGotoStmt & S)687 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
688 if (const LabelDecl *Target = S.getConstantTarget()) {
689 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
690 return;
691 }
692
693 // Ensure that we have an i8* for our PHI node.
694 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
695 Int8PtrTy, "addr");
696 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
697
698 // Get the basic block for the indirect goto.
699 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
700
701 // The first instruction in the block has to be the PHI for the switch dest,
702 // add an entry for this branch.
703 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
704
705 EmitBranch(IndGotoBB);
706 }
707
EmitIfStmt(const IfStmt & S)708 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
709 // C99 6.8.4.1: The first substatement is executed if the expression compares
710 // unequal to 0. The condition must be a scalar type.
711 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
712
713 if (S.getInit())
714 EmitStmt(S.getInit());
715
716 if (S.getConditionVariable())
717 EmitDecl(*S.getConditionVariable());
718
719 // If the condition constant folds and can be elided, try to avoid emitting
720 // the condition and the dead arm of the if/else.
721 bool CondConstant;
722 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
723 S.isConstexpr())) {
724 // Figure out which block (then or else) is executed.
725 const Stmt *Executed = S.getThen();
726 const Stmt *Skipped = S.getElse();
727 if (!CondConstant) // Condition false?
728 std::swap(Executed, Skipped);
729
730 // If the skipped block has no labels in it, just emit the executed block.
731 // This avoids emitting dead code and simplifies the CFG substantially.
732 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
733 if (CondConstant)
734 incrementProfileCounter(&S);
735 if (Executed) {
736 RunCleanupsScope ExecutedScope(*this);
737 EmitStmt(Executed);
738 }
739 return;
740 }
741 }
742
743 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
744 // the conditional branch.
745 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
746 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
747 llvm::BasicBlock *ElseBlock = ContBlock;
748 if (S.getElse())
749 ElseBlock = createBasicBlock("if.else");
750
751 // Prefer the PGO based weights over the likelihood attribute.
752 // When the build isn't optimized the metadata isn't used, so don't generate
753 // it.
754 Stmt::Likelihood LH = Stmt::LH_None;
755 uint64_t Count = getProfileCount(S.getThen());
756 if (!Count && CGM.getCodeGenOpts().OptimizationLevel)
757 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
758 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
759
760 // Emit the 'then' code.
761 EmitBlock(ThenBlock);
762 incrementProfileCounter(&S);
763 {
764 RunCleanupsScope ThenScope(*this);
765 EmitStmt(S.getThen());
766 }
767 EmitBranch(ContBlock);
768
769 // Emit the 'else' code if present.
770 if (const Stmt *Else = S.getElse()) {
771 {
772 // There is no need to emit line number for an unconditional branch.
773 auto NL = ApplyDebugLocation::CreateEmpty(*this);
774 EmitBlock(ElseBlock);
775 }
776 {
777 RunCleanupsScope ElseScope(*this);
778 EmitStmt(Else);
779 }
780 {
781 // There is no need to emit line number for an unconditional branch.
782 auto NL = ApplyDebugLocation::CreateEmpty(*this);
783 EmitBranch(ContBlock);
784 }
785 }
786
787 // Emit the continuation block for code after the if.
788 EmitBlock(ContBlock, true);
789 }
790
EmitWhileStmt(const WhileStmt & S,ArrayRef<const Attr * > WhileAttrs)791 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
792 ArrayRef<const Attr *> WhileAttrs) {
793 // Emit the header for the loop, which will also become
794 // the continue target.
795 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
796 EmitBlock(LoopHeader.getBlock());
797
798 // Create an exit block for when the condition fails, which will
799 // also become the break target.
800 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
801
802 // Store the blocks to use for break and continue.
803 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
804
805 // C++ [stmt.while]p2:
806 // When the condition of a while statement is a declaration, the
807 // scope of the variable that is declared extends from its point
808 // of declaration (3.3.2) to the end of the while statement.
809 // [...]
810 // The object created in a condition is destroyed and created
811 // with each iteration of the loop.
812 RunCleanupsScope ConditionScope(*this);
813
814 if (S.getConditionVariable())
815 EmitDecl(*S.getConditionVariable());
816
817 // Evaluate the conditional in the while header. C99 6.8.5.1: The
818 // evaluation of the controlling expression takes place before each
819 // execution of the loop body.
820 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
821
822 // while(1) is common, avoid extra exit blocks. Be sure
823 // to correctly handle break/continue though.
824 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
825 bool CondIsConstInt = C != nullptr;
826 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
827 const SourceRange &R = S.getSourceRange();
828 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
829 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
830 SourceLocToDebugLoc(R.getEnd()),
831 checkIfLoopMustProgress(CondIsConstInt));
832
833 // As long as the condition is true, go to the loop body.
834 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
835 if (EmitBoolCondBranch) {
836 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
837 if (ConditionScope.requiresCleanups())
838 ExitBlock = createBasicBlock("while.exit");
839 llvm::MDNode *Weights =
840 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
841 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
842 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
843 BoolCondVal, Stmt::getLikelihood(S.getBody()));
844 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
845
846 if (ExitBlock != LoopExit.getBlock()) {
847 EmitBlock(ExitBlock);
848 EmitBranchThroughCleanup(LoopExit);
849 }
850 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
851 CGM.getDiags().Report(A->getLocation(),
852 diag::warn_attribute_has_no_effect_on_infinite_loop)
853 << A << A->getRange();
854 CGM.getDiags().Report(
855 S.getWhileLoc(),
856 diag::note_attribute_has_no_effect_on_infinite_loop_here)
857 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
858 }
859
860 // Emit the loop body. We have to emit this in a cleanup scope
861 // because it might be a singleton DeclStmt.
862 {
863 RunCleanupsScope BodyScope(*this);
864 EmitBlock(LoopBody);
865 incrementProfileCounter(&S);
866 EmitStmt(S.getBody());
867 }
868
869 BreakContinueStack.pop_back();
870
871 // Immediately force cleanup.
872 ConditionScope.ForceCleanup();
873
874 EmitStopPoint(&S);
875 // Branch to the loop header again.
876 EmitBranch(LoopHeader.getBlock());
877
878 LoopStack.pop();
879
880 // Emit the exit block.
881 EmitBlock(LoopExit.getBlock(), true);
882
883 // The LoopHeader typically is just a branch if we skipped emitting
884 // a branch, try to erase it.
885 if (!EmitBoolCondBranch)
886 SimplifyForwardingBlocks(LoopHeader.getBlock());
887 }
888
EmitDoStmt(const DoStmt & S,ArrayRef<const Attr * > DoAttrs)889 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
890 ArrayRef<const Attr *> DoAttrs) {
891 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
892 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
893
894 uint64_t ParentCount = getCurrentProfileCount();
895
896 // Store the blocks to use for break and continue.
897 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
898
899 // Emit the body of the loop.
900 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
901
902 EmitBlockWithFallThrough(LoopBody, &S);
903 {
904 RunCleanupsScope BodyScope(*this);
905 EmitStmt(S.getBody());
906 }
907
908 EmitBlock(LoopCond.getBlock());
909
910 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
911 // after each execution of the loop body."
912
913 // Evaluate the conditional in the while header.
914 // C99 6.8.5p2/p4: The first substatement is executed if the expression
915 // compares unequal to 0. The condition must be a scalar type.
916 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
917
918 BreakContinueStack.pop_back();
919
920 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
921 // to correctly handle break/continue though.
922 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
923 bool CondIsConstInt = C;
924 bool EmitBoolCondBranch = !C || !C->isZero();
925
926 const SourceRange &R = S.getSourceRange();
927 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
928 SourceLocToDebugLoc(R.getBegin()),
929 SourceLocToDebugLoc(R.getEnd()),
930 checkIfLoopMustProgress(CondIsConstInt));
931
932 // As long as the condition is true, iterate the loop.
933 if (EmitBoolCondBranch) {
934 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
935 Builder.CreateCondBr(
936 BoolCondVal, LoopBody, LoopExit.getBlock(),
937 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
938 }
939
940 LoopStack.pop();
941
942 // Emit the exit block.
943 EmitBlock(LoopExit.getBlock());
944
945 // The DoCond block typically is just a branch if we skipped
946 // emitting a branch, try to erase it.
947 if (!EmitBoolCondBranch)
948 SimplifyForwardingBlocks(LoopCond.getBlock());
949 }
950
EmitForStmt(const ForStmt & S,ArrayRef<const Attr * > ForAttrs)951 void CodeGenFunction::EmitForStmt(const ForStmt &S,
952 ArrayRef<const Attr *> ForAttrs) {
953 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
954
955 LexicalScope ForScope(*this, S.getSourceRange());
956
957 // Evaluate the first part before the loop.
958 if (S.getInit())
959 EmitStmt(S.getInit());
960
961 // Start the loop with a block that tests the condition.
962 // If there's an increment, the continue scope will be overwritten
963 // later.
964 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
965 llvm::BasicBlock *CondBlock = CondDest.getBlock();
966 EmitBlock(CondBlock);
967
968 Expr::EvalResult Result;
969 bool CondIsConstInt =
970 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
971
972 const SourceRange &R = S.getSourceRange();
973 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
974 SourceLocToDebugLoc(R.getBegin()),
975 SourceLocToDebugLoc(R.getEnd()),
976 checkIfLoopMustProgress(CondIsConstInt));
977
978 // Create a cleanup scope for the condition variable cleanups.
979 LexicalScope ConditionScope(*this, S.getSourceRange());
980
981 // If the for loop doesn't have an increment we can just use the condition as
982 // the continue block. Otherwise, if there is no condition variable, we can
983 // form the continue block now. If there is a condition variable, we can't
984 // form the continue block until after we've emitted the condition, because
985 // the condition is in scope in the increment, but Sema's jump diagnostics
986 // ensure that there are no continues from the condition variable that jump
987 // to the loop increment.
988 JumpDest Continue;
989 if (!S.getInc())
990 Continue = CondDest;
991 else if (!S.getConditionVariable())
992 Continue = getJumpDestInCurrentScope("for.inc");
993 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
994
995 if (S.getCond()) {
996 // If the for statement has a condition scope, emit the local variable
997 // declaration.
998 if (S.getConditionVariable()) {
999 EmitDecl(*S.getConditionVariable());
1000
1001 // We have entered the condition variable's scope, so we're now able to
1002 // jump to the continue block.
1003 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1004 BreakContinueStack.back().ContinueBlock = Continue;
1005 }
1006
1007 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1008 // If there are any cleanups between here and the loop-exit scope,
1009 // create a block to stage a loop exit along.
1010 if (ForScope.requiresCleanups())
1011 ExitBlock = createBasicBlock("for.cond.cleanup");
1012
1013 // As long as the condition is true, iterate the loop.
1014 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1015
1016 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1017 // compares unequal to 0. The condition must be a scalar type.
1018 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1019 llvm::MDNode *Weights =
1020 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1021 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1022 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1023 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1024
1025 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1026
1027 if (ExitBlock != LoopExit.getBlock()) {
1028 EmitBlock(ExitBlock);
1029 EmitBranchThroughCleanup(LoopExit);
1030 }
1031
1032 EmitBlock(ForBody);
1033 } else {
1034 // Treat it as a non-zero constant. Don't even create a new block for the
1035 // body, just fall into it.
1036 }
1037 incrementProfileCounter(&S);
1038
1039 {
1040 // Create a separate cleanup scope for the body, in case it is not
1041 // a compound statement.
1042 RunCleanupsScope BodyScope(*this);
1043 EmitStmt(S.getBody());
1044 }
1045
1046 // If there is an increment, emit it next.
1047 if (S.getInc()) {
1048 EmitBlock(Continue.getBlock());
1049 EmitStmt(S.getInc());
1050 }
1051
1052 BreakContinueStack.pop_back();
1053
1054 ConditionScope.ForceCleanup();
1055
1056 EmitStopPoint(&S);
1057 EmitBranch(CondBlock);
1058
1059 ForScope.ForceCleanup();
1060
1061 LoopStack.pop();
1062
1063 // Emit the fall-through block.
1064 EmitBlock(LoopExit.getBlock(), true);
1065 }
1066
1067 void
EmitCXXForRangeStmt(const CXXForRangeStmt & S,ArrayRef<const Attr * > ForAttrs)1068 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1069 ArrayRef<const Attr *> ForAttrs) {
1070 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1071
1072 LexicalScope ForScope(*this, S.getSourceRange());
1073
1074 // Evaluate the first pieces before the loop.
1075 if (S.getInit())
1076 EmitStmt(S.getInit());
1077 EmitStmt(S.getRangeStmt());
1078 EmitStmt(S.getBeginStmt());
1079 EmitStmt(S.getEndStmt());
1080
1081 // Start the loop with a block that tests the condition.
1082 // If there's an increment, the continue scope will be overwritten
1083 // later.
1084 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1085 EmitBlock(CondBlock);
1086
1087 const SourceRange &R = S.getSourceRange();
1088 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1089 SourceLocToDebugLoc(R.getBegin()),
1090 SourceLocToDebugLoc(R.getEnd()));
1091
1092 // If there are any cleanups between here and the loop-exit scope,
1093 // create a block to stage a loop exit along.
1094 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1095 if (ForScope.requiresCleanups())
1096 ExitBlock = createBasicBlock("for.cond.cleanup");
1097
1098 // The loop body, consisting of the specified body and the loop variable.
1099 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1100
1101 // The body is executed if the expression, contextually converted
1102 // to bool, is true.
1103 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1104 llvm::MDNode *Weights =
1105 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1106 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1107 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1108 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1109 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1110
1111 if (ExitBlock != LoopExit.getBlock()) {
1112 EmitBlock(ExitBlock);
1113 EmitBranchThroughCleanup(LoopExit);
1114 }
1115
1116 EmitBlock(ForBody);
1117 incrementProfileCounter(&S);
1118
1119 // Create a block for the increment. In case of a 'continue', we jump there.
1120 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1121
1122 // Store the blocks to use for break and continue.
1123 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1124
1125 {
1126 // Create a separate cleanup scope for the loop variable and body.
1127 LexicalScope BodyScope(*this, S.getSourceRange());
1128 EmitStmt(S.getLoopVarStmt());
1129 EmitStmt(S.getBody());
1130 }
1131
1132 EmitStopPoint(&S);
1133 // If there is an increment, emit it next.
1134 EmitBlock(Continue.getBlock());
1135 EmitStmt(S.getInc());
1136
1137 BreakContinueStack.pop_back();
1138
1139 EmitBranch(CondBlock);
1140
1141 ForScope.ForceCleanup();
1142
1143 LoopStack.pop();
1144
1145 // Emit the fall-through block.
1146 EmitBlock(LoopExit.getBlock(), true);
1147 }
1148
EmitReturnOfRValue(RValue RV,QualType Ty)1149 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1150 if (RV.isScalar()) {
1151 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1152 } else if (RV.isAggregate()) {
1153 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1154 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1155 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1156 } else {
1157 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1158 /*init*/ true);
1159 }
1160 EmitBranchThroughCleanup(ReturnBlock);
1161 }
1162
1163 namespace {
1164 // RAII struct used to save and restore a return statment's result expression.
1165 struct SaveRetExprRAII {
SaveRetExprRAII__anona7cd2d310111::SaveRetExprRAII1166 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1167 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1168 CGF.RetExpr = RetExpr;
1169 }
~SaveRetExprRAII__anona7cd2d310111::SaveRetExprRAII1170 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1171 const Expr *OldRetExpr;
1172 CodeGenFunction &CGF;
1173 };
1174 } // namespace
1175
1176 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1177 /// if the function returns void, or may be missing one if the function returns
1178 /// non-void. Fun stuff :).
EmitReturnStmt(const ReturnStmt & S)1179 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1180 if (requiresReturnValueCheck()) {
1181 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1182 auto *SLocPtr =
1183 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1184 llvm::GlobalVariable::PrivateLinkage, SLoc);
1185 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1186 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1187 assert(ReturnLocation.isValid() && "No valid return location");
1188 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1189 ReturnLocation);
1190 }
1191
1192 // Returning from an outlined SEH helper is UB, and we already warn on it.
1193 if (IsOutlinedSEHHelper) {
1194 Builder.CreateUnreachable();
1195 Builder.ClearInsertionPoint();
1196 }
1197
1198 // Emit the result value, even if unused, to evaluate the side effects.
1199 const Expr *RV = S.getRetValue();
1200
1201 // Record the result expression of the return statement. The recorded
1202 // expression is used to determine whether a block capture's lifetime should
1203 // end at the end of the full expression as opposed to the end of the scope
1204 // enclosing the block expression.
1205 //
1206 // This permits a small, easily-implemented exception to our over-conservative
1207 // rules about not jumping to statements following block literals with
1208 // non-trivial cleanups.
1209 SaveRetExprRAII SaveRetExpr(RV, *this);
1210
1211 RunCleanupsScope cleanupScope(*this);
1212 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1213 RV = EWC->getSubExpr();
1214 // FIXME: Clean this up by using an LValue for ReturnTemp,
1215 // EmitStoreThroughLValue, and EmitAnyExpr.
1216 // Check if the NRVO candidate was not globalized in OpenMP mode.
1217 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1218 S.getNRVOCandidate()->isNRVOVariable() &&
1219 (!getLangOpts().OpenMP ||
1220 !CGM.getOpenMPRuntime()
1221 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1222 .isValid())) {
1223 // Apply the named return value optimization for this return statement,
1224 // which means doing nothing: the appropriate result has already been
1225 // constructed into the NRVO variable.
1226
1227 // If there is an NRVO flag for this variable, set it to 1 into indicate
1228 // that the cleanup code should not destroy the variable.
1229 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1230 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1231 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1232 // Make sure not to return anything, but evaluate the expression
1233 // for side effects.
1234 if (RV)
1235 EmitAnyExpr(RV);
1236 } else if (!RV) {
1237 // Do nothing (return value is left uninitialized)
1238 } else if (FnRetTy->isReferenceType()) {
1239 // If this function returns a reference, take the address of the expression
1240 // rather than the value.
1241 RValue Result = EmitReferenceBindingToExpr(RV);
1242 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1243 } else {
1244 switch (getEvaluationKind(RV->getType())) {
1245 case TEK_Scalar:
1246 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1247 break;
1248 case TEK_Complex:
1249 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1250 /*isInit*/ true);
1251 break;
1252 case TEK_Aggregate:
1253 EmitAggExpr(RV, AggValueSlot::forAddr(
1254 ReturnValue, Qualifiers(),
1255 AggValueSlot::IsDestructed,
1256 AggValueSlot::DoesNotNeedGCBarriers,
1257 AggValueSlot::IsNotAliased,
1258 getOverlapForReturnValue()));
1259 break;
1260 }
1261 }
1262
1263 ++NumReturnExprs;
1264 if (!RV || RV->isEvaluatable(getContext()))
1265 ++NumSimpleReturnExprs;
1266
1267 cleanupScope.ForceCleanup();
1268 EmitBranchThroughCleanup(ReturnBlock);
1269 }
1270
EmitDeclStmt(const DeclStmt & S)1271 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1272 // As long as debug info is modeled with instructions, we have to ensure we
1273 // have a place to insert here and write the stop point here.
1274 if (HaveInsertPoint())
1275 EmitStopPoint(&S);
1276
1277 for (const auto *I : S.decls())
1278 EmitDecl(*I);
1279 }
1280
EmitBreakStmt(const BreakStmt & S)1281 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1282 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1283
1284 // If this code is reachable then emit a stop point (if generating
1285 // debug info). We have to do this ourselves because we are on the
1286 // "simple" statement path.
1287 if (HaveInsertPoint())
1288 EmitStopPoint(&S);
1289
1290 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1291 }
1292
EmitContinueStmt(const ContinueStmt & S)1293 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1294 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1295
1296 // If this code is reachable then emit a stop point (if generating
1297 // debug info). We have to do this ourselves because we are on the
1298 // "simple" statement path.
1299 if (HaveInsertPoint())
1300 EmitStopPoint(&S);
1301
1302 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1303 }
1304
1305 /// EmitCaseStmtRange - If case statement range is not too big then
1306 /// add multiple cases to switch instruction, one for each value within
1307 /// the range. If range is too big then emit "if" condition check.
EmitCaseStmtRange(const CaseStmt & S,ArrayRef<const Attr * > Attrs)1308 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1309 ArrayRef<const Attr *> Attrs) {
1310 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1311
1312 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1313 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1314
1315 // Emit the code for this case. We do this first to make sure it is
1316 // properly chained from our predecessor before generating the
1317 // switch machinery to enter this block.
1318 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1319 EmitBlockWithFallThrough(CaseDest, &S);
1320 EmitStmt(S.getSubStmt());
1321
1322 // If range is empty, do nothing.
1323 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1324 return;
1325
1326 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1327 llvm::APInt Range = RHS - LHS;
1328 // FIXME: parameters such as this should not be hardcoded.
1329 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1330 // Range is small enough to add multiple switch instruction cases.
1331 uint64_t Total = getProfileCount(&S);
1332 unsigned NCases = Range.getZExtValue() + 1;
1333 // We only have one region counter for the entire set of cases here, so we
1334 // need to divide the weights evenly between the generated cases, ensuring
1335 // that the total weight is preserved. E.g., a weight of 5 over three cases
1336 // will be distributed as weights of 2, 2, and 1.
1337 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1338 for (unsigned I = 0; I != NCases; ++I) {
1339 if (SwitchWeights)
1340 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1341 else if (SwitchLikelihood)
1342 SwitchLikelihood->push_back(LH);
1343
1344 if (Rem)
1345 Rem--;
1346 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1347 ++LHS;
1348 }
1349 return;
1350 }
1351
1352 // The range is too big. Emit "if" condition into a new block,
1353 // making sure to save and restore the current insertion point.
1354 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1355
1356 // Push this test onto the chain of range checks (which terminates
1357 // in the default basic block). The switch's default will be changed
1358 // to the top of this chain after switch emission is complete.
1359 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1360 CaseRangeBlock = createBasicBlock("sw.caserange");
1361
1362 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1363 Builder.SetInsertPoint(CaseRangeBlock);
1364
1365 // Emit range check.
1366 llvm::Value *Diff =
1367 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1368 llvm::Value *Cond =
1369 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1370
1371 llvm::MDNode *Weights = nullptr;
1372 if (SwitchWeights) {
1373 uint64_t ThisCount = getProfileCount(&S);
1374 uint64_t DefaultCount = (*SwitchWeights)[0];
1375 Weights = createProfileWeights(ThisCount, DefaultCount);
1376
1377 // Since we're chaining the switch default through each large case range, we
1378 // need to update the weight for the default, ie, the first case, to include
1379 // this case.
1380 (*SwitchWeights)[0] += ThisCount;
1381 } else if (SwitchLikelihood)
1382 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1383
1384 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1385
1386 // Restore the appropriate insertion point.
1387 if (RestoreBB)
1388 Builder.SetInsertPoint(RestoreBB);
1389 else
1390 Builder.ClearInsertionPoint();
1391 }
1392
EmitCaseStmt(const CaseStmt & S,ArrayRef<const Attr * > Attrs)1393 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1394 ArrayRef<const Attr *> Attrs) {
1395 // If there is no enclosing switch instance that we're aware of, then this
1396 // case statement and its block can be elided. This situation only happens
1397 // when we've constant-folded the switch, are emitting the constant case,
1398 // and part of the constant case includes another case statement. For
1399 // instance: switch (4) { case 4: do { case 5: } while (1); }
1400 if (!SwitchInsn) {
1401 EmitStmt(S.getSubStmt());
1402 return;
1403 }
1404
1405 // Handle case ranges.
1406 if (S.getRHS()) {
1407 EmitCaseStmtRange(S, Attrs);
1408 return;
1409 }
1410
1411 llvm::ConstantInt *CaseVal =
1412 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1413 if (SwitchLikelihood)
1414 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1415
1416 // If the body of the case is just a 'break', try to not emit an empty block.
1417 // If we're profiling or we're not optimizing, leave the block in for better
1418 // debug and coverage analysis.
1419 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1420 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1421 isa<BreakStmt>(S.getSubStmt())) {
1422 JumpDest Block = BreakContinueStack.back().BreakBlock;
1423
1424 // Only do this optimization if there are no cleanups that need emitting.
1425 if (isObviouslyBranchWithoutCleanups(Block)) {
1426 if (SwitchWeights)
1427 SwitchWeights->push_back(getProfileCount(&S));
1428 SwitchInsn->addCase(CaseVal, Block.getBlock());
1429
1430 // If there was a fallthrough into this case, make sure to redirect it to
1431 // the end of the switch as well.
1432 if (Builder.GetInsertBlock()) {
1433 Builder.CreateBr(Block.getBlock());
1434 Builder.ClearInsertionPoint();
1435 }
1436 return;
1437 }
1438 }
1439
1440 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1441 EmitBlockWithFallThrough(CaseDest, &S);
1442 if (SwitchWeights)
1443 SwitchWeights->push_back(getProfileCount(&S));
1444 SwitchInsn->addCase(CaseVal, CaseDest);
1445
1446 // Recursively emitting the statement is acceptable, but is not wonderful for
1447 // code where we have many case statements nested together, i.e.:
1448 // case 1:
1449 // case 2:
1450 // case 3: etc.
1451 // Handling this recursively will create a new block for each case statement
1452 // that falls through to the next case which is IR intensive. It also causes
1453 // deep recursion which can run into stack depth limitations. Handle
1454 // sequential non-range case statements specially.
1455 //
1456 // TODO When the next case has a likelihood attribute the code returns to the
1457 // recursive algorithm. Maybe improve this case if it becomes common practice
1458 // to use a lot of attributes.
1459 const CaseStmt *CurCase = &S;
1460 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1461
1462 // Otherwise, iteratively add consecutive cases to this switch stmt.
1463 while (NextCase && NextCase->getRHS() == nullptr) {
1464 CurCase = NextCase;
1465 llvm::ConstantInt *CaseVal =
1466 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1467
1468 if (SwitchWeights)
1469 SwitchWeights->push_back(getProfileCount(NextCase));
1470 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1471 CaseDest = createBasicBlock("sw.bb");
1472 EmitBlockWithFallThrough(CaseDest, CurCase);
1473 }
1474 // Since this loop is only executed when the CaseStmt has no attributes
1475 // use a hard-coded value.
1476 if (SwitchLikelihood)
1477 SwitchLikelihood->push_back(Stmt::LH_None);
1478
1479 SwitchInsn->addCase(CaseVal, CaseDest);
1480 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1481 }
1482
1483 // Normal default recursion for non-cases.
1484 EmitStmt(CurCase->getSubStmt());
1485 }
1486
EmitDefaultStmt(const DefaultStmt & S,ArrayRef<const Attr * > Attrs)1487 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1488 ArrayRef<const Attr *> Attrs) {
1489 // If there is no enclosing switch instance that we're aware of, then this
1490 // default statement can be elided. This situation only happens when we've
1491 // constant-folded the switch.
1492 if (!SwitchInsn) {
1493 EmitStmt(S.getSubStmt());
1494 return;
1495 }
1496
1497 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1498 assert(DefaultBlock->empty() &&
1499 "EmitDefaultStmt: Default block already defined?");
1500
1501 if (SwitchLikelihood)
1502 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1503
1504 EmitBlockWithFallThrough(DefaultBlock, &S);
1505
1506 EmitStmt(S.getSubStmt());
1507 }
1508
1509 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1510 /// constant value that is being switched on, see if we can dead code eliminate
1511 /// the body of the switch to a simple series of statements to emit. Basically,
1512 /// on a switch (5) we want to find these statements:
1513 /// case 5:
1514 /// printf(...); <--
1515 /// ++i; <--
1516 /// break;
1517 ///
1518 /// and add them to the ResultStmts vector. If it is unsafe to do this
1519 /// transformation (for example, one of the elided statements contains a label
1520 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1521 /// should include statements after it (e.g. the printf() line is a substmt of
1522 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1523 /// statement, then return CSFC_Success.
1524 ///
1525 /// If Case is non-null, then we are looking for the specified case, checking
1526 /// that nothing we jump over contains labels. If Case is null, then we found
1527 /// the case and are looking for the break.
1528 ///
1529 /// If the recursive walk actually finds our Case, then we set FoundCase to
1530 /// true.
1531 ///
1532 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
CollectStatementsForCase(const Stmt * S,const SwitchCase * Case,bool & FoundCase,SmallVectorImpl<const Stmt * > & ResultStmts)1533 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1534 const SwitchCase *Case,
1535 bool &FoundCase,
1536 SmallVectorImpl<const Stmt*> &ResultStmts) {
1537 // If this is a null statement, just succeed.
1538 if (!S)
1539 return Case ? CSFC_Success : CSFC_FallThrough;
1540
1541 // If this is the switchcase (case 4: or default) that we're looking for, then
1542 // we're in business. Just add the substatement.
1543 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1544 if (S == Case) {
1545 FoundCase = true;
1546 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1547 ResultStmts);
1548 }
1549
1550 // Otherwise, this is some other case or default statement, just ignore it.
1551 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1552 ResultStmts);
1553 }
1554
1555 // If we are in the live part of the code and we found our break statement,
1556 // return a success!
1557 if (!Case && isa<BreakStmt>(S))
1558 return CSFC_Success;
1559
1560 // If this is a switch statement, then it might contain the SwitchCase, the
1561 // break, or neither.
1562 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1563 // Handle this as two cases: we might be looking for the SwitchCase (if so
1564 // the skipped statements must be skippable) or we might already have it.
1565 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1566 bool StartedInLiveCode = FoundCase;
1567 unsigned StartSize = ResultStmts.size();
1568
1569 // If we've not found the case yet, scan through looking for it.
1570 if (Case) {
1571 // Keep track of whether we see a skipped declaration. The code could be
1572 // using the declaration even if it is skipped, so we can't optimize out
1573 // the decl if the kept statements might refer to it.
1574 bool HadSkippedDecl = false;
1575
1576 // If we're looking for the case, just see if we can skip each of the
1577 // substatements.
1578 for (; Case && I != E; ++I) {
1579 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1580
1581 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1582 case CSFC_Failure: return CSFC_Failure;
1583 case CSFC_Success:
1584 // A successful result means that either 1) that the statement doesn't
1585 // have the case and is skippable, or 2) does contain the case value
1586 // and also contains the break to exit the switch. In the later case,
1587 // we just verify the rest of the statements are elidable.
1588 if (FoundCase) {
1589 // If we found the case and skipped declarations, we can't do the
1590 // optimization.
1591 if (HadSkippedDecl)
1592 return CSFC_Failure;
1593
1594 for (++I; I != E; ++I)
1595 if (CodeGenFunction::ContainsLabel(*I, true))
1596 return CSFC_Failure;
1597 return CSFC_Success;
1598 }
1599 break;
1600 case CSFC_FallThrough:
1601 // If we have a fallthrough condition, then we must have found the
1602 // case started to include statements. Consider the rest of the
1603 // statements in the compound statement as candidates for inclusion.
1604 assert(FoundCase && "Didn't find case but returned fallthrough?");
1605 // We recursively found Case, so we're not looking for it anymore.
1606 Case = nullptr;
1607
1608 // If we found the case and skipped declarations, we can't do the
1609 // optimization.
1610 if (HadSkippedDecl)
1611 return CSFC_Failure;
1612 break;
1613 }
1614 }
1615
1616 if (!FoundCase)
1617 return CSFC_Success;
1618
1619 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1620 }
1621
1622 // If we have statements in our range, then we know that the statements are
1623 // live and need to be added to the set of statements we're tracking.
1624 bool AnyDecls = false;
1625 for (; I != E; ++I) {
1626 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1627
1628 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1629 case CSFC_Failure: return CSFC_Failure;
1630 case CSFC_FallThrough:
1631 // A fallthrough result means that the statement was simple and just
1632 // included in ResultStmt, keep adding them afterwards.
1633 break;
1634 case CSFC_Success:
1635 // A successful result means that we found the break statement and
1636 // stopped statement inclusion. We just ensure that any leftover stmts
1637 // are skippable and return success ourselves.
1638 for (++I; I != E; ++I)
1639 if (CodeGenFunction::ContainsLabel(*I, true))
1640 return CSFC_Failure;
1641 return CSFC_Success;
1642 }
1643 }
1644
1645 // If we're about to fall out of a scope without hitting a 'break;', we
1646 // can't perform the optimization if there were any decls in that scope
1647 // (we'd lose their end-of-lifetime).
1648 if (AnyDecls) {
1649 // If the entire compound statement was live, there's one more thing we
1650 // can try before giving up: emit the whole thing as a single statement.
1651 // We can do that unless the statement contains a 'break;'.
1652 // FIXME: Such a break must be at the end of a construct within this one.
1653 // We could emit this by just ignoring the BreakStmts entirely.
1654 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1655 ResultStmts.resize(StartSize);
1656 ResultStmts.push_back(S);
1657 } else {
1658 return CSFC_Failure;
1659 }
1660 }
1661
1662 return CSFC_FallThrough;
1663 }
1664
1665 // Okay, this is some other statement that we don't handle explicitly, like a
1666 // for statement or increment etc. If we are skipping over this statement,
1667 // just verify it doesn't have labels, which would make it invalid to elide.
1668 if (Case) {
1669 if (CodeGenFunction::ContainsLabel(S, true))
1670 return CSFC_Failure;
1671 return CSFC_Success;
1672 }
1673
1674 // Otherwise, we want to include this statement. Everything is cool with that
1675 // so long as it doesn't contain a break out of the switch we're in.
1676 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1677
1678 // Otherwise, everything is great. Include the statement and tell the caller
1679 // that we fall through and include the next statement as well.
1680 ResultStmts.push_back(S);
1681 return CSFC_FallThrough;
1682 }
1683
1684 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1685 /// then invoke CollectStatementsForCase to find the list of statements to emit
1686 /// for a switch on constant. See the comment above CollectStatementsForCase
1687 /// for more details.
FindCaseStatementsForValue(const SwitchStmt & S,const llvm::APSInt & ConstantCondValue,SmallVectorImpl<const Stmt * > & ResultStmts,ASTContext & C,const SwitchCase * & ResultCase)1688 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1689 const llvm::APSInt &ConstantCondValue,
1690 SmallVectorImpl<const Stmt*> &ResultStmts,
1691 ASTContext &C,
1692 const SwitchCase *&ResultCase) {
1693 // First step, find the switch case that is being branched to. We can do this
1694 // efficiently by scanning the SwitchCase list.
1695 const SwitchCase *Case = S.getSwitchCaseList();
1696 const DefaultStmt *DefaultCase = nullptr;
1697
1698 for (; Case; Case = Case->getNextSwitchCase()) {
1699 // It's either a default or case. Just remember the default statement in
1700 // case we're not jumping to any numbered cases.
1701 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1702 DefaultCase = DS;
1703 continue;
1704 }
1705
1706 // Check to see if this case is the one we're looking for.
1707 const CaseStmt *CS = cast<CaseStmt>(Case);
1708 // Don't handle case ranges yet.
1709 if (CS->getRHS()) return false;
1710
1711 // If we found our case, remember it as 'case'.
1712 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1713 break;
1714 }
1715
1716 // If we didn't find a matching case, we use a default if it exists, or we
1717 // elide the whole switch body!
1718 if (!Case) {
1719 // It is safe to elide the body of the switch if it doesn't contain labels
1720 // etc. If it is safe, return successfully with an empty ResultStmts list.
1721 if (!DefaultCase)
1722 return !CodeGenFunction::ContainsLabel(&S);
1723 Case = DefaultCase;
1724 }
1725
1726 // Ok, we know which case is being jumped to, try to collect all the
1727 // statements that follow it. This can fail for a variety of reasons. Also,
1728 // check to see that the recursive walk actually found our case statement.
1729 // Insane cases like this can fail to find it in the recursive walk since we
1730 // don't handle every stmt kind:
1731 // switch (4) {
1732 // while (1) {
1733 // case 4: ...
1734 bool FoundCase = false;
1735 ResultCase = Case;
1736 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1737 ResultStmts) != CSFC_Failure &&
1738 FoundCase;
1739 }
1740
1741 static Optional<SmallVector<uint64_t, 16>>
getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods)1742 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1743 // Are there enough branches to weight them?
1744 if (Likelihoods.size() <= 1)
1745 return None;
1746
1747 uint64_t NumUnlikely = 0;
1748 uint64_t NumNone = 0;
1749 uint64_t NumLikely = 0;
1750 for (const auto LH : Likelihoods) {
1751 switch (LH) {
1752 case Stmt::LH_Unlikely:
1753 ++NumUnlikely;
1754 break;
1755 case Stmt::LH_None:
1756 ++NumNone;
1757 break;
1758 case Stmt::LH_Likely:
1759 ++NumLikely;
1760 break;
1761 }
1762 }
1763
1764 // Is there a likelihood attribute used?
1765 if (NumUnlikely == 0 && NumLikely == 0)
1766 return None;
1767
1768 // When multiple cases share the same code they can be combined during
1769 // optimization. In that case the weights of the branch will be the sum of
1770 // the individual weights. Make sure the combined sum of all neutral cases
1771 // doesn't exceed the value of a single likely attribute.
1772 // The additions both avoid divisions by 0 and make sure the weights of None
1773 // don't exceed the weight of Likely.
1774 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1775 const uint64_t None = Likely / (NumNone + 1);
1776 const uint64_t Unlikely = 0;
1777
1778 SmallVector<uint64_t, 16> Result;
1779 Result.reserve(Likelihoods.size());
1780 for (const auto LH : Likelihoods) {
1781 switch (LH) {
1782 case Stmt::LH_Unlikely:
1783 Result.push_back(Unlikely);
1784 break;
1785 case Stmt::LH_None:
1786 Result.push_back(None);
1787 break;
1788 case Stmt::LH_Likely:
1789 Result.push_back(Likely);
1790 break;
1791 }
1792 }
1793
1794 return Result;
1795 }
1796
EmitSwitchStmt(const SwitchStmt & S)1797 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1798 // Handle nested switch statements.
1799 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1800 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1801 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1802 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1803
1804 // See if we can constant fold the condition of the switch and therefore only
1805 // emit the live case statement (if any) of the switch.
1806 llvm::APSInt ConstantCondValue;
1807 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1808 SmallVector<const Stmt*, 4> CaseStmts;
1809 const SwitchCase *Case = nullptr;
1810 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1811 getContext(), Case)) {
1812 if (Case)
1813 incrementProfileCounter(Case);
1814 RunCleanupsScope ExecutedScope(*this);
1815
1816 if (S.getInit())
1817 EmitStmt(S.getInit());
1818
1819 // Emit the condition variable if needed inside the entire cleanup scope
1820 // used by this special case for constant folded switches.
1821 if (S.getConditionVariable())
1822 EmitDecl(*S.getConditionVariable());
1823
1824 // At this point, we are no longer "within" a switch instance, so
1825 // we can temporarily enforce this to ensure that any embedded case
1826 // statements are not emitted.
1827 SwitchInsn = nullptr;
1828
1829 // Okay, we can dead code eliminate everything except this case. Emit the
1830 // specified series of statements and we're good.
1831 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1832 EmitStmt(CaseStmts[i]);
1833 incrementProfileCounter(&S);
1834
1835 // Now we want to restore the saved switch instance so that nested
1836 // switches continue to function properly
1837 SwitchInsn = SavedSwitchInsn;
1838
1839 return;
1840 }
1841 }
1842
1843 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1844
1845 RunCleanupsScope ConditionScope(*this);
1846
1847 if (S.getInit())
1848 EmitStmt(S.getInit());
1849
1850 if (S.getConditionVariable())
1851 EmitDecl(*S.getConditionVariable());
1852 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1853
1854 // Create basic block to hold stuff that comes after switch
1855 // statement. We also need to create a default block now so that
1856 // explicit case ranges tests can have a place to jump to on
1857 // failure.
1858 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1859 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1860 if (PGO.haveRegionCounts()) {
1861 // Walk the SwitchCase list to find how many there are.
1862 uint64_t DefaultCount = 0;
1863 unsigned NumCases = 0;
1864 for (const SwitchCase *Case = S.getSwitchCaseList();
1865 Case;
1866 Case = Case->getNextSwitchCase()) {
1867 if (isa<DefaultStmt>(Case))
1868 DefaultCount = getProfileCount(Case);
1869 NumCases += 1;
1870 }
1871 SwitchWeights = new SmallVector<uint64_t, 16>();
1872 SwitchWeights->reserve(NumCases);
1873 // The default needs to be first. We store the edge count, so we already
1874 // know the right weight.
1875 SwitchWeights->push_back(DefaultCount);
1876 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
1877 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
1878 // Initialize the default case.
1879 SwitchLikelihood->push_back(Stmt::LH_None);
1880 }
1881
1882 CaseRangeBlock = DefaultBlock;
1883
1884 // Clear the insertion point to indicate we are in unreachable code.
1885 Builder.ClearInsertionPoint();
1886
1887 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1888 // then reuse last ContinueBlock.
1889 JumpDest OuterContinue;
1890 if (!BreakContinueStack.empty())
1891 OuterContinue = BreakContinueStack.back().ContinueBlock;
1892
1893 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1894
1895 // Emit switch body.
1896 EmitStmt(S.getBody());
1897
1898 BreakContinueStack.pop_back();
1899
1900 // Update the default block in case explicit case range tests have
1901 // been chained on top.
1902 SwitchInsn->setDefaultDest(CaseRangeBlock);
1903
1904 // If a default was never emitted:
1905 if (!DefaultBlock->getParent()) {
1906 // If we have cleanups, emit the default block so that there's a
1907 // place to jump through the cleanups from.
1908 if (ConditionScope.requiresCleanups()) {
1909 EmitBlock(DefaultBlock);
1910
1911 // Otherwise, just forward the default block to the switch end.
1912 } else {
1913 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1914 delete DefaultBlock;
1915 }
1916 }
1917
1918 ConditionScope.ForceCleanup();
1919
1920 // Emit continuation.
1921 EmitBlock(SwitchExit.getBlock(), true);
1922 incrementProfileCounter(&S);
1923
1924 // If the switch has a condition wrapped by __builtin_unpredictable,
1925 // create metadata that specifies that the switch is unpredictable.
1926 // Don't bother if not optimizing because that metadata would not be used.
1927 auto *Call = dyn_cast<CallExpr>(S.getCond());
1928 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1929 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1930 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1931 llvm::MDBuilder MDHelper(getLLVMContext());
1932 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1933 MDHelper.createUnpredictable());
1934 }
1935 }
1936
1937 if (SwitchWeights) {
1938 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1939 "switch weights do not match switch cases");
1940 // If there's only one jump destination there's no sense weighting it.
1941 if (SwitchWeights->size() > 1)
1942 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1943 createProfileWeights(*SwitchWeights));
1944 delete SwitchWeights;
1945 } else if (SwitchLikelihood) {
1946 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
1947 "switch likelihoods do not match switch cases");
1948 Optional<SmallVector<uint64_t, 16>> LHW =
1949 getLikelihoodWeights(*SwitchLikelihood);
1950 if (LHW) {
1951 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
1952 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1953 createProfileWeights(*LHW));
1954 }
1955 delete SwitchLikelihood;
1956 }
1957 SwitchInsn = SavedSwitchInsn;
1958 SwitchWeights = SavedSwitchWeights;
1959 SwitchLikelihood = SavedSwitchLikelihood;
1960 CaseRangeBlock = SavedCRBlock;
1961 }
1962
1963 static std::string
SimplifyConstraint(const char * Constraint,const TargetInfo & Target,SmallVectorImpl<TargetInfo::ConstraintInfo> * OutCons=nullptr)1964 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1965 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1966 std::string Result;
1967
1968 while (*Constraint) {
1969 switch (*Constraint) {
1970 default:
1971 Result += Target.convertConstraint(Constraint);
1972 break;
1973 // Ignore these
1974 case '*':
1975 case '?':
1976 case '!':
1977 case '=': // Will see this and the following in mult-alt constraints.
1978 case '+':
1979 break;
1980 case '#': // Ignore the rest of the constraint alternative.
1981 while (Constraint[1] && Constraint[1] != ',')
1982 Constraint++;
1983 break;
1984 case '&':
1985 case '%':
1986 Result += *Constraint;
1987 while (Constraint[1] && Constraint[1] == *Constraint)
1988 Constraint++;
1989 break;
1990 case ',':
1991 Result += "|";
1992 break;
1993 case 'g':
1994 Result += "imr";
1995 break;
1996 case '[': {
1997 assert(OutCons &&
1998 "Must pass output names to constraints with a symbolic name");
1999 unsigned Index;
2000 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2001 assert(result && "Could not resolve symbolic name"); (void)result;
2002 Result += llvm::utostr(Index);
2003 break;
2004 }
2005 }
2006
2007 Constraint++;
2008 }
2009
2010 return Result;
2011 }
2012
2013 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2014 /// as using a particular register add that as a constraint that will be used
2015 /// in this asm stmt.
2016 static std::string
AddVariableConstraints(const std::string & Constraint,const Expr & AsmExpr,const TargetInfo & Target,CodeGenModule & CGM,const AsmStmt & Stmt,const bool EarlyClobber,std::string * GCCReg=nullptr)2017 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2018 const TargetInfo &Target, CodeGenModule &CGM,
2019 const AsmStmt &Stmt, const bool EarlyClobber,
2020 std::string *GCCReg = nullptr) {
2021 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2022 if (!AsmDeclRef)
2023 return Constraint;
2024 const ValueDecl &Value = *AsmDeclRef->getDecl();
2025 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2026 if (!Variable)
2027 return Constraint;
2028 if (Variable->getStorageClass() != SC_Register)
2029 return Constraint;
2030 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2031 if (!Attr)
2032 return Constraint;
2033 StringRef Register = Attr->getLabel();
2034 assert(Target.isValidGCCRegisterName(Register));
2035 // We're using validateOutputConstraint here because we only care if
2036 // this is a register constraint.
2037 TargetInfo::ConstraintInfo Info(Constraint, "");
2038 if (Target.validateOutputConstraint(Info) &&
2039 !Info.allowsRegister()) {
2040 CGM.ErrorUnsupported(&Stmt, "__asm__");
2041 return Constraint;
2042 }
2043 // Canonicalize the register here before returning it.
2044 Register = Target.getNormalizedGCCRegisterName(Register);
2045 if (GCCReg != nullptr)
2046 *GCCReg = Register.str();
2047 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2048 }
2049
2050 llvm::Value*
EmitAsmInputLValue(const TargetInfo::ConstraintInfo & Info,LValue InputValue,QualType InputType,std::string & ConstraintStr,SourceLocation Loc)2051 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2052 LValue InputValue, QualType InputType,
2053 std::string &ConstraintStr,
2054 SourceLocation Loc) {
2055 llvm::Value *Arg;
2056 if (Info.allowsRegister() || !Info.allowsMemory()) {
2057 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
2058 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
2059 } else {
2060 llvm::Type *Ty = ConvertType(InputType);
2061 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2062 if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
2063 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2064 Ty = llvm::PointerType::getUnqual(Ty);
2065
2066 Arg = Builder.CreateLoad(
2067 Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
2068 } else {
2069 Arg = InputValue.getPointer(*this);
2070 ConstraintStr += '*';
2071 }
2072 }
2073 } else {
2074 Arg = InputValue.getPointer(*this);
2075 ConstraintStr += '*';
2076 }
2077
2078 return Arg;
2079 }
2080
EmitAsmInput(const TargetInfo::ConstraintInfo & Info,const Expr * InputExpr,std::string & ConstraintStr)2081 llvm::Value* CodeGenFunction::EmitAsmInput(
2082 const TargetInfo::ConstraintInfo &Info,
2083 const Expr *InputExpr,
2084 std::string &ConstraintStr) {
2085 // If this can't be a register or memory, i.e., has to be a constant
2086 // (immediate or symbolic), try to emit it as such.
2087 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2088 if (Info.requiresImmediateConstant()) {
2089 Expr::EvalResult EVResult;
2090 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2091
2092 llvm::APSInt IntResult;
2093 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2094 getContext()))
2095 return llvm::ConstantInt::get(getLLVMContext(), IntResult);
2096 }
2097
2098 Expr::EvalResult Result;
2099 if (InputExpr->EvaluateAsInt(Result, getContext()))
2100 return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
2101 }
2102
2103 if (Info.allowsRegister() || !Info.allowsMemory())
2104 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2105 return EmitScalarExpr(InputExpr);
2106 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2107 return EmitScalarExpr(InputExpr);
2108 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2109 LValue Dest = EmitLValue(InputExpr);
2110 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2111 InputExpr->getExprLoc());
2112 }
2113
2114 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2115 /// asm call instruction. The !srcloc MDNode contains a list of constant
2116 /// integers which are the source locations of the start of each line in the
2117 /// asm.
getAsmSrcLocInfo(const StringLiteral * Str,CodeGenFunction & CGF)2118 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2119 CodeGenFunction &CGF) {
2120 SmallVector<llvm::Metadata *, 8> Locs;
2121 // Add the location of the first line to the MDNode.
2122 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2123 CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
2124 StringRef StrVal = Str->getString();
2125 if (!StrVal.empty()) {
2126 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2127 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2128 unsigned StartToken = 0;
2129 unsigned ByteOffset = 0;
2130
2131 // Add the location of the start of each subsequent line of the asm to the
2132 // MDNode.
2133 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2134 if (StrVal[i] != '\n') continue;
2135 SourceLocation LineLoc = Str->getLocationOfByte(
2136 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2137 Locs.push_back(llvm::ConstantAsMetadata::get(
2138 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
2139 }
2140 }
2141
2142 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2143 }
2144
UpdateAsmCallInst(llvm::CallBase & Result,bool HasSideEffect,bool HasUnwindClobber,bool ReadOnly,bool ReadNone,bool NoMerge,const AsmStmt & S,const std::vector<llvm::Type * > & ResultRegTypes,CodeGenFunction & CGF,std::vector<llvm::Value * > & RegResults)2145 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2146 bool HasUnwindClobber, bool ReadOnly,
2147 bool ReadNone, bool NoMerge, const AsmStmt &S,
2148 const std::vector<llvm::Type *> &ResultRegTypes,
2149 CodeGenFunction &CGF,
2150 std::vector<llvm::Value *> &RegResults) {
2151 if (!HasUnwindClobber)
2152 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2153 llvm::Attribute::NoUnwind);
2154
2155 if (NoMerge)
2156 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2157 llvm::Attribute::NoMerge);
2158 // Attach readnone and readonly attributes.
2159 if (!HasSideEffect) {
2160 if (ReadNone)
2161 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2162 llvm::Attribute::ReadNone);
2163 else if (ReadOnly)
2164 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2165 llvm::Attribute::ReadOnly);
2166 }
2167
2168 // Slap the source location of the inline asm into a !srcloc metadata on the
2169 // call.
2170 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2171 Result.setMetadata("srcloc",
2172 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2173 else {
2174 // At least put the line number on MS inline asm blobs.
2175 llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
2176 S.getAsmLoc().getRawEncoding());
2177 Result.setMetadata("srcloc",
2178 llvm::MDNode::get(CGF.getLLVMContext(),
2179 llvm::ConstantAsMetadata::get(Loc)));
2180 }
2181
2182 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2183 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2184 // convergent (meaning, they may call an intrinsically convergent op, such
2185 // as bar.sync, and so can't have certain optimizations applied around
2186 // them).
2187 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2188 llvm::Attribute::Convergent);
2189 // Extract all of the register value results from the asm.
2190 if (ResultRegTypes.size() == 1) {
2191 RegResults.push_back(&Result);
2192 } else {
2193 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2194 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2195 RegResults.push_back(Tmp);
2196 }
2197 }
2198 }
2199
EmitAsmStmt(const AsmStmt & S)2200 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2201 // Assemble the final asm string.
2202 std::string AsmString = S.generateAsmString(getContext());
2203
2204 // Get all the output and input constraints together.
2205 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2206 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2207
2208 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2209 StringRef Name;
2210 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2211 Name = GAS->getOutputName(i);
2212 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2213 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2214 assert(IsValid && "Failed to parse output constraint");
2215 OutputConstraintInfos.push_back(Info);
2216 }
2217
2218 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2219 StringRef Name;
2220 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2221 Name = GAS->getInputName(i);
2222 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2223 bool IsValid =
2224 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2225 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2226 InputConstraintInfos.push_back(Info);
2227 }
2228
2229 std::string Constraints;
2230
2231 std::vector<LValue> ResultRegDests;
2232 std::vector<QualType> ResultRegQualTys;
2233 std::vector<llvm::Type *> ResultRegTypes;
2234 std::vector<llvm::Type *> ResultTruncRegTypes;
2235 std::vector<llvm::Type *> ArgTypes;
2236 std::vector<llvm::Value*> Args;
2237 llvm::BitVector ResultTypeRequiresCast;
2238
2239 // Keep track of inout constraints.
2240 std::string InOutConstraints;
2241 std::vector<llvm::Value*> InOutArgs;
2242 std::vector<llvm::Type*> InOutArgTypes;
2243
2244 // Keep track of out constraints for tied input operand.
2245 std::vector<std::string> OutputConstraints;
2246
2247 // Keep track of defined physregs.
2248 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2249
2250 // An inline asm can be marked readonly if it meets the following conditions:
2251 // - it doesn't have any sideeffects
2252 // - it doesn't clobber memory
2253 // - it doesn't return a value by-reference
2254 // It can be marked readnone if it doesn't have any input memory constraints
2255 // in addition to meeting the conditions listed above.
2256 bool ReadOnly = true, ReadNone = true;
2257
2258 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2259 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2260
2261 // Simplify the output constraint.
2262 std::string OutputConstraint(S.getOutputConstraint(i));
2263 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2264 getTarget(), &OutputConstraintInfos);
2265
2266 const Expr *OutExpr = S.getOutputExpr(i);
2267 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2268
2269 std::string GCCReg;
2270 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2271 getTarget(), CGM, S,
2272 Info.earlyClobber(),
2273 &GCCReg);
2274 // Give an error on multiple outputs to same physreg.
2275 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2276 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2277
2278 OutputConstraints.push_back(OutputConstraint);
2279 LValue Dest = EmitLValue(OutExpr);
2280 if (!Constraints.empty())
2281 Constraints += ',';
2282
2283 // If this is a register output, then make the inline asm return it
2284 // by-value. If this is a memory result, return the value by-reference.
2285 bool isScalarizableAggregate =
2286 hasAggregateEvaluationKind(OutExpr->getType());
2287 if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) ||
2288 isScalarizableAggregate)) {
2289 Constraints += "=" + OutputConstraint;
2290 ResultRegQualTys.push_back(OutExpr->getType());
2291 ResultRegDests.push_back(Dest);
2292 ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
2293 if (Info.allowsRegister() && isScalarizableAggregate) {
2294 ResultTypeRequiresCast.push_back(true);
2295 unsigned Size = getContext().getTypeSize(OutExpr->getType());
2296 llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
2297 ResultRegTypes.push_back(ConvTy);
2298 } else {
2299 ResultTypeRequiresCast.push_back(false);
2300 ResultRegTypes.push_back(ResultTruncRegTypes.back());
2301 }
2302 // If this output is tied to an input, and if the input is larger, then
2303 // we need to set the actual result type of the inline asm node to be the
2304 // same as the input type.
2305 if (Info.hasMatchingInput()) {
2306 unsigned InputNo;
2307 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2308 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2309 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2310 break;
2311 }
2312 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2313
2314 QualType InputTy = S.getInputExpr(InputNo)->getType();
2315 QualType OutputType = OutExpr->getType();
2316
2317 uint64_t InputSize = getContext().getTypeSize(InputTy);
2318 if (getContext().getTypeSize(OutputType) < InputSize) {
2319 // Form the asm to return the value as a larger integer or fp type.
2320 ResultRegTypes.back() = ConvertType(InputTy);
2321 }
2322 }
2323 if (llvm::Type* AdjTy =
2324 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2325 ResultRegTypes.back()))
2326 ResultRegTypes.back() = AdjTy;
2327 else {
2328 CGM.getDiags().Report(S.getAsmLoc(),
2329 diag::err_asm_invalid_type_in_input)
2330 << OutExpr->getType() << OutputConstraint;
2331 }
2332
2333 // Update largest vector width for any vector types.
2334 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2335 LargestVectorWidth =
2336 std::max((uint64_t)LargestVectorWidth,
2337 VT->getPrimitiveSizeInBits().getKnownMinSize());
2338 } else {
2339 llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
2340 llvm::Value *DestPtr = Dest.getPointer(*this);
2341 // Matrix types in memory are represented by arrays, but accessed through
2342 // vector pointers, with the alignment specified on the access operation.
2343 // For inline assembly, update pointer arguments to use vector pointers.
2344 // Otherwise there will be a mis-match if the matrix is also an
2345 // input-argument which is represented as vector.
2346 if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
2347 DestAddrTy = llvm::PointerType::get(
2348 ConvertType(OutExpr->getType()),
2349 cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
2350 DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
2351 }
2352 ArgTypes.push_back(DestAddrTy);
2353 Args.push_back(DestPtr);
2354 Constraints += "=*";
2355 Constraints += OutputConstraint;
2356 ReadOnly = ReadNone = false;
2357 }
2358
2359 if (Info.isReadWrite()) {
2360 InOutConstraints += ',';
2361
2362 const Expr *InputExpr = S.getOutputExpr(i);
2363 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2364 InOutConstraints,
2365 InputExpr->getExprLoc());
2366
2367 if (llvm::Type* AdjTy =
2368 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2369 Arg->getType()))
2370 Arg = Builder.CreateBitCast(Arg, AdjTy);
2371
2372 // Update largest vector width for any vector types.
2373 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2374 LargestVectorWidth =
2375 std::max((uint64_t)LargestVectorWidth,
2376 VT->getPrimitiveSizeInBits().getKnownMinSize());
2377 // Only tie earlyclobber physregs.
2378 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2379 InOutConstraints += llvm::utostr(i);
2380 else
2381 InOutConstraints += OutputConstraint;
2382
2383 InOutArgTypes.push_back(Arg->getType());
2384 InOutArgs.push_back(Arg);
2385 }
2386 }
2387
2388 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2389 // to the return value slot. Only do this when returning in registers.
2390 if (isa<MSAsmStmt>(&S)) {
2391 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2392 if (RetAI.isDirect() || RetAI.isExtend()) {
2393 // Make a fake lvalue for the return value slot.
2394 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2395 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2396 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2397 ResultRegDests, AsmString, S.getNumOutputs());
2398 SawAsmBlock = true;
2399 }
2400 }
2401
2402 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2403 const Expr *InputExpr = S.getInputExpr(i);
2404
2405 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2406
2407 if (Info.allowsMemory())
2408 ReadNone = false;
2409
2410 if (!Constraints.empty())
2411 Constraints += ',';
2412
2413 // Simplify the input constraint.
2414 std::string InputConstraint(S.getInputConstraint(i));
2415 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2416 &OutputConstraintInfos);
2417
2418 InputConstraint = AddVariableConstraints(
2419 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2420 getTarget(), CGM, S, false /* No EarlyClobber */);
2421
2422 std::string ReplaceConstraint (InputConstraint);
2423 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2424
2425 // If this input argument is tied to a larger output result, extend the
2426 // input to be the same size as the output. The LLVM backend wants to see
2427 // the input and output of a matching constraint be the same size. Note
2428 // that GCC does not define what the top bits are here. We use zext because
2429 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2430 if (Info.hasTiedOperand()) {
2431 unsigned Output = Info.getTiedOperand();
2432 QualType OutputType = S.getOutputExpr(Output)->getType();
2433 QualType InputTy = InputExpr->getType();
2434
2435 if (getContext().getTypeSize(OutputType) >
2436 getContext().getTypeSize(InputTy)) {
2437 // Use ptrtoint as appropriate so that we can do our extension.
2438 if (isa<llvm::PointerType>(Arg->getType()))
2439 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2440 llvm::Type *OutputTy = ConvertType(OutputType);
2441 if (isa<llvm::IntegerType>(OutputTy))
2442 Arg = Builder.CreateZExt(Arg, OutputTy);
2443 else if (isa<llvm::PointerType>(OutputTy))
2444 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2445 else {
2446 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2447 Arg = Builder.CreateFPExt(Arg, OutputTy);
2448 }
2449 }
2450 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2451 ReplaceConstraint = OutputConstraints[Output];
2452 }
2453 if (llvm::Type* AdjTy =
2454 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2455 Arg->getType()))
2456 Arg = Builder.CreateBitCast(Arg, AdjTy);
2457 else
2458 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2459 << InputExpr->getType() << InputConstraint;
2460
2461 // Update largest vector width for any vector types.
2462 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2463 LargestVectorWidth =
2464 std::max((uint64_t)LargestVectorWidth,
2465 VT->getPrimitiveSizeInBits().getKnownMinSize());
2466
2467 ArgTypes.push_back(Arg->getType());
2468 Args.push_back(Arg);
2469 Constraints += InputConstraint;
2470 }
2471
2472 // Labels
2473 SmallVector<llvm::BasicBlock *, 16> Transfer;
2474 llvm::BasicBlock *Fallthrough = nullptr;
2475 bool IsGCCAsmGoto = false;
2476 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2477 IsGCCAsmGoto = GS->isAsmGoto();
2478 if (IsGCCAsmGoto) {
2479 for (const auto *E : GS->labels()) {
2480 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2481 Transfer.push_back(Dest.getBlock());
2482 llvm::BlockAddress *BA =
2483 llvm::BlockAddress::get(CurFn, Dest.getBlock());
2484 Args.push_back(BA);
2485 ArgTypes.push_back(BA->getType());
2486 if (!Constraints.empty())
2487 Constraints += ',';
2488 Constraints += 'X';
2489 }
2490 Fallthrough = createBasicBlock("asm.fallthrough");
2491 }
2492 }
2493
2494 // Append the "input" part of inout constraints last.
2495 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2496 ArgTypes.push_back(InOutArgTypes[i]);
2497 Args.push_back(InOutArgs[i]);
2498 }
2499 Constraints += InOutConstraints;
2500
2501 bool HasUnwindClobber = false;
2502
2503 // Clobbers
2504 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2505 StringRef Clobber = S.getClobber(i);
2506
2507 if (Clobber == "memory")
2508 ReadOnly = ReadNone = false;
2509 else if (Clobber == "unwind") {
2510 HasUnwindClobber = true;
2511 continue;
2512 } else if (Clobber != "cc") {
2513 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2514 if (CGM.getCodeGenOpts().StackClashProtector &&
2515 getTarget().isSPRegName(Clobber)) {
2516 CGM.getDiags().Report(S.getAsmLoc(),
2517 diag::warn_stack_clash_protection_inline_asm);
2518 }
2519 }
2520
2521 if (isa<MSAsmStmt>(&S)) {
2522 if (Clobber == "eax" || Clobber == "edx") {
2523 if (Constraints.find("=&A") != std::string::npos)
2524 continue;
2525 std::string::size_type position1 =
2526 Constraints.find("={" + Clobber.str() + "}");
2527 if (position1 != std::string::npos) {
2528 Constraints.insert(position1 + 1, "&");
2529 continue;
2530 }
2531 std::string::size_type position2 = Constraints.find("=A");
2532 if (position2 != std::string::npos) {
2533 Constraints.insert(position2 + 1, "&");
2534 continue;
2535 }
2536 }
2537 }
2538 if (!Constraints.empty())
2539 Constraints += ',';
2540
2541 Constraints += "~{";
2542 Constraints += Clobber;
2543 Constraints += '}';
2544 }
2545
2546 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2547 "unwind clobber can't be used with asm goto");
2548
2549 // Add machine specific clobbers
2550 std::string MachineClobbers = getTarget().getClobbers();
2551 if (!MachineClobbers.empty()) {
2552 if (!Constraints.empty())
2553 Constraints += ',';
2554 Constraints += MachineClobbers;
2555 }
2556
2557 llvm::Type *ResultType;
2558 if (ResultRegTypes.empty())
2559 ResultType = VoidTy;
2560 else if (ResultRegTypes.size() == 1)
2561 ResultType = ResultRegTypes[0];
2562 else
2563 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2564
2565 llvm::FunctionType *FTy =
2566 llvm::FunctionType::get(ResultType, ArgTypes, false);
2567
2568 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2569 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2570 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2571 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2572 FTy, AsmString, Constraints, HasSideEffect,
2573 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2574 std::vector<llvm::Value*> RegResults;
2575 if (IsGCCAsmGoto) {
2576 llvm::CallBrInst *Result =
2577 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2578 EmitBlock(Fallthrough);
2579 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2580 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2581 ResultRegTypes, *this, RegResults);
2582 } else if (HasUnwindClobber) {
2583 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2584 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2585 InNoMergeAttributedStmt, S, ResultRegTypes, *this,
2586 RegResults);
2587 } else {
2588 llvm::CallInst *Result =
2589 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2590 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2591 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2592 ResultRegTypes, *this, RegResults);
2593 }
2594
2595 assert(RegResults.size() == ResultRegTypes.size());
2596 assert(RegResults.size() == ResultTruncRegTypes.size());
2597 assert(RegResults.size() == ResultRegDests.size());
2598 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2599 // in which case its size may grow.
2600 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2601 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2602 llvm::Value *Tmp = RegResults[i];
2603
2604 // If the result type of the LLVM IR asm doesn't match the result type of
2605 // the expression, do the conversion.
2606 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2607 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2608
2609 // Truncate the integer result to the right size, note that TruncTy can be
2610 // a pointer.
2611 if (TruncTy->isFloatingPointTy())
2612 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2613 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2614 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2615 Tmp = Builder.CreateTrunc(Tmp,
2616 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2617 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2618 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2619 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2620 Tmp = Builder.CreatePtrToInt(Tmp,
2621 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2622 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2623 } else if (TruncTy->isIntegerTy()) {
2624 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2625 } else if (TruncTy->isVectorTy()) {
2626 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2627 }
2628 }
2629
2630 LValue Dest = ResultRegDests[i];
2631 // ResultTypeRequiresCast elements correspond to the first
2632 // ResultTypeRequiresCast.size() elements of RegResults.
2633 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2634 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2635 Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2636 ResultRegTypes[i]->getPointerTo());
2637 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2638 if (Ty.isNull()) {
2639 const Expr *OutExpr = S.getOutputExpr(i);
2640 CGM.Error(
2641 OutExpr->getExprLoc(),
2642 "impossible constraint in asm: can't store value into a register");
2643 return;
2644 }
2645 Dest = MakeAddrLValue(A, Ty);
2646 }
2647 EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2648 }
2649 }
2650
InitCapturedStruct(const CapturedStmt & S)2651 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2652 const RecordDecl *RD = S.getCapturedRecordDecl();
2653 QualType RecordTy = getContext().getRecordType(RD);
2654
2655 // Initialize the captured struct.
2656 LValue SlotLV =
2657 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2658
2659 RecordDecl::field_iterator CurField = RD->field_begin();
2660 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2661 E = S.capture_init_end();
2662 I != E; ++I, ++CurField) {
2663 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2664 if (CurField->hasCapturedVLAType()) {
2665 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2666 } else {
2667 EmitInitializerForField(*CurField, LV, *I);
2668 }
2669 }
2670
2671 return SlotLV;
2672 }
2673
2674 /// Generate an outlined function for the body of a CapturedStmt, store any
2675 /// captured variables into the captured struct, and call the outlined function.
2676 llvm::Function *
EmitCapturedStmt(const CapturedStmt & S,CapturedRegionKind K)2677 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2678 LValue CapStruct = InitCapturedStruct(S);
2679
2680 // Emit the CapturedDecl
2681 CodeGenFunction CGF(CGM, true);
2682 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2683 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2684 delete CGF.CapturedStmtInfo;
2685
2686 // Emit call to the helper function.
2687 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2688
2689 return F;
2690 }
2691
GenerateCapturedStmtArgument(const CapturedStmt & S)2692 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2693 LValue CapStruct = InitCapturedStruct(S);
2694 return CapStruct.getAddress(*this);
2695 }
2696
2697 /// Creates the outlined function for a CapturedStmt.
2698 llvm::Function *
GenerateCapturedStmtFunction(const CapturedStmt & S)2699 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2700 assert(CapturedStmtInfo &&
2701 "CapturedStmtInfo should be set when generating the captured function");
2702 const CapturedDecl *CD = S.getCapturedDecl();
2703 const RecordDecl *RD = S.getCapturedRecordDecl();
2704 SourceLocation Loc = S.getBeginLoc();
2705 assert(CD->hasBody() && "missing CapturedDecl body");
2706
2707 // Build the argument list.
2708 ASTContext &Ctx = CGM.getContext();
2709 FunctionArgList Args;
2710 Args.append(CD->param_begin(), CD->param_end());
2711
2712 // Create the function declaration.
2713 const CGFunctionInfo &FuncInfo =
2714 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2715 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2716
2717 llvm::Function *F =
2718 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2719 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2720 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2721 if (CD->isNothrow())
2722 F->addFnAttr(llvm::Attribute::NoUnwind);
2723
2724 // Generate the function.
2725 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2726 CD->getBody()->getBeginLoc());
2727 // Set the context parameter in CapturedStmtInfo.
2728 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2729 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2730
2731 // Initialize variable-length arrays.
2732 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2733 Ctx.getTagDeclType(RD));
2734 for (auto *FD : RD->fields()) {
2735 if (FD->hasCapturedVLAType()) {
2736 auto *ExprArg =
2737 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2738 .getScalarVal();
2739 auto VAT = FD->getCapturedVLAType();
2740 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2741 }
2742 }
2743
2744 // If 'this' is captured, load it into CXXThisValue.
2745 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2746 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2747 LValue ThisLValue = EmitLValueForField(Base, FD);
2748 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2749 }
2750
2751 PGO.assignRegionCounters(GlobalDecl(CD), F);
2752 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2753 FinishFunction(CD->getBodyRBrace());
2754
2755 return F;
2756 }
2757