1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/Stmt.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
23 #include "clang/Basic/DiagnosticSema.h"
24 #include "clang/Basic/PrettyStackTrace.h"
25 #include "clang/Basic/SourceManager.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/IR/Assumptions.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/InlineAsm.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/MDBuilder.h"
34 #include "llvm/Support/SaveAndRestore.h"
35
36 using namespace clang;
37 using namespace CodeGen;
38
39 //===----------------------------------------------------------------------===//
40 // Statement Emission
41 //===----------------------------------------------------------------------===//
42
EmitStopPoint(const Stmt * S)43 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
44 if (CGDebugInfo *DI = getDebugInfo()) {
45 SourceLocation Loc;
46 Loc = S->getBeginLoc();
47 DI->EmitLocation(Builder, Loc);
48
49 LastStopPoint = Loc;
50 }
51 }
52
EmitStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)53 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
54 assert(S && "Null statement?");
55 PGO.setCurrentStmt(S);
56
57 // These statements have their own debug info handling.
58 if (EmitSimpleStmt(S, Attrs))
59 return;
60
61 // Check if we are generating unreachable code.
62 if (!HaveInsertPoint()) {
63 // If so, and the statement doesn't contain a label, then we do not need to
64 // generate actual code. This is safe because (1) the current point is
65 // unreachable, so we don't need to execute the code, and (2) we've already
66 // handled the statements which update internal data structures (like the
67 // local variable map) which could be used by subsequent statements.
68 if (!ContainsLabel(S)) {
69 // Verify that any decl statements were handled as simple, they may be in
70 // scope of subsequent reachable statements.
71 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
72 return;
73 }
74
75 // Otherwise, make a new block to hold the code.
76 EnsureInsertPoint();
77 }
78
79 // Generate a stoppoint if we are emitting debug info.
80 EmitStopPoint(S);
81
82 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
83 // enabled.
84 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
85 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
86 EmitSimpleOMPExecutableDirective(*D);
87 return;
88 }
89 }
90
91 switch (S->getStmtClass()) {
92 case Stmt::NoStmtClass:
93 case Stmt::CXXCatchStmtClass:
94 case Stmt::SEHExceptStmtClass:
95 case Stmt::SEHFinallyStmtClass:
96 case Stmt::MSDependentExistsStmtClass:
97 llvm_unreachable("invalid statement class to emit generically");
98 case Stmt::NullStmtClass:
99 case Stmt::CompoundStmtClass:
100 case Stmt::DeclStmtClass:
101 case Stmt::LabelStmtClass:
102 case Stmt::AttributedStmtClass:
103 case Stmt::GotoStmtClass:
104 case Stmt::BreakStmtClass:
105 case Stmt::ContinueStmtClass:
106 case Stmt::DefaultStmtClass:
107 case Stmt::CaseStmtClass:
108 case Stmt::SEHLeaveStmtClass:
109 llvm_unreachable("should have emitted these statements as simple");
110
111 #define STMT(Type, Base)
112 #define ABSTRACT_STMT(Op)
113 #define EXPR(Type, Base) \
114 case Stmt::Type##Class:
115 #include "clang/AST/StmtNodes.inc"
116 {
117 // Remember the block we came in on.
118 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
119 assert(incoming && "expression emission must have an insertion point");
120
121 EmitIgnoredExpr(cast<Expr>(S));
122
123 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
124 assert(outgoing && "expression emission cleared block!");
125
126 // The expression emitters assume (reasonably!) that the insertion
127 // point is always set. To maintain that, the call-emission code
128 // for noreturn functions has to enter a new block with no
129 // predecessors. We want to kill that block and mark the current
130 // insertion point unreachable in the common case of a call like
131 // "exit();". Since expression emission doesn't otherwise create
132 // blocks with no predecessors, we can just test for that.
133 // However, we must be careful not to do this to our incoming
134 // block, because *statement* emission does sometimes create
135 // reachable blocks which will have no predecessors until later in
136 // the function. This occurs with, e.g., labels that are not
137 // reachable by fallthrough.
138 if (incoming != outgoing && outgoing->use_empty()) {
139 outgoing->eraseFromParent();
140 Builder.ClearInsertionPoint();
141 }
142 break;
143 }
144
145 case Stmt::IndirectGotoStmtClass:
146 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
147
148 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
149 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
150 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
151 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
152
153 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
154
155 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
156 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
157 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
158 case Stmt::CoroutineBodyStmtClass:
159 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
160 break;
161 case Stmt::CoreturnStmtClass:
162 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
163 break;
164 case Stmt::CapturedStmtClass: {
165 const CapturedStmt *CS = cast<CapturedStmt>(S);
166 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
167 }
168 break;
169 case Stmt::ObjCAtTryStmtClass:
170 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
171 break;
172 case Stmt::ObjCAtCatchStmtClass:
173 llvm_unreachable(
174 "@catch statements should be handled by EmitObjCAtTryStmt");
175 case Stmt::ObjCAtFinallyStmtClass:
176 llvm_unreachable(
177 "@finally statements should be handled by EmitObjCAtTryStmt");
178 case Stmt::ObjCAtThrowStmtClass:
179 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
180 break;
181 case Stmt::ObjCAtSynchronizedStmtClass:
182 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
183 break;
184 case Stmt::ObjCForCollectionStmtClass:
185 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
186 break;
187 case Stmt::ObjCAutoreleasePoolStmtClass:
188 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
189 break;
190
191 case Stmt::CXXTryStmtClass:
192 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
193 break;
194 case Stmt::CXXForRangeStmtClass:
195 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
196 break;
197 case Stmt::SEHTryStmtClass:
198 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
199 break;
200 case Stmt::OMPMetaDirectiveClass:
201 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
202 break;
203 case Stmt::OMPCanonicalLoopClass:
204 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
205 break;
206 case Stmt::OMPParallelDirectiveClass:
207 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
208 break;
209 case Stmt::OMPSimdDirectiveClass:
210 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
211 break;
212 case Stmt::OMPTileDirectiveClass:
213 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
214 break;
215 case Stmt::OMPUnrollDirectiveClass:
216 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
217 break;
218 case Stmt::OMPForDirectiveClass:
219 EmitOMPForDirective(cast<OMPForDirective>(*S));
220 break;
221 case Stmt::OMPForSimdDirectiveClass:
222 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
223 break;
224 case Stmt::OMPSectionsDirectiveClass:
225 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
226 break;
227 case Stmt::OMPSectionDirectiveClass:
228 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
229 break;
230 case Stmt::OMPSingleDirectiveClass:
231 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
232 break;
233 case Stmt::OMPMasterDirectiveClass:
234 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
235 break;
236 case Stmt::OMPCriticalDirectiveClass:
237 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
238 break;
239 case Stmt::OMPParallelForDirectiveClass:
240 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
241 break;
242 case Stmt::OMPParallelForSimdDirectiveClass:
243 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
244 break;
245 case Stmt::OMPParallelMasterDirectiveClass:
246 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
247 break;
248 case Stmt::OMPParallelSectionsDirectiveClass:
249 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
250 break;
251 case Stmt::OMPTaskDirectiveClass:
252 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
253 break;
254 case Stmt::OMPTaskyieldDirectiveClass:
255 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
256 break;
257 case Stmt::OMPBarrierDirectiveClass:
258 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
259 break;
260 case Stmt::OMPTaskwaitDirectiveClass:
261 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
262 break;
263 case Stmt::OMPTaskgroupDirectiveClass:
264 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
265 break;
266 case Stmt::OMPFlushDirectiveClass:
267 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
268 break;
269 case Stmt::OMPDepobjDirectiveClass:
270 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
271 break;
272 case Stmt::OMPScanDirectiveClass:
273 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
274 break;
275 case Stmt::OMPOrderedDirectiveClass:
276 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
277 break;
278 case Stmt::OMPAtomicDirectiveClass:
279 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
280 break;
281 case Stmt::OMPTargetDirectiveClass:
282 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
283 break;
284 case Stmt::OMPTeamsDirectiveClass:
285 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
286 break;
287 case Stmt::OMPCancellationPointDirectiveClass:
288 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
289 break;
290 case Stmt::OMPCancelDirectiveClass:
291 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
292 break;
293 case Stmt::OMPTargetDataDirectiveClass:
294 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
295 break;
296 case Stmt::OMPTargetEnterDataDirectiveClass:
297 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
298 break;
299 case Stmt::OMPTargetExitDataDirectiveClass:
300 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
301 break;
302 case Stmt::OMPTargetParallelDirectiveClass:
303 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
304 break;
305 case Stmt::OMPTargetParallelForDirectiveClass:
306 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
307 break;
308 case Stmt::OMPTaskLoopDirectiveClass:
309 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
310 break;
311 case Stmt::OMPTaskLoopSimdDirectiveClass:
312 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
313 break;
314 case Stmt::OMPMasterTaskLoopDirectiveClass:
315 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
316 break;
317 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
318 EmitOMPMasterTaskLoopSimdDirective(
319 cast<OMPMasterTaskLoopSimdDirective>(*S));
320 break;
321 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
322 EmitOMPParallelMasterTaskLoopDirective(
323 cast<OMPParallelMasterTaskLoopDirective>(*S));
324 break;
325 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
326 EmitOMPParallelMasterTaskLoopSimdDirective(
327 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
328 break;
329 case Stmt::OMPDistributeDirectiveClass:
330 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
331 break;
332 case Stmt::OMPTargetUpdateDirectiveClass:
333 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
334 break;
335 case Stmt::OMPDistributeParallelForDirectiveClass:
336 EmitOMPDistributeParallelForDirective(
337 cast<OMPDistributeParallelForDirective>(*S));
338 break;
339 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
340 EmitOMPDistributeParallelForSimdDirective(
341 cast<OMPDistributeParallelForSimdDirective>(*S));
342 break;
343 case Stmt::OMPDistributeSimdDirectiveClass:
344 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
345 break;
346 case Stmt::OMPTargetParallelForSimdDirectiveClass:
347 EmitOMPTargetParallelForSimdDirective(
348 cast<OMPTargetParallelForSimdDirective>(*S));
349 break;
350 case Stmt::OMPTargetSimdDirectiveClass:
351 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
352 break;
353 case Stmt::OMPTeamsDistributeDirectiveClass:
354 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
355 break;
356 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
357 EmitOMPTeamsDistributeSimdDirective(
358 cast<OMPTeamsDistributeSimdDirective>(*S));
359 break;
360 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
361 EmitOMPTeamsDistributeParallelForSimdDirective(
362 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
363 break;
364 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
365 EmitOMPTeamsDistributeParallelForDirective(
366 cast<OMPTeamsDistributeParallelForDirective>(*S));
367 break;
368 case Stmt::OMPTargetTeamsDirectiveClass:
369 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
370 break;
371 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
372 EmitOMPTargetTeamsDistributeDirective(
373 cast<OMPTargetTeamsDistributeDirective>(*S));
374 break;
375 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
376 EmitOMPTargetTeamsDistributeParallelForDirective(
377 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
378 break;
379 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
380 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
381 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
382 break;
383 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
384 EmitOMPTargetTeamsDistributeSimdDirective(
385 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
386 break;
387 case Stmt::OMPInteropDirectiveClass:
388 llvm_unreachable("Interop directive not supported yet.");
389 break;
390 case Stmt::OMPDispatchDirectiveClass:
391 llvm_unreachable("Dispatch directive not supported yet.");
392 break;
393 case Stmt::OMPMaskedDirectiveClass:
394 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
395 break;
396 }
397 }
398
EmitSimpleStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)399 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
400 ArrayRef<const Attr *> Attrs) {
401 switch (S->getStmtClass()) {
402 default:
403 return false;
404 case Stmt::NullStmtClass:
405 break;
406 case Stmt::CompoundStmtClass:
407 EmitCompoundStmt(cast<CompoundStmt>(*S));
408 break;
409 case Stmt::DeclStmtClass:
410 EmitDeclStmt(cast<DeclStmt>(*S));
411 break;
412 case Stmt::LabelStmtClass:
413 EmitLabelStmt(cast<LabelStmt>(*S));
414 break;
415 case Stmt::AttributedStmtClass:
416 EmitAttributedStmt(cast<AttributedStmt>(*S));
417 break;
418 case Stmt::GotoStmtClass:
419 EmitGotoStmt(cast<GotoStmt>(*S));
420 break;
421 case Stmt::BreakStmtClass:
422 EmitBreakStmt(cast<BreakStmt>(*S));
423 break;
424 case Stmt::ContinueStmtClass:
425 EmitContinueStmt(cast<ContinueStmt>(*S));
426 break;
427 case Stmt::DefaultStmtClass:
428 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
429 break;
430 case Stmt::CaseStmtClass:
431 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
432 break;
433 case Stmt::SEHLeaveStmtClass:
434 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
435 break;
436 }
437 return true;
438 }
439
440 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
441 /// this captures the expression result of the last sub-statement and returns it
442 /// (for use by the statement expression extension).
EmitCompoundStmt(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)443 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
444 AggValueSlot AggSlot) {
445 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
446 "LLVM IR generation of compound statement ('{}')");
447
448 // Keep track of the current cleanup stack depth, including debug scopes.
449 LexicalScope Scope(*this, S.getSourceRange());
450
451 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
452 }
453
454 Address
EmitCompoundStmtWithoutScope(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)455 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
456 bool GetLast,
457 AggValueSlot AggSlot) {
458
459 const Stmt *ExprResult = S.getStmtExprResult();
460 assert((!GetLast || (GetLast && ExprResult)) &&
461 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
462
463 Address RetAlloca = Address::invalid();
464
465 for (auto *CurStmt : S.body()) {
466 if (GetLast && ExprResult == CurStmt) {
467 // We have to special case labels here. They are statements, but when put
468 // at the end of a statement expression, they yield the value of their
469 // subexpression. Handle this by walking through all labels we encounter,
470 // emitting them before we evaluate the subexpr.
471 // Similar issues arise for attributed statements.
472 while (!isa<Expr>(ExprResult)) {
473 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
474 EmitLabel(LS->getDecl());
475 ExprResult = LS->getSubStmt();
476 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
477 // FIXME: Update this if we ever have attributes that affect the
478 // semantics of an expression.
479 ExprResult = AS->getSubStmt();
480 } else {
481 llvm_unreachable("unknown value statement");
482 }
483 }
484
485 EnsureInsertPoint();
486
487 const Expr *E = cast<Expr>(ExprResult);
488 QualType ExprTy = E->getType();
489 if (hasAggregateEvaluationKind(ExprTy)) {
490 EmitAggExpr(E, AggSlot);
491 } else {
492 // We can't return an RValue here because there might be cleanups at
493 // the end of the StmtExpr. Because of that, we have to emit the result
494 // here into a temporary alloca.
495 RetAlloca = CreateMemTemp(ExprTy);
496 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
497 /*IsInit*/ false);
498 }
499 } else {
500 EmitStmt(CurStmt);
501 }
502 }
503
504 return RetAlloca;
505 }
506
SimplifyForwardingBlocks(llvm::BasicBlock * BB)507 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
508 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
509
510 // If there is a cleanup stack, then we it isn't worth trying to
511 // simplify this block (we would need to remove it from the scope map
512 // and cleanup entry).
513 if (!EHStack.empty())
514 return;
515
516 // Can only simplify direct branches.
517 if (!BI || !BI->isUnconditional())
518 return;
519
520 // Can only simplify empty blocks.
521 if (BI->getIterator() != BB->begin())
522 return;
523
524 BB->replaceAllUsesWith(BI->getSuccessor(0));
525 BI->eraseFromParent();
526 BB->eraseFromParent();
527 }
528
EmitBlock(llvm::BasicBlock * BB,bool IsFinished)529 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
530 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
531
532 // Fall out of the current block (if necessary).
533 EmitBranch(BB);
534
535 if (IsFinished && BB->use_empty()) {
536 delete BB;
537 return;
538 }
539
540 // Place the block after the current block, if possible, or else at
541 // the end of the function.
542 if (CurBB && CurBB->getParent())
543 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
544 else
545 CurFn->getBasicBlockList().push_back(BB);
546 Builder.SetInsertPoint(BB);
547 }
548
EmitBranch(llvm::BasicBlock * Target)549 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
550 // Emit a branch from the current block to the target one if this
551 // was a real block. If this was just a fall-through block after a
552 // terminator, don't emit it.
553 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
554
555 if (!CurBB || CurBB->getTerminator()) {
556 // If there is no insert point or the previous block is already
557 // terminated, don't touch it.
558 } else {
559 // Otherwise, create a fall-through branch.
560 Builder.CreateBr(Target);
561 }
562
563 Builder.ClearInsertionPoint();
564 }
565
EmitBlockAfterUses(llvm::BasicBlock * block)566 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
567 bool inserted = false;
568 for (llvm::User *u : block->users()) {
569 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
570 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
571 block);
572 inserted = true;
573 break;
574 }
575 }
576
577 if (!inserted)
578 CurFn->getBasicBlockList().push_back(block);
579
580 Builder.SetInsertPoint(block);
581 }
582
583 CodeGenFunction::JumpDest
getJumpDestForLabel(const LabelDecl * D)584 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
585 JumpDest &Dest = LabelMap[D];
586 if (Dest.isValid()) return Dest;
587
588 // Create, but don't insert, the new block.
589 Dest = JumpDest(createBasicBlock(D->getName()),
590 EHScopeStack::stable_iterator::invalid(),
591 NextCleanupDestIndex++);
592 return Dest;
593 }
594
EmitLabel(const LabelDecl * D)595 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
596 // Add this label to the current lexical scope if we're within any
597 // normal cleanups. Jumps "in" to this label --- when permitted by
598 // the language --- may need to be routed around such cleanups.
599 if (EHStack.hasNormalCleanups() && CurLexicalScope)
600 CurLexicalScope->addLabel(D);
601
602 JumpDest &Dest = LabelMap[D];
603
604 // If we didn't need a forward reference to this label, just go
605 // ahead and create a destination at the current scope.
606 if (!Dest.isValid()) {
607 Dest = getJumpDestInCurrentScope(D->getName());
608
609 // Otherwise, we need to give this label a target depth and remove
610 // it from the branch-fixups list.
611 } else {
612 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
613 Dest.setScopeDepth(EHStack.stable_begin());
614 ResolveBranchFixups(Dest.getBlock());
615 }
616
617 EmitBlock(Dest.getBlock());
618
619 // Emit debug info for labels.
620 if (CGDebugInfo *DI = getDebugInfo()) {
621 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
622 DI->setLocation(D->getLocation());
623 DI->EmitLabel(D, Builder);
624 }
625 }
626
627 incrementProfileCounter(D->getStmt());
628 }
629
630 /// Change the cleanup scope of the labels in this lexical scope to
631 /// match the scope of the enclosing context.
rescopeLabels()632 void CodeGenFunction::LexicalScope::rescopeLabels() {
633 assert(!Labels.empty());
634 EHScopeStack::stable_iterator innermostScope
635 = CGF.EHStack.getInnermostNormalCleanup();
636
637 // Change the scope depth of all the labels.
638 for (SmallVectorImpl<const LabelDecl*>::const_iterator
639 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
640 assert(CGF.LabelMap.count(*i));
641 JumpDest &dest = CGF.LabelMap.find(*i)->second;
642 assert(dest.getScopeDepth().isValid());
643 assert(innermostScope.encloses(dest.getScopeDepth()));
644 dest.setScopeDepth(innermostScope);
645 }
646
647 // Reparent the labels if the new scope also has cleanups.
648 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
649 ParentScope->Labels.append(Labels.begin(), Labels.end());
650 }
651 }
652
653
EmitLabelStmt(const LabelStmt & S)654 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
655 EmitLabel(S.getDecl());
656
657 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
658 if (getLangOpts().EHAsynch && S.isSideEntry())
659 EmitSehCppScopeBegin();
660
661 EmitStmt(S.getSubStmt());
662 }
663
EmitAttributedStmt(const AttributedStmt & S)664 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
665 bool nomerge = false;
666 const CallExpr *musttail = nullptr;
667
668 for (const auto *A : S.getAttrs()) {
669 if (A->getKind() == attr::NoMerge) {
670 nomerge = true;
671 }
672 if (A->getKind() == attr::MustTail) {
673 const Stmt *Sub = S.getSubStmt();
674 const ReturnStmt *R = cast<ReturnStmt>(Sub);
675 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
676 }
677 }
678 SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
679 SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
680 EmitStmt(S.getSubStmt(), S.getAttrs());
681 }
682
EmitGotoStmt(const GotoStmt & S)683 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
684 // If this code is reachable then emit a stop point (if generating
685 // debug info). We have to do this ourselves because we are on the
686 // "simple" statement path.
687 if (HaveInsertPoint())
688 EmitStopPoint(&S);
689
690 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
691 }
692
693
EmitIndirectGotoStmt(const IndirectGotoStmt & S)694 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
695 if (const LabelDecl *Target = S.getConstantTarget()) {
696 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
697 return;
698 }
699
700 // Ensure that we have an i8* for our PHI node.
701 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
702 Int8PtrTy, "addr");
703 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
704
705 // Get the basic block for the indirect goto.
706 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
707
708 // The first instruction in the block has to be the PHI for the switch dest,
709 // add an entry for this branch.
710 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
711
712 EmitBranch(IndGotoBB);
713 }
714
EmitIfStmt(const IfStmt & S)715 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
716 // The else branch of a consteval if statement is always the only branch that
717 // can be runtime evaluated.
718 if (S.isConsteval()) {
719 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
720 if (Executed) {
721 RunCleanupsScope ExecutedScope(*this);
722 EmitStmt(Executed);
723 }
724 return;
725 }
726
727 // C99 6.8.4.1: The first substatement is executed if the expression compares
728 // unequal to 0. The condition must be a scalar type.
729 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
730
731 if (S.getInit())
732 EmitStmt(S.getInit());
733
734 if (S.getConditionVariable())
735 EmitDecl(*S.getConditionVariable());
736
737 // If the condition constant folds and can be elided, try to avoid emitting
738 // the condition and the dead arm of the if/else.
739 bool CondConstant;
740 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
741 S.isConstexpr())) {
742 // Figure out which block (then or else) is executed.
743 const Stmt *Executed = S.getThen();
744 const Stmt *Skipped = S.getElse();
745 if (!CondConstant) // Condition false?
746 std::swap(Executed, Skipped);
747
748 // If the skipped block has no labels in it, just emit the executed block.
749 // This avoids emitting dead code and simplifies the CFG substantially.
750 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
751 if (CondConstant)
752 incrementProfileCounter(&S);
753 if (Executed) {
754 RunCleanupsScope ExecutedScope(*this);
755 EmitStmt(Executed);
756 }
757 return;
758 }
759 }
760
761 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
762 // the conditional branch.
763 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
764 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
765 llvm::BasicBlock *ElseBlock = ContBlock;
766 if (S.getElse())
767 ElseBlock = createBasicBlock("if.else");
768
769 // Prefer the PGO based weights over the likelihood attribute.
770 // When the build isn't optimized the metadata isn't used, so don't generate
771 // it.
772 Stmt::Likelihood LH = Stmt::LH_None;
773 uint64_t Count = getProfileCount(S.getThen());
774 if (!Count && CGM.getCodeGenOpts().OptimizationLevel)
775 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
776 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
777
778 // Emit the 'then' code.
779 EmitBlock(ThenBlock);
780 incrementProfileCounter(&S);
781 {
782 RunCleanupsScope ThenScope(*this);
783 EmitStmt(S.getThen());
784 }
785 EmitBranch(ContBlock);
786
787 // Emit the 'else' code if present.
788 if (const Stmt *Else = S.getElse()) {
789 {
790 // There is no need to emit line number for an unconditional branch.
791 auto NL = ApplyDebugLocation::CreateEmpty(*this);
792 EmitBlock(ElseBlock);
793 }
794 {
795 RunCleanupsScope ElseScope(*this);
796 EmitStmt(Else);
797 }
798 {
799 // There is no need to emit line number for an unconditional branch.
800 auto NL = ApplyDebugLocation::CreateEmpty(*this);
801 EmitBranch(ContBlock);
802 }
803 }
804
805 // Emit the continuation block for code after the if.
806 EmitBlock(ContBlock, true);
807 }
808
EmitWhileStmt(const WhileStmt & S,ArrayRef<const Attr * > WhileAttrs)809 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
810 ArrayRef<const Attr *> WhileAttrs) {
811 // Emit the header for the loop, which will also become
812 // the continue target.
813 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
814 EmitBlock(LoopHeader.getBlock());
815
816 // Create an exit block for when the condition fails, which will
817 // also become the break target.
818 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
819
820 // Store the blocks to use for break and continue.
821 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
822
823 // C++ [stmt.while]p2:
824 // When the condition of a while statement is a declaration, the
825 // scope of the variable that is declared extends from its point
826 // of declaration (3.3.2) to the end of the while statement.
827 // [...]
828 // The object created in a condition is destroyed and created
829 // with each iteration of the loop.
830 RunCleanupsScope ConditionScope(*this);
831
832 if (S.getConditionVariable())
833 EmitDecl(*S.getConditionVariable());
834
835 // Evaluate the conditional in the while header. C99 6.8.5.1: The
836 // evaluation of the controlling expression takes place before each
837 // execution of the loop body.
838 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
839
840 // while(1) is common, avoid extra exit blocks. Be sure
841 // to correctly handle break/continue though.
842 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
843 bool CondIsConstInt = C != nullptr;
844 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
845 const SourceRange &R = S.getSourceRange();
846 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
847 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
848 SourceLocToDebugLoc(R.getEnd()),
849 checkIfLoopMustProgress(CondIsConstInt));
850
851 // As long as the condition is true, go to the loop body.
852 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
853 if (EmitBoolCondBranch) {
854 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
855 if (ConditionScope.requiresCleanups())
856 ExitBlock = createBasicBlock("while.exit");
857 llvm::MDNode *Weights =
858 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
859 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
860 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
861 BoolCondVal, Stmt::getLikelihood(S.getBody()));
862 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
863
864 if (ExitBlock != LoopExit.getBlock()) {
865 EmitBlock(ExitBlock);
866 EmitBranchThroughCleanup(LoopExit);
867 }
868 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
869 CGM.getDiags().Report(A->getLocation(),
870 diag::warn_attribute_has_no_effect_on_infinite_loop)
871 << A << A->getRange();
872 CGM.getDiags().Report(
873 S.getWhileLoc(),
874 diag::note_attribute_has_no_effect_on_infinite_loop_here)
875 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
876 }
877
878 // Emit the loop body. We have to emit this in a cleanup scope
879 // because it might be a singleton DeclStmt.
880 {
881 RunCleanupsScope BodyScope(*this);
882 EmitBlock(LoopBody);
883 incrementProfileCounter(&S);
884 EmitStmt(S.getBody());
885 }
886
887 BreakContinueStack.pop_back();
888
889 // Immediately force cleanup.
890 ConditionScope.ForceCleanup();
891
892 EmitStopPoint(&S);
893 // Branch to the loop header again.
894 EmitBranch(LoopHeader.getBlock());
895
896 LoopStack.pop();
897
898 // Emit the exit block.
899 EmitBlock(LoopExit.getBlock(), true);
900
901 // The LoopHeader typically is just a branch if we skipped emitting
902 // a branch, try to erase it.
903 if (!EmitBoolCondBranch)
904 SimplifyForwardingBlocks(LoopHeader.getBlock());
905 }
906
EmitDoStmt(const DoStmt & S,ArrayRef<const Attr * > DoAttrs)907 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
908 ArrayRef<const Attr *> DoAttrs) {
909 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
910 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
911
912 uint64_t ParentCount = getCurrentProfileCount();
913
914 // Store the blocks to use for break and continue.
915 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
916
917 // Emit the body of the loop.
918 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
919
920 EmitBlockWithFallThrough(LoopBody, &S);
921 {
922 RunCleanupsScope BodyScope(*this);
923 EmitStmt(S.getBody());
924 }
925
926 EmitBlock(LoopCond.getBlock());
927
928 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
929 // after each execution of the loop body."
930
931 // Evaluate the conditional in the while header.
932 // C99 6.8.5p2/p4: The first substatement is executed if the expression
933 // compares unequal to 0. The condition must be a scalar type.
934 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
935
936 BreakContinueStack.pop_back();
937
938 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
939 // to correctly handle break/continue though.
940 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
941 bool CondIsConstInt = C;
942 bool EmitBoolCondBranch = !C || !C->isZero();
943
944 const SourceRange &R = S.getSourceRange();
945 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
946 SourceLocToDebugLoc(R.getBegin()),
947 SourceLocToDebugLoc(R.getEnd()),
948 checkIfLoopMustProgress(CondIsConstInt));
949
950 // As long as the condition is true, iterate the loop.
951 if (EmitBoolCondBranch) {
952 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
953 Builder.CreateCondBr(
954 BoolCondVal, LoopBody, LoopExit.getBlock(),
955 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
956 }
957
958 LoopStack.pop();
959
960 // Emit the exit block.
961 EmitBlock(LoopExit.getBlock());
962
963 // The DoCond block typically is just a branch if we skipped
964 // emitting a branch, try to erase it.
965 if (!EmitBoolCondBranch)
966 SimplifyForwardingBlocks(LoopCond.getBlock());
967 }
968
EmitForStmt(const ForStmt & S,ArrayRef<const Attr * > ForAttrs)969 void CodeGenFunction::EmitForStmt(const ForStmt &S,
970 ArrayRef<const Attr *> ForAttrs) {
971 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
972
973 LexicalScope ForScope(*this, S.getSourceRange());
974
975 // Evaluate the first part before the loop.
976 if (S.getInit())
977 EmitStmt(S.getInit());
978
979 // Start the loop with a block that tests the condition.
980 // If there's an increment, the continue scope will be overwritten
981 // later.
982 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
983 llvm::BasicBlock *CondBlock = CondDest.getBlock();
984 EmitBlock(CondBlock);
985
986 Expr::EvalResult Result;
987 bool CondIsConstInt =
988 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
989
990 const SourceRange &R = S.getSourceRange();
991 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
992 SourceLocToDebugLoc(R.getBegin()),
993 SourceLocToDebugLoc(R.getEnd()),
994 checkIfLoopMustProgress(CondIsConstInt));
995
996 // Create a cleanup scope for the condition variable cleanups.
997 LexicalScope ConditionScope(*this, S.getSourceRange());
998
999 // If the for loop doesn't have an increment we can just use the condition as
1000 // the continue block. Otherwise, if there is no condition variable, we can
1001 // form the continue block now. If there is a condition variable, we can't
1002 // form the continue block until after we've emitted the condition, because
1003 // the condition is in scope in the increment, but Sema's jump diagnostics
1004 // ensure that there are no continues from the condition variable that jump
1005 // to the loop increment.
1006 JumpDest Continue;
1007 if (!S.getInc())
1008 Continue = CondDest;
1009 else if (!S.getConditionVariable())
1010 Continue = getJumpDestInCurrentScope("for.inc");
1011 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1012
1013 if (S.getCond()) {
1014 // If the for statement has a condition scope, emit the local variable
1015 // declaration.
1016 if (S.getConditionVariable()) {
1017 EmitDecl(*S.getConditionVariable());
1018
1019 // We have entered the condition variable's scope, so we're now able to
1020 // jump to the continue block.
1021 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1022 BreakContinueStack.back().ContinueBlock = Continue;
1023 }
1024
1025 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1026 // If there are any cleanups between here and the loop-exit scope,
1027 // create a block to stage a loop exit along.
1028 if (ForScope.requiresCleanups())
1029 ExitBlock = createBasicBlock("for.cond.cleanup");
1030
1031 // As long as the condition is true, iterate the loop.
1032 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1033
1034 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1035 // compares unequal to 0. The condition must be a scalar type.
1036 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1037 llvm::MDNode *Weights =
1038 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1039 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1040 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1041 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1042
1043 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1044
1045 if (ExitBlock != LoopExit.getBlock()) {
1046 EmitBlock(ExitBlock);
1047 EmitBranchThroughCleanup(LoopExit);
1048 }
1049
1050 EmitBlock(ForBody);
1051 } else {
1052 // Treat it as a non-zero constant. Don't even create a new block for the
1053 // body, just fall into it.
1054 }
1055 incrementProfileCounter(&S);
1056
1057 {
1058 // Create a separate cleanup scope for the body, in case it is not
1059 // a compound statement.
1060 RunCleanupsScope BodyScope(*this);
1061 EmitStmt(S.getBody());
1062 }
1063
1064 // If there is an increment, emit it next.
1065 if (S.getInc()) {
1066 EmitBlock(Continue.getBlock());
1067 EmitStmt(S.getInc());
1068 }
1069
1070 BreakContinueStack.pop_back();
1071
1072 ConditionScope.ForceCleanup();
1073
1074 EmitStopPoint(&S);
1075 EmitBranch(CondBlock);
1076
1077 ForScope.ForceCleanup();
1078
1079 LoopStack.pop();
1080
1081 // Emit the fall-through block.
1082 EmitBlock(LoopExit.getBlock(), true);
1083 }
1084
1085 void
EmitCXXForRangeStmt(const CXXForRangeStmt & S,ArrayRef<const Attr * > ForAttrs)1086 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1087 ArrayRef<const Attr *> ForAttrs) {
1088 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1089
1090 LexicalScope ForScope(*this, S.getSourceRange());
1091
1092 // Evaluate the first pieces before the loop.
1093 if (S.getInit())
1094 EmitStmt(S.getInit());
1095 EmitStmt(S.getRangeStmt());
1096 EmitStmt(S.getBeginStmt());
1097 EmitStmt(S.getEndStmt());
1098
1099 // Start the loop with a block that tests the condition.
1100 // If there's an increment, the continue scope will be overwritten
1101 // later.
1102 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1103 EmitBlock(CondBlock);
1104
1105 const SourceRange &R = S.getSourceRange();
1106 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1107 SourceLocToDebugLoc(R.getBegin()),
1108 SourceLocToDebugLoc(R.getEnd()));
1109
1110 // If there are any cleanups between here and the loop-exit scope,
1111 // create a block to stage a loop exit along.
1112 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1113 if (ForScope.requiresCleanups())
1114 ExitBlock = createBasicBlock("for.cond.cleanup");
1115
1116 // The loop body, consisting of the specified body and the loop variable.
1117 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1118
1119 // The body is executed if the expression, contextually converted
1120 // to bool, is true.
1121 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1122 llvm::MDNode *Weights =
1123 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1124 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1125 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1126 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1127 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1128
1129 if (ExitBlock != LoopExit.getBlock()) {
1130 EmitBlock(ExitBlock);
1131 EmitBranchThroughCleanup(LoopExit);
1132 }
1133
1134 EmitBlock(ForBody);
1135 incrementProfileCounter(&S);
1136
1137 // Create a block for the increment. In case of a 'continue', we jump there.
1138 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1139
1140 // Store the blocks to use for break and continue.
1141 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1142
1143 {
1144 // Create a separate cleanup scope for the loop variable and body.
1145 LexicalScope BodyScope(*this, S.getSourceRange());
1146 EmitStmt(S.getLoopVarStmt());
1147 EmitStmt(S.getBody());
1148 }
1149
1150 EmitStopPoint(&S);
1151 // If there is an increment, emit it next.
1152 EmitBlock(Continue.getBlock());
1153 EmitStmt(S.getInc());
1154
1155 BreakContinueStack.pop_back();
1156
1157 EmitBranch(CondBlock);
1158
1159 ForScope.ForceCleanup();
1160
1161 LoopStack.pop();
1162
1163 // Emit the fall-through block.
1164 EmitBlock(LoopExit.getBlock(), true);
1165 }
1166
EmitReturnOfRValue(RValue RV,QualType Ty)1167 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1168 if (RV.isScalar()) {
1169 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1170 } else if (RV.isAggregate()) {
1171 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1172 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1173 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1174 } else {
1175 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1176 /*init*/ true);
1177 }
1178 EmitBranchThroughCleanup(ReturnBlock);
1179 }
1180
1181 namespace {
1182 // RAII struct used to save and restore a return statment's result expression.
1183 struct SaveRetExprRAII {
SaveRetExprRAII__anon8cbcbdc90111::SaveRetExprRAII1184 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1185 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1186 CGF.RetExpr = RetExpr;
1187 }
~SaveRetExprRAII__anon8cbcbdc90111::SaveRetExprRAII1188 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1189 const Expr *OldRetExpr;
1190 CodeGenFunction &CGF;
1191 };
1192 } // namespace
1193
1194 /// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1195 /// codegen it as 'tail call ...; ret void;'.
makeTailCallIfSwiftAsync(const CallExpr * CE,CGBuilderTy & Builder,const CGFunctionInfo * CurFnInfo)1196 static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1197 const CGFunctionInfo *CurFnInfo) {
1198 auto calleeQualType = CE->getCallee()->getType();
1199 const FunctionType *calleeType = nullptr;
1200 if (calleeQualType->isFunctionPointerType() ||
1201 calleeQualType->isFunctionReferenceType() ||
1202 calleeQualType->isBlockPointerType() ||
1203 calleeQualType->isMemberFunctionPointerType()) {
1204 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1205 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1206 calleeType = ty;
1207 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1208 if (auto methodDecl = CMCE->getMethodDecl()) {
1209 // getMethodDecl() doesn't handle member pointers at the moment.
1210 calleeType = methodDecl->getType()->castAs<FunctionType>();
1211 } else {
1212 return;
1213 }
1214 } else {
1215 return;
1216 }
1217 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1218 (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) {
1219 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1220 CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1221 Builder.CreateRetVoid();
1222 Builder.ClearInsertionPoint();
1223 }
1224 }
1225
1226 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1227 /// if the function returns void, or may be missing one if the function returns
1228 /// non-void. Fun stuff :).
EmitReturnStmt(const ReturnStmt & S)1229 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1230 if (requiresReturnValueCheck()) {
1231 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1232 auto *SLocPtr =
1233 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1234 llvm::GlobalVariable::PrivateLinkage, SLoc);
1235 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1236 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1237 assert(ReturnLocation.isValid() && "No valid return location");
1238 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1239 ReturnLocation);
1240 }
1241
1242 // Returning from an outlined SEH helper is UB, and we already warn on it.
1243 if (IsOutlinedSEHHelper) {
1244 Builder.CreateUnreachable();
1245 Builder.ClearInsertionPoint();
1246 }
1247
1248 // Emit the result value, even if unused, to evaluate the side effects.
1249 const Expr *RV = S.getRetValue();
1250
1251 // Record the result expression of the return statement. The recorded
1252 // expression is used to determine whether a block capture's lifetime should
1253 // end at the end of the full expression as opposed to the end of the scope
1254 // enclosing the block expression.
1255 //
1256 // This permits a small, easily-implemented exception to our over-conservative
1257 // rules about not jumping to statements following block literals with
1258 // non-trivial cleanups.
1259 SaveRetExprRAII SaveRetExpr(RV, *this);
1260
1261 RunCleanupsScope cleanupScope(*this);
1262 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1263 RV = EWC->getSubExpr();
1264 // FIXME: Clean this up by using an LValue for ReturnTemp,
1265 // EmitStoreThroughLValue, and EmitAnyExpr.
1266 // Check if the NRVO candidate was not globalized in OpenMP mode.
1267 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1268 S.getNRVOCandidate()->isNRVOVariable() &&
1269 (!getLangOpts().OpenMP ||
1270 !CGM.getOpenMPRuntime()
1271 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1272 .isValid())) {
1273 // Apply the named return value optimization for this return statement,
1274 // which means doing nothing: the appropriate result has already been
1275 // constructed into the NRVO variable.
1276
1277 // If there is an NRVO flag for this variable, set it to 1 into indicate
1278 // that the cleanup code should not destroy the variable.
1279 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1280 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1281 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1282 // Make sure not to return anything, but evaluate the expression
1283 // for side effects.
1284 if (RV) {
1285 EmitAnyExpr(RV);
1286 if (auto *CE = dyn_cast<CallExpr>(RV))
1287 makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1288 }
1289 } else if (!RV) {
1290 // Do nothing (return value is left uninitialized)
1291 } else if (FnRetTy->isReferenceType()) {
1292 // If this function returns a reference, take the address of the expression
1293 // rather than the value.
1294 RValue Result = EmitReferenceBindingToExpr(RV);
1295 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1296 } else {
1297 switch (getEvaluationKind(RV->getType())) {
1298 case TEK_Scalar:
1299 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1300 break;
1301 case TEK_Complex:
1302 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1303 /*isInit*/ true);
1304 break;
1305 case TEK_Aggregate:
1306 EmitAggExpr(RV, AggValueSlot::forAddr(
1307 ReturnValue, Qualifiers(),
1308 AggValueSlot::IsDestructed,
1309 AggValueSlot::DoesNotNeedGCBarriers,
1310 AggValueSlot::IsNotAliased,
1311 getOverlapForReturnValue()));
1312 break;
1313 }
1314 }
1315
1316 ++NumReturnExprs;
1317 if (!RV || RV->isEvaluatable(getContext()))
1318 ++NumSimpleReturnExprs;
1319
1320 cleanupScope.ForceCleanup();
1321 EmitBranchThroughCleanup(ReturnBlock);
1322 }
1323
EmitDeclStmt(const DeclStmt & S)1324 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1325 // As long as debug info is modeled with instructions, we have to ensure we
1326 // have a place to insert here and write the stop point here.
1327 if (HaveInsertPoint())
1328 EmitStopPoint(&S);
1329
1330 for (const auto *I : S.decls())
1331 EmitDecl(*I);
1332 }
1333
EmitBreakStmt(const BreakStmt & S)1334 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1335 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1336
1337 // If this code is reachable then emit a stop point (if generating
1338 // debug info). We have to do this ourselves because we are on the
1339 // "simple" statement path.
1340 if (HaveInsertPoint())
1341 EmitStopPoint(&S);
1342
1343 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1344 }
1345
EmitContinueStmt(const ContinueStmt & S)1346 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1347 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1348
1349 // If this code is reachable then emit a stop point (if generating
1350 // debug info). We have to do this ourselves because we are on the
1351 // "simple" statement path.
1352 if (HaveInsertPoint())
1353 EmitStopPoint(&S);
1354
1355 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1356 }
1357
1358 /// EmitCaseStmtRange - If case statement range is not too big then
1359 /// add multiple cases to switch instruction, one for each value within
1360 /// the range. If range is too big then emit "if" condition check.
EmitCaseStmtRange(const CaseStmt & S,ArrayRef<const Attr * > Attrs)1361 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1362 ArrayRef<const Attr *> Attrs) {
1363 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1364
1365 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1366 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1367
1368 // Emit the code for this case. We do this first to make sure it is
1369 // properly chained from our predecessor before generating the
1370 // switch machinery to enter this block.
1371 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1372 EmitBlockWithFallThrough(CaseDest, &S);
1373 EmitStmt(S.getSubStmt());
1374
1375 // If range is empty, do nothing.
1376 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1377 return;
1378
1379 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1380 llvm::APInt Range = RHS - LHS;
1381 // FIXME: parameters such as this should not be hardcoded.
1382 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1383 // Range is small enough to add multiple switch instruction cases.
1384 uint64_t Total = getProfileCount(&S);
1385 unsigned NCases = Range.getZExtValue() + 1;
1386 // We only have one region counter for the entire set of cases here, so we
1387 // need to divide the weights evenly between the generated cases, ensuring
1388 // that the total weight is preserved. E.g., a weight of 5 over three cases
1389 // will be distributed as weights of 2, 2, and 1.
1390 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1391 for (unsigned I = 0; I != NCases; ++I) {
1392 if (SwitchWeights)
1393 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1394 else if (SwitchLikelihood)
1395 SwitchLikelihood->push_back(LH);
1396
1397 if (Rem)
1398 Rem--;
1399 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1400 ++LHS;
1401 }
1402 return;
1403 }
1404
1405 // The range is too big. Emit "if" condition into a new block,
1406 // making sure to save and restore the current insertion point.
1407 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1408
1409 // Push this test onto the chain of range checks (which terminates
1410 // in the default basic block). The switch's default will be changed
1411 // to the top of this chain after switch emission is complete.
1412 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1413 CaseRangeBlock = createBasicBlock("sw.caserange");
1414
1415 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1416 Builder.SetInsertPoint(CaseRangeBlock);
1417
1418 // Emit range check.
1419 llvm::Value *Diff =
1420 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1421 llvm::Value *Cond =
1422 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1423
1424 llvm::MDNode *Weights = nullptr;
1425 if (SwitchWeights) {
1426 uint64_t ThisCount = getProfileCount(&S);
1427 uint64_t DefaultCount = (*SwitchWeights)[0];
1428 Weights = createProfileWeights(ThisCount, DefaultCount);
1429
1430 // Since we're chaining the switch default through each large case range, we
1431 // need to update the weight for the default, ie, the first case, to include
1432 // this case.
1433 (*SwitchWeights)[0] += ThisCount;
1434 } else if (SwitchLikelihood)
1435 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1436
1437 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1438
1439 // Restore the appropriate insertion point.
1440 if (RestoreBB)
1441 Builder.SetInsertPoint(RestoreBB);
1442 else
1443 Builder.ClearInsertionPoint();
1444 }
1445
EmitCaseStmt(const CaseStmt & S,ArrayRef<const Attr * > Attrs)1446 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1447 ArrayRef<const Attr *> Attrs) {
1448 // If there is no enclosing switch instance that we're aware of, then this
1449 // case statement and its block can be elided. This situation only happens
1450 // when we've constant-folded the switch, are emitting the constant case,
1451 // and part of the constant case includes another case statement. For
1452 // instance: switch (4) { case 4: do { case 5: } while (1); }
1453 if (!SwitchInsn) {
1454 EmitStmt(S.getSubStmt());
1455 return;
1456 }
1457
1458 // Handle case ranges.
1459 if (S.getRHS()) {
1460 EmitCaseStmtRange(S, Attrs);
1461 return;
1462 }
1463
1464 llvm::ConstantInt *CaseVal =
1465 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1466 if (SwitchLikelihood)
1467 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1468
1469 // If the body of the case is just a 'break', try to not emit an empty block.
1470 // If we're profiling or we're not optimizing, leave the block in for better
1471 // debug and coverage analysis.
1472 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1473 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1474 isa<BreakStmt>(S.getSubStmt())) {
1475 JumpDest Block = BreakContinueStack.back().BreakBlock;
1476
1477 // Only do this optimization if there are no cleanups that need emitting.
1478 if (isObviouslyBranchWithoutCleanups(Block)) {
1479 if (SwitchWeights)
1480 SwitchWeights->push_back(getProfileCount(&S));
1481 SwitchInsn->addCase(CaseVal, Block.getBlock());
1482
1483 // If there was a fallthrough into this case, make sure to redirect it to
1484 // the end of the switch as well.
1485 if (Builder.GetInsertBlock()) {
1486 Builder.CreateBr(Block.getBlock());
1487 Builder.ClearInsertionPoint();
1488 }
1489 return;
1490 }
1491 }
1492
1493 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1494 EmitBlockWithFallThrough(CaseDest, &S);
1495 if (SwitchWeights)
1496 SwitchWeights->push_back(getProfileCount(&S));
1497 SwitchInsn->addCase(CaseVal, CaseDest);
1498
1499 // Recursively emitting the statement is acceptable, but is not wonderful for
1500 // code where we have many case statements nested together, i.e.:
1501 // case 1:
1502 // case 2:
1503 // case 3: etc.
1504 // Handling this recursively will create a new block for each case statement
1505 // that falls through to the next case which is IR intensive. It also causes
1506 // deep recursion which can run into stack depth limitations. Handle
1507 // sequential non-range case statements specially.
1508 //
1509 // TODO When the next case has a likelihood attribute the code returns to the
1510 // recursive algorithm. Maybe improve this case if it becomes common practice
1511 // to use a lot of attributes.
1512 const CaseStmt *CurCase = &S;
1513 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1514
1515 // Otherwise, iteratively add consecutive cases to this switch stmt.
1516 while (NextCase && NextCase->getRHS() == nullptr) {
1517 CurCase = NextCase;
1518 llvm::ConstantInt *CaseVal =
1519 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1520
1521 if (SwitchWeights)
1522 SwitchWeights->push_back(getProfileCount(NextCase));
1523 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1524 CaseDest = createBasicBlock("sw.bb");
1525 EmitBlockWithFallThrough(CaseDest, CurCase);
1526 }
1527 // Since this loop is only executed when the CaseStmt has no attributes
1528 // use a hard-coded value.
1529 if (SwitchLikelihood)
1530 SwitchLikelihood->push_back(Stmt::LH_None);
1531
1532 SwitchInsn->addCase(CaseVal, CaseDest);
1533 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1534 }
1535
1536 // Generate a stop point for debug info if the case statement is
1537 // followed by a default statement. A fallthrough case before a
1538 // default case gets its own branch target.
1539 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1540 EmitStopPoint(CurCase);
1541
1542 // Normal default recursion for non-cases.
1543 EmitStmt(CurCase->getSubStmt());
1544 }
1545
EmitDefaultStmt(const DefaultStmt & S,ArrayRef<const Attr * > Attrs)1546 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1547 ArrayRef<const Attr *> Attrs) {
1548 // If there is no enclosing switch instance that we're aware of, then this
1549 // default statement can be elided. This situation only happens when we've
1550 // constant-folded the switch.
1551 if (!SwitchInsn) {
1552 EmitStmt(S.getSubStmt());
1553 return;
1554 }
1555
1556 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1557 assert(DefaultBlock->empty() &&
1558 "EmitDefaultStmt: Default block already defined?");
1559
1560 if (SwitchLikelihood)
1561 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1562
1563 EmitBlockWithFallThrough(DefaultBlock, &S);
1564
1565 EmitStmt(S.getSubStmt());
1566 }
1567
1568 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1569 /// constant value that is being switched on, see if we can dead code eliminate
1570 /// the body of the switch to a simple series of statements to emit. Basically,
1571 /// on a switch (5) we want to find these statements:
1572 /// case 5:
1573 /// printf(...); <--
1574 /// ++i; <--
1575 /// break;
1576 ///
1577 /// and add them to the ResultStmts vector. If it is unsafe to do this
1578 /// transformation (for example, one of the elided statements contains a label
1579 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1580 /// should include statements after it (e.g. the printf() line is a substmt of
1581 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1582 /// statement, then return CSFC_Success.
1583 ///
1584 /// If Case is non-null, then we are looking for the specified case, checking
1585 /// that nothing we jump over contains labels. If Case is null, then we found
1586 /// the case and are looking for the break.
1587 ///
1588 /// If the recursive walk actually finds our Case, then we set FoundCase to
1589 /// true.
1590 ///
1591 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
CollectStatementsForCase(const Stmt * S,const SwitchCase * Case,bool & FoundCase,SmallVectorImpl<const Stmt * > & ResultStmts)1592 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1593 const SwitchCase *Case,
1594 bool &FoundCase,
1595 SmallVectorImpl<const Stmt*> &ResultStmts) {
1596 // If this is a null statement, just succeed.
1597 if (!S)
1598 return Case ? CSFC_Success : CSFC_FallThrough;
1599
1600 // If this is the switchcase (case 4: or default) that we're looking for, then
1601 // we're in business. Just add the substatement.
1602 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1603 if (S == Case) {
1604 FoundCase = true;
1605 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1606 ResultStmts);
1607 }
1608
1609 // Otherwise, this is some other case or default statement, just ignore it.
1610 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1611 ResultStmts);
1612 }
1613
1614 // If we are in the live part of the code and we found our break statement,
1615 // return a success!
1616 if (!Case && isa<BreakStmt>(S))
1617 return CSFC_Success;
1618
1619 // If this is a switch statement, then it might contain the SwitchCase, the
1620 // break, or neither.
1621 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1622 // Handle this as two cases: we might be looking for the SwitchCase (if so
1623 // the skipped statements must be skippable) or we might already have it.
1624 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1625 bool StartedInLiveCode = FoundCase;
1626 unsigned StartSize = ResultStmts.size();
1627
1628 // If we've not found the case yet, scan through looking for it.
1629 if (Case) {
1630 // Keep track of whether we see a skipped declaration. The code could be
1631 // using the declaration even if it is skipped, so we can't optimize out
1632 // the decl if the kept statements might refer to it.
1633 bool HadSkippedDecl = false;
1634
1635 // If we're looking for the case, just see if we can skip each of the
1636 // substatements.
1637 for (; Case && I != E; ++I) {
1638 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1639
1640 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1641 case CSFC_Failure: return CSFC_Failure;
1642 case CSFC_Success:
1643 // A successful result means that either 1) that the statement doesn't
1644 // have the case and is skippable, or 2) does contain the case value
1645 // and also contains the break to exit the switch. In the later case,
1646 // we just verify the rest of the statements are elidable.
1647 if (FoundCase) {
1648 // If we found the case and skipped declarations, we can't do the
1649 // optimization.
1650 if (HadSkippedDecl)
1651 return CSFC_Failure;
1652
1653 for (++I; I != E; ++I)
1654 if (CodeGenFunction::ContainsLabel(*I, true))
1655 return CSFC_Failure;
1656 return CSFC_Success;
1657 }
1658 break;
1659 case CSFC_FallThrough:
1660 // If we have a fallthrough condition, then we must have found the
1661 // case started to include statements. Consider the rest of the
1662 // statements in the compound statement as candidates for inclusion.
1663 assert(FoundCase && "Didn't find case but returned fallthrough?");
1664 // We recursively found Case, so we're not looking for it anymore.
1665 Case = nullptr;
1666
1667 // If we found the case and skipped declarations, we can't do the
1668 // optimization.
1669 if (HadSkippedDecl)
1670 return CSFC_Failure;
1671 break;
1672 }
1673 }
1674
1675 if (!FoundCase)
1676 return CSFC_Success;
1677
1678 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1679 }
1680
1681 // If we have statements in our range, then we know that the statements are
1682 // live and need to be added to the set of statements we're tracking.
1683 bool AnyDecls = false;
1684 for (; I != E; ++I) {
1685 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1686
1687 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1688 case CSFC_Failure: return CSFC_Failure;
1689 case CSFC_FallThrough:
1690 // A fallthrough result means that the statement was simple and just
1691 // included in ResultStmt, keep adding them afterwards.
1692 break;
1693 case CSFC_Success:
1694 // A successful result means that we found the break statement and
1695 // stopped statement inclusion. We just ensure that any leftover stmts
1696 // are skippable and return success ourselves.
1697 for (++I; I != E; ++I)
1698 if (CodeGenFunction::ContainsLabel(*I, true))
1699 return CSFC_Failure;
1700 return CSFC_Success;
1701 }
1702 }
1703
1704 // If we're about to fall out of a scope without hitting a 'break;', we
1705 // can't perform the optimization if there were any decls in that scope
1706 // (we'd lose their end-of-lifetime).
1707 if (AnyDecls) {
1708 // If the entire compound statement was live, there's one more thing we
1709 // can try before giving up: emit the whole thing as a single statement.
1710 // We can do that unless the statement contains a 'break;'.
1711 // FIXME: Such a break must be at the end of a construct within this one.
1712 // We could emit this by just ignoring the BreakStmts entirely.
1713 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1714 ResultStmts.resize(StartSize);
1715 ResultStmts.push_back(S);
1716 } else {
1717 return CSFC_Failure;
1718 }
1719 }
1720
1721 return CSFC_FallThrough;
1722 }
1723
1724 // Okay, this is some other statement that we don't handle explicitly, like a
1725 // for statement or increment etc. If we are skipping over this statement,
1726 // just verify it doesn't have labels, which would make it invalid to elide.
1727 if (Case) {
1728 if (CodeGenFunction::ContainsLabel(S, true))
1729 return CSFC_Failure;
1730 return CSFC_Success;
1731 }
1732
1733 // Otherwise, we want to include this statement. Everything is cool with that
1734 // so long as it doesn't contain a break out of the switch we're in.
1735 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1736
1737 // Otherwise, everything is great. Include the statement and tell the caller
1738 // that we fall through and include the next statement as well.
1739 ResultStmts.push_back(S);
1740 return CSFC_FallThrough;
1741 }
1742
1743 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1744 /// then invoke CollectStatementsForCase to find the list of statements to emit
1745 /// for a switch on constant. See the comment above CollectStatementsForCase
1746 /// for more details.
FindCaseStatementsForValue(const SwitchStmt & S,const llvm::APSInt & ConstantCondValue,SmallVectorImpl<const Stmt * > & ResultStmts,ASTContext & C,const SwitchCase * & ResultCase)1747 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1748 const llvm::APSInt &ConstantCondValue,
1749 SmallVectorImpl<const Stmt*> &ResultStmts,
1750 ASTContext &C,
1751 const SwitchCase *&ResultCase) {
1752 // First step, find the switch case that is being branched to. We can do this
1753 // efficiently by scanning the SwitchCase list.
1754 const SwitchCase *Case = S.getSwitchCaseList();
1755 const DefaultStmt *DefaultCase = nullptr;
1756
1757 for (; Case; Case = Case->getNextSwitchCase()) {
1758 // It's either a default or case. Just remember the default statement in
1759 // case we're not jumping to any numbered cases.
1760 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1761 DefaultCase = DS;
1762 continue;
1763 }
1764
1765 // Check to see if this case is the one we're looking for.
1766 const CaseStmt *CS = cast<CaseStmt>(Case);
1767 // Don't handle case ranges yet.
1768 if (CS->getRHS()) return false;
1769
1770 // If we found our case, remember it as 'case'.
1771 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1772 break;
1773 }
1774
1775 // If we didn't find a matching case, we use a default if it exists, or we
1776 // elide the whole switch body!
1777 if (!Case) {
1778 // It is safe to elide the body of the switch if it doesn't contain labels
1779 // etc. If it is safe, return successfully with an empty ResultStmts list.
1780 if (!DefaultCase)
1781 return !CodeGenFunction::ContainsLabel(&S);
1782 Case = DefaultCase;
1783 }
1784
1785 // Ok, we know which case is being jumped to, try to collect all the
1786 // statements that follow it. This can fail for a variety of reasons. Also,
1787 // check to see that the recursive walk actually found our case statement.
1788 // Insane cases like this can fail to find it in the recursive walk since we
1789 // don't handle every stmt kind:
1790 // switch (4) {
1791 // while (1) {
1792 // case 4: ...
1793 bool FoundCase = false;
1794 ResultCase = Case;
1795 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1796 ResultStmts) != CSFC_Failure &&
1797 FoundCase;
1798 }
1799
1800 static Optional<SmallVector<uint64_t, 16>>
getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods)1801 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1802 // Are there enough branches to weight them?
1803 if (Likelihoods.size() <= 1)
1804 return None;
1805
1806 uint64_t NumUnlikely = 0;
1807 uint64_t NumNone = 0;
1808 uint64_t NumLikely = 0;
1809 for (const auto LH : Likelihoods) {
1810 switch (LH) {
1811 case Stmt::LH_Unlikely:
1812 ++NumUnlikely;
1813 break;
1814 case Stmt::LH_None:
1815 ++NumNone;
1816 break;
1817 case Stmt::LH_Likely:
1818 ++NumLikely;
1819 break;
1820 }
1821 }
1822
1823 // Is there a likelihood attribute used?
1824 if (NumUnlikely == 0 && NumLikely == 0)
1825 return None;
1826
1827 // When multiple cases share the same code they can be combined during
1828 // optimization. In that case the weights of the branch will be the sum of
1829 // the individual weights. Make sure the combined sum of all neutral cases
1830 // doesn't exceed the value of a single likely attribute.
1831 // The additions both avoid divisions by 0 and make sure the weights of None
1832 // don't exceed the weight of Likely.
1833 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1834 const uint64_t None = Likely / (NumNone + 1);
1835 const uint64_t Unlikely = 0;
1836
1837 SmallVector<uint64_t, 16> Result;
1838 Result.reserve(Likelihoods.size());
1839 for (const auto LH : Likelihoods) {
1840 switch (LH) {
1841 case Stmt::LH_Unlikely:
1842 Result.push_back(Unlikely);
1843 break;
1844 case Stmt::LH_None:
1845 Result.push_back(None);
1846 break;
1847 case Stmt::LH_Likely:
1848 Result.push_back(Likely);
1849 break;
1850 }
1851 }
1852
1853 return Result;
1854 }
1855
EmitSwitchStmt(const SwitchStmt & S)1856 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1857 // Handle nested switch statements.
1858 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1859 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1860 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1861 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1862
1863 // See if we can constant fold the condition of the switch and therefore only
1864 // emit the live case statement (if any) of the switch.
1865 llvm::APSInt ConstantCondValue;
1866 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1867 SmallVector<const Stmt*, 4> CaseStmts;
1868 const SwitchCase *Case = nullptr;
1869 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1870 getContext(), Case)) {
1871 if (Case)
1872 incrementProfileCounter(Case);
1873 RunCleanupsScope ExecutedScope(*this);
1874
1875 if (S.getInit())
1876 EmitStmt(S.getInit());
1877
1878 // Emit the condition variable if needed inside the entire cleanup scope
1879 // used by this special case for constant folded switches.
1880 if (S.getConditionVariable())
1881 EmitDecl(*S.getConditionVariable());
1882
1883 // At this point, we are no longer "within" a switch instance, so
1884 // we can temporarily enforce this to ensure that any embedded case
1885 // statements are not emitted.
1886 SwitchInsn = nullptr;
1887
1888 // Okay, we can dead code eliminate everything except this case. Emit the
1889 // specified series of statements and we're good.
1890 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1891 EmitStmt(CaseStmts[i]);
1892 incrementProfileCounter(&S);
1893
1894 // Now we want to restore the saved switch instance so that nested
1895 // switches continue to function properly
1896 SwitchInsn = SavedSwitchInsn;
1897
1898 return;
1899 }
1900 }
1901
1902 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1903
1904 RunCleanupsScope ConditionScope(*this);
1905
1906 if (S.getInit())
1907 EmitStmt(S.getInit());
1908
1909 if (S.getConditionVariable())
1910 EmitDecl(*S.getConditionVariable());
1911 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1912
1913 // Create basic block to hold stuff that comes after switch
1914 // statement. We also need to create a default block now so that
1915 // explicit case ranges tests can have a place to jump to on
1916 // failure.
1917 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1918 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1919 if (PGO.haveRegionCounts()) {
1920 // Walk the SwitchCase list to find how many there are.
1921 uint64_t DefaultCount = 0;
1922 unsigned NumCases = 0;
1923 for (const SwitchCase *Case = S.getSwitchCaseList();
1924 Case;
1925 Case = Case->getNextSwitchCase()) {
1926 if (isa<DefaultStmt>(Case))
1927 DefaultCount = getProfileCount(Case);
1928 NumCases += 1;
1929 }
1930 SwitchWeights = new SmallVector<uint64_t, 16>();
1931 SwitchWeights->reserve(NumCases);
1932 // The default needs to be first. We store the edge count, so we already
1933 // know the right weight.
1934 SwitchWeights->push_back(DefaultCount);
1935 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
1936 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
1937 // Initialize the default case.
1938 SwitchLikelihood->push_back(Stmt::LH_None);
1939 }
1940
1941 CaseRangeBlock = DefaultBlock;
1942
1943 // Clear the insertion point to indicate we are in unreachable code.
1944 Builder.ClearInsertionPoint();
1945
1946 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1947 // then reuse last ContinueBlock.
1948 JumpDest OuterContinue;
1949 if (!BreakContinueStack.empty())
1950 OuterContinue = BreakContinueStack.back().ContinueBlock;
1951
1952 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1953
1954 // Emit switch body.
1955 EmitStmt(S.getBody());
1956
1957 BreakContinueStack.pop_back();
1958
1959 // Update the default block in case explicit case range tests have
1960 // been chained on top.
1961 SwitchInsn->setDefaultDest(CaseRangeBlock);
1962
1963 // If a default was never emitted:
1964 if (!DefaultBlock->getParent()) {
1965 // If we have cleanups, emit the default block so that there's a
1966 // place to jump through the cleanups from.
1967 if (ConditionScope.requiresCleanups()) {
1968 EmitBlock(DefaultBlock);
1969
1970 // Otherwise, just forward the default block to the switch end.
1971 } else {
1972 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1973 delete DefaultBlock;
1974 }
1975 }
1976
1977 ConditionScope.ForceCleanup();
1978
1979 // Emit continuation.
1980 EmitBlock(SwitchExit.getBlock(), true);
1981 incrementProfileCounter(&S);
1982
1983 // If the switch has a condition wrapped by __builtin_unpredictable,
1984 // create metadata that specifies that the switch is unpredictable.
1985 // Don't bother if not optimizing because that metadata would not be used.
1986 auto *Call = dyn_cast<CallExpr>(S.getCond());
1987 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1988 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1989 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1990 llvm::MDBuilder MDHelper(getLLVMContext());
1991 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1992 MDHelper.createUnpredictable());
1993 }
1994 }
1995
1996 if (SwitchWeights) {
1997 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1998 "switch weights do not match switch cases");
1999 // If there's only one jump destination there's no sense weighting it.
2000 if (SwitchWeights->size() > 1)
2001 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2002 createProfileWeights(*SwitchWeights));
2003 delete SwitchWeights;
2004 } else if (SwitchLikelihood) {
2005 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2006 "switch likelihoods do not match switch cases");
2007 Optional<SmallVector<uint64_t, 16>> LHW =
2008 getLikelihoodWeights(*SwitchLikelihood);
2009 if (LHW) {
2010 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2011 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2012 createProfileWeights(*LHW));
2013 }
2014 delete SwitchLikelihood;
2015 }
2016 SwitchInsn = SavedSwitchInsn;
2017 SwitchWeights = SavedSwitchWeights;
2018 SwitchLikelihood = SavedSwitchLikelihood;
2019 CaseRangeBlock = SavedCRBlock;
2020 }
2021
2022 static std::string
SimplifyConstraint(const char * Constraint,const TargetInfo & Target,SmallVectorImpl<TargetInfo::ConstraintInfo> * OutCons=nullptr)2023 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2024 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2025 std::string Result;
2026
2027 while (*Constraint) {
2028 switch (*Constraint) {
2029 default:
2030 Result += Target.convertConstraint(Constraint);
2031 break;
2032 // Ignore these
2033 case '*':
2034 case '?':
2035 case '!':
2036 case '=': // Will see this and the following in mult-alt constraints.
2037 case '+':
2038 break;
2039 case '#': // Ignore the rest of the constraint alternative.
2040 while (Constraint[1] && Constraint[1] != ',')
2041 Constraint++;
2042 break;
2043 case '&':
2044 case '%':
2045 Result += *Constraint;
2046 while (Constraint[1] && Constraint[1] == *Constraint)
2047 Constraint++;
2048 break;
2049 case ',':
2050 Result += "|";
2051 break;
2052 case 'g':
2053 Result += "imr";
2054 break;
2055 case '[': {
2056 assert(OutCons &&
2057 "Must pass output names to constraints with a symbolic name");
2058 unsigned Index;
2059 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2060 assert(result && "Could not resolve symbolic name"); (void)result;
2061 Result += llvm::utostr(Index);
2062 break;
2063 }
2064 }
2065
2066 Constraint++;
2067 }
2068
2069 return Result;
2070 }
2071
2072 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2073 /// as using a particular register add that as a constraint that will be used
2074 /// in this asm stmt.
2075 static std::string
AddVariableConstraints(const std::string & Constraint,const Expr & AsmExpr,const TargetInfo & Target,CodeGenModule & CGM,const AsmStmt & Stmt,const bool EarlyClobber,std::string * GCCReg=nullptr)2076 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2077 const TargetInfo &Target, CodeGenModule &CGM,
2078 const AsmStmt &Stmt, const bool EarlyClobber,
2079 std::string *GCCReg = nullptr) {
2080 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2081 if (!AsmDeclRef)
2082 return Constraint;
2083 const ValueDecl &Value = *AsmDeclRef->getDecl();
2084 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2085 if (!Variable)
2086 return Constraint;
2087 if (Variable->getStorageClass() != SC_Register)
2088 return Constraint;
2089 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2090 if (!Attr)
2091 return Constraint;
2092 StringRef Register = Attr->getLabel();
2093 assert(Target.isValidGCCRegisterName(Register));
2094 // We're using validateOutputConstraint here because we only care if
2095 // this is a register constraint.
2096 TargetInfo::ConstraintInfo Info(Constraint, "");
2097 if (Target.validateOutputConstraint(Info) &&
2098 !Info.allowsRegister()) {
2099 CGM.ErrorUnsupported(&Stmt, "__asm__");
2100 return Constraint;
2101 }
2102 // Canonicalize the register here before returning it.
2103 Register = Target.getNormalizedGCCRegisterName(Register);
2104 if (GCCReg != nullptr)
2105 *GCCReg = Register.str();
2106 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2107 }
2108
2109 llvm::Value*
EmitAsmInputLValue(const TargetInfo::ConstraintInfo & Info,LValue InputValue,QualType InputType,std::string & ConstraintStr,SourceLocation Loc)2110 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2111 LValue InputValue, QualType InputType,
2112 std::string &ConstraintStr,
2113 SourceLocation Loc) {
2114 llvm::Value *Arg;
2115 if (Info.allowsRegister() || !Info.allowsMemory()) {
2116 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
2117 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
2118 } else {
2119 llvm::Type *Ty = ConvertType(InputType);
2120 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2121 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2122 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2123 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2124 Ty = llvm::PointerType::getUnqual(Ty);
2125
2126 Arg = Builder.CreateLoad(
2127 Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
2128 } else {
2129 Arg = InputValue.getPointer(*this);
2130 ConstraintStr += '*';
2131 }
2132 }
2133 } else {
2134 Arg = InputValue.getPointer(*this);
2135 ConstraintStr += '*';
2136 }
2137
2138 return Arg;
2139 }
2140
EmitAsmInput(const TargetInfo::ConstraintInfo & Info,const Expr * InputExpr,std::string & ConstraintStr)2141 llvm::Value* CodeGenFunction::EmitAsmInput(
2142 const TargetInfo::ConstraintInfo &Info,
2143 const Expr *InputExpr,
2144 std::string &ConstraintStr) {
2145 // If this can't be a register or memory, i.e., has to be a constant
2146 // (immediate or symbolic), try to emit it as such.
2147 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2148 if (Info.requiresImmediateConstant()) {
2149 Expr::EvalResult EVResult;
2150 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2151
2152 llvm::APSInt IntResult;
2153 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2154 getContext()))
2155 return llvm::ConstantInt::get(getLLVMContext(), IntResult);
2156 }
2157
2158 Expr::EvalResult Result;
2159 if (InputExpr->EvaluateAsInt(Result, getContext()))
2160 return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
2161 }
2162
2163 if (Info.allowsRegister() || !Info.allowsMemory())
2164 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2165 return EmitScalarExpr(InputExpr);
2166 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2167 return EmitScalarExpr(InputExpr);
2168 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2169 LValue Dest = EmitLValue(InputExpr);
2170 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2171 InputExpr->getExprLoc());
2172 }
2173
2174 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2175 /// asm call instruction. The !srcloc MDNode contains a list of constant
2176 /// integers which are the source locations of the start of each line in the
2177 /// asm.
getAsmSrcLocInfo(const StringLiteral * Str,CodeGenFunction & CGF)2178 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2179 CodeGenFunction &CGF) {
2180 SmallVector<llvm::Metadata *, 8> Locs;
2181 // Add the location of the first line to the MDNode.
2182 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2183 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2184 StringRef StrVal = Str->getString();
2185 if (!StrVal.empty()) {
2186 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2187 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2188 unsigned StartToken = 0;
2189 unsigned ByteOffset = 0;
2190
2191 // Add the location of the start of each subsequent line of the asm to the
2192 // MDNode.
2193 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2194 if (StrVal[i] != '\n') continue;
2195 SourceLocation LineLoc = Str->getLocationOfByte(
2196 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2197 Locs.push_back(llvm::ConstantAsMetadata::get(
2198 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2199 }
2200 }
2201
2202 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2203 }
2204
UpdateAsmCallInst(llvm::CallBase & Result,bool HasSideEffect,bool HasUnwindClobber,bool ReadOnly,bool ReadNone,bool NoMerge,const AsmStmt & S,const std::vector<llvm::Type * > & ResultRegTypes,CodeGenFunction & CGF,std::vector<llvm::Value * > & RegResults)2205 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2206 bool HasUnwindClobber, bool ReadOnly,
2207 bool ReadNone, bool NoMerge, const AsmStmt &S,
2208 const std::vector<llvm::Type *> &ResultRegTypes,
2209 CodeGenFunction &CGF,
2210 std::vector<llvm::Value *> &RegResults) {
2211 if (!HasUnwindClobber)
2212 Result.addFnAttr(llvm::Attribute::NoUnwind);
2213
2214 if (NoMerge)
2215 Result.addFnAttr(llvm::Attribute::NoMerge);
2216 // Attach readnone and readonly attributes.
2217 if (!HasSideEffect) {
2218 if (ReadNone)
2219 Result.addFnAttr(llvm::Attribute::ReadNone);
2220 else if (ReadOnly)
2221 Result.addFnAttr(llvm::Attribute::ReadOnly);
2222 }
2223
2224 // Attach OpenMP assumption attributes from the caller, if they exist.
2225 if (CGF.CGM.getLangOpts().OpenMP) {
2226 SmallVector<StringRef, 4> Attrs;
2227
2228 for (const AssumptionAttr *AA :
2229 CGF.CurFuncDecl->specific_attrs<AssumptionAttr>())
2230 AA->getAssumption().split(Attrs, ",");
2231
2232 if (!Attrs.empty())
2233 Result.addFnAttr(
2234 llvm::Attribute::get(CGF.getLLVMContext(), llvm::AssumptionAttrKey,
2235 llvm::join(Attrs.begin(), Attrs.end(), ",")));
2236 }
2237
2238 // Slap the source location of the inline asm into a !srcloc metadata on the
2239 // call.
2240 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2241 Result.setMetadata("srcloc",
2242 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2243 else {
2244 // At least put the line number on MS inline asm blobs.
2245 llvm::Constant *Loc =
2246 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2247 Result.setMetadata("srcloc",
2248 llvm::MDNode::get(CGF.getLLVMContext(),
2249 llvm::ConstantAsMetadata::get(Loc)));
2250 }
2251
2252 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2253 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2254 // convergent (meaning, they may call an intrinsically convergent op, such
2255 // as bar.sync, and so can't have certain optimizations applied around
2256 // them).
2257 Result.addFnAttr(llvm::Attribute::Convergent);
2258 // Extract all of the register value results from the asm.
2259 if (ResultRegTypes.size() == 1) {
2260 RegResults.push_back(&Result);
2261 } else {
2262 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2263 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2264 RegResults.push_back(Tmp);
2265 }
2266 }
2267 }
2268
EmitAsmStmt(const AsmStmt & S)2269 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2270 // Assemble the final asm string.
2271 std::string AsmString = S.generateAsmString(getContext());
2272
2273 // Get all the output and input constraints together.
2274 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2275 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2276
2277 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2278 StringRef Name;
2279 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2280 Name = GAS->getOutputName(i);
2281 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2282 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2283 assert(IsValid && "Failed to parse output constraint");
2284 OutputConstraintInfos.push_back(Info);
2285 }
2286
2287 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2288 StringRef Name;
2289 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2290 Name = GAS->getInputName(i);
2291 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2292 bool IsValid =
2293 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2294 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2295 InputConstraintInfos.push_back(Info);
2296 }
2297
2298 std::string Constraints;
2299
2300 std::vector<LValue> ResultRegDests;
2301 std::vector<QualType> ResultRegQualTys;
2302 std::vector<llvm::Type *> ResultRegTypes;
2303 std::vector<llvm::Type *> ResultTruncRegTypes;
2304 std::vector<llvm::Type *> ArgTypes;
2305 std::vector<llvm::Value*> Args;
2306 llvm::BitVector ResultTypeRequiresCast;
2307
2308 // Keep track of inout constraints.
2309 std::string InOutConstraints;
2310 std::vector<llvm::Value*> InOutArgs;
2311 std::vector<llvm::Type*> InOutArgTypes;
2312
2313 // Keep track of out constraints for tied input operand.
2314 std::vector<std::string> OutputConstraints;
2315
2316 // Keep track of defined physregs.
2317 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2318
2319 // An inline asm can be marked readonly if it meets the following conditions:
2320 // - it doesn't have any sideeffects
2321 // - it doesn't clobber memory
2322 // - it doesn't return a value by-reference
2323 // It can be marked readnone if it doesn't have any input memory constraints
2324 // in addition to meeting the conditions listed above.
2325 bool ReadOnly = true, ReadNone = true;
2326
2327 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2328 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2329
2330 // Simplify the output constraint.
2331 std::string OutputConstraint(S.getOutputConstraint(i));
2332 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2333 getTarget(), &OutputConstraintInfos);
2334
2335 const Expr *OutExpr = S.getOutputExpr(i);
2336 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2337
2338 std::string GCCReg;
2339 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2340 getTarget(), CGM, S,
2341 Info.earlyClobber(),
2342 &GCCReg);
2343 // Give an error on multiple outputs to same physreg.
2344 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2345 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2346
2347 OutputConstraints.push_back(OutputConstraint);
2348 LValue Dest = EmitLValue(OutExpr);
2349 if (!Constraints.empty())
2350 Constraints += ',';
2351
2352 // If this is a register output, then make the inline asm return it
2353 // by-value. If this is a memory result, return the value by-reference.
2354 QualType QTy = OutExpr->getType();
2355 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2356 hasAggregateEvaluationKind(QTy);
2357 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2358
2359 Constraints += "=" + OutputConstraint;
2360 ResultRegQualTys.push_back(QTy);
2361 ResultRegDests.push_back(Dest);
2362
2363 llvm::Type *Ty = ConvertTypeForMem(QTy);
2364 const bool RequiresCast = Info.allowsRegister() &&
2365 (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2366 Ty->isAggregateType());
2367
2368 ResultTruncRegTypes.push_back(Ty);
2369 ResultTypeRequiresCast.push_back(RequiresCast);
2370
2371 if (RequiresCast) {
2372 unsigned Size = getContext().getTypeSize(QTy);
2373 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2374 }
2375 ResultRegTypes.push_back(Ty);
2376 // If this output is tied to an input, and if the input is larger, then
2377 // we need to set the actual result type of the inline asm node to be the
2378 // same as the input type.
2379 if (Info.hasMatchingInput()) {
2380 unsigned InputNo;
2381 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2382 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2383 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2384 break;
2385 }
2386 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2387
2388 QualType InputTy = S.getInputExpr(InputNo)->getType();
2389 QualType OutputType = OutExpr->getType();
2390
2391 uint64_t InputSize = getContext().getTypeSize(InputTy);
2392 if (getContext().getTypeSize(OutputType) < InputSize) {
2393 // Form the asm to return the value as a larger integer or fp type.
2394 ResultRegTypes.back() = ConvertType(InputTy);
2395 }
2396 }
2397 if (llvm::Type* AdjTy =
2398 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2399 ResultRegTypes.back()))
2400 ResultRegTypes.back() = AdjTy;
2401 else {
2402 CGM.getDiags().Report(S.getAsmLoc(),
2403 diag::err_asm_invalid_type_in_input)
2404 << OutExpr->getType() << OutputConstraint;
2405 }
2406
2407 // Update largest vector width for any vector types.
2408 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2409 LargestVectorWidth =
2410 std::max((uint64_t)LargestVectorWidth,
2411 VT->getPrimitiveSizeInBits().getKnownMinSize());
2412 } else {
2413 llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
2414 llvm::Value *DestPtr = Dest.getPointer(*this);
2415 // Matrix types in memory are represented by arrays, but accessed through
2416 // vector pointers, with the alignment specified on the access operation.
2417 // For inline assembly, update pointer arguments to use vector pointers.
2418 // Otherwise there will be a mis-match if the matrix is also an
2419 // input-argument which is represented as vector.
2420 if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
2421 DestAddrTy = llvm::PointerType::get(
2422 ConvertType(OutExpr->getType()),
2423 cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
2424 DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
2425 }
2426 ArgTypes.push_back(DestAddrTy);
2427 Args.push_back(DestPtr);
2428 Constraints += "=*";
2429 Constraints += OutputConstraint;
2430 ReadOnly = ReadNone = false;
2431 }
2432
2433 if (Info.isReadWrite()) {
2434 InOutConstraints += ',';
2435
2436 const Expr *InputExpr = S.getOutputExpr(i);
2437 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2438 InOutConstraints,
2439 InputExpr->getExprLoc());
2440
2441 if (llvm::Type* AdjTy =
2442 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2443 Arg->getType()))
2444 Arg = Builder.CreateBitCast(Arg, AdjTy);
2445
2446 // Update largest vector width for any vector types.
2447 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2448 LargestVectorWidth =
2449 std::max((uint64_t)LargestVectorWidth,
2450 VT->getPrimitiveSizeInBits().getKnownMinSize());
2451 // Only tie earlyclobber physregs.
2452 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2453 InOutConstraints += llvm::utostr(i);
2454 else
2455 InOutConstraints += OutputConstraint;
2456
2457 InOutArgTypes.push_back(Arg->getType());
2458 InOutArgs.push_back(Arg);
2459 }
2460 }
2461
2462 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2463 // to the return value slot. Only do this when returning in registers.
2464 if (isa<MSAsmStmt>(&S)) {
2465 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2466 if (RetAI.isDirect() || RetAI.isExtend()) {
2467 // Make a fake lvalue for the return value slot.
2468 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2469 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2470 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2471 ResultRegDests, AsmString, S.getNumOutputs());
2472 SawAsmBlock = true;
2473 }
2474 }
2475
2476 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2477 const Expr *InputExpr = S.getInputExpr(i);
2478
2479 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2480
2481 if (Info.allowsMemory())
2482 ReadNone = false;
2483
2484 if (!Constraints.empty())
2485 Constraints += ',';
2486
2487 // Simplify the input constraint.
2488 std::string InputConstraint(S.getInputConstraint(i));
2489 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2490 &OutputConstraintInfos);
2491
2492 InputConstraint = AddVariableConstraints(
2493 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2494 getTarget(), CGM, S, false /* No EarlyClobber */);
2495
2496 std::string ReplaceConstraint (InputConstraint);
2497 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2498
2499 // If this input argument is tied to a larger output result, extend the
2500 // input to be the same size as the output. The LLVM backend wants to see
2501 // the input and output of a matching constraint be the same size. Note
2502 // that GCC does not define what the top bits are here. We use zext because
2503 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2504 if (Info.hasTiedOperand()) {
2505 unsigned Output = Info.getTiedOperand();
2506 QualType OutputType = S.getOutputExpr(Output)->getType();
2507 QualType InputTy = InputExpr->getType();
2508
2509 if (getContext().getTypeSize(OutputType) >
2510 getContext().getTypeSize(InputTy)) {
2511 // Use ptrtoint as appropriate so that we can do our extension.
2512 if (isa<llvm::PointerType>(Arg->getType()))
2513 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2514 llvm::Type *OutputTy = ConvertType(OutputType);
2515 if (isa<llvm::IntegerType>(OutputTy))
2516 Arg = Builder.CreateZExt(Arg, OutputTy);
2517 else if (isa<llvm::PointerType>(OutputTy))
2518 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2519 else {
2520 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2521 Arg = Builder.CreateFPExt(Arg, OutputTy);
2522 }
2523 }
2524 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2525 ReplaceConstraint = OutputConstraints[Output];
2526 }
2527 if (llvm::Type* AdjTy =
2528 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2529 Arg->getType()))
2530 Arg = Builder.CreateBitCast(Arg, AdjTy);
2531 else
2532 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2533 << InputExpr->getType() << InputConstraint;
2534
2535 // Update largest vector width for any vector types.
2536 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2537 LargestVectorWidth =
2538 std::max((uint64_t)LargestVectorWidth,
2539 VT->getPrimitiveSizeInBits().getKnownMinSize());
2540
2541 ArgTypes.push_back(Arg->getType());
2542 Args.push_back(Arg);
2543 Constraints += InputConstraint;
2544 }
2545
2546 // Labels
2547 SmallVector<llvm::BasicBlock *, 16> Transfer;
2548 llvm::BasicBlock *Fallthrough = nullptr;
2549 bool IsGCCAsmGoto = false;
2550 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2551 IsGCCAsmGoto = GS->isAsmGoto();
2552 if (IsGCCAsmGoto) {
2553 for (const auto *E : GS->labels()) {
2554 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2555 Transfer.push_back(Dest.getBlock());
2556 llvm::BlockAddress *BA =
2557 llvm::BlockAddress::get(CurFn, Dest.getBlock());
2558 Args.push_back(BA);
2559 ArgTypes.push_back(BA->getType());
2560 if (!Constraints.empty())
2561 Constraints += ',';
2562 Constraints += 'X';
2563 }
2564 Fallthrough = createBasicBlock("asm.fallthrough");
2565 }
2566 }
2567
2568 // Append the "input" part of inout constraints last.
2569 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2570 ArgTypes.push_back(InOutArgTypes[i]);
2571 Args.push_back(InOutArgs[i]);
2572 }
2573 Constraints += InOutConstraints;
2574
2575 bool HasUnwindClobber = false;
2576
2577 // Clobbers
2578 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2579 StringRef Clobber = S.getClobber(i);
2580
2581 if (Clobber == "memory")
2582 ReadOnly = ReadNone = false;
2583 else if (Clobber == "unwind") {
2584 HasUnwindClobber = true;
2585 continue;
2586 } else if (Clobber != "cc") {
2587 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2588 if (CGM.getCodeGenOpts().StackClashProtector &&
2589 getTarget().isSPRegName(Clobber)) {
2590 CGM.getDiags().Report(S.getAsmLoc(),
2591 diag::warn_stack_clash_protection_inline_asm);
2592 }
2593 }
2594
2595 if (isa<MSAsmStmt>(&S)) {
2596 if (Clobber == "eax" || Clobber == "edx") {
2597 if (Constraints.find("=&A") != std::string::npos)
2598 continue;
2599 std::string::size_type position1 =
2600 Constraints.find("={" + Clobber.str() + "}");
2601 if (position1 != std::string::npos) {
2602 Constraints.insert(position1 + 1, "&");
2603 continue;
2604 }
2605 std::string::size_type position2 = Constraints.find("=A");
2606 if (position2 != std::string::npos) {
2607 Constraints.insert(position2 + 1, "&");
2608 continue;
2609 }
2610 }
2611 }
2612 if (!Constraints.empty())
2613 Constraints += ',';
2614
2615 Constraints += "~{";
2616 Constraints += Clobber;
2617 Constraints += '}';
2618 }
2619
2620 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2621 "unwind clobber can't be used with asm goto");
2622
2623 // Add machine specific clobbers
2624 std::string MachineClobbers = getTarget().getClobbers();
2625 if (!MachineClobbers.empty()) {
2626 if (!Constraints.empty())
2627 Constraints += ',';
2628 Constraints += MachineClobbers;
2629 }
2630
2631 llvm::Type *ResultType;
2632 if (ResultRegTypes.empty())
2633 ResultType = VoidTy;
2634 else if (ResultRegTypes.size() == 1)
2635 ResultType = ResultRegTypes[0];
2636 else
2637 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2638
2639 llvm::FunctionType *FTy =
2640 llvm::FunctionType::get(ResultType, ArgTypes, false);
2641
2642 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2643 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2644 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2645 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2646 FTy, AsmString, Constraints, HasSideEffect,
2647 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2648 std::vector<llvm::Value*> RegResults;
2649 if (IsGCCAsmGoto) {
2650 llvm::CallBrInst *Result =
2651 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2652 EmitBlock(Fallthrough);
2653 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2654 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2655 ResultRegTypes, *this, RegResults);
2656 } else if (HasUnwindClobber) {
2657 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2658 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2659 InNoMergeAttributedStmt, S, ResultRegTypes, *this,
2660 RegResults);
2661 } else {
2662 llvm::CallInst *Result =
2663 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2664 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2665 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2666 ResultRegTypes, *this, RegResults);
2667 }
2668
2669 assert(RegResults.size() == ResultRegTypes.size());
2670 assert(RegResults.size() == ResultTruncRegTypes.size());
2671 assert(RegResults.size() == ResultRegDests.size());
2672 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2673 // in which case its size may grow.
2674 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2675 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2676 llvm::Value *Tmp = RegResults[i];
2677 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2678
2679 // If the result type of the LLVM IR asm doesn't match the result type of
2680 // the expression, do the conversion.
2681 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2682
2683 // Truncate the integer result to the right size, note that TruncTy can be
2684 // a pointer.
2685 if (TruncTy->isFloatingPointTy())
2686 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2687 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2688 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2689 Tmp = Builder.CreateTrunc(Tmp,
2690 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2691 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2692 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2693 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2694 Tmp = Builder.CreatePtrToInt(Tmp,
2695 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2696 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2697 } else if (TruncTy->isIntegerTy()) {
2698 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2699 } else if (TruncTy->isVectorTy()) {
2700 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2701 }
2702 }
2703
2704 LValue Dest = ResultRegDests[i];
2705 // ResultTypeRequiresCast elements correspond to the first
2706 // ResultTypeRequiresCast.size() elements of RegResults.
2707 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2708 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2709 Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2710 ResultRegTypes[i]->getPointerTo());
2711 if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
2712 Builder.CreateStore(Tmp, A);
2713 continue;
2714 }
2715
2716 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2717 if (Ty.isNull()) {
2718 const Expr *OutExpr = S.getOutputExpr(i);
2719 CGM.Error(
2720 OutExpr->getExprLoc(),
2721 "impossible constraint in asm: can't store value into a register");
2722 return;
2723 }
2724 Dest = MakeAddrLValue(A, Ty);
2725 }
2726 EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2727 }
2728 }
2729
InitCapturedStruct(const CapturedStmt & S)2730 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2731 const RecordDecl *RD = S.getCapturedRecordDecl();
2732 QualType RecordTy = getContext().getRecordType(RD);
2733
2734 // Initialize the captured struct.
2735 LValue SlotLV =
2736 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2737
2738 RecordDecl::field_iterator CurField = RD->field_begin();
2739 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2740 E = S.capture_init_end();
2741 I != E; ++I, ++CurField) {
2742 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2743 if (CurField->hasCapturedVLAType()) {
2744 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2745 } else {
2746 EmitInitializerForField(*CurField, LV, *I);
2747 }
2748 }
2749
2750 return SlotLV;
2751 }
2752
2753 /// Generate an outlined function for the body of a CapturedStmt, store any
2754 /// captured variables into the captured struct, and call the outlined function.
2755 llvm::Function *
EmitCapturedStmt(const CapturedStmt & S,CapturedRegionKind K)2756 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2757 LValue CapStruct = InitCapturedStruct(S);
2758
2759 // Emit the CapturedDecl
2760 CodeGenFunction CGF(CGM, true);
2761 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2762 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2763 delete CGF.CapturedStmtInfo;
2764
2765 // Emit call to the helper function.
2766 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2767
2768 return F;
2769 }
2770
GenerateCapturedStmtArgument(const CapturedStmt & S)2771 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2772 LValue CapStruct = InitCapturedStruct(S);
2773 return CapStruct.getAddress(*this);
2774 }
2775
2776 /// Creates the outlined function for a CapturedStmt.
2777 llvm::Function *
GenerateCapturedStmtFunction(const CapturedStmt & S)2778 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2779 assert(CapturedStmtInfo &&
2780 "CapturedStmtInfo should be set when generating the captured function");
2781 const CapturedDecl *CD = S.getCapturedDecl();
2782 const RecordDecl *RD = S.getCapturedRecordDecl();
2783 SourceLocation Loc = S.getBeginLoc();
2784 assert(CD->hasBody() && "missing CapturedDecl body");
2785
2786 // Build the argument list.
2787 ASTContext &Ctx = CGM.getContext();
2788 FunctionArgList Args;
2789 Args.append(CD->param_begin(), CD->param_end());
2790
2791 // Create the function declaration.
2792 const CGFunctionInfo &FuncInfo =
2793 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2794 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2795
2796 llvm::Function *F =
2797 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2798 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2799 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2800 if (CD->isNothrow())
2801 F->addFnAttr(llvm::Attribute::NoUnwind);
2802
2803 // Generate the function.
2804 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2805 CD->getBody()->getBeginLoc());
2806 // Set the context parameter in CapturedStmtInfo.
2807 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2808 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2809
2810 // Initialize variable-length arrays.
2811 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2812 Ctx.getTagDeclType(RD));
2813 for (auto *FD : RD->fields()) {
2814 if (FD->hasCapturedVLAType()) {
2815 auto *ExprArg =
2816 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2817 .getScalarVal();
2818 auto VAT = FD->getCapturedVLAType();
2819 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2820 }
2821 }
2822
2823 // If 'this' is captured, load it into CXXThisValue.
2824 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2825 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2826 LValue ThisLValue = EmitLValueForField(Base, FD);
2827 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2828 }
2829
2830 PGO.assignRegionCounters(GlobalDecl(CD), F);
2831 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2832 FinishFunction(CD->getBodyRBrace());
2833
2834 return F;
2835 }
2836