1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/Stmt.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
23 #include "clang/Basic/DiagnosticSema.h"
24 #include "clang/Basic/PrettyStackTrace.h"
25 #include "clang/Basic/SourceManager.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/IR/DataLayout.h"
30 #include "llvm/IR/InlineAsm.h"
31 #include "llvm/IR/Intrinsics.h"
32 #include "llvm/IR/MDBuilder.h"
33 #include "llvm/Support/SaveAndRestore.h"
34
35 using namespace clang;
36 using namespace CodeGen;
37
38 //===----------------------------------------------------------------------===//
39 // Statement Emission
40 //===----------------------------------------------------------------------===//
41
EmitStopPoint(const Stmt * S)42 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
43 if (CGDebugInfo *DI = getDebugInfo()) {
44 SourceLocation Loc;
45 Loc = S->getBeginLoc();
46 DI->EmitLocation(Builder, Loc);
47
48 LastStopPoint = Loc;
49 }
50 }
51
EmitStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)52 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
53 assert(S && "Null statement?");
54 PGO.setCurrentStmt(S);
55
56 // These statements have their own debug info handling.
57 if (EmitSimpleStmt(S, Attrs))
58 return;
59
60 // Check if we are generating unreachable code.
61 if (!HaveInsertPoint()) {
62 // If so, and the statement doesn't contain a label, then we do not need to
63 // generate actual code. This is safe because (1) the current point is
64 // unreachable, so we don't need to execute the code, and (2) we've already
65 // handled the statements which update internal data structures (like the
66 // local variable map) which could be used by subsequent statements.
67 if (!ContainsLabel(S)) {
68 // Verify that any decl statements were handled as simple, they may be in
69 // scope of subsequent reachable statements.
70 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
71 return;
72 }
73
74 // Otherwise, make a new block to hold the code.
75 EnsureInsertPoint();
76 }
77
78 // Generate a stoppoint if we are emitting debug info.
79 EmitStopPoint(S);
80
81 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
82 // enabled.
83 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
84 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
85 EmitSimpleOMPExecutableDirective(*D);
86 return;
87 }
88 }
89
90 switch (S->getStmtClass()) {
91 case Stmt::NoStmtClass:
92 case Stmt::CXXCatchStmtClass:
93 case Stmt::SEHExceptStmtClass:
94 case Stmt::SEHFinallyStmtClass:
95 case Stmt::MSDependentExistsStmtClass:
96 llvm_unreachable("invalid statement class to emit generically");
97 case Stmt::NullStmtClass:
98 case Stmt::CompoundStmtClass:
99 case Stmt::DeclStmtClass:
100 case Stmt::LabelStmtClass:
101 case Stmt::AttributedStmtClass:
102 case Stmt::GotoStmtClass:
103 case Stmt::BreakStmtClass:
104 case Stmt::ContinueStmtClass:
105 case Stmt::DefaultStmtClass:
106 case Stmt::CaseStmtClass:
107 case Stmt::SEHLeaveStmtClass:
108 llvm_unreachable("should have emitted these statements as simple");
109
110 #define STMT(Type, Base)
111 #define ABSTRACT_STMT(Op)
112 #define EXPR(Type, Base) \
113 case Stmt::Type##Class:
114 #include "clang/AST/StmtNodes.inc"
115 {
116 // Remember the block we came in on.
117 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
118 assert(incoming && "expression emission must have an insertion point");
119
120 EmitIgnoredExpr(cast<Expr>(S));
121
122 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
123 assert(outgoing && "expression emission cleared block!");
124
125 // The expression emitters assume (reasonably!) that the insertion
126 // point is always set. To maintain that, the call-emission code
127 // for noreturn functions has to enter a new block with no
128 // predecessors. We want to kill that block and mark the current
129 // insertion point unreachable in the common case of a call like
130 // "exit();". Since expression emission doesn't otherwise create
131 // blocks with no predecessors, we can just test for that.
132 // However, we must be careful not to do this to our incoming
133 // block, because *statement* emission does sometimes create
134 // reachable blocks which will have no predecessors until later in
135 // the function. This occurs with, e.g., labels that are not
136 // reachable by fallthrough.
137 if (incoming != outgoing && outgoing->use_empty()) {
138 outgoing->eraseFromParent();
139 Builder.ClearInsertionPoint();
140 }
141 break;
142 }
143
144 case Stmt::IndirectGotoStmtClass:
145 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
146
147 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
148 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
149 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
150 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
151
152 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
153
154 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
155 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
156 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
157 case Stmt::CoroutineBodyStmtClass:
158 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
159 break;
160 case Stmt::CoreturnStmtClass:
161 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
162 break;
163 case Stmt::CapturedStmtClass: {
164 const CapturedStmt *CS = cast<CapturedStmt>(S);
165 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
166 }
167 break;
168 case Stmt::ObjCAtTryStmtClass:
169 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
170 break;
171 case Stmt::ObjCAtCatchStmtClass:
172 llvm_unreachable(
173 "@catch statements should be handled by EmitObjCAtTryStmt");
174 case Stmt::ObjCAtFinallyStmtClass:
175 llvm_unreachable(
176 "@finally statements should be handled by EmitObjCAtTryStmt");
177 case Stmt::ObjCAtThrowStmtClass:
178 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
179 break;
180 case Stmt::ObjCAtSynchronizedStmtClass:
181 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
182 break;
183 case Stmt::ObjCForCollectionStmtClass:
184 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
185 break;
186 case Stmt::ObjCAutoreleasePoolStmtClass:
187 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
188 break;
189
190 case Stmt::CXXTryStmtClass:
191 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
192 break;
193 case Stmt::CXXForRangeStmtClass:
194 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
195 break;
196 case Stmt::SEHTryStmtClass:
197 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
198 break;
199 case Stmt::OMPCanonicalLoopClass:
200 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
201 break;
202 case Stmt::OMPParallelDirectiveClass:
203 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
204 break;
205 case Stmt::OMPSimdDirectiveClass:
206 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
207 break;
208 case Stmt::OMPTileDirectiveClass:
209 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
210 break;
211 case Stmt::OMPUnrollDirectiveClass:
212 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
213 break;
214 case Stmt::OMPForDirectiveClass:
215 EmitOMPForDirective(cast<OMPForDirective>(*S));
216 break;
217 case Stmt::OMPForSimdDirectiveClass:
218 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
219 break;
220 case Stmt::OMPSectionsDirectiveClass:
221 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
222 break;
223 case Stmt::OMPSectionDirectiveClass:
224 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
225 break;
226 case Stmt::OMPSingleDirectiveClass:
227 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
228 break;
229 case Stmt::OMPMasterDirectiveClass:
230 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
231 break;
232 case Stmt::OMPCriticalDirectiveClass:
233 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
234 break;
235 case Stmt::OMPParallelForDirectiveClass:
236 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
237 break;
238 case Stmt::OMPParallelForSimdDirectiveClass:
239 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
240 break;
241 case Stmt::OMPParallelMasterDirectiveClass:
242 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
243 break;
244 case Stmt::OMPParallelSectionsDirectiveClass:
245 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
246 break;
247 case Stmt::OMPTaskDirectiveClass:
248 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
249 break;
250 case Stmt::OMPTaskyieldDirectiveClass:
251 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
252 break;
253 case Stmt::OMPBarrierDirectiveClass:
254 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
255 break;
256 case Stmt::OMPTaskwaitDirectiveClass:
257 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
258 break;
259 case Stmt::OMPTaskgroupDirectiveClass:
260 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
261 break;
262 case Stmt::OMPFlushDirectiveClass:
263 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
264 break;
265 case Stmt::OMPDepobjDirectiveClass:
266 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
267 break;
268 case Stmt::OMPScanDirectiveClass:
269 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
270 break;
271 case Stmt::OMPOrderedDirectiveClass:
272 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
273 break;
274 case Stmt::OMPAtomicDirectiveClass:
275 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
276 break;
277 case Stmt::OMPTargetDirectiveClass:
278 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
279 break;
280 case Stmt::OMPTeamsDirectiveClass:
281 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
282 break;
283 case Stmt::OMPCancellationPointDirectiveClass:
284 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
285 break;
286 case Stmt::OMPCancelDirectiveClass:
287 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
288 break;
289 case Stmt::OMPTargetDataDirectiveClass:
290 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
291 break;
292 case Stmt::OMPTargetEnterDataDirectiveClass:
293 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
294 break;
295 case Stmt::OMPTargetExitDataDirectiveClass:
296 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
297 break;
298 case Stmt::OMPTargetParallelDirectiveClass:
299 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
300 break;
301 case Stmt::OMPTargetParallelForDirectiveClass:
302 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
303 break;
304 case Stmt::OMPTaskLoopDirectiveClass:
305 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
306 break;
307 case Stmt::OMPTaskLoopSimdDirectiveClass:
308 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
309 break;
310 case Stmt::OMPMasterTaskLoopDirectiveClass:
311 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
312 break;
313 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
314 EmitOMPMasterTaskLoopSimdDirective(
315 cast<OMPMasterTaskLoopSimdDirective>(*S));
316 break;
317 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
318 EmitOMPParallelMasterTaskLoopDirective(
319 cast<OMPParallelMasterTaskLoopDirective>(*S));
320 break;
321 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
322 EmitOMPParallelMasterTaskLoopSimdDirective(
323 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
324 break;
325 case Stmt::OMPDistributeDirectiveClass:
326 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
327 break;
328 case Stmt::OMPTargetUpdateDirectiveClass:
329 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
330 break;
331 case Stmt::OMPDistributeParallelForDirectiveClass:
332 EmitOMPDistributeParallelForDirective(
333 cast<OMPDistributeParallelForDirective>(*S));
334 break;
335 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
336 EmitOMPDistributeParallelForSimdDirective(
337 cast<OMPDistributeParallelForSimdDirective>(*S));
338 break;
339 case Stmt::OMPDistributeSimdDirectiveClass:
340 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
341 break;
342 case Stmt::OMPTargetParallelForSimdDirectiveClass:
343 EmitOMPTargetParallelForSimdDirective(
344 cast<OMPTargetParallelForSimdDirective>(*S));
345 break;
346 case Stmt::OMPTargetSimdDirectiveClass:
347 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
348 break;
349 case Stmt::OMPTeamsDistributeDirectiveClass:
350 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
351 break;
352 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
353 EmitOMPTeamsDistributeSimdDirective(
354 cast<OMPTeamsDistributeSimdDirective>(*S));
355 break;
356 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
357 EmitOMPTeamsDistributeParallelForSimdDirective(
358 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
359 break;
360 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
361 EmitOMPTeamsDistributeParallelForDirective(
362 cast<OMPTeamsDistributeParallelForDirective>(*S));
363 break;
364 case Stmt::OMPTargetTeamsDirectiveClass:
365 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
366 break;
367 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
368 EmitOMPTargetTeamsDistributeDirective(
369 cast<OMPTargetTeamsDistributeDirective>(*S));
370 break;
371 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
372 EmitOMPTargetTeamsDistributeParallelForDirective(
373 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
374 break;
375 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
376 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
377 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
378 break;
379 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
380 EmitOMPTargetTeamsDistributeSimdDirective(
381 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
382 break;
383 case Stmt::OMPInteropDirectiveClass:
384 llvm_unreachable("Interop directive not supported yet.");
385 break;
386 case Stmt::OMPDispatchDirectiveClass:
387 llvm_unreachable("Dispatch directive not supported yet.");
388 break;
389 case Stmt::OMPMaskedDirectiveClass:
390 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
391 break;
392 }
393 }
394
EmitSimpleStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)395 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
396 ArrayRef<const Attr *> Attrs) {
397 switch (S->getStmtClass()) {
398 default:
399 return false;
400 case Stmt::NullStmtClass:
401 break;
402 case Stmt::CompoundStmtClass:
403 EmitCompoundStmt(cast<CompoundStmt>(*S));
404 break;
405 case Stmt::DeclStmtClass:
406 EmitDeclStmt(cast<DeclStmt>(*S));
407 break;
408 case Stmt::LabelStmtClass:
409 EmitLabelStmt(cast<LabelStmt>(*S));
410 break;
411 case Stmt::AttributedStmtClass:
412 EmitAttributedStmt(cast<AttributedStmt>(*S));
413 break;
414 case Stmt::GotoStmtClass:
415 EmitGotoStmt(cast<GotoStmt>(*S));
416 break;
417 case Stmt::BreakStmtClass:
418 EmitBreakStmt(cast<BreakStmt>(*S));
419 break;
420 case Stmt::ContinueStmtClass:
421 EmitContinueStmt(cast<ContinueStmt>(*S));
422 break;
423 case Stmt::DefaultStmtClass:
424 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
425 break;
426 case Stmt::CaseStmtClass:
427 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
428 break;
429 case Stmt::SEHLeaveStmtClass:
430 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
431 break;
432 }
433 return true;
434 }
435
436 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
437 /// this captures the expression result of the last sub-statement and returns it
438 /// (for use by the statement expression extension).
EmitCompoundStmt(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)439 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
440 AggValueSlot AggSlot) {
441 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
442 "LLVM IR generation of compound statement ('{}')");
443
444 // Keep track of the current cleanup stack depth, including debug scopes.
445 LexicalScope Scope(*this, S.getSourceRange());
446
447 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
448 }
449
450 Address
EmitCompoundStmtWithoutScope(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)451 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
452 bool GetLast,
453 AggValueSlot AggSlot) {
454
455 const Stmt *ExprResult = S.getStmtExprResult();
456 assert((!GetLast || (GetLast && ExprResult)) &&
457 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
458
459 Address RetAlloca = Address::invalid();
460
461 for (auto *CurStmt : S.body()) {
462 if (GetLast && ExprResult == CurStmt) {
463 // We have to special case labels here. They are statements, but when put
464 // at the end of a statement expression, they yield the value of their
465 // subexpression. Handle this by walking through all labels we encounter,
466 // emitting them before we evaluate the subexpr.
467 // Similar issues arise for attributed statements.
468 while (!isa<Expr>(ExprResult)) {
469 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
470 EmitLabel(LS->getDecl());
471 ExprResult = LS->getSubStmt();
472 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
473 // FIXME: Update this if we ever have attributes that affect the
474 // semantics of an expression.
475 ExprResult = AS->getSubStmt();
476 } else {
477 llvm_unreachable("unknown value statement");
478 }
479 }
480
481 EnsureInsertPoint();
482
483 const Expr *E = cast<Expr>(ExprResult);
484 QualType ExprTy = E->getType();
485 if (hasAggregateEvaluationKind(ExprTy)) {
486 EmitAggExpr(E, AggSlot);
487 } else {
488 // We can't return an RValue here because there might be cleanups at
489 // the end of the StmtExpr. Because of that, we have to emit the result
490 // here into a temporary alloca.
491 RetAlloca = CreateMemTemp(ExprTy);
492 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
493 /*IsInit*/ false);
494 }
495 } else {
496 EmitStmt(CurStmt);
497 }
498 }
499
500 return RetAlloca;
501 }
502
SimplifyForwardingBlocks(llvm::BasicBlock * BB)503 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
504 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
505
506 // If there is a cleanup stack, then we it isn't worth trying to
507 // simplify this block (we would need to remove it from the scope map
508 // and cleanup entry).
509 if (!EHStack.empty())
510 return;
511
512 // Can only simplify direct branches.
513 if (!BI || !BI->isUnconditional())
514 return;
515
516 // Can only simplify empty blocks.
517 if (BI->getIterator() != BB->begin())
518 return;
519
520 BB->replaceAllUsesWith(BI->getSuccessor(0));
521 BI->eraseFromParent();
522 BB->eraseFromParent();
523 }
524
EmitBlock(llvm::BasicBlock * BB,bool IsFinished)525 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
526 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
527
528 // Fall out of the current block (if necessary).
529 EmitBranch(BB);
530
531 if (IsFinished && BB->use_empty()) {
532 delete BB;
533 return;
534 }
535
536 // Place the block after the current block, if possible, or else at
537 // the end of the function.
538 if (CurBB && CurBB->getParent())
539 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
540 else
541 CurFn->getBasicBlockList().push_back(BB);
542 Builder.SetInsertPoint(BB);
543 }
544
EmitBranch(llvm::BasicBlock * Target)545 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
546 // Emit a branch from the current block to the target one if this
547 // was a real block. If this was just a fall-through block after a
548 // terminator, don't emit it.
549 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
550
551 if (!CurBB || CurBB->getTerminator()) {
552 // If there is no insert point or the previous block is already
553 // terminated, don't touch it.
554 } else {
555 // Otherwise, create a fall-through branch.
556 Builder.CreateBr(Target);
557 }
558
559 Builder.ClearInsertionPoint();
560 }
561
EmitBlockAfterUses(llvm::BasicBlock * block)562 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
563 bool inserted = false;
564 for (llvm::User *u : block->users()) {
565 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
566 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
567 block);
568 inserted = true;
569 break;
570 }
571 }
572
573 if (!inserted)
574 CurFn->getBasicBlockList().push_back(block);
575
576 Builder.SetInsertPoint(block);
577 }
578
579 CodeGenFunction::JumpDest
getJumpDestForLabel(const LabelDecl * D)580 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
581 JumpDest &Dest = LabelMap[D];
582 if (Dest.isValid()) return Dest;
583
584 // Create, but don't insert, the new block.
585 Dest = JumpDest(createBasicBlock(D->getName()),
586 EHScopeStack::stable_iterator::invalid(),
587 NextCleanupDestIndex++);
588 return Dest;
589 }
590
EmitLabel(const LabelDecl * D)591 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
592 // Add this label to the current lexical scope if we're within any
593 // normal cleanups. Jumps "in" to this label --- when permitted by
594 // the language --- may need to be routed around such cleanups.
595 if (EHStack.hasNormalCleanups() && CurLexicalScope)
596 CurLexicalScope->addLabel(D);
597
598 JumpDest &Dest = LabelMap[D];
599
600 // If we didn't need a forward reference to this label, just go
601 // ahead and create a destination at the current scope.
602 if (!Dest.isValid()) {
603 Dest = getJumpDestInCurrentScope(D->getName());
604
605 // Otherwise, we need to give this label a target depth and remove
606 // it from the branch-fixups list.
607 } else {
608 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
609 Dest.setScopeDepth(EHStack.stable_begin());
610 ResolveBranchFixups(Dest.getBlock());
611 }
612
613 EmitBlock(Dest.getBlock());
614
615 // Emit debug info for labels.
616 if (CGDebugInfo *DI = getDebugInfo()) {
617 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
618 DI->setLocation(D->getLocation());
619 DI->EmitLabel(D, Builder);
620 }
621 }
622
623 incrementProfileCounter(D->getStmt());
624 }
625
626 /// Change the cleanup scope of the labels in this lexical scope to
627 /// match the scope of the enclosing context.
rescopeLabels()628 void CodeGenFunction::LexicalScope::rescopeLabels() {
629 assert(!Labels.empty());
630 EHScopeStack::stable_iterator innermostScope
631 = CGF.EHStack.getInnermostNormalCleanup();
632
633 // Change the scope depth of all the labels.
634 for (SmallVectorImpl<const LabelDecl*>::const_iterator
635 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
636 assert(CGF.LabelMap.count(*i));
637 JumpDest &dest = CGF.LabelMap.find(*i)->second;
638 assert(dest.getScopeDepth().isValid());
639 assert(innermostScope.encloses(dest.getScopeDepth()));
640 dest.setScopeDepth(innermostScope);
641 }
642
643 // Reparent the labels if the new scope also has cleanups.
644 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
645 ParentScope->Labels.append(Labels.begin(), Labels.end());
646 }
647 }
648
649
EmitLabelStmt(const LabelStmt & S)650 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
651 EmitLabel(S.getDecl());
652
653 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
654 if (getLangOpts().EHAsynch && S.isSideEntry())
655 EmitSehCppScopeBegin();
656
657 EmitStmt(S.getSubStmt());
658 }
659
EmitAttributedStmt(const AttributedStmt & S)660 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
661 bool nomerge = false;
662 const CallExpr *musttail = nullptr;
663
664 for (const auto *A : S.getAttrs()) {
665 if (A->getKind() == attr::NoMerge) {
666 nomerge = true;
667 }
668 if (A->getKind() == attr::MustTail) {
669 const Stmt *Sub = S.getSubStmt();
670 const ReturnStmt *R = cast<ReturnStmt>(Sub);
671 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
672 }
673 }
674 SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
675 SaveAndRestore<const CallExpr *> save_musttail(MustTailCall, musttail);
676 EmitStmt(S.getSubStmt(), S.getAttrs());
677 }
678
EmitGotoStmt(const GotoStmt & S)679 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
680 // If this code is reachable then emit a stop point (if generating
681 // debug info). We have to do this ourselves because we are on the
682 // "simple" statement path.
683 if (HaveInsertPoint())
684 EmitStopPoint(&S);
685
686 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
687 }
688
689
EmitIndirectGotoStmt(const IndirectGotoStmt & S)690 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
691 if (const LabelDecl *Target = S.getConstantTarget()) {
692 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
693 return;
694 }
695
696 // Ensure that we have an i8* for our PHI node.
697 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
698 Int8PtrTy, "addr");
699 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
700
701 // Get the basic block for the indirect goto.
702 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
703
704 // The first instruction in the block has to be the PHI for the switch dest,
705 // add an entry for this branch.
706 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
707
708 EmitBranch(IndGotoBB);
709 }
710
EmitIfStmt(const IfStmt & S)711 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
712 // C99 6.8.4.1: The first substatement is executed if the expression compares
713 // unequal to 0. The condition must be a scalar type.
714 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
715
716 if (S.getInit())
717 EmitStmt(S.getInit());
718
719 if (S.getConditionVariable())
720 EmitDecl(*S.getConditionVariable());
721
722 // If the condition constant folds and can be elided, try to avoid emitting
723 // the condition and the dead arm of the if/else.
724 bool CondConstant;
725 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
726 S.isConstexpr())) {
727 // Figure out which block (then or else) is executed.
728 const Stmt *Executed = S.getThen();
729 const Stmt *Skipped = S.getElse();
730 if (!CondConstant) // Condition false?
731 std::swap(Executed, Skipped);
732
733 // If the skipped block has no labels in it, just emit the executed block.
734 // This avoids emitting dead code and simplifies the CFG substantially.
735 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
736 if (CondConstant)
737 incrementProfileCounter(&S);
738 if (Executed) {
739 RunCleanupsScope ExecutedScope(*this);
740 EmitStmt(Executed);
741 }
742 return;
743 }
744 }
745
746 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
747 // the conditional branch.
748 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
749 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
750 llvm::BasicBlock *ElseBlock = ContBlock;
751 if (S.getElse())
752 ElseBlock = createBasicBlock("if.else");
753
754 // Prefer the PGO based weights over the likelihood attribute.
755 // When the build isn't optimized the metadata isn't used, so don't generate
756 // it.
757 Stmt::Likelihood LH = Stmt::LH_None;
758 uint64_t Count = getProfileCount(S.getThen());
759 if (!Count && CGM.getCodeGenOpts().OptimizationLevel)
760 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
761 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, Count, LH);
762
763 // Emit the 'then' code.
764 EmitBlock(ThenBlock);
765 incrementProfileCounter(&S);
766 {
767 RunCleanupsScope ThenScope(*this);
768 EmitStmt(S.getThen());
769 }
770 EmitBranch(ContBlock);
771
772 // Emit the 'else' code if present.
773 if (const Stmt *Else = S.getElse()) {
774 {
775 // There is no need to emit line number for an unconditional branch.
776 auto NL = ApplyDebugLocation::CreateEmpty(*this);
777 EmitBlock(ElseBlock);
778 }
779 {
780 RunCleanupsScope ElseScope(*this);
781 EmitStmt(Else);
782 }
783 {
784 // There is no need to emit line number for an unconditional branch.
785 auto NL = ApplyDebugLocation::CreateEmpty(*this);
786 EmitBranch(ContBlock);
787 }
788 }
789
790 // Emit the continuation block for code after the if.
791 EmitBlock(ContBlock, true);
792 }
793
EmitWhileStmt(const WhileStmt & S,ArrayRef<const Attr * > WhileAttrs)794 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
795 ArrayRef<const Attr *> WhileAttrs) {
796 // Emit the header for the loop, which will also become
797 // the continue target.
798 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
799 EmitBlock(LoopHeader.getBlock());
800
801 // Create an exit block for when the condition fails, which will
802 // also become the break target.
803 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
804
805 // Store the blocks to use for break and continue.
806 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
807
808 // C++ [stmt.while]p2:
809 // When the condition of a while statement is a declaration, the
810 // scope of the variable that is declared extends from its point
811 // of declaration (3.3.2) to the end of the while statement.
812 // [...]
813 // The object created in a condition is destroyed and created
814 // with each iteration of the loop.
815 RunCleanupsScope ConditionScope(*this);
816
817 if (S.getConditionVariable())
818 EmitDecl(*S.getConditionVariable());
819
820 // Evaluate the conditional in the while header. C99 6.8.5.1: The
821 // evaluation of the controlling expression takes place before each
822 // execution of the loop body.
823 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
824
825 // while(1) is common, avoid extra exit blocks. Be sure
826 // to correctly handle break/continue though.
827 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
828 bool CondIsConstInt = C != nullptr;
829 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
830 const SourceRange &R = S.getSourceRange();
831 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
832 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
833 SourceLocToDebugLoc(R.getEnd()),
834 checkIfLoopMustProgress(CondIsConstInt));
835
836 // As long as the condition is true, go to the loop body.
837 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
838 if (EmitBoolCondBranch) {
839 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
840 if (ConditionScope.requiresCleanups())
841 ExitBlock = createBasicBlock("while.exit");
842 llvm::MDNode *Weights =
843 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
844 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
845 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
846 BoolCondVal, Stmt::getLikelihood(S.getBody()));
847 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
848
849 if (ExitBlock != LoopExit.getBlock()) {
850 EmitBlock(ExitBlock);
851 EmitBranchThroughCleanup(LoopExit);
852 }
853 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
854 CGM.getDiags().Report(A->getLocation(),
855 diag::warn_attribute_has_no_effect_on_infinite_loop)
856 << A << A->getRange();
857 CGM.getDiags().Report(
858 S.getWhileLoc(),
859 diag::note_attribute_has_no_effect_on_infinite_loop_here)
860 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
861 }
862
863 // Emit the loop body. We have to emit this in a cleanup scope
864 // because it might be a singleton DeclStmt.
865 {
866 RunCleanupsScope BodyScope(*this);
867 EmitBlock(LoopBody);
868 incrementProfileCounter(&S);
869 EmitStmt(S.getBody());
870 }
871
872 BreakContinueStack.pop_back();
873
874 // Immediately force cleanup.
875 ConditionScope.ForceCleanup();
876
877 EmitStopPoint(&S);
878 // Branch to the loop header again.
879 EmitBranch(LoopHeader.getBlock());
880
881 LoopStack.pop();
882
883 // Emit the exit block.
884 EmitBlock(LoopExit.getBlock(), true);
885
886 // The LoopHeader typically is just a branch if we skipped emitting
887 // a branch, try to erase it.
888 if (!EmitBoolCondBranch)
889 SimplifyForwardingBlocks(LoopHeader.getBlock());
890 }
891
EmitDoStmt(const DoStmt & S,ArrayRef<const Attr * > DoAttrs)892 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
893 ArrayRef<const Attr *> DoAttrs) {
894 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
895 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
896
897 uint64_t ParentCount = getCurrentProfileCount();
898
899 // Store the blocks to use for break and continue.
900 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
901
902 // Emit the body of the loop.
903 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
904
905 EmitBlockWithFallThrough(LoopBody, &S);
906 {
907 RunCleanupsScope BodyScope(*this);
908 EmitStmt(S.getBody());
909 }
910
911 EmitBlock(LoopCond.getBlock());
912
913 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
914 // after each execution of the loop body."
915
916 // Evaluate the conditional in the while header.
917 // C99 6.8.5p2/p4: The first substatement is executed if the expression
918 // compares unequal to 0. The condition must be a scalar type.
919 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
920
921 BreakContinueStack.pop_back();
922
923 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
924 // to correctly handle break/continue though.
925 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
926 bool CondIsConstInt = C;
927 bool EmitBoolCondBranch = !C || !C->isZero();
928
929 const SourceRange &R = S.getSourceRange();
930 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
931 SourceLocToDebugLoc(R.getBegin()),
932 SourceLocToDebugLoc(R.getEnd()),
933 checkIfLoopMustProgress(CondIsConstInt));
934
935 // As long as the condition is true, iterate the loop.
936 if (EmitBoolCondBranch) {
937 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
938 Builder.CreateCondBr(
939 BoolCondVal, LoopBody, LoopExit.getBlock(),
940 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
941 }
942
943 LoopStack.pop();
944
945 // Emit the exit block.
946 EmitBlock(LoopExit.getBlock());
947
948 // The DoCond block typically is just a branch if we skipped
949 // emitting a branch, try to erase it.
950 if (!EmitBoolCondBranch)
951 SimplifyForwardingBlocks(LoopCond.getBlock());
952 }
953
EmitForStmt(const ForStmt & S,ArrayRef<const Attr * > ForAttrs)954 void CodeGenFunction::EmitForStmt(const ForStmt &S,
955 ArrayRef<const Attr *> ForAttrs) {
956 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
957
958 LexicalScope ForScope(*this, S.getSourceRange());
959
960 // Evaluate the first part before the loop.
961 if (S.getInit())
962 EmitStmt(S.getInit());
963
964 // Start the loop with a block that tests the condition.
965 // If there's an increment, the continue scope will be overwritten
966 // later.
967 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
968 llvm::BasicBlock *CondBlock = CondDest.getBlock();
969 EmitBlock(CondBlock);
970
971 Expr::EvalResult Result;
972 bool CondIsConstInt =
973 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
974
975 const SourceRange &R = S.getSourceRange();
976 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
977 SourceLocToDebugLoc(R.getBegin()),
978 SourceLocToDebugLoc(R.getEnd()),
979 checkIfLoopMustProgress(CondIsConstInt));
980
981 // Create a cleanup scope for the condition variable cleanups.
982 LexicalScope ConditionScope(*this, S.getSourceRange());
983
984 // If the for loop doesn't have an increment we can just use the condition as
985 // the continue block. Otherwise, if there is no condition variable, we can
986 // form the continue block now. If there is a condition variable, we can't
987 // form the continue block until after we've emitted the condition, because
988 // the condition is in scope in the increment, but Sema's jump diagnostics
989 // ensure that there are no continues from the condition variable that jump
990 // to the loop increment.
991 JumpDest Continue;
992 if (!S.getInc())
993 Continue = CondDest;
994 else if (!S.getConditionVariable())
995 Continue = getJumpDestInCurrentScope("for.inc");
996 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
997
998 if (S.getCond()) {
999 // If the for statement has a condition scope, emit the local variable
1000 // declaration.
1001 if (S.getConditionVariable()) {
1002 EmitDecl(*S.getConditionVariable());
1003
1004 // We have entered the condition variable's scope, so we're now able to
1005 // jump to the continue block.
1006 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1007 BreakContinueStack.back().ContinueBlock = Continue;
1008 }
1009
1010 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1011 // If there are any cleanups between here and the loop-exit scope,
1012 // create a block to stage a loop exit along.
1013 if (ForScope.requiresCleanups())
1014 ExitBlock = createBasicBlock("for.cond.cleanup");
1015
1016 // As long as the condition is true, iterate the loop.
1017 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1018
1019 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1020 // compares unequal to 0. The condition must be a scalar type.
1021 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1022 llvm::MDNode *Weights =
1023 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1024 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1025 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1026 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1027
1028 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1029
1030 if (ExitBlock != LoopExit.getBlock()) {
1031 EmitBlock(ExitBlock);
1032 EmitBranchThroughCleanup(LoopExit);
1033 }
1034
1035 EmitBlock(ForBody);
1036 } else {
1037 // Treat it as a non-zero constant. Don't even create a new block for the
1038 // body, just fall into it.
1039 }
1040 incrementProfileCounter(&S);
1041
1042 {
1043 // Create a separate cleanup scope for the body, in case it is not
1044 // a compound statement.
1045 RunCleanupsScope BodyScope(*this);
1046 EmitStmt(S.getBody());
1047 }
1048
1049 // If there is an increment, emit it next.
1050 if (S.getInc()) {
1051 EmitBlock(Continue.getBlock());
1052 EmitStmt(S.getInc());
1053 }
1054
1055 BreakContinueStack.pop_back();
1056
1057 ConditionScope.ForceCleanup();
1058
1059 EmitStopPoint(&S);
1060 EmitBranch(CondBlock);
1061
1062 ForScope.ForceCleanup();
1063
1064 LoopStack.pop();
1065
1066 // Emit the fall-through block.
1067 EmitBlock(LoopExit.getBlock(), true);
1068 }
1069
1070 void
EmitCXXForRangeStmt(const CXXForRangeStmt & S,ArrayRef<const Attr * > ForAttrs)1071 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1072 ArrayRef<const Attr *> ForAttrs) {
1073 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1074
1075 LexicalScope ForScope(*this, S.getSourceRange());
1076
1077 // Evaluate the first pieces before the loop.
1078 if (S.getInit())
1079 EmitStmt(S.getInit());
1080 EmitStmt(S.getRangeStmt());
1081 EmitStmt(S.getBeginStmt());
1082 EmitStmt(S.getEndStmt());
1083
1084 // Start the loop with a block that tests the condition.
1085 // If there's an increment, the continue scope will be overwritten
1086 // later.
1087 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1088 EmitBlock(CondBlock);
1089
1090 const SourceRange &R = S.getSourceRange();
1091 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1092 SourceLocToDebugLoc(R.getBegin()),
1093 SourceLocToDebugLoc(R.getEnd()));
1094
1095 // If there are any cleanups between here and the loop-exit scope,
1096 // create a block to stage a loop exit along.
1097 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1098 if (ForScope.requiresCleanups())
1099 ExitBlock = createBasicBlock("for.cond.cleanup");
1100
1101 // The loop body, consisting of the specified body and the loop variable.
1102 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1103
1104 // The body is executed if the expression, contextually converted
1105 // to bool, is true.
1106 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1107 llvm::MDNode *Weights =
1108 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1109 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1110 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1111 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1112 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1113
1114 if (ExitBlock != LoopExit.getBlock()) {
1115 EmitBlock(ExitBlock);
1116 EmitBranchThroughCleanup(LoopExit);
1117 }
1118
1119 EmitBlock(ForBody);
1120 incrementProfileCounter(&S);
1121
1122 // Create a block for the increment. In case of a 'continue', we jump there.
1123 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1124
1125 // Store the blocks to use for break and continue.
1126 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1127
1128 {
1129 // Create a separate cleanup scope for the loop variable and body.
1130 LexicalScope BodyScope(*this, S.getSourceRange());
1131 EmitStmt(S.getLoopVarStmt());
1132 EmitStmt(S.getBody());
1133 }
1134
1135 EmitStopPoint(&S);
1136 // If there is an increment, emit it next.
1137 EmitBlock(Continue.getBlock());
1138 EmitStmt(S.getInc());
1139
1140 BreakContinueStack.pop_back();
1141
1142 EmitBranch(CondBlock);
1143
1144 ForScope.ForceCleanup();
1145
1146 LoopStack.pop();
1147
1148 // Emit the fall-through block.
1149 EmitBlock(LoopExit.getBlock(), true);
1150 }
1151
EmitReturnOfRValue(RValue RV,QualType Ty)1152 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1153 if (RV.isScalar()) {
1154 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1155 } else if (RV.isAggregate()) {
1156 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1157 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1158 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1159 } else {
1160 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1161 /*init*/ true);
1162 }
1163 EmitBranchThroughCleanup(ReturnBlock);
1164 }
1165
1166 namespace {
1167 // RAII struct used to save and restore a return statment's result expression.
1168 struct SaveRetExprRAII {
SaveRetExprRAII__anond272b0f60111::SaveRetExprRAII1169 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1170 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1171 CGF.RetExpr = RetExpr;
1172 }
~SaveRetExprRAII__anond272b0f60111::SaveRetExprRAII1173 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1174 const Expr *OldRetExpr;
1175 CodeGenFunction &CGF;
1176 };
1177 } // namespace
1178
1179 /// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1180 /// codegen it as 'tail call ...; ret void;'.
makeTailCallIfSwiftAsync(const CallExpr * CE,CGBuilderTy & Builder,const CGFunctionInfo * CurFnInfo)1181 static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1182 const CGFunctionInfo *CurFnInfo) {
1183 auto calleeQualType = CE->getCallee()->getType();
1184 const FunctionType *calleeType = nullptr;
1185 if (calleeQualType->isFunctionPointerType() ||
1186 calleeQualType->isFunctionReferenceType() ||
1187 calleeQualType->isBlockPointerType() ||
1188 calleeQualType->isMemberFunctionPointerType()) {
1189 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1190 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1191 calleeType = ty;
1192 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1193 if (auto methodDecl = CMCE->getMethodDecl()) {
1194 // getMethodDecl() doesn't handle member pointers at the moment.
1195 calleeType = methodDecl->getType()->castAs<FunctionType>();
1196 } else {
1197 return;
1198 }
1199 } else {
1200 return;
1201 }
1202 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1203 (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) {
1204 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1205 CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1206 Builder.CreateRetVoid();
1207 Builder.ClearInsertionPoint();
1208 }
1209 }
1210
1211 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1212 /// if the function returns void, or may be missing one if the function returns
1213 /// non-void. Fun stuff :).
EmitReturnStmt(const ReturnStmt & S)1214 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1215 if (requiresReturnValueCheck()) {
1216 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1217 auto *SLocPtr =
1218 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1219 llvm::GlobalVariable::PrivateLinkage, SLoc);
1220 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1221 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1222 assert(ReturnLocation.isValid() && "No valid return location");
1223 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1224 ReturnLocation);
1225 }
1226
1227 // Returning from an outlined SEH helper is UB, and we already warn on it.
1228 if (IsOutlinedSEHHelper) {
1229 Builder.CreateUnreachable();
1230 Builder.ClearInsertionPoint();
1231 }
1232
1233 // Emit the result value, even if unused, to evaluate the side effects.
1234 const Expr *RV = S.getRetValue();
1235
1236 // Record the result expression of the return statement. The recorded
1237 // expression is used to determine whether a block capture's lifetime should
1238 // end at the end of the full expression as opposed to the end of the scope
1239 // enclosing the block expression.
1240 //
1241 // This permits a small, easily-implemented exception to our over-conservative
1242 // rules about not jumping to statements following block literals with
1243 // non-trivial cleanups.
1244 SaveRetExprRAII SaveRetExpr(RV, *this);
1245
1246 RunCleanupsScope cleanupScope(*this);
1247 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1248 RV = EWC->getSubExpr();
1249 // FIXME: Clean this up by using an LValue for ReturnTemp,
1250 // EmitStoreThroughLValue, and EmitAnyExpr.
1251 // Check if the NRVO candidate was not globalized in OpenMP mode.
1252 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1253 S.getNRVOCandidate()->isNRVOVariable() &&
1254 (!getLangOpts().OpenMP ||
1255 !CGM.getOpenMPRuntime()
1256 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1257 .isValid())) {
1258 // Apply the named return value optimization for this return statement,
1259 // which means doing nothing: the appropriate result has already been
1260 // constructed into the NRVO variable.
1261
1262 // If there is an NRVO flag for this variable, set it to 1 into indicate
1263 // that the cleanup code should not destroy the variable.
1264 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1265 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1266 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1267 // Make sure not to return anything, but evaluate the expression
1268 // for side effects.
1269 if (RV) {
1270 EmitAnyExpr(RV);
1271 if (auto *CE = dyn_cast<CallExpr>(RV))
1272 makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1273 }
1274 } else if (!RV) {
1275 // Do nothing (return value is left uninitialized)
1276 } else if (FnRetTy->isReferenceType()) {
1277 // If this function returns a reference, take the address of the expression
1278 // rather than the value.
1279 RValue Result = EmitReferenceBindingToExpr(RV);
1280 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1281 } else {
1282 switch (getEvaluationKind(RV->getType())) {
1283 case TEK_Scalar:
1284 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1285 break;
1286 case TEK_Complex:
1287 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1288 /*isInit*/ true);
1289 break;
1290 case TEK_Aggregate:
1291 EmitAggExpr(RV, AggValueSlot::forAddr(
1292 ReturnValue, Qualifiers(),
1293 AggValueSlot::IsDestructed,
1294 AggValueSlot::DoesNotNeedGCBarriers,
1295 AggValueSlot::IsNotAliased,
1296 getOverlapForReturnValue()));
1297 break;
1298 }
1299 }
1300
1301 ++NumReturnExprs;
1302 if (!RV || RV->isEvaluatable(getContext()))
1303 ++NumSimpleReturnExprs;
1304
1305 cleanupScope.ForceCleanup();
1306 EmitBranchThroughCleanup(ReturnBlock);
1307 }
1308
EmitDeclStmt(const DeclStmt & S)1309 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1310 // As long as debug info is modeled with instructions, we have to ensure we
1311 // have a place to insert here and write the stop point here.
1312 if (HaveInsertPoint())
1313 EmitStopPoint(&S);
1314
1315 for (const auto *I : S.decls())
1316 EmitDecl(*I);
1317 }
1318
EmitBreakStmt(const BreakStmt & S)1319 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1320 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1321
1322 // If this code is reachable then emit a stop point (if generating
1323 // debug info). We have to do this ourselves because we are on the
1324 // "simple" statement path.
1325 if (HaveInsertPoint())
1326 EmitStopPoint(&S);
1327
1328 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1329 }
1330
EmitContinueStmt(const ContinueStmt & S)1331 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1332 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1333
1334 // If this code is reachable then emit a stop point (if generating
1335 // debug info). We have to do this ourselves because we are on the
1336 // "simple" statement path.
1337 if (HaveInsertPoint())
1338 EmitStopPoint(&S);
1339
1340 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1341 }
1342
1343 /// EmitCaseStmtRange - If case statement range is not too big then
1344 /// add multiple cases to switch instruction, one for each value within
1345 /// the range. If range is too big then emit "if" condition check.
EmitCaseStmtRange(const CaseStmt & S,ArrayRef<const Attr * > Attrs)1346 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1347 ArrayRef<const Attr *> Attrs) {
1348 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1349
1350 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1351 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1352
1353 // Emit the code for this case. We do this first to make sure it is
1354 // properly chained from our predecessor before generating the
1355 // switch machinery to enter this block.
1356 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1357 EmitBlockWithFallThrough(CaseDest, &S);
1358 EmitStmt(S.getSubStmt());
1359
1360 // If range is empty, do nothing.
1361 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1362 return;
1363
1364 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1365 llvm::APInt Range = RHS - LHS;
1366 // FIXME: parameters such as this should not be hardcoded.
1367 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1368 // Range is small enough to add multiple switch instruction cases.
1369 uint64_t Total = getProfileCount(&S);
1370 unsigned NCases = Range.getZExtValue() + 1;
1371 // We only have one region counter for the entire set of cases here, so we
1372 // need to divide the weights evenly between the generated cases, ensuring
1373 // that the total weight is preserved. E.g., a weight of 5 over three cases
1374 // will be distributed as weights of 2, 2, and 1.
1375 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1376 for (unsigned I = 0; I != NCases; ++I) {
1377 if (SwitchWeights)
1378 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1379 else if (SwitchLikelihood)
1380 SwitchLikelihood->push_back(LH);
1381
1382 if (Rem)
1383 Rem--;
1384 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1385 ++LHS;
1386 }
1387 return;
1388 }
1389
1390 // The range is too big. Emit "if" condition into a new block,
1391 // making sure to save and restore the current insertion point.
1392 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1393
1394 // Push this test onto the chain of range checks (which terminates
1395 // in the default basic block). The switch's default will be changed
1396 // to the top of this chain after switch emission is complete.
1397 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1398 CaseRangeBlock = createBasicBlock("sw.caserange");
1399
1400 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1401 Builder.SetInsertPoint(CaseRangeBlock);
1402
1403 // Emit range check.
1404 llvm::Value *Diff =
1405 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1406 llvm::Value *Cond =
1407 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1408
1409 llvm::MDNode *Weights = nullptr;
1410 if (SwitchWeights) {
1411 uint64_t ThisCount = getProfileCount(&S);
1412 uint64_t DefaultCount = (*SwitchWeights)[0];
1413 Weights = createProfileWeights(ThisCount, DefaultCount);
1414
1415 // Since we're chaining the switch default through each large case range, we
1416 // need to update the weight for the default, ie, the first case, to include
1417 // this case.
1418 (*SwitchWeights)[0] += ThisCount;
1419 } else if (SwitchLikelihood)
1420 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1421
1422 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1423
1424 // Restore the appropriate insertion point.
1425 if (RestoreBB)
1426 Builder.SetInsertPoint(RestoreBB);
1427 else
1428 Builder.ClearInsertionPoint();
1429 }
1430
EmitCaseStmt(const CaseStmt & S,ArrayRef<const Attr * > Attrs)1431 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1432 ArrayRef<const Attr *> Attrs) {
1433 // If there is no enclosing switch instance that we're aware of, then this
1434 // case statement and its block can be elided. This situation only happens
1435 // when we've constant-folded the switch, are emitting the constant case,
1436 // and part of the constant case includes another case statement. For
1437 // instance: switch (4) { case 4: do { case 5: } while (1); }
1438 if (!SwitchInsn) {
1439 EmitStmt(S.getSubStmt());
1440 return;
1441 }
1442
1443 // Handle case ranges.
1444 if (S.getRHS()) {
1445 EmitCaseStmtRange(S, Attrs);
1446 return;
1447 }
1448
1449 llvm::ConstantInt *CaseVal =
1450 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1451 if (SwitchLikelihood)
1452 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1453
1454 // If the body of the case is just a 'break', try to not emit an empty block.
1455 // If we're profiling or we're not optimizing, leave the block in for better
1456 // debug and coverage analysis.
1457 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1458 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1459 isa<BreakStmt>(S.getSubStmt())) {
1460 JumpDest Block = BreakContinueStack.back().BreakBlock;
1461
1462 // Only do this optimization if there are no cleanups that need emitting.
1463 if (isObviouslyBranchWithoutCleanups(Block)) {
1464 if (SwitchWeights)
1465 SwitchWeights->push_back(getProfileCount(&S));
1466 SwitchInsn->addCase(CaseVal, Block.getBlock());
1467
1468 // If there was a fallthrough into this case, make sure to redirect it to
1469 // the end of the switch as well.
1470 if (Builder.GetInsertBlock()) {
1471 Builder.CreateBr(Block.getBlock());
1472 Builder.ClearInsertionPoint();
1473 }
1474 return;
1475 }
1476 }
1477
1478 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1479 EmitBlockWithFallThrough(CaseDest, &S);
1480 if (SwitchWeights)
1481 SwitchWeights->push_back(getProfileCount(&S));
1482 SwitchInsn->addCase(CaseVal, CaseDest);
1483
1484 // Recursively emitting the statement is acceptable, but is not wonderful for
1485 // code where we have many case statements nested together, i.e.:
1486 // case 1:
1487 // case 2:
1488 // case 3: etc.
1489 // Handling this recursively will create a new block for each case statement
1490 // that falls through to the next case which is IR intensive. It also causes
1491 // deep recursion which can run into stack depth limitations. Handle
1492 // sequential non-range case statements specially.
1493 //
1494 // TODO When the next case has a likelihood attribute the code returns to the
1495 // recursive algorithm. Maybe improve this case if it becomes common practice
1496 // to use a lot of attributes.
1497 const CaseStmt *CurCase = &S;
1498 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1499
1500 // Otherwise, iteratively add consecutive cases to this switch stmt.
1501 while (NextCase && NextCase->getRHS() == nullptr) {
1502 CurCase = NextCase;
1503 llvm::ConstantInt *CaseVal =
1504 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1505
1506 if (SwitchWeights)
1507 SwitchWeights->push_back(getProfileCount(NextCase));
1508 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1509 CaseDest = createBasicBlock("sw.bb");
1510 EmitBlockWithFallThrough(CaseDest, CurCase);
1511 }
1512 // Since this loop is only executed when the CaseStmt has no attributes
1513 // use a hard-coded value.
1514 if (SwitchLikelihood)
1515 SwitchLikelihood->push_back(Stmt::LH_None);
1516
1517 SwitchInsn->addCase(CaseVal, CaseDest);
1518 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1519 }
1520
1521 // Normal default recursion for non-cases.
1522 EmitStmt(CurCase->getSubStmt());
1523 }
1524
EmitDefaultStmt(const DefaultStmt & S,ArrayRef<const Attr * > Attrs)1525 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1526 ArrayRef<const Attr *> Attrs) {
1527 // If there is no enclosing switch instance that we're aware of, then this
1528 // default statement can be elided. This situation only happens when we've
1529 // constant-folded the switch.
1530 if (!SwitchInsn) {
1531 EmitStmt(S.getSubStmt());
1532 return;
1533 }
1534
1535 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1536 assert(DefaultBlock->empty() &&
1537 "EmitDefaultStmt: Default block already defined?");
1538
1539 if (SwitchLikelihood)
1540 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1541
1542 EmitBlockWithFallThrough(DefaultBlock, &S);
1543
1544 EmitStmt(S.getSubStmt());
1545 }
1546
1547 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1548 /// constant value that is being switched on, see if we can dead code eliminate
1549 /// the body of the switch to a simple series of statements to emit. Basically,
1550 /// on a switch (5) we want to find these statements:
1551 /// case 5:
1552 /// printf(...); <--
1553 /// ++i; <--
1554 /// break;
1555 ///
1556 /// and add them to the ResultStmts vector. If it is unsafe to do this
1557 /// transformation (for example, one of the elided statements contains a label
1558 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1559 /// should include statements after it (e.g. the printf() line is a substmt of
1560 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1561 /// statement, then return CSFC_Success.
1562 ///
1563 /// If Case is non-null, then we are looking for the specified case, checking
1564 /// that nothing we jump over contains labels. If Case is null, then we found
1565 /// the case and are looking for the break.
1566 ///
1567 /// If the recursive walk actually finds our Case, then we set FoundCase to
1568 /// true.
1569 ///
1570 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
CollectStatementsForCase(const Stmt * S,const SwitchCase * Case,bool & FoundCase,SmallVectorImpl<const Stmt * > & ResultStmts)1571 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1572 const SwitchCase *Case,
1573 bool &FoundCase,
1574 SmallVectorImpl<const Stmt*> &ResultStmts) {
1575 // If this is a null statement, just succeed.
1576 if (!S)
1577 return Case ? CSFC_Success : CSFC_FallThrough;
1578
1579 // If this is the switchcase (case 4: or default) that we're looking for, then
1580 // we're in business. Just add the substatement.
1581 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1582 if (S == Case) {
1583 FoundCase = true;
1584 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1585 ResultStmts);
1586 }
1587
1588 // Otherwise, this is some other case or default statement, just ignore it.
1589 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1590 ResultStmts);
1591 }
1592
1593 // If we are in the live part of the code and we found our break statement,
1594 // return a success!
1595 if (!Case && isa<BreakStmt>(S))
1596 return CSFC_Success;
1597
1598 // If this is a switch statement, then it might contain the SwitchCase, the
1599 // break, or neither.
1600 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1601 // Handle this as two cases: we might be looking for the SwitchCase (if so
1602 // the skipped statements must be skippable) or we might already have it.
1603 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1604 bool StartedInLiveCode = FoundCase;
1605 unsigned StartSize = ResultStmts.size();
1606
1607 // If we've not found the case yet, scan through looking for it.
1608 if (Case) {
1609 // Keep track of whether we see a skipped declaration. The code could be
1610 // using the declaration even if it is skipped, so we can't optimize out
1611 // the decl if the kept statements might refer to it.
1612 bool HadSkippedDecl = false;
1613
1614 // If we're looking for the case, just see if we can skip each of the
1615 // substatements.
1616 for (; Case && I != E; ++I) {
1617 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1618
1619 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1620 case CSFC_Failure: return CSFC_Failure;
1621 case CSFC_Success:
1622 // A successful result means that either 1) that the statement doesn't
1623 // have the case and is skippable, or 2) does contain the case value
1624 // and also contains the break to exit the switch. In the later case,
1625 // we just verify the rest of the statements are elidable.
1626 if (FoundCase) {
1627 // If we found the case and skipped declarations, we can't do the
1628 // optimization.
1629 if (HadSkippedDecl)
1630 return CSFC_Failure;
1631
1632 for (++I; I != E; ++I)
1633 if (CodeGenFunction::ContainsLabel(*I, true))
1634 return CSFC_Failure;
1635 return CSFC_Success;
1636 }
1637 break;
1638 case CSFC_FallThrough:
1639 // If we have a fallthrough condition, then we must have found the
1640 // case started to include statements. Consider the rest of the
1641 // statements in the compound statement as candidates for inclusion.
1642 assert(FoundCase && "Didn't find case but returned fallthrough?");
1643 // We recursively found Case, so we're not looking for it anymore.
1644 Case = nullptr;
1645
1646 // If we found the case and skipped declarations, we can't do the
1647 // optimization.
1648 if (HadSkippedDecl)
1649 return CSFC_Failure;
1650 break;
1651 }
1652 }
1653
1654 if (!FoundCase)
1655 return CSFC_Success;
1656
1657 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1658 }
1659
1660 // If we have statements in our range, then we know that the statements are
1661 // live and need to be added to the set of statements we're tracking.
1662 bool AnyDecls = false;
1663 for (; I != E; ++I) {
1664 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1665
1666 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1667 case CSFC_Failure: return CSFC_Failure;
1668 case CSFC_FallThrough:
1669 // A fallthrough result means that the statement was simple and just
1670 // included in ResultStmt, keep adding them afterwards.
1671 break;
1672 case CSFC_Success:
1673 // A successful result means that we found the break statement and
1674 // stopped statement inclusion. We just ensure that any leftover stmts
1675 // are skippable and return success ourselves.
1676 for (++I; I != E; ++I)
1677 if (CodeGenFunction::ContainsLabel(*I, true))
1678 return CSFC_Failure;
1679 return CSFC_Success;
1680 }
1681 }
1682
1683 // If we're about to fall out of a scope without hitting a 'break;', we
1684 // can't perform the optimization if there were any decls in that scope
1685 // (we'd lose their end-of-lifetime).
1686 if (AnyDecls) {
1687 // If the entire compound statement was live, there's one more thing we
1688 // can try before giving up: emit the whole thing as a single statement.
1689 // We can do that unless the statement contains a 'break;'.
1690 // FIXME: Such a break must be at the end of a construct within this one.
1691 // We could emit this by just ignoring the BreakStmts entirely.
1692 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1693 ResultStmts.resize(StartSize);
1694 ResultStmts.push_back(S);
1695 } else {
1696 return CSFC_Failure;
1697 }
1698 }
1699
1700 return CSFC_FallThrough;
1701 }
1702
1703 // Okay, this is some other statement that we don't handle explicitly, like a
1704 // for statement or increment etc. If we are skipping over this statement,
1705 // just verify it doesn't have labels, which would make it invalid to elide.
1706 if (Case) {
1707 if (CodeGenFunction::ContainsLabel(S, true))
1708 return CSFC_Failure;
1709 return CSFC_Success;
1710 }
1711
1712 // Otherwise, we want to include this statement. Everything is cool with that
1713 // so long as it doesn't contain a break out of the switch we're in.
1714 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1715
1716 // Otherwise, everything is great. Include the statement and tell the caller
1717 // that we fall through and include the next statement as well.
1718 ResultStmts.push_back(S);
1719 return CSFC_FallThrough;
1720 }
1721
1722 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1723 /// then invoke CollectStatementsForCase to find the list of statements to emit
1724 /// for a switch on constant. See the comment above CollectStatementsForCase
1725 /// for more details.
FindCaseStatementsForValue(const SwitchStmt & S,const llvm::APSInt & ConstantCondValue,SmallVectorImpl<const Stmt * > & ResultStmts,ASTContext & C,const SwitchCase * & ResultCase)1726 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1727 const llvm::APSInt &ConstantCondValue,
1728 SmallVectorImpl<const Stmt*> &ResultStmts,
1729 ASTContext &C,
1730 const SwitchCase *&ResultCase) {
1731 // First step, find the switch case that is being branched to. We can do this
1732 // efficiently by scanning the SwitchCase list.
1733 const SwitchCase *Case = S.getSwitchCaseList();
1734 const DefaultStmt *DefaultCase = nullptr;
1735
1736 for (; Case; Case = Case->getNextSwitchCase()) {
1737 // It's either a default or case. Just remember the default statement in
1738 // case we're not jumping to any numbered cases.
1739 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1740 DefaultCase = DS;
1741 continue;
1742 }
1743
1744 // Check to see if this case is the one we're looking for.
1745 const CaseStmt *CS = cast<CaseStmt>(Case);
1746 // Don't handle case ranges yet.
1747 if (CS->getRHS()) return false;
1748
1749 // If we found our case, remember it as 'case'.
1750 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1751 break;
1752 }
1753
1754 // If we didn't find a matching case, we use a default if it exists, or we
1755 // elide the whole switch body!
1756 if (!Case) {
1757 // It is safe to elide the body of the switch if it doesn't contain labels
1758 // etc. If it is safe, return successfully with an empty ResultStmts list.
1759 if (!DefaultCase)
1760 return !CodeGenFunction::ContainsLabel(&S);
1761 Case = DefaultCase;
1762 }
1763
1764 // Ok, we know which case is being jumped to, try to collect all the
1765 // statements that follow it. This can fail for a variety of reasons. Also,
1766 // check to see that the recursive walk actually found our case statement.
1767 // Insane cases like this can fail to find it in the recursive walk since we
1768 // don't handle every stmt kind:
1769 // switch (4) {
1770 // while (1) {
1771 // case 4: ...
1772 bool FoundCase = false;
1773 ResultCase = Case;
1774 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1775 ResultStmts) != CSFC_Failure &&
1776 FoundCase;
1777 }
1778
1779 static Optional<SmallVector<uint64_t, 16>>
getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods)1780 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1781 // Are there enough branches to weight them?
1782 if (Likelihoods.size() <= 1)
1783 return None;
1784
1785 uint64_t NumUnlikely = 0;
1786 uint64_t NumNone = 0;
1787 uint64_t NumLikely = 0;
1788 for (const auto LH : Likelihoods) {
1789 switch (LH) {
1790 case Stmt::LH_Unlikely:
1791 ++NumUnlikely;
1792 break;
1793 case Stmt::LH_None:
1794 ++NumNone;
1795 break;
1796 case Stmt::LH_Likely:
1797 ++NumLikely;
1798 break;
1799 }
1800 }
1801
1802 // Is there a likelihood attribute used?
1803 if (NumUnlikely == 0 && NumLikely == 0)
1804 return None;
1805
1806 // When multiple cases share the same code they can be combined during
1807 // optimization. In that case the weights of the branch will be the sum of
1808 // the individual weights. Make sure the combined sum of all neutral cases
1809 // doesn't exceed the value of a single likely attribute.
1810 // The additions both avoid divisions by 0 and make sure the weights of None
1811 // don't exceed the weight of Likely.
1812 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1813 const uint64_t None = Likely / (NumNone + 1);
1814 const uint64_t Unlikely = 0;
1815
1816 SmallVector<uint64_t, 16> Result;
1817 Result.reserve(Likelihoods.size());
1818 for (const auto LH : Likelihoods) {
1819 switch (LH) {
1820 case Stmt::LH_Unlikely:
1821 Result.push_back(Unlikely);
1822 break;
1823 case Stmt::LH_None:
1824 Result.push_back(None);
1825 break;
1826 case Stmt::LH_Likely:
1827 Result.push_back(Likely);
1828 break;
1829 }
1830 }
1831
1832 return Result;
1833 }
1834
EmitSwitchStmt(const SwitchStmt & S)1835 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1836 // Handle nested switch statements.
1837 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1838 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1839 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1840 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1841
1842 // See if we can constant fold the condition of the switch and therefore only
1843 // emit the live case statement (if any) of the switch.
1844 llvm::APSInt ConstantCondValue;
1845 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1846 SmallVector<const Stmt*, 4> CaseStmts;
1847 const SwitchCase *Case = nullptr;
1848 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1849 getContext(), Case)) {
1850 if (Case)
1851 incrementProfileCounter(Case);
1852 RunCleanupsScope ExecutedScope(*this);
1853
1854 if (S.getInit())
1855 EmitStmt(S.getInit());
1856
1857 // Emit the condition variable if needed inside the entire cleanup scope
1858 // used by this special case for constant folded switches.
1859 if (S.getConditionVariable())
1860 EmitDecl(*S.getConditionVariable());
1861
1862 // At this point, we are no longer "within" a switch instance, so
1863 // we can temporarily enforce this to ensure that any embedded case
1864 // statements are not emitted.
1865 SwitchInsn = nullptr;
1866
1867 // Okay, we can dead code eliminate everything except this case. Emit the
1868 // specified series of statements and we're good.
1869 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1870 EmitStmt(CaseStmts[i]);
1871 incrementProfileCounter(&S);
1872
1873 // Now we want to restore the saved switch instance so that nested
1874 // switches continue to function properly
1875 SwitchInsn = SavedSwitchInsn;
1876
1877 return;
1878 }
1879 }
1880
1881 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1882
1883 RunCleanupsScope ConditionScope(*this);
1884
1885 if (S.getInit())
1886 EmitStmt(S.getInit());
1887
1888 if (S.getConditionVariable())
1889 EmitDecl(*S.getConditionVariable());
1890 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1891
1892 // Create basic block to hold stuff that comes after switch
1893 // statement. We also need to create a default block now so that
1894 // explicit case ranges tests can have a place to jump to on
1895 // failure.
1896 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1897 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1898 if (PGO.haveRegionCounts()) {
1899 // Walk the SwitchCase list to find how many there are.
1900 uint64_t DefaultCount = 0;
1901 unsigned NumCases = 0;
1902 for (const SwitchCase *Case = S.getSwitchCaseList();
1903 Case;
1904 Case = Case->getNextSwitchCase()) {
1905 if (isa<DefaultStmt>(Case))
1906 DefaultCount = getProfileCount(Case);
1907 NumCases += 1;
1908 }
1909 SwitchWeights = new SmallVector<uint64_t, 16>();
1910 SwitchWeights->reserve(NumCases);
1911 // The default needs to be first. We store the edge count, so we already
1912 // know the right weight.
1913 SwitchWeights->push_back(DefaultCount);
1914 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
1915 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
1916 // Initialize the default case.
1917 SwitchLikelihood->push_back(Stmt::LH_None);
1918 }
1919
1920 CaseRangeBlock = DefaultBlock;
1921
1922 // Clear the insertion point to indicate we are in unreachable code.
1923 Builder.ClearInsertionPoint();
1924
1925 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1926 // then reuse last ContinueBlock.
1927 JumpDest OuterContinue;
1928 if (!BreakContinueStack.empty())
1929 OuterContinue = BreakContinueStack.back().ContinueBlock;
1930
1931 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1932
1933 // Emit switch body.
1934 EmitStmt(S.getBody());
1935
1936 BreakContinueStack.pop_back();
1937
1938 // Update the default block in case explicit case range tests have
1939 // been chained on top.
1940 SwitchInsn->setDefaultDest(CaseRangeBlock);
1941
1942 // If a default was never emitted:
1943 if (!DefaultBlock->getParent()) {
1944 // If we have cleanups, emit the default block so that there's a
1945 // place to jump through the cleanups from.
1946 if (ConditionScope.requiresCleanups()) {
1947 EmitBlock(DefaultBlock);
1948
1949 // Otherwise, just forward the default block to the switch end.
1950 } else {
1951 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1952 delete DefaultBlock;
1953 }
1954 }
1955
1956 ConditionScope.ForceCleanup();
1957
1958 // Emit continuation.
1959 EmitBlock(SwitchExit.getBlock(), true);
1960 incrementProfileCounter(&S);
1961
1962 // If the switch has a condition wrapped by __builtin_unpredictable,
1963 // create metadata that specifies that the switch is unpredictable.
1964 // Don't bother if not optimizing because that metadata would not be used.
1965 auto *Call = dyn_cast<CallExpr>(S.getCond());
1966 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1967 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1968 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1969 llvm::MDBuilder MDHelper(getLLVMContext());
1970 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1971 MDHelper.createUnpredictable());
1972 }
1973 }
1974
1975 if (SwitchWeights) {
1976 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1977 "switch weights do not match switch cases");
1978 // If there's only one jump destination there's no sense weighting it.
1979 if (SwitchWeights->size() > 1)
1980 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1981 createProfileWeights(*SwitchWeights));
1982 delete SwitchWeights;
1983 } else if (SwitchLikelihood) {
1984 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
1985 "switch likelihoods do not match switch cases");
1986 Optional<SmallVector<uint64_t, 16>> LHW =
1987 getLikelihoodWeights(*SwitchLikelihood);
1988 if (LHW) {
1989 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
1990 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1991 createProfileWeights(*LHW));
1992 }
1993 delete SwitchLikelihood;
1994 }
1995 SwitchInsn = SavedSwitchInsn;
1996 SwitchWeights = SavedSwitchWeights;
1997 SwitchLikelihood = SavedSwitchLikelihood;
1998 CaseRangeBlock = SavedCRBlock;
1999 }
2000
2001 static std::string
SimplifyConstraint(const char * Constraint,const TargetInfo & Target,SmallVectorImpl<TargetInfo::ConstraintInfo> * OutCons=nullptr)2002 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2003 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2004 std::string Result;
2005
2006 while (*Constraint) {
2007 switch (*Constraint) {
2008 default:
2009 Result += Target.convertConstraint(Constraint);
2010 break;
2011 // Ignore these
2012 case '*':
2013 case '?':
2014 case '!':
2015 case '=': // Will see this and the following in mult-alt constraints.
2016 case '+':
2017 break;
2018 case '#': // Ignore the rest of the constraint alternative.
2019 while (Constraint[1] && Constraint[1] != ',')
2020 Constraint++;
2021 break;
2022 case '&':
2023 case '%':
2024 Result += *Constraint;
2025 while (Constraint[1] && Constraint[1] == *Constraint)
2026 Constraint++;
2027 break;
2028 case ',':
2029 Result += "|";
2030 break;
2031 case 'g':
2032 Result += "imr";
2033 break;
2034 case '[': {
2035 assert(OutCons &&
2036 "Must pass output names to constraints with a symbolic name");
2037 unsigned Index;
2038 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2039 assert(result && "Could not resolve symbolic name"); (void)result;
2040 Result += llvm::utostr(Index);
2041 break;
2042 }
2043 }
2044
2045 Constraint++;
2046 }
2047
2048 return Result;
2049 }
2050
2051 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2052 /// as using a particular register add that as a constraint that will be used
2053 /// in this asm stmt.
2054 static std::string
AddVariableConstraints(const std::string & Constraint,const Expr & AsmExpr,const TargetInfo & Target,CodeGenModule & CGM,const AsmStmt & Stmt,const bool EarlyClobber,std::string * GCCReg=nullptr)2055 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2056 const TargetInfo &Target, CodeGenModule &CGM,
2057 const AsmStmt &Stmt, const bool EarlyClobber,
2058 std::string *GCCReg = nullptr) {
2059 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2060 if (!AsmDeclRef)
2061 return Constraint;
2062 const ValueDecl &Value = *AsmDeclRef->getDecl();
2063 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2064 if (!Variable)
2065 return Constraint;
2066 if (Variable->getStorageClass() != SC_Register)
2067 return Constraint;
2068 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2069 if (!Attr)
2070 return Constraint;
2071 StringRef Register = Attr->getLabel();
2072 assert(Target.isValidGCCRegisterName(Register));
2073 // We're using validateOutputConstraint here because we only care if
2074 // this is a register constraint.
2075 TargetInfo::ConstraintInfo Info(Constraint, "");
2076 if (Target.validateOutputConstraint(Info) &&
2077 !Info.allowsRegister()) {
2078 CGM.ErrorUnsupported(&Stmt, "__asm__");
2079 return Constraint;
2080 }
2081 // Canonicalize the register here before returning it.
2082 Register = Target.getNormalizedGCCRegisterName(Register);
2083 if (GCCReg != nullptr)
2084 *GCCReg = Register.str();
2085 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2086 }
2087
2088 llvm::Value*
EmitAsmInputLValue(const TargetInfo::ConstraintInfo & Info,LValue InputValue,QualType InputType,std::string & ConstraintStr,SourceLocation Loc)2089 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
2090 LValue InputValue, QualType InputType,
2091 std::string &ConstraintStr,
2092 SourceLocation Loc) {
2093 llvm::Value *Arg;
2094 if (Info.allowsRegister() || !Info.allowsMemory()) {
2095 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
2096 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
2097 } else {
2098 llvm::Type *Ty = ConvertType(InputType);
2099 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2100 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2101 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2102 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2103 Ty = llvm::PointerType::getUnqual(Ty);
2104
2105 Arg = Builder.CreateLoad(
2106 Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
2107 } else {
2108 Arg = InputValue.getPointer(*this);
2109 ConstraintStr += '*';
2110 }
2111 }
2112 } else {
2113 Arg = InputValue.getPointer(*this);
2114 ConstraintStr += '*';
2115 }
2116
2117 return Arg;
2118 }
2119
EmitAsmInput(const TargetInfo::ConstraintInfo & Info,const Expr * InputExpr,std::string & ConstraintStr)2120 llvm::Value* CodeGenFunction::EmitAsmInput(
2121 const TargetInfo::ConstraintInfo &Info,
2122 const Expr *InputExpr,
2123 std::string &ConstraintStr) {
2124 // If this can't be a register or memory, i.e., has to be a constant
2125 // (immediate or symbolic), try to emit it as such.
2126 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2127 if (Info.requiresImmediateConstant()) {
2128 Expr::EvalResult EVResult;
2129 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2130
2131 llvm::APSInt IntResult;
2132 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2133 getContext()))
2134 return llvm::ConstantInt::get(getLLVMContext(), IntResult);
2135 }
2136
2137 Expr::EvalResult Result;
2138 if (InputExpr->EvaluateAsInt(Result, getContext()))
2139 return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
2140 }
2141
2142 if (Info.allowsRegister() || !Info.allowsMemory())
2143 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2144 return EmitScalarExpr(InputExpr);
2145 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2146 return EmitScalarExpr(InputExpr);
2147 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2148 LValue Dest = EmitLValue(InputExpr);
2149 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2150 InputExpr->getExprLoc());
2151 }
2152
2153 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2154 /// asm call instruction. The !srcloc MDNode contains a list of constant
2155 /// integers which are the source locations of the start of each line in the
2156 /// asm.
getAsmSrcLocInfo(const StringLiteral * Str,CodeGenFunction & CGF)2157 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2158 CodeGenFunction &CGF) {
2159 SmallVector<llvm::Metadata *, 8> Locs;
2160 // Add the location of the first line to the MDNode.
2161 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2162 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2163 StringRef StrVal = Str->getString();
2164 if (!StrVal.empty()) {
2165 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2166 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2167 unsigned StartToken = 0;
2168 unsigned ByteOffset = 0;
2169
2170 // Add the location of the start of each subsequent line of the asm to the
2171 // MDNode.
2172 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2173 if (StrVal[i] != '\n') continue;
2174 SourceLocation LineLoc = Str->getLocationOfByte(
2175 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2176 Locs.push_back(llvm::ConstantAsMetadata::get(
2177 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2178 }
2179 }
2180
2181 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2182 }
2183
UpdateAsmCallInst(llvm::CallBase & Result,bool HasSideEffect,bool HasUnwindClobber,bool ReadOnly,bool ReadNone,bool NoMerge,const AsmStmt & S,const std::vector<llvm::Type * > & ResultRegTypes,CodeGenFunction & CGF,std::vector<llvm::Value * > & RegResults)2184 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2185 bool HasUnwindClobber, bool ReadOnly,
2186 bool ReadNone, bool NoMerge, const AsmStmt &S,
2187 const std::vector<llvm::Type *> &ResultRegTypes,
2188 CodeGenFunction &CGF,
2189 std::vector<llvm::Value *> &RegResults) {
2190 if (!HasUnwindClobber)
2191 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2192 llvm::Attribute::NoUnwind);
2193
2194 if (NoMerge)
2195 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2196 llvm::Attribute::NoMerge);
2197 // Attach readnone and readonly attributes.
2198 if (!HasSideEffect) {
2199 if (ReadNone)
2200 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2201 llvm::Attribute::ReadNone);
2202 else if (ReadOnly)
2203 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2204 llvm::Attribute::ReadOnly);
2205 }
2206
2207 // Slap the source location of the inline asm into a !srcloc metadata on the
2208 // call.
2209 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2210 Result.setMetadata("srcloc",
2211 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2212 else {
2213 // At least put the line number on MS inline asm blobs.
2214 llvm::Constant *Loc =
2215 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2216 Result.setMetadata("srcloc",
2217 llvm::MDNode::get(CGF.getLLVMContext(),
2218 llvm::ConstantAsMetadata::get(Loc)));
2219 }
2220
2221 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2222 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2223 // convergent (meaning, they may call an intrinsically convergent op, such
2224 // as bar.sync, and so can't have certain optimizations applied around
2225 // them).
2226 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2227 llvm::Attribute::Convergent);
2228 // Extract all of the register value results from the asm.
2229 if (ResultRegTypes.size() == 1) {
2230 RegResults.push_back(&Result);
2231 } else {
2232 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2233 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2234 RegResults.push_back(Tmp);
2235 }
2236 }
2237 }
2238
EmitAsmStmt(const AsmStmt & S)2239 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2240 // Assemble the final asm string.
2241 std::string AsmString = S.generateAsmString(getContext());
2242
2243 // Get all the output and input constraints together.
2244 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2245 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2246
2247 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2248 StringRef Name;
2249 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2250 Name = GAS->getOutputName(i);
2251 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2252 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2253 assert(IsValid && "Failed to parse output constraint");
2254 OutputConstraintInfos.push_back(Info);
2255 }
2256
2257 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2258 StringRef Name;
2259 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2260 Name = GAS->getInputName(i);
2261 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2262 bool IsValid =
2263 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2264 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2265 InputConstraintInfos.push_back(Info);
2266 }
2267
2268 std::string Constraints;
2269
2270 std::vector<LValue> ResultRegDests;
2271 std::vector<QualType> ResultRegQualTys;
2272 std::vector<llvm::Type *> ResultRegTypes;
2273 std::vector<llvm::Type *> ResultTruncRegTypes;
2274 std::vector<llvm::Type *> ArgTypes;
2275 std::vector<llvm::Value*> Args;
2276 llvm::BitVector ResultTypeRequiresCast;
2277
2278 // Keep track of inout constraints.
2279 std::string InOutConstraints;
2280 std::vector<llvm::Value*> InOutArgs;
2281 std::vector<llvm::Type*> InOutArgTypes;
2282
2283 // Keep track of out constraints for tied input operand.
2284 std::vector<std::string> OutputConstraints;
2285
2286 // Keep track of defined physregs.
2287 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2288
2289 // An inline asm can be marked readonly if it meets the following conditions:
2290 // - it doesn't have any sideeffects
2291 // - it doesn't clobber memory
2292 // - it doesn't return a value by-reference
2293 // It can be marked readnone if it doesn't have any input memory constraints
2294 // in addition to meeting the conditions listed above.
2295 bool ReadOnly = true, ReadNone = true;
2296
2297 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2298 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2299
2300 // Simplify the output constraint.
2301 std::string OutputConstraint(S.getOutputConstraint(i));
2302 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2303 getTarget(), &OutputConstraintInfos);
2304
2305 const Expr *OutExpr = S.getOutputExpr(i);
2306 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2307
2308 std::string GCCReg;
2309 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2310 getTarget(), CGM, S,
2311 Info.earlyClobber(),
2312 &GCCReg);
2313 // Give an error on multiple outputs to same physreg.
2314 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2315 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2316
2317 OutputConstraints.push_back(OutputConstraint);
2318 LValue Dest = EmitLValue(OutExpr);
2319 if (!Constraints.empty())
2320 Constraints += ',';
2321
2322 // If this is a register output, then make the inline asm return it
2323 // by-value. If this is a memory result, return the value by-reference.
2324 QualType QTy = OutExpr->getType();
2325 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2326 hasAggregateEvaluationKind(QTy);
2327 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2328
2329 Constraints += "=" + OutputConstraint;
2330 ResultRegQualTys.push_back(QTy);
2331 ResultRegDests.push_back(Dest);
2332
2333 llvm::Type *Ty = ConvertTypeForMem(QTy);
2334 const bool RequiresCast = Info.allowsRegister() &&
2335 (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2336 Ty->isAggregateType());
2337
2338 ResultTruncRegTypes.push_back(Ty);
2339 ResultTypeRequiresCast.push_back(RequiresCast);
2340
2341 if (RequiresCast) {
2342 unsigned Size = getContext().getTypeSize(QTy);
2343 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2344 }
2345 ResultRegTypes.push_back(Ty);
2346 // If this output is tied to an input, and if the input is larger, then
2347 // we need to set the actual result type of the inline asm node to be the
2348 // same as the input type.
2349 if (Info.hasMatchingInput()) {
2350 unsigned InputNo;
2351 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2352 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2353 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2354 break;
2355 }
2356 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2357
2358 QualType InputTy = S.getInputExpr(InputNo)->getType();
2359 QualType OutputType = OutExpr->getType();
2360
2361 uint64_t InputSize = getContext().getTypeSize(InputTy);
2362 if (getContext().getTypeSize(OutputType) < InputSize) {
2363 // Form the asm to return the value as a larger integer or fp type.
2364 ResultRegTypes.back() = ConvertType(InputTy);
2365 }
2366 }
2367 if (llvm::Type* AdjTy =
2368 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2369 ResultRegTypes.back()))
2370 ResultRegTypes.back() = AdjTy;
2371 else {
2372 CGM.getDiags().Report(S.getAsmLoc(),
2373 diag::err_asm_invalid_type_in_input)
2374 << OutExpr->getType() << OutputConstraint;
2375 }
2376
2377 // Update largest vector width for any vector types.
2378 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2379 LargestVectorWidth =
2380 std::max((uint64_t)LargestVectorWidth,
2381 VT->getPrimitiveSizeInBits().getKnownMinSize());
2382 } else {
2383 llvm::Type *DestAddrTy = Dest.getAddress(*this).getType();
2384 llvm::Value *DestPtr = Dest.getPointer(*this);
2385 // Matrix types in memory are represented by arrays, but accessed through
2386 // vector pointers, with the alignment specified on the access operation.
2387 // For inline assembly, update pointer arguments to use vector pointers.
2388 // Otherwise there will be a mis-match if the matrix is also an
2389 // input-argument which is represented as vector.
2390 if (isa<MatrixType>(OutExpr->getType().getCanonicalType())) {
2391 DestAddrTy = llvm::PointerType::get(
2392 ConvertType(OutExpr->getType()),
2393 cast<llvm::PointerType>(DestAddrTy)->getAddressSpace());
2394 DestPtr = Builder.CreateBitCast(DestPtr, DestAddrTy);
2395 }
2396 ArgTypes.push_back(DestAddrTy);
2397 Args.push_back(DestPtr);
2398 Constraints += "=*";
2399 Constraints += OutputConstraint;
2400 ReadOnly = ReadNone = false;
2401 }
2402
2403 if (Info.isReadWrite()) {
2404 InOutConstraints += ',';
2405
2406 const Expr *InputExpr = S.getOutputExpr(i);
2407 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2408 InOutConstraints,
2409 InputExpr->getExprLoc());
2410
2411 if (llvm::Type* AdjTy =
2412 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2413 Arg->getType()))
2414 Arg = Builder.CreateBitCast(Arg, AdjTy);
2415
2416 // Update largest vector width for any vector types.
2417 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2418 LargestVectorWidth =
2419 std::max((uint64_t)LargestVectorWidth,
2420 VT->getPrimitiveSizeInBits().getKnownMinSize());
2421 // Only tie earlyclobber physregs.
2422 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2423 InOutConstraints += llvm::utostr(i);
2424 else
2425 InOutConstraints += OutputConstraint;
2426
2427 InOutArgTypes.push_back(Arg->getType());
2428 InOutArgs.push_back(Arg);
2429 }
2430 }
2431
2432 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2433 // to the return value slot. Only do this when returning in registers.
2434 if (isa<MSAsmStmt>(&S)) {
2435 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2436 if (RetAI.isDirect() || RetAI.isExtend()) {
2437 // Make a fake lvalue for the return value slot.
2438 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2439 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2440 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2441 ResultRegDests, AsmString, S.getNumOutputs());
2442 SawAsmBlock = true;
2443 }
2444 }
2445
2446 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2447 const Expr *InputExpr = S.getInputExpr(i);
2448
2449 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2450
2451 if (Info.allowsMemory())
2452 ReadNone = false;
2453
2454 if (!Constraints.empty())
2455 Constraints += ',';
2456
2457 // Simplify the input constraint.
2458 std::string InputConstraint(S.getInputConstraint(i));
2459 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2460 &OutputConstraintInfos);
2461
2462 InputConstraint = AddVariableConstraints(
2463 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2464 getTarget(), CGM, S, false /* No EarlyClobber */);
2465
2466 std::string ReplaceConstraint (InputConstraint);
2467 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2468
2469 // If this input argument is tied to a larger output result, extend the
2470 // input to be the same size as the output. The LLVM backend wants to see
2471 // the input and output of a matching constraint be the same size. Note
2472 // that GCC does not define what the top bits are here. We use zext because
2473 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2474 if (Info.hasTiedOperand()) {
2475 unsigned Output = Info.getTiedOperand();
2476 QualType OutputType = S.getOutputExpr(Output)->getType();
2477 QualType InputTy = InputExpr->getType();
2478
2479 if (getContext().getTypeSize(OutputType) >
2480 getContext().getTypeSize(InputTy)) {
2481 // Use ptrtoint as appropriate so that we can do our extension.
2482 if (isa<llvm::PointerType>(Arg->getType()))
2483 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2484 llvm::Type *OutputTy = ConvertType(OutputType);
2485 if (isa<llvm::IntegerType>(OutputTy))
2486 Arg = Builder.CreateZExt(Arg, OutputTy);
2487 else if (isa<llvm::PointerType>(OutputTy))
2488 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2489 else {
2490 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2491 Arg = Builder.CreateFPExt(Arg, OutputTy);
2492 }
2493 }
2494 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2495 ReplaceConstraint = OutputConstraints[Output];
2496 }
2497 if (llvm::Type* AdjTy =
2498 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2499 Arg->getType()))
2500 Arg = Builder.CreateBitCast(Arg, AdjTy);
2501 else
2502 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2503 << InputExpr->getType() << InputConstraint;
2504
2505 // Update largest vector width for any vector types.
2506 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2507 LargestVectorWidth =
2508 std::max((uint64_t)LargestVectorWidth,
2509 VT->getPrimitiveSizeInBits().getKnownMinSize());
2510
2511 ArgTypes.push_back(Arg->getType());
2512 Args.push_back(Arg);
2513 Constraints += InputConstraint;
2514 }
2515
2516 // Labels
2517 SmallVector<llvm::BasicBlock *, 16> Transfer;
2518 llvm::BasicBlock *Fallthrough = nullptr;
2519 bool IsGCCAsmGoto = false;
2520 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2521 IsGCCAsmGoto = GS->isAsmGoto();
2522 if (IsGCCAsmGoto) {
2523 for (const auto *E : GS->labels()) {
2524 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2525 Transfer.push_back(Dest.getBlock());
2526 llvm::BlockAddress *BA =
2527 llvm::BlockAddress::get(CurFn, Dest.getBlock());
2528 Args.push_back(BA);
2529 ArgTypes.push_back(BA->getType());
2530 if (!Constraints.empty())
2531 Constraints += ',';
2532 Constraints += 'X';
2533 }
2534 Fallthrough = createBasicBlock("asm.fallthrough");
2535 }
2536 }
2537
2538 // Append the "input" part of inout constraints last.
2539 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2540 ArgTypes.push_back(InOutArgTypes[i]);
2541 Args.push_back(InOutArgs[i]);
2542 }
2543 Constraints += InOutConstraints;
2544
2545 bool HasUnwindClobber = false;
2546
2547 // Clobbers
2548 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2549 StringRef Clobber = S.getClobber(i);
2550
2551 if (Clobber == "memory")
2552 ReadOnly = ReadNone = false;
2553 else if (Clobber == "unwind") {
2554 HasUnwindClobber = true;
2555 continue;
2556 } else if (Clobber != "cc") {
2557 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2558 if (CGM.getCodeGenOpts().StackClashProtector &&
2559 getTarget().isSPRegName(Clobber)) {
2560 CGM.getDiags().Report(S.getAsmLoc(),
2561 diag::warn_stack_clash_protection_inline_asm);
2562 }
2563 }
2564
2565 if (isa<MSAsmStmt>(&S)) {
2566 if (Clobber == "eax" || Clobber == "edx") {
2567 if (Constraints.find("=&A") != std::string::npos)
2568 continue;
2569 std::string::size_type position1 =
2570 Constraints.find("={" + Clobber.str() + "}");
2571 if (position1 != std::string::npos) {
2572 Constraints.insert(position1 + 1, "&");
2573 continue;
2574 }
2575 std::string::size_type position2 = Constraints.find("=A");
2576 if (position2 != std::string::npos) {
2577 Constraints.insert(position2 + 1, "&");
2578 continue;
2579 }
2580 }
2581 }
2582 if (!Constraints.empty())
2583 Constraints += ',';
2584
2585 Constraints += "~{";
2586 Constraints += Clobber;
2587 Constraints += '}';
2588 }
2589
2590 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2591 "unwind clobber can't be used with asm goto");
2592
2593 // Add machine specific clobbers
2594 std::string MachineClobbers = getTarget().getClobbers();
2595 if (!MachineClobbers.empty()) {
2596 if (!Constraints.empty())
2597 Constraints += ',';
2598 Constraints += MachineClobbers;
2599 }
2600
2601 llvm::Type *ResultType;
2602 if (ResultRegTypes.empty())
2603 ResultType = VoidTy;
2604 else if (ResultRegTypes.size() == 1)
2605 ResultType = ResultRegTypes[0];
2606 else
2607 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2608
2609 llvm::FunctionType *FTy =
2610 llvm::FunctionType::get(ResultType, ArgTypes, false);
2611
2612 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2613 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2614 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2615 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2616 FTy, AsmString, Constraints, HasSideEffect,
2617 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2618 std::vector<llvm::Value*> RegResults;
2619 if (IsGCCAsmGoto) {
2620 llvm::CallBrInst *Result =
2621 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2622 EmitBlock(Fallthrough);
2623 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2624 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2625 ResultRegTypes, *this, RegResults);
2626 } else if (HasUnwindClobber) {
2627 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2628 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2629 InNoMergeAttributedStmt, S, ResultRegTypes, *this,
2630 RegResults);
2631 } else {
2632 llvm::CallInst *Result =
2633 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2634 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2635 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2636 ResultRegTypes, *this, RegResults);
2637 }
2638
2639 assert(RegResults.size() == ResultRegTypes.size());
2640 assert(RegResults.size() == ResultTruncRegTypes.size());
2641 assert(RegResults.size() == ResultRegDests.size());
2642 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2643 // in which case its size may grow.
2644 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2645 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2646 llvm::Value *Tmp = RegResults[i];
2647 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2648
2649 // If the result type of the LLVM IR asm doesn't match the result type of
2650 // the expression, do the conversion.
2651 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2652
2653 // Truncate the integer result to the right size, note that TruncTy can be
2654 // a pointer.
2655 if (TruncTy->isFloatingPointTy())
2656 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2657 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2658 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2659 Tmp = Builder.CreateTrunc(Tmp,
2660 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2661 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2662 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2663 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2664 Tmp = Builder.CreatePtrToInt(Tmp,
2665 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2666 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2667 } else if (TruncTy->isIntegerTy()) {
2668 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2669 } else if (TruncTy->isVectorTy()) {
2670 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2671 }
2672 }
2673
2674 LValue Dest = ResultRegDests[i];
2675 // ResultTypeRequiresCast elements correspond to the first
2676 // ResultTypeRequiresCast.size() elements of RegResults.
2677 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2678 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2679 Address A = Builder.CreateBitCast(Dest.getAddress(*this),
2680 ResultRegTypes[i]->getPointerTo());
2681 if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
2682 Builder.CreateStore(Tmp, A);
2683 continue;
2684 }
2685
2686 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2687 if (Ty.isNull()) {
2688 const Expr *OutExpr = S.getOutputExpr(i);
2689 CGM.Error(
2690 OutExpr->getExprLoc(),
2691 "impossible constraint in asm: can't store value into a register");
2692 return;
2693 }
2694 Dest = MakeAddrLValue(A, Ty);
2695 }
2696 EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2697 }
2698 }
2699
InitCapturedStruct(const CapturedStmt & S)2700 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2701 const RecordDecl *RD = S.getCapturedRecordDecl();
2702 QualType RecordTy = getContext().getRecordType(RD);
2703
2704 // Initialize the captured struct.
2705 LValue SlotLV =
2706 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2707
2708 RecordDecl::field_iterator CurField = RD->field_begin();
2709 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2710 E = S.capture_init_end();
2711 I != E; ++I, ++CurField) {
2712 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2713 if (CurField->hasCapturedVLAType()) {
2714 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2715 } else {
2716 EmitInitializerForField(*CurField, LV, *I);
2717 }
2718 }
2719
2720 return SlotLV;
2721 }
2722
2723 /// Generate an outlined function for the body of a CapturedStmt, store any
2724 /// captured variables into the captured struct, and call the outlined function.
2725 llvm::Function *
EmitCapturedStmt(const CapturedStmt & S,CapturedRegionKind K)2726 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2727 LValue CapStruct = InitCapturedStruct(S);
2728
2729 // Emit the CapturedDecl
2730 CodeGenFunction CGF(CGM, true);
2731 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2732 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2733 delete CGF.CapturedStmtInfo;
2734
2735 // Emit call to the helper function.
2736 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2737
2738 return F;
2739 }
2740
GenerateCapturedStmtArgument(const CapturedStmt & S)2741 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2742 LValue CapStruct = InitCapturedStruct(S);
2743 return CapStruct.getAddress(*this);
2744 }
2745
2746 /// Creates the outlined function for a CapturedStmt.
2747 llvm::Function *
GenerateCapturedStmtFunction(const CapturedStmt & S)2748 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2749 assert(CapturedStmtInfo &&
2750 "CapturedStmtInfo should be set when generating the captured function");
2751 const CapturedDecl *CD = S.getCapturedDecl();
2752 const RecordDecl *RD = S.getCapturedRecordDecl();
2753 SourceLocation Loc = S.getBeginLoc();
2754 assert(CD->hasBody() && "missing CapturedDecl body");
2755
2756 // Build the argument list.
2757 ASTContext &Ctx = CGM.getContext();
2758 FunctionArgList Args;
2759 Args.append(CD->param_begin(), CD->param_end());
2760
2761 // Create the function declaration.
2762 const CGFunctionInfo &FuncInfo =
2763 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2764 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2765
2766 llvm::Function *F =
2767 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2768 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2769 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2770 if (CD->isNothrow())
2771 F->addFnAttr(llvm::Attribute::NoUnwind);
2772
2773 // Generate the function.
2774 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2775 CD->getBody()->getBeginLoc());
2776 // Set the context parameter in CapturedStmtInfo.
2777 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2778 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2779
2780 // Initialize variable-length arrays.
2781 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2782 Ctx.getTagDeclType(RD));
2783 for (auto *FD : RD->fields()) {
2784 if (FD->hasCapturedVLAType()) {
2785 auto *ExprArg =
2786 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2787 .getScalarVal();
2788 auto VAT = FD->getCapturedVLAType();
2789 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2790 }
2791 }
2792
2793 // If 'this' is captured, load it into CXXThisValue.
2794 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2795 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2796 LValue ThisLValue = EmitLValueForField(Base, FD);
2797 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2798 }
2799
2800 PGO.assignRegionCounters(GlobalDecl(CD), F);
2801 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2802 FinishFunction(CD->getBodyRBrace());
2803
2804 return F;
2805 }
2806