1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/StmtVisitor.h"
20 #include "clang/Basic/Builtins.h"
21 #include "clang/Basic/PrettyStackTrace.h"
22 #include "clang/Basic/SourceManager.h"
23 #include "clang/Basic/TargetInfo.h"
24 #include "llvm/ADT/StringExtras.h"
25 #include "llvm/IR/DataLayout.h"
26 #include "llvm/IR/InlineAsm.h"
27 #include "llvm/IR/Intrinsics.h"
28 #include "llvm/IR/MDBuilder.h"
29 #include "llvm/Support/SaveAndRestore.h"
30
31 using namespace clang;
32 using namespace CodeGen;
33
34 //===----------------------------------------------------------------------===//
35 // Statement Emission
36 //===----------------------------------------------------------------------===//
37
EmitStopPoint(const Stmt * S)38 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
39 if (CGDebugInfo *DI = getDebugInfo()) {
40 SourceLocation Loc;
41 Loc = S->getBeginLoc();
42 DI->EmitLocation(Builder, Loc);
43
44 LastStopPoint = Loc;
45 }
46 }
47
EmitStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)48 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
49 assert(S && "Null statement?");
50 PGO.setCurrentStmt(S);
51
52 // These statements have their own debug info handling.
53 if (EmitSimpleStmt(S))
54 return;
55
56 // Check if we are generating unreachable code.
57 if (!HaveInsertPoint()) {
58 // If so, and the statement doesn't contain a label, then we do not need to
59 // generate actual code. This is safe because (1) the current point is
60 // unreachable, so we don't need to execute the code, and (2) we've already
61 // handled the statements which update internal data structures (like the
62 // local variable map) which could be used by subsequent statements.
63 if (!ContainsLabel(S)) {
64 // Verify that any decl statements were handled as simple, they may be in
65 // scope of subsequent reachable statements.
66 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
67 return;
68 }
69
70 // Otherwise, make a new block to hold the code.
71 EnsureInsertPoint();
72 }
73
74 // Generate a stoppoint if we are emitting debug info.
75 EmitStopPoint(S);
76
77 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
78 // enabled.
79 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
80 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
81 EmitSimpleOMPExecutableDirective(*D);
82 return;
83 }
84 }
85
86 switch (S->getStmtClass()) {
87 case Stmt::NoStmtClass:
88 case Stmt::CXXCatchStmtClass:
89 case Stmt::SEHExceptStmtClass:
90 case Stmt::SEHFinallyStmtClass:
91 case Stmt::MSDependentExistsStmtClass:
92 llvm_unreachable("invalid statement class to emit generically");
93 case Stmt::NullStmtClass:
94 case Stmt::CompoundStmtClass:
95 case Stmt::DeclStmtClass:
96 case Stmt::LabelStmtClass:
97 case Stmt::AttributedStmtClass:
98 case Stmt::GotoStmtClass:
99 case Stmt::BreakStmtClass:
100 case Stmt::ContinueStmtClass:
101 case Stmt::DefaultStmtClass:
102 case Stmt::CaseStmtClass:
103 case Stmt::SEHLeaveStmtClass:
104 llvm_unreachable("should have emitted these statements as simple");
105
106 #define STMT(Type, Base)
107 #define ABSTRACT_STMT(Op)
108 #define EXPR(Type, Base) \
109 case Stmt::Type##Class:
110 #include "clang/AST/StmtNodes.inc"
111 {
112 // Remember the block we came in on.
113 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
114 assert(incoming && "expression emission must have an insertion point");
115
116 EmitIgnoredExpr(cast<Expr>(S));
117
118 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
119 assert(outgoing && "expression emission cleared block!");
120
121 // The expression emitters assume (reasonably!) that the insertion
122 // point is always set. To maintain that, the call-emission code
123 // for noreturn functions has to enter a new block with no
124 // predecessors. We want to kill that block and mark the current
125 // insertion point unreachable in the common case of a call like
126 // "exit();". Since expression emission doesn't otherwise create
127 // blocks with no predecessors, we can just test for that.
128 // However, we must be careful not to do this to our incoming
129 // block, because *statement* emission does sometimes create
130 // reachable blocks which will have no predecessors until later in
131 // the function. This occurs with, e.g., labels that are not
132 // reachable by fallthrough.
133 if (incoming != outgoing && outgoing->use_empty()) {
134 outgoing->eraseFromParent();
135 Builder.ClearInsertionPoint();
136 }
137 break;
138 }
139
140 case Stmt::IndirectGotoStmtClass:
141 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
142
143 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
144 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
145 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
146 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
147
148 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
149
150 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
151 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
152 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
153 case Stmt::CoroutineBodyStmtClass:
154 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
155 break;
156 case Stmt::CoreturnStmtClass:
157 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
158 break;
159 case Stmt::CapturedStmtClass: {
160 const CapturedStmt *CS = cast<CapturedStmt>(S);
161 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
162 }
163 break;
164 case Stmt::ObjCAtTryStmtClass:
165 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
166 break;
167 case Stmt::ObjCAtCatchStmtClass:
168 llvm_unreachable(
169 "@catch statements should be handled by EmitObjCAtTryStmt");
170 case Stmt::ObjCAtFinallyStmtClass:
171 llvm_unreachable(
172 "@finally statements should be handled by EmitObjCAtTryStmt");
173 case Stmt::ObjCAtThrowStmtClass:
174 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
175 break;
176 case Stmt::ObjCAtSynchronizedStmtClass:
177 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
178 break;
179 case Stmt::ObjCForCollectionStmtClass:
180 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
181 break;
182 case Stmt::ObjCAutoreleasePoolStmtClass:
183 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
184 break;
185
186 case Stmt::CXXTryStmtClass:
187 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
188 break;
189 case Stmt::CXXForRangeStmtClass:
190 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
191 break;
192 case Stmt::SEHTryStmtClass:
193 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
194 break;
195 case Stmt::OMPParallelDirectiveClass:
196 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
197 break;
198 case Stmt::OMPSimdDirectiveClass:
199 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
200 break;
201 case Stmt::OMPForDirectiveClass:
202 EmitOMPForDirective(cast<OMPForDirective>(*S));
203 break;
204 case Stmt::OMPForSimdDirectiveClass:
205 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
206 break;
207 case Stmt::OMPSectionsDirectiveClass:
208 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
209 break;
210 case Stmt::OMPSectionDirectiveClass:
211 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
212 break;
213 case Stmt::OMPSingleDirectiveClass:
214 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
215 break;
216 case Stmt::OMPMasterDirectiveClass:
217 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
218 break;
219 case Stmt::OMPCriticalDirectiveClass:
220 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
221 break;
222 case Stmt::OMPParallelForDirectiveClass:
223 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
224 break;
225 case Stmt::OMPParallelForSimdDirectiveClass:
226 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
227 break;
228 case Stmt::OMPParallelMasterDirectiveClass:
229 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
230 break;
231 case Stmt::OMPParallelSectionsDirectiveClass:
232 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
233 break;
234 case Stmt::OMPTaskDirectiveClass:
235 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
236 break;
237 case Stmt::OMPTaskyieldDirectiveClass:
238 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
239 break;
240 case Stmt::OMPBarrierDirectiveClass:
241 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
242 break;
243 case Stmt::OMPTaskwaitDirectiveClass:
244 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
245 break;
246 case Stmt::OMPTaskgroupDirectiveClass:
247 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
248 break;
249 case Stmt::OMPFlushDirectiveClass:
250 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
251 break;
252 case Stmt::OMPDepobjDirectiveClass:
253 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
254 break;
255 case Stmt::OMPScanDirectiveClass:
256 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
257 break;
258 case Stmt::OMPOrderedDirectiveClass:
259 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
260 break;
261 case Stmt::OMPAtomicDirectiveClass:
262 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
263 break;
264 case Stmt::OMPTargetDirectiveClass:
265 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
266 break;
267 case Stmt::OMPTeamsDirectiveClass:
268 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
269 break;
270 case Stmt::OMPCancellationPointDirectiveClass:
271 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
272 break;
273 case Stmt::OMPCancelDirectiveClass:
274 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
275 break;
276 case Stmt::OMPTargetDataDirectiveClass:
277 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
278 break;
279 case Stmt::OMPTargetEnterDataDirectiveClass:
280 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
281 break;
282 case Stmt::OMPTargetExitDataDirectiveClass:
283 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
284 break;
285 case Stmt::OMPTargetParallelDirectiveClass:
286 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
287 break;
288 case Stmt::OMPTargetParallelForDirectiveClass:
289 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
290 break;
291 case Stmt::OMPTaskLoopDirectiveClass:
292 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
293 break;
294 case Stmt::OMPTaskLoopSimdDirectiveClass:
295 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
296 break;
297 case Stmt::OMPMasterTaskLoopDirectiveClass:
298 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
299 break;
300 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
301 EmitOMPMasterTaskLoopSimdDirective(
302 cast<OMPMasterTaskLoopSimdDirective>(*S));
303 break;
304 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
305 EmitOMPParallelMasterTaskLoopDirective(
306 cast<OMPParallelMasterTaskLoopDirective>(*S));
307 break;
308 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
309 EmitOMPParallelMasterTaskLoopSimdDirective(
310 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
311 break;
312 case Stmt::OMPDistributeDirectiveClass:
313 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
314 break;
315 case Stmt::OMPTargetUpdateDirectiveClass:
316 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
317 break;
318 case Stmt::OMPDistributeParallelForDirectiveClass:
319 EmitOMPDistributeParallelForDirective(
320 cast<OMPDistributeParallelForDirective>(*S));
321 break;
322 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
323 EmitOMPDistributeParallelForSimdDirective(
324 cast<OMPDistributeParallelForSimdDirective>(*S));
325 break;
326 case Stmt::OMPDistributeSimdDirectiveClass:
327 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
328 break;
329 case Stmt::OMPTargetParallelForSimdDirectiveClass:
330 EmitOMPTargetParallelForSimdDirective(
331 cast<OMPTargetParallelForSimdDirective>(*S));
332 break;
333 case Stmt::OMPTargetSimdDirectiveClass:
334 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
335 break;
336 case Stmt::OMPTeamsDistributeDirectiveClass:
337 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
338 break;
339 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
340 EmitOMPTeamsDistributeSimdDirective(
341 cast<OMPTeamsDistributeSimdDirective>(*S));
342 break;
343 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
344 EmitOMPTeamsDistributeParallelForSimdDirective(
345 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
346 break;
347 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
348 EmitOMPTeamsDistributeParallelForDirective(
349 cast<OMPTeamsDistributeParallelForDirective>(*S));
350 break;
351 case Stmt::OMPTargetTeamsDirectiveClass:
352 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
353 break;
354 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
355 EmitOMPTargetTeamsDistributeDirective(
356 cast<OMPTargetTeamsDistributeDirective>(*S));
357 break;
358 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
359 EmitOMPTargetTeamsDistributeParallelForDirective(
360 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
361 break;
362 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
363 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
364 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
365 break;
366 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
367 EmitOMPTargetTeamsDistributeSimdDirective(
368 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
369 break;
370 }
371 }
372
EmitSimpleStmt(const Stmt * S)373 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S) {
374 switch (S->getStmtClass()) {
375 default: return false;
376 case Stmt::NullStmtClass: break;
377 case Stmt::CompoundStmtClass: EmitCompoundStmt(cast<CompoundStmt>(*S)); break;
378 case Stmt::DeclStmtClass: EmitDeclStmt(cast<DeclStmt>(*S)); break;
379 case Stmt::LabelStmtClass: EmitLabelStmt(cast<LabelStmt>(*S)); break;
380 case Stmt::AttributedStmtClass:
381 EmitAttributedStmt(cast<AttributedStmt>(*S)); break;
382 case Stmt::GotoStmtClass: EmitGotoStmt(cast<GotoStmt>(*S)); break;
383 case Stmt::BreakStmtClass: EmitBreakStmt(cast<BreakStmt>(*S)); break;
384 case Stmt::ContinueStmtClass: EmitContinueStmt(cast<ContinueStmt>(*S)); break;
385 case Stmt::DefaultStmtClass: EmitDefaultStmt(cast<DefaultStmt>(*S)); break;
386 case Stmt::CaseStmtClass: EmitCaseStmt(cast<CaseStmt>(*S)); break;
387 case Stmt::SEHLeaveStmtClass: EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S)); break;
388 }
389
390 return true;
391 }
392
393 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
394 /// this captures the expression result of the last sub-statement and returns it
395 /// (for use by the statement expression extension).
EmitCompoundStmt(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)396 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
397 AggValueSlot AggSlot) {
398 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
399 "LLVM IR generation of compound statement ('{}')");
400
401 // Keep track of the current cleanup stack depth, including debug scopes.
402 LexicalScope Scope(*this, S.getSourceRange());
403
404 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
405 }
406
407 Address
EmitCompoundStmtWithoutScope(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)408 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
409 bool GetLast,
410 AggValueSlot AggSlot) {
411
412 const Stmt *ExprResult = S.getStmtExprResult();
413 assert((!GetLast || (GetLast && ExprResult)) &&
414 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
415
416 Address RetAlloca = Address::invalid();
417
418 for (auto *CurStmt : S.body()) {
419 if (GetLast && ExprResult == CurStmt) {
420 // We have to special case labels here. They are statements, but when put
421 // at the end of a statement expression, they yield the value of their
422 // subexpression. Handle this by walking through all labels we encounter,
423 // emitting them before we evaluate the subexpr.
424 // Similar issues arise for attributed statements.
425 while (!isa<Expr>(ExprResult)) {
426 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
427 EmitLabel(LS->getDecl());
428 ExprResult = LS->getSubStmt();
429 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
430 // FIXME: Update this if we ever have attributes that affect the
431 // semantics of an expression.
432 ExprResult = AS->getSubStmt();
433 } else {
434 llvm_unreachable("unknown value statement");
435 }
436 }
437
438 EnsureInsertPoint();
439
440 const Expr *E = cast<Expr>(ExprResult);
441 QualType ExprTy = E->getType();
442 if (hasAggregateEvaluationKind(ExprTy)) {
443 EmitAggExpr(E, AggSlot);
444 } else {
445 // We can't return an RValue here because there might be cleanups at
446 // the end of the StmtExpr. Because of that, we have to emit the result
447 // here into a temporary alloca.
448 RetAlloca = CreateMemTemp(ExprTy);
449 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
450 /*IsInit*/ false);
451 }
452 } else {
453 EmitStmt(CurStmt);
454 }
455 }
456
457 return RetAlloca;
458 }
459
SimplifyForwardingBlocks(llvm::BasicBlock * BB)460 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
461 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
462
463 // If there is a cleanup stack, then we it isn't worth trying to
464 // simplify this block (we would need to remove it from the scope map
465 // and cleanup entry).
466 if (!EHStack.empty())
467 return;
468
469 // Can only simplify direct branches.
470 if (!BI || !BI->isUnconditional())
471 return;
472
473 // Can only simplify empty blocks.
474 if (BI->getIterator() != BB->begin())
475 return;
476
477 BB->replaceAllUsesWith(BI->getSuccessor(0));
478 BI->eraseFromParent();
479 BB->eraseFromParent();
480 }
481
EmitBlock(llvm::BasicBlock * BB,bool IsFinished)482 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
483 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
484
485 // Fall out of the current block (if necessary).
486 EmitBranch(BB);
487
488 if (IsFinished && BB->use_empty()) {
489 delete BB;
490 return;
491 }
492
493 // Place the block after the current block, if possible, or else at
494 // the end of the function.
495 if (CurBB && CurBB->getParent())
496 CurFn->getBasicBlockList().insertAfter(CurBB->getIterator(), BB);
497 else
498 CurFn->getBasicBlockList().push_back(BB);
499 Builder.SetInsertPoint(BB);
500 }
501
EmitBranch(llvm::BasicBlock * Target)502 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
503 // Emit a branch from the current block to the target one if this
504 // was a real block. If this was just a fall-through block after a
505 // terminator, don't emit it.
506 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
507
508 if (!CurBB || CurBB->getTerminator()) {
509 // If there is no insert point or the previous block is already
510 // terminated, don't touch it.
511 } else {
512 // Otherwise, create a fall-through branch.
513 Builder.CreateBr(Target);
514 }
515
516 Builder.ClearInsertionPoint();
517 }
518
EmitBlockAfterUses(llvm::BasicBlock * block)519 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
520 bool inserted = false;
521 for (llvm::User *u : block->users()) {
522 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
523 CurFn->getBasicBlockList().insertAfter(insn->getParent()->getIterator(),
524 block);
525 inserted = true;
526 break;
527 }
528 }
529
530 if (!inserted)
531 CurFn->getBasicBlockList().push_back(block);
532
533 Builder.SetInsertPoint(block);
534 }
535
536 CodeGenFunction::JumpDest
getJumpDestForLabel(const LabelDecl * D)537 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
538 JumpDest &Dest = LabelMap[D];
539 if (Dest.isValid()) return Dest;
540
541 // Create, but don't insert, the new block.
542 Dest = JumpDest(createBasicBlock(D->getName()),
543 EHScopeStack::stable_iterator::invalid(),
544 NextCleanupDestIndex++);
545 return Dest;
546 }
547
EmitLabel(const LabelDecl * D)548 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
549 // Add this label to the current lexical scope if we're within any
550 // normal cleanups. Jumps "in" to this label --- when permitted by
551 // the language --- may need to be routed around such cleanups.
552 if (EHStack.hasNormalCleanups() && CurLexicalScope)
553 CurLexicalScope->addLabel(D);
554
555 JumpDest &Dest = LabelMap[D];
556
557 // If we didn't need a forward reference to this label, just go
558 // ahead and create a destination at the current scope.
559 if (!Dest.isValid()) {
560 Dest = getJumpDestInCurrentScope(D->getName());
561
562 // Otherwise, we need to give this label a target depth and remove
563 // it from the branch-fixups list.
564 } else {
565 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
566 Dest.setScopeDepth(EHStack.stable_begin());
567 ResolveBranchFixups(Dest.getBlock());
568 }
569
570 EmitBlock(Dest.getBlock());
571
572 // Emit debug info for labels.
573 if (CGDebugInfo *DI = getDebugInfo()) {
574 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
575 DI->setLocation(D->getLocation());
576 DI->EmitLabel(D, Builder);
577 }
578 }
579
580 incrementProfileCounter(D->getStmt());
581 }
582
583 /// Change the cleanup scope of the labels in this lexical scope to
584 /// match the scope of the enclosing context.
rescopeLabels()585 void CodeGenFunction::LexicalScope::rescopeLabels() {
586 assert(!Labels.empty());
587 EHScopeStack::stable_iterator innermostScope
588 = CGF.EHStack.getInnermostNormalCleanup();
589
590 // Change the scope depth of all the labels.
591 for (SmallVectorImpl<const LabelDecl*>::const_iterator
592 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
593 assert(CGF.LabelMap.count(*i));
594 JumpDest &dest = CGF.LabelMap.find(*i)->second;
595 assert(dest.getScopeDepth().isValid());
596 assert(innermostScope.encloses(dest.getScopeDepth()));
597 dest.setScopeDepth(innermostScope);
598 }
599
600 // Reparent the labels if the new scope also has cleanups.
601 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
602 ParentScope->Labels.append(Labels.begin(), Labels.end());
603 }
604 }
605
606
EmitLabelStmt(const LabelStmt & S)607 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
608 EmitLabel(S.getDecl());
609 EmitStmt(S.getSubStmt());
610 }
611
EmitAttributedStmt(const AttributedStmt & S)612 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
613 bool nomerge = false;
614 for (const auto *A : S.getAttrs())
615 if (A->getKind() == attr::NoMerge) {
616 nomerge = true;
617 break;
618 }
619 SaveAndRestore<bool> save_nomerge(InNoMergeAttributedStmt, nomerge);
620 EmitStmt(S.getSubStmt(), S.getAttrs());
621 }
622
EmitGotoStmt(const GotoStmt & S)623 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
624 // If this code is reachable then emit a stop point (if generating
625 // debug info). We have to do this ourselves because we are on the
626 // "simple" statement path.
627 if (HaveInsertPoint())
628 EmitStopPoint(&S);
629
630 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
631 }
632
633
EmitIndirectGotoStmt(const IndirectGotoStmt & S)634 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
635 if (const LabelDecl *Target = S.getConstantTarget()) {
636 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
637 return;
638 }
639
640 // Ensure that we have an i8* for our PHI node.
641 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
642 Int8PtrTy, "addr");
643 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
644
645 // Get the basic block for the indirect goto.
646 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
647
648 // The first instruction in the block has to be the PHI for the switch dest,
649 // add an entry for this branch.
650 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
651
652 EmitBranch(IndGotoBB);
653 }
654
EmitIfStmt(const IfStmt & S)655 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
656 // C99 6.8.4.1: The first substatement is executed if the expression compares
657 // unequal to 0. The condition must be a scalar type.
658 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
659
660 if (S.getInit())
661 EmitStmt(S.getInit());
662
663 if (S.getConditionVariable())
664 EmitDecl(*S.getConditionVariable());
665
666 // If the condition constant folds and can be elided, try to avoid emitting
667 // the condition and the dead arm of the if/else.
668 bool CondConstant;
669 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
670 S.isConstexpr())) {
671 // Figure out which block (then or else) is executed.
672 const Stmt *Executed = S.getThen();
673 const Stmt *Skipped = S.getElse();
674 if (!CondConstant) // Condition false?
675 std::swap(Executed, Skipped);
676
677 // If the skipped block has no labels in it, just emit the executed block.
678 // This avoids emitting dead code and simplifies the CFG substantially.
679 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
680 if (CondConstant)
681 incrementProfileCounter(&S);
682 if (Executed) {
683 RunCleanupsScope ExecutedScope(*this);
684 EmitStmt(Executed);
685 }
686 return;
687 }
688 }
689
690 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
691 // the conditional branch.
692 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
693 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
694 llvm::BasicBlock *ElseBlock = ContBlock;
695 if (S.getElse())
696 ElseBlock = createBasicBlock("if.else");
697
698 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock,
699 getProfileCount(S.getThen()));
700
701 // Emit the 'then' code.
702 EmitBlock(ThenBlock);
703 incrementProfileCounter(&S);
704 {
705 RunCleanupsScope ThenScope(*this);
706 EmitStmt(S.getThen());
707 }
708 EmitBranch(ContBlock);
709
710 // Emit the 'else' code if present.
711 if (const Stmt *Else = S.getElse()) {
712 {
713 // There is no need to emit line number for an unconditional branch.
714 auto NL = ApplyDebugLocation::CreateEmpty(*this);
715 EmitBlock(ElseBlock);
716 }
717 {
718 RunCleanupsScope ElseScope(*this);
719 EmitStmt(Else);
720 }
721 {
722 // There is no need to emit line number for an unconditional branch.
723 auto NL = ApplyDebugLocation::CreateEmpty(*this);
724 EmitBranch(ContBlock);
725 }
726 }
727
728 // Emit the continuation block for code after the if.
729 EmitBlock(ContBlock, true);
730 }
731
EmitWhileStmt(const WhileStmt & S,ArrayRef<const Attr * > WhileAttrs)732 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
733 ArrayRef<const Attr *> WhileAttrs) {
734 // Emit the header for the loop, which will also become
735 // the continue target.
736 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
737 EmitBlock(LoopHeader.getBlock());
738
739 const SourceRange &R = S.getSourceRange();
740 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
741 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
742 SourceLocToDebugLoc(R.getEnd()));
743
744 // Create an exit block for when the condition fails, which will
745 // also become the break target.
746 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
747
748 // Store the blocks to use for break and continue.
749 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
750
751 // C++ [stmt.while]p2:
752 // When the condition of a while statement is a declaration, the
753 // scope of the variable that is declared extends from its point
754 // of declaration (3.3.2) to the end of the while statement.
755 // [...]
756 // The object created in a condition is destroyed and created
757 // with each iteration of the loop.
758 RunCleanupsScope ConditionScope(*this);
759
760 if (S.getConditionVariable())
761 EmitDecl(*S.getConditionVariable());
762
763 // Evaluate the conditional in the while header. C99 6.8.5.1: The
764 // evaluation of the controlling expression takes place before each
765 // execution of the loop body.
766 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
767
768 // while(1) is common, avoid extra exit blocks. Be sure
769 // to correctly handle break/continue though.
770 bool EmitBoolCondBranch = true;
771 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
772 if (C->isOne())
773 EmitBoolCondBranch = false;
774
775 // As long as the condition is true, go to the loop body.
776 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
777 if (EmitBoolCondBranch) {
778 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
779 if (ConditionScope.requiresCleanups())
780 ExitBlock = createBasicBlock("while.exit");
781 Builder.CreateCondBr(
782 BoolCondVal, LoopBody, ExitBlock,
783 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
784
785 if (ExitBlock != LoopExit.getBlock()) {
786 EmitBlock(ExitBlock);
787 EmitBranchThroughCleanup(LoopExit);
788 }
789 }
790
791 // Emit the loop body. We have to emit this in a cleanup scope
792 // because it might be a singleton DeclStmt.
793 {
794 RunCleanupsScope BodyScope(*this);
795 EmitBlock(LoopBody);
796 incrementProfileCounter(&S);
797 EmitStmt(S.getBody());
798 }
799
800 BreakContinueStack.pop_back();
801
802 // Immediately force cleanup.
803 ConditionScope.ForceCleanup();
804
805 EmitStopPoint(&S);
806 // Branch to the loop header again.
807 EmitBranch(LoopHeader.getBlock());
808
809 LoopStack.pop();
810
811 // Emit the exit block.
812 EmitBlock(LoopExit.getBlock(), true);
813
814 // The LoopHeader typically is just a branch if we skipped emitting
815 // a branch, try to erase it.
816 if (!EmitBoolCondBranch)
817 SimplifyForwardingBlocks(LoopHeader.getBlock());
818 }
819
EmitDoStmt(const DoStmt & S,ArrayRef<const Attr * > DoAttrs)820 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
821 ArrayRef<const Attr *> DoAttrs) {
822 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
823 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
824
825 uint64_t ParentCount = getCurrentProfileCount();
826
827 // Store the blocks to use for break and continue.
828 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
829
830 // Emit the body of the loop.
831 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
832
833 EmitBlockWithFallThrough(LoopBody, &S);
834 {
835 RunCleanupsScope BodyScope(*this);
836 EmitStmt(S.getBody());
837 }
838
839 EmitBlock(LoopCond.getBlock());
840
841 const SourceRange &R = S.getSourceRange();
842 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
843 SourceLocToDebugLoc(R.getBegin()),
844 SourceLocToDebugLoc(R.getEnd()));
845
846 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
847 // after each execution of the loop body."
848
849 // Evaluate the conditional in the while header.
850 // C99 6.8.5p2/p4: The first substatement is executed if the expression
851 // compares unequal to 0. The condition must be a scalar type.
852 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
853
854 BreakContinueStack.pop_back();
855
856 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
857 // to correctly handle break/continue though.
858 bool EmitBoolCondBranch = true;
859 if (llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal))
860 if (C->isZero())
861 EmitBoolCondBranch = false;
862
863 // As long as the condition is true, iterate the loop.
864 if (EmitBoolCondBranch) {
865 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
866 Builder.CreateCondBr(
867 BoolCondVal, LoopBody, LoopExit.getBlock(),
868 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
869 }
870
871 LoopStack.pop();
872
873 // Emit the exit block.
874 EmitBlock(LoopExit.getBlock());
875
876 // The DoCond block typically is just a branch if we skipped
877 // emitting a branch, try to erase it.
878 if (!EmitBoolCondBranch)
879 SimplifyForwardingBlocks(LoopCond.getBlock());
880 }
881
EmitForStmt(const ForStmt & S,ArrayRef<const Attr * > ForAttrs)882 void CodeGenFunction::EmitForStmt(const ForStmt &S,
883 ArrayRef<const Attr *> ForAttrs) {
884 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
885
886 LexicalScope ForScope(*this, S.getSourceRange());
887
888 // Evaluate the first part before the loop.
889 if (S.getInit())
890 EmitStmt(S.getInit());
891
892 // Start the loop with a block that tests the condition.
893 // If there's an increment, the continue scope will be overwritten
894 // later.
895 JumpDest Continue = getJumpDestInCurrentScope("for.cond");
896 llvm::BasicBlock *CondBlock = Continue.getBlock();
897 EmitBlock(CondBlock);
898
899 const SourceRange &R = S.getSourceRange();
900 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
901 SourceLocToDebugLoc(R.getBegin()),
902 SourceLocToDebugLoc(R.getEnd()));
903
904 // If the for loop doesn't have an increment we can just use the
905 // condition as the continue block. Otherwise we'll need to create
906 // a block for it (in the current scope, i.e. in the scope of the
907 // condition), and that we will become our continue block.
908 if (S.getInc())
909 Continue = getJumpDestInCurrentScope("for.inc");
910
911 // Store the blocks to use for break and continue.
912 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
913
914 // Create a cleanup scope for the condition variable cleanups.
915 LexicalScope ConditionScope(*this, S.getSourceRange());
916
917 if (S.getCond()) {
918 // If the for statement has a condition scope, emit the local variable
919 // declaration.
920 if (S.getConditionVariable()) {
921 EmitDecl(*S.getConditionVariable());
922 }
923
924 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
925 // If there are any cleanups between here and the loop-exit scope,
926 // create a block to stage a loop exit along.
927 if (ForScope.requiresCleanups())
928 ExitBlock = createBasicBlock("for.cond.cleanup");
929
930 // As long as the condition is true, iterate the loop.
931 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
932
933 // C99 6.8.5p2/p4: The first substatement is executed if the expression
934 // compares unequal to 0. The condition must be a scalar type.
935 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
936 Builder.CreateCondBr(
937 BoolCondVal, ForBody, ExitBlock,
938 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
939
940 if (ExitBlock != LoopExit.getBlock()) {
941 EmitBlock(ExitBlock);
942 EmitBranchThroughCleanup(LoopExit);
943 }
944
945 EmitBlock(ForBody);
946 } else {
947 // Treat it as a non-zero constant. Don't even create a new block for the
948 // body, just fall into it.
949 }
950 incrementProfileCounter(&S);
951
952 {
953 // Create a separate cleanup scope for the body, in case it is not
954 // a compound statement.
955 RunCleanupsScope BodyScope(*this);
956 EmitStmt(S.getBody());
957 }
958
959 // If there is an increment, emit it next.
960 if (S.getInc()) {
961 EmitBlock(Continue.getBlock());
962 EmitStmt(S.getInc());
963 }
964
965 BreakContinueStack.pop_back();
966
967 ConditionScope.ForceCleanup();
968
969 EmitStopPoint(&S);
970 EmitBranch(CondBlock);
971
972 ForScope.ForceCleanup();
973
974 LoopStack.pop();
975
976 // Emit the fall-through block.
977 EmitBlock(LoopExit.getBlock(), true);
978 }
979
980 void
EmitCXXForRangeStmt(const CXXForRangeStmt & S,ArrayRef<const Attr * > ForAttrs)981 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
982 ArrayRef<const Attr *> ForAttrs) {
983 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
984
985 LexicalScope ForScope(*this, S.getSourceRange());
986
987 // Evaluate the first pieces before the loop.
988 if (S.getInit())
989 EmitStmt(S.getInit());
990 EmitStmt(S.getRangeStmt());
991 EmitStmt(S.getBeginStmt());
992 EmitStmt(S.getEndStmt());
993
994 // Start the loop with a block that tests the condition.
995 // If there's an increment, the continue scope will be overwritten
996 // later.
997 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
998 EmitBlock(CondBlock);
999
1000 const SourceRange &R = S.getSourceRange();
1001 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1002 SourceLocToDebugLoc(R.getBegin()),
1003 SourceLocToDebugLoc(R.getEnd()));
1004
1005 // If there are any cleanups between here and the loop-exit scope,
1006 // create a block to stage a loop exit along.
1007 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1008 if (ForScope.requiresCleanups())
1009 ExitBlock = createBasicBlock("for.cond.cleanup");
1010
1011 // The loop body, consisting of the specified body and the loop variable.
1012 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1013
1014 // The body is executed if the expression, contextually converted
1015 // to bool, is true.
1016 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1017 Builder.CreateCondBr(
1018 BoolCondVal, ForBody, ExitBlock,
1019 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody())));
1020
1021 if (ExitBlock != LoopExit.getBlock()) {
1022 EmitBlock(ExitBlock);
1023 EmitBranchThroughCleanup(LoopExit);
1024 }
1025
1026 EmitBlock(ForBody);
1027 incrementProfileCounter(&S);
1028
1029 // Create a block for the increment. In case of a 'continue', we jump there.
1030 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1031
1032 // Store the blocks to use for break and continue.
1033 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1034
1035 {
1036 // Create a separate cleanup scope for the loop variable and body.
1037 LexicalScope BodyScope(*this, S.getSourceRange());
1038 EmitStmt(S.getLoopVarStmt());
1039 EmitStmt(S.getBody());
1040 }
1041
1042 EmitStopPoint(&S);
1043 // If there is an increment, emit it next.
1044 EmitBlock(Continue.getBlock());
1045 EmitStmt(S.getInc());
1046
1047 BreakContinueStack.pop_back();
1048
1049 EmitBranch(CondBlock);
1050
1051 ForScope.ForceCleanup();
1052
1053 LoopStack.pop();
1054
1055 // Emit the fall-through block.
1056 EmitBlock(LoopExit.getBlock(), true);
1057 }
1058
EmitReturnOfRValue(RValue RV,QualType Ty)1059 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1060 if (RV.isScalar()) {
1061 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1062 } else if (RV.isAggregate()) {
1063 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1064 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1065 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1066 } else {
1067 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1068 /*init*/ true);
1069 }
1070 EmitBranchThroughCleanup(ReturnBlock);
1071 }
1072
1073 namespace {
1074 // RAII struct used to save and restore a return statment's result expression.
1075 struct SaveRetExprRAII {
SaveRetExprRAII__anon768306be0111::SaveRetExprRAII1076 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1077 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1078 CGF.RetExpr = RetExpr;
1079 }
~SaveRetExprRAII__anon768306be0111::SaveRetExprRAII1080 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1081 const Expr *OldRetExpr;
1082 CodeGenFunction &CGF;
1083 };
1084 } // namespace
1085
1086 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1087 /// if the function returns void, or may be missing one if the function returns
1088 /// non-void. Fun stuff :).
EmitReturnStmt(const ReturnStmt & S)1089 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1090 if (requiresReturnValueCheck()) {
1091 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1092 auto *SLocPtr =
1093 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1094 llvm::GlobalVariable::PrivateLinkage, SLoc);
1095 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1096 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1097 assert(ReturnLocation.isValid() && "No valid return location");
1098 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1099 ReturnLocation);
1100 }
1101
1102 // Returning from an outlined SEH helper is UB, and we already warn on it.
1103 if (IsOutlinedSEHHelper) {
1104 Builder.CreateUnreachable();
1105 Builder.ClearInsertionPoint();
1106 }
1107
1108 // Emit the result value, even if unused, to evaluate the side effects.
1109 const Expr *RV = S.getRetValue();
1110
1111 // Record the result expression of the return statement. The recorded
1112 // expression is used to determine whether a block capture's lifetime should
1113 // end at the end of the full expression as opposed to the end of the scope
1114 // enclosing the block expression.
1115 //
1116 // This permits a small, easily-implemented exception to our over-conservative
1117 // rules about not jumping to statements following block literals with
1118 // non-trivial cleanups.
1119 SaveRetExprRAII SaveRetExpr(RV, *this);
1120
1121 RunCleanupsScope cleanupScope(*this);
1122 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1123 RV = EWC->getSubExpr();
1124 // FIXME: Clean this up by using an LValue for ReturnTemp,
1125 // EmitStoreThroughLValue, and EmitAnyExpr.
1126 // Check if the NRVO candidate was not globalized in OpenMP mode.
1127 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1128 S.getNRVOCandidate()->isNRVOVariable() &&
1129 (!getLangOpts().OpenMP ||
1130 !CGM.getOpenMPRuntime()
1131 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1132 .isValid())) {
1133 // Apply the named return value optimization for this return statement,
1134 // which means doing nothing: the appropriate result has already been
1135 // constructed into the NRVO variable.
1136
1137 // If there is an NRVO flag for this variable, set it to 1 into indicate
1138 // that the cleanup code should not destroy the variable.
1139 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1140 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1141 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1142 // Make sure not to return anything, but evaluate the expression
1143 // for side effects.
1144 if (RV)
1145 EmitAnyExpr(RV);
1146 } else if (!RV) {
1147 // Do nothing (return value is left uninitialized)
1148 } else if (FnRetTy->isReferenceType()) {
1149 // If this function returns a reference, take the address of the expression
1150 // rather than the value.
1151 RValue Result = EmitReferenceBindingToExpr(RV);
1152 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1153 } else if (TEK_Scalar == getEvaluationKind(RV->getType())) {
1154 llvm::Value *RetV = EmitScalarExpr(RV);
1155 QualType Ty = RV->getType();
1156 if (Ty->isPointerType()) {
1157 if (const TypedefType *TT = dyn_cast<TypedefType>(Ty)) {
1158 // TT->getDecl() could be a TypedefDecl or a TypedefNameDecl
1159 const TypedefDecl* TD = dyn_cast<TypedefDecl>(TT->getDecl());
1160 VarDecl *Key = TD ? TD->getOpaqueKey() : nullptr;
1161 if (Key) {
1162 llvm::Type *RetTy = RetV->getType();
1163 llvm::Value *KeyV = CGM.GetAddrOfGlobalVar(Key);
1164 CharUnits Alignment = getContext().getDeclAlign(Key);
1165 Address Addr(KeyV, Alignment);
1166 KeyV = Builder.CreateLoad(Addr);
1167 // If this is CHERI, enforce this in hardware
1168 if (Ty->isCHERICapabilityType(getContext())) {
1169 unsigned CapAS = CGM.getTargetCodeGenInfo().getCHERICapabilityAS();
1170 auto *F = CGM.getIntrinsic(llvm::Intrinsic::cheri_cap_seal);
1171 llvm::Type *CapPtrTy = llvm::PointerType::get(Int8Ty, CapAS);
1172 RetV = Builder.CreateCall(F,
1173 {Builder.CreateBitCast(RetV, CapPtrTy),
1174 Builder.CreateBitCast(KeyV, CapPtrTy)});
1175 RetV = Builder.CreateBitCast(RetV, RetTy);
1176 } else {
1177 KeyV = Builder.CreatePtrToInt(KeyV, IntPtrTy);
1178 RetV = Builder.CreatePtrToInt(RetV, IntPtrTy);
1179 RetV = Builder.CreateXor(RetV, KeyV);
1180 RetV = Builder.CreateIntToPtr(RetV, RetTy);
1181 }
1182 }
1183 }
1184 }
1185 Builder.CreateStore(RetV, ReturnValue);
1186 } else {
1187 switch (getEvaluationKind(RV->getType())) {
1188 case TEK_Scalar:
1189 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1190 break;
1191 case TEK_Complex:
1192 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1193 /*isInit*/ true);
1194 break;
1195 case TEK_Aggregate:
1196 EmitAggExpr(RV, AggValueSlot::forAddr(
1197 ReturnValue, Qualifiers(),
1198 AggValueSlot::IsDestructed,
1199 AggValueSlot::DoesNotNeedGCBarriers,
1200 AggValueSlot::IsNotAliased,
1201 getOverlapForReturnValue()));
1202 break;
1203 }
1204 }
1205
1206 ++NumReturnExprs;
1207 if (!RV || RV->isEvaluatable(getContext()))
1208 ++NumSimpleReturnExprs;
1209
1210 cleanupScope.ForceCleanup();
1211 EmitBranchThroughCleanup(ReturnBlock);
1212 }
1213
EmitDeclStmt(const DeclStmt & S)1214 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1215 // As long as debug info is modeled with instructions, we have to ensure we
1216 // have a place to insert here and write the stop point here.
1217 if (HaveInsertPoint())
1218 EmitStopPoint(&S);
1219
1220 for (const auto *I : S.decls())
1221 EmitDecl(*I);
1222 }
1223
EmitBreakStmt(const BreakStmt & S)1224 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1225 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1226
1227 // If this code is reachable then emit a stop point (if generating
1228 // debug info). We have to do this ourselves because we are on the
1229 // "simple" statement path.
1230 if (HaveInsertPoint())
1231 EmitStopPoint(&S);
1232
1233 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1234 }
1235
EmitContinueStmt(const ContinueStmt & S)1236 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1237 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1238
1239 // If this code is reachable then emit a stop point (if generating
1240 // debug info). We have to do this ourselves because we are on the
1241 // "simple" statement path.
1242 if (HaveInsertPoint())
1243 EmitStopPoint(&S);
1244
1245 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1246 }
1247
1248 /// EmitCaseStmtRange - If case statement range is not too big then
1249 /// add multiple cases to switch instruction, one for each value within
1250 /// the range. If range is too big then emit "if" condition check.
EmitCaseStmtRange(const CaseStmt & S)1251 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S) {
1252 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1253
1254 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1255 if (S.getLHS()->getType()->isCHERICapabilityType(getContext()))
1256 LHS = LHS.extOrTrunc(Target.getPointerRangeForCHERICapability());
1257 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1258 if (S.getRHS()->getType()->isCHERICapabilityType(getContext()))
1259 RHS = RHS.extOrTrunc(Target.getPointerRangeForCHERICapability());
1260
1261 // Emit the code for this case. We do this first to make sure it is
1262 // properly chained from our predecessor before generating the
1263 // switch machinery to enter this block.
1264 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1265 EmitBlockWithFallThrough(CaseDest, &S);
1266 EmitStmt(S.getSubStmt());
1267
1268 // If range is empty, do nothing.
1269 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1270 return;
1271
1272 llvm::APInt Range = RHS - LHS;
1273 // FIXME: parameters such as this should not be hardcoded.
1274 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1275 // Range is small enough to add multiple switch instruction cases.
1276 uint64_t Total = getProfileCount(&S);
1277 unsigned NCases = Range.getZExtValue() + 1;
1278 // We only have one region counter for the entire set of cases here, so we
1279 // need to divide the weights evenly between the generated cases, ensuring
1280 // that the total weight is preserved. E.g., a weight of 5 over three cases
1281 // will be distributed as weights of 2, 2, and 1.
1282 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1283 for (unsigned I = 0; I != NCases; ++I) {
1284 if (SwitchWeights)
1285 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1286 if (Rem)
1287 Rem--;
1288 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1289 ++LHS;
1290 }
1291 return;
1292 }
1293
1294 // The range is too big. Emit "if" condition into a new block,
1295 // making sure to save and restore the current insertion point.
1296 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1297
1298 // Push this test onto the chain of range checks (which terminates
1299 // in the default basic block). The switch's default will be changed
1300 // to the top of this chain after switch emission is complete.
1301 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1302 CaseRangeBlock = createBasicBlock("sw.caserange");
1303
1304 CurFn->getBasicBlockList().push_back(CaseRangeBlock);
1305 Builder.SetInsertPoint(CaseRangeBlock);
1306
1307 // Emit range check.
1308 llvm::Value *Diff =
1309 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1310 llvm::Value *Cond =
1311 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1312
1313 llvm::MDNode *Weights = nullptr;
1314 if (SwitchWeights) {
1315 uint64_t ThisCount = getProfileCount(&S);
1316 uint64_t DefaultCount = (*SwitchWeights)[0];
1317 Weights = createProfileWeights(ThisCount, DefaultCount);
1318
1319 // Since we're chaining the switch default through each large case range, we
1320 // need to update the weight for the default, ie, the first case, to include
1321 // this case.
1322 (*SwitchWeights)[0] += ThisCount;
1323 }
1324 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1325
1326 // Restore the appropriate insertion point.
1327 if (RestoreBB)
1328 Builder.SetInsertPoint(RestoreBB);
1329 else
1330 Builder.ClearInsertionPoint();
1331 }
1332
EmitCaseStmt(const CaseStmt & S)1333 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S) {
1334 // If there is no enclosing switch instance that we're aware of, then this
1335 // case statement and its block can be elided. This situation only happens
1336 // when we've constant-folded the switch, are emitting the constant case,
1337 // and part of the constant case includes another case statement. For
1338 // instance: switch (4) { case 4: do { case 5: } while (1); }
1339 if (!SwitchInsn) {
1340 EmitStmt(S.getSubStmt());
1341 return;
1342 }
1343
1344 // Handle case ranges.
1345 if (S.getRHS()) {
1346 EmitCaseStmtRange(S);
1347 return;
1348 }
1349 // SwitchInsn.getCondition()
1350 llvm::APSInt CaseIntVal = S.getLHS()->EvaluateKnownConstInt(getContext());
1351 if (S.getLHS()->getType()->isCHERICapabilityType(getContext()))
1352 if (CaseIntVal.getBitWidth() > 64)
1353 CaseIntVal = CaseIntVal.trunc(64); // XXXAR: will this always be correct???
1354 llvm::ConstantInt *CaseVal = Builder.getInt(CaseIntVal);
1355
1356 // If the body of the case is just a 'break', try to not emit an empty block.
1357 // If we're profiling or we're not optimizing, leave the block in for better
1358 // debug and coverage analysis.
1359 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1360 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1361 isa<BreakStmt>(S.getSubStmt())) {
1362 JumpDest Block = BreakContinueStack.back().BreakBlock;
1363
1364 // Only do this optimization if there are no cleanups that need emitting.
1365 if (isObviouslyBranchWithoutCleanups(Block)) {
1366 if (SwitchWeights)
1367 SwitchWeights->push_back(getProfileCount(&S));
1368 SwitchInsn->addCase(CaseVal, Block.getBlock());
1369
1370 // If there was a fallthrough into this case, make sure to redirect it to
1371 // the end of the switch as well.
1372 if (Builder.GetInsertBlock()) {
1373 Builder.CreateBr(Block.getBlock());
1374 Builder.ClearInsertionPoint();
1375 }
1376 return;
1377 }
1378 }
1379
1380 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1381 EmitBlockWithFallThrough(CaseDest, &S);
1382 if (SwitchWeights)
1383 SwitchWeights->push_back(getProfileCount(&S));
1384 SwitchInsn->addCase(CaseVal, CaseDest);
1385
1386 // Recursively emitting the statement is acceptable, but is not wonderful for
1387 // code where we have many case statements nested together, i.e.:
1388 // case 1:
1389 // case 2:
1390 // case 3: etc.
1391 // Handling this recursively will create a new block for each case statement
1392 // that falls through to the next case which is IR intensive. It also causes
1393 // deep recursion which can run into stack depth limitations. Handle
1394 // sequential non-range case statements specially.
1395 const CaseStmt *CurCase = &S;
1396 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1397
1398 // Otherwise, iteratively add consecutive cases to this switch stmt.
1399 while (NextCase && NextCase->getRHS() == nullptr) {
1400 CurCase = NextCase;
1401 CaseIntVal = CurCase->getLHS()->EvaluateKnownConstInt(getContext());
1402 if (S.getLHS()->getType()->isCHERICapabilityType(getContext()))
1403 if (CaseIntVal.getBitWidth() > 64)
1404 CaseIntVal = CaseIntVal.trunc(64);
1405 llvm::ConstantInt *CaseVal = Builder.getInt(CaseIntVal);
1406
1407 if (SwitchWeights)
1408 SwitchWeights->push_back(getProfileCount(NextCase));
1409 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1410 CaseDest = createBasicBlock("sw.bb");
1411 EmitBlockWithFallThrough(CaseDest, &S);
1412 }
1413
1414 SwitchInsn->addCase(CaseVal, CaseDest);
1415 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1416 }
1417
1418 // Normal default recursion for non-cases.
1419 EmitStmt(CurCase->getSubStmt());
1420 }
1421
EmitDefaultStmt(const DefaultStmt & S)1422 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S) {
1423 // If there is no enclosing switch instance that we're aware of, then this
1424 // default statement can be elided. This situation only happens when we've
1425 // constant-folded the switch.
1426 if (!SwitchInsn) {
1427 EmitStmt(S.getSubStmt());
1428 return;
1429 }
1430
1431 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1432 assert(DefaultBlock->empty() &&
1433 "EmitDefaultStmt: Default block already defined?");
1434
1435 EmitBlockWithFallThrough(DefaultBlock, &S);
1436
1437 EmitStmt(S.getSubStmt());
1438 }
1439
1440 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1441 /// constant value that is being switched on, see if we can dead code eliminate
1442 /// the body of the switch to a simple series of statements to emit. Basically,
1443 /// on a switch (5) we want to find these statements:
1444 /// case 5:
1445 /// printf(...); <--
1446 /// ++i; <--
1447 /// break;
1448 ///
1449 /// and add them to the ResultStmts vector. If it is unsafe to do this
1450 /// transformation (for example, one of the elided statements contains a label
1451 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1452 /// should include statements after it (e.g. the printf() line is a substmt of
1453 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1454 /// statement, then return CSFC_Success.
1455 ///
1456 /// If Case is non-null, then we are looking for the specified case, checking
1457 /// that nothing we jump over contains labels. If Case is null, then we found
1458 /// the case and are looking for the break.
1459 ///
1460 /// If the recursive walk actually finds our Case, then we set FoundCase to
1461 /// true.
1462 ///
1463 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
CollectStatementsForCase(const Stmt * S,const SwitchCase * Case,bool & FoundCase,SmallVectorImpl<const Stmt * > & ResultStmts)1464 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1465 const SwitchCase *Case,
1466 bool &FoundCase,
1467 SmallVectorImpl<const Stmt*> &ResultStmts) {
1468 // If this is a null statement, just succeed.
1469 if (!S)
1470 return Case ? CSFC_Success : CSFC_FallThrough;
1471
1472 // If this is the switchcase (case 4: or default) that we're looking for, then
1473 // we're in business. Just add the substatement.
1474 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1475 if (S == Case) {
1476 FoundCase = true;
1477 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1478 ResultStmts);
1479 }
1480
1481 // Otherwise, this is some other case or default statement, just ignore it.
1482 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1483 ResultStmts);
1484 }
1485
1486 // If we are in the live part of the code and we found our break statement,
1487 // return a success!
1488 if (!Case && isa<BreakStmt>(S))
1489 return CSFC_Success;
1490
1491 // If this is a switch statement, then it might contain the SwitchCase, the
1492 // break, or neither.
1493 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1494 // Handle this as two cases: we might be looking for the SwitchCase (if so
1495 // the skipped statements must be skippable) or we might already have it.
1496 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1497 bool StartedInLiveCode = FoundCase;
1498 unsigned StartSize = ResultStmts.size();
1499
1500 // If we've not found the case yet, scan through looking for it.
1501 if (Case) {
1502 // Keep track of whether we see a skipped declaration. The code could be
1503 // using the declaration even if it is skipped, so we can't optimize out
1504 // the decl if the kept statements might refer to it.
1505 bool HadSkippedDecl = false;
1506
1507 // If we're looking for the case, just see if we can skip each of the
1508 // substatements.
1509 for (; Case && I != E; ++I) {
1510 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1511
1512 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1513 case CSFC_Failure: return CSFC_Failure;
1514 case CSFC_Success:
1515 // A successful result means that either 1) that the statement doesn't
1516 // have the case and is skippable, or 2) does contain the case value
1517 // and also contains the break to exit the switch. In the later case,
1518 // we just verify the rest of the statements are elidable.
1519 if (FoundCase) {
1520 // If we found the case and skipped declarations, we can't do the
1521 // optimization.
1522 if (HadSkippedDecl)
1523 return CSFC_Failure;
1524
1525 for (++I; I != E; ++I)
1526 if (CodeGenFunction::ContainsLabel(*I, true))
1527 return CSFC_Failure;
1528 return CSFC_Success;
1529 }
1530 break;
1531 case CSFC_FallThrough:
1532 // If we have a fallthrough condition, then we must have found the
1533 // case started to include statements. Consider the rest of the
1534 // statements in the compound statement as candidates for inclusion.
1535 assert(FoundCase && "Didn't find case but returned fallthrough?");
1536 // We recursively found Case, so we're not looking for it anymore.
1537 Case = nullptr;
1538
1539 // If we found the case and skipped declarations, we can't do the
1540 // optimization.
1541 if (HadSkippedDecl)
1542 return CSFC_Failure;
1543 break;
1544 }
1545 }
1546
1547 if (!FoundCase)
1548 return CSFC_Success;
1549
1550 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1551 }
1552
1553 // If we have statements in our range, then we know that the statements are
1554 // live and need to be added to the set of statements we're tracking.
1555 bool AnyDecls = false;
1556 for (; I != E; ++I) {
1557 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1558
1559 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1560 case CSFC_Failure: return CSFC_Failure;
1561 case CSFC_FallThrough:
1562 // A fallthrough result means that the statement was simple and just
1563 // included in ResultStmt, keep adding them afterwards.
1564 break;
1565 case CSFC_Success:
1566 // A successful result means that we found the break statement and
1567 // stopped statement inclusion. We just ensure that any leftover stmts
1568 // are skippable and return success ourselves.
1569 for (++I; I != E; ++I)
1570 if (CodeGenFunction::ContainsLabel(*I, true))
1571 return CSFC_Failure;
1572 return CSFC_Success;
1573 }
1574 }
1575
1576 // If we're about to fall out of a scope without hitting a 'break;', we
1577 // can't perform the optimization if there were any decls in that scope
1578 // (we'd lose their end-of-lifetime).
1579 if (AnyDecls) {
1580 // If the entire compound statement was live, there's one more thing we
1581 // can try before giving up: emit the whole thing as a single statement.
1582 // We can do that unless the statement contains a 'break;'.
1583 // FIXME: Such a break must be at the end of a construct within this one.
1584 // We could emit this by just ignoring the BreakStmts entirely.
1585 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1586 ResultStmts.resize(StartSize);
1587 ResultStmts.push_back(S);
1588 } else {
1589 return CSFC_Failure;
1590 }
1591 }
1592
1593 return CSFC_FallThrough;
1594 }
1595
1596 // Okay, this is some other statement that we don't handle explicitly, like a
1597 // for statement or increment etc. If we are skipping over this statement,
1598 // just verify it doesn't have labels, which would make it invalid to elide.
1599 if (Case) {
1600 if (CodeGenFunction::ContainsLabel(S, true))
1601 return CSFC_Failure;
1602 return CSFC_Success;
1603 }
1604
1605 // Otherwise, we want to include this statement. Everything is cool with that
1606 // so long as it doesn't contain a break out of the switch we're in.
1607 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1608
1609 // Otherwise, everything is great. Include the statement and tell the caller
1610 // that we fall through and include the next statement as well.
1611 ResultStmts.push_back(S);
1612 return CSFC_FallThrough;
1613 }
1614
1615 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1616 /// then invoke CollectStatementsForCase to find the list of statements to emit
1617 /// for a switch on constant. See the comment above CollectStatementsForCase
1618 /// for more details.
FindCaseStatementsForValue(const SwitchStmt & S,const llvm::APSInt & ConstantCondValue,SmallVectorImpl<const Stmt * > & ResultStmts,ASTContext & C,const SwitchCase * & ResultCase)1619 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1620 const llvm::APSInt &ConstantCondValue,
1621 SmallVectorImpl<const Stmt*> &ResultStmts,
1622 ASTContext &C,
1623 const SwitchCase *&ResultCase) {
1624 // First step, find the switch case that is being branched to. We can do this
1625 // efficiently by scanning the SwitchCase list.
1626 const SwitchCase *Case = S.getSwitchCaseList();
1627 const DefaultStmt *DefaultCase = nullptr;
1628
1629 for (; Case; Case = Case->getNextSwitchCase()) {
1630 // It's either a default or case. Just remember the default statement in
1631 // case we're not jumping to any numbered cases.
1632 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1633 DefaultCase = DS;
1634 continue;
1635 }
1636
1637 // Check to see if this case is the one we're looking for.
1638 const CaseStmt *CS = cast<CaseStmt>(Case);
1639 // Don't handle case ranges yet.
1640 if (CS->getRHS()) return false;
1641
1642 // If we found our case, remember it as 'case'.
1643 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1644 break;
1645 }
1646
1647 // If we didn't find a matching case, we use a default if it exists, or we
1648 // elide the whole switch body!
1649 if (!Case) {
1650 // It is safe to elide the body of the switch if it doesn't contain labels
1651 // etc. If it is safe, return successfully with an empty ResultStmts list.
1652 if (!DefaultCase)
1653 return !CodeGenFunction::ContainsLabel(&S);
1654 Case = DefaultCase;
1655 }
1656
1657 // Ok, we know which case is being jumped to, try to collect all the
1658 // statements that follow it. This can fail for a variety of reasons. Also,
1659 // check to see that the recursive walk actually found our case statement.
1660 // Insane cases like this can fail to find it in the recursive walk since we
1661 // don't handle every stmt kind:
1662 // switch (4) {
1663 // while (1) {
1664 // case 4: ...
1665 bool FoundCase = false;
1666 ResultCase = Case;
1667 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1668 ResultStmts) != CSFC_Failure &&
1669 FoundCase;
1670 }
1671
EmitSwitchStmt(const SwitchStmt & S)1672 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1673 // Handle nested switch statements.
1674 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1675 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1676 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1677
1678 // See if we can constant fold the condition of the switch and therefore only
1679 // emit the live case statement (if any) of the switch.
1680 llvm::APSInt ConstantCondValue;
1681 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1682 SmallVector<const Stmt*, 4> CaseStmts;
1683 const SwitchCase *Case = nullptr;
1684 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1685 getContext(), Case)) {
1686 if (Case)
1687 incrementProfileCounter(Case);
1688 RunCleanupsScope ExecutedScope(*this);
1689
1690 if (S.getInit())
1691 EmitStmt(S.getInit());
1692
1693 // Emit the condition variable if needed inside the entire cleanup scope
1694 // used by this special case for constant folded switches.
1695 if (S.getConditionVariable())
1696 EmitDecl(*S.getConditionVariable());
1697
1698 // At this point, we are no longer "within" a switch instance, so
1699 // we can temporarily enforce this to ensure that any embedded case
1700 // statements are not emitted.
1701 SwitchInsn = nullptr;
1702
1703 // Okay, we can dead code eliminate everything except this case. Emit the
1704 // specified series of statements and we're good.
1705 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1706 EmitStmt(CaseStmts[i]);
1707 incrementProfileCounter(&S);
1708
1709 // Now we want to restore the saved switch instance so that nested
1710 // switches continue to function properly
1711 SwitchInsn = SavedSwitchInsn;
1712
1713 return;
1714 }
1715 }
1716
1717 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1718
1719 RunCleanupsScope ConditionScope(*this);
1720
1721 if (S.getInit())
1722 EmitStmt(S.getInit());
1723
1724 if (S.getConditionVariable())
1725 EmitDecl(*S.getConditionVariable());
1726 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1727 // If we're an intcap_t, then we actually want to switch on the offset.
1728 if (S.getCond()->getType()->isCHERICapabilityType(getContext())) {
1729 // XXXAR: In switch statements we want to switch on the virtual address and
1730 // not the offset: https://github.com/CTSRD-CHERI/clang/issues/132
1731 CondV = getPointerAddress(CondV, "intcap.vaddr");
1732 }
1733 // Create basic block to hold stuff that comes after switch
1734 // statement. We also need to create a default block now so that
1735 // explicit case ranges tests can have a place to jump to on
1736 // failure.
1737 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1738 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1739 if (PGO.haveRegionCounts()) {
1740 // Walk the SwitchCase list to find how many there are.
1741 uint64_t DefaultCount = 0;
1742 unsigned NumCases = 0;
1743 for (const SwitchCase *Case = S.getSwitchCaseList();
1744 Case;
1745 Case = Case->getNextSwitchCase()) {
1746 if (isa<DefaultStmt>(Case))
1747 DefaultCount = getProfileCount(Case);
1748 NumCases += 1;
1749 }
1750 SwitchWeights = new SmallVector<uint64_t, 16>();
1751 SwitchWeights->reserve(NumCases);
1752 // The default needs to be first. We store the edge count, so we already
1753 // know the right weight.
1754 SwitchWeights->push_back(DefaultCount);
1755 }
1756 CaseRangeBlock = DefaultBlock;
1757
1758 // Clear the insertion point to indicate we are in unreachable code.
1759 Builder.ClearInsertionPoint();
1760
1761 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
1762 // then reuse last ContinueBlock.
1763 JumpDest OuterContinue;
1764 if (!BreakContinueStack.empty())
1765 OuterContinue = BreakContinueStack.back().ContinueBlock;
1766
1767 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
1768
1769 // Emit switch body.
1770 EmitStmt(S.getBody());
1771
1772 BreakContinueStack.pop_back();
1773
1774 // Update the default block in case explicit case range tests have
1775 // been chained on top.
1776 SwitchInsn->setDefaultDest(CaseRangeBlock);
1777
1778 // If a default was never emitted:
1779 if (!DefaultBlock->getParent()) {
1780 // If we have cleanups, emit the default block so that there's a
1781 // place to jump through the cleanups from.
1782 if (ConditionScope.requiresCleanups()) {
1783 EmitBlock(DefaultBlock);
1784
1785 // Otherwise, just forward the default block to the switch end.
1786 } else {
1787 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
1788 delete DefaultBlock;
1789 }
1790 }
1791
1792 ConditionScope.ForceCleanup();
1793
1794 // Emit continuation.
1795 EmitBlock(SwitchExit.getBlock(), true);
1796 incrementProfileCounter(&S);
1797
1798 // If the switch has a condition wrapped by __builtin_unpredictable,
1799 // create metadata that specifies that the switch is unpredictable.
1800 // Don't bother if not optimizing because that metadata would not be used.
1801 auto *Call = dyn_cast<CallExpr>(S.getCond());
1802 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
1803 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
1804 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
1805 llvm::MDBuilder MDHelper(getLLVMContext());
1806 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
1807 MDHelper.createUnpredictable());
1808 }
1809 }
1810
1811 if (SwitchWeights) {
1812 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
1813 "switch weights do not match switch cases");
1814 // If there's only one jump destination there's no sense weighting it.
1815 if (SwitchWeights->size() > 1)
1816 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
1817 createProfileWeights(*SwitchWeights));
1818 delete SwitchWeights;
1819 }
1820 SwitchInsn = SavedSwitchInsn;
1821 SwitchWeights = SavedSwitchWeights;
1822 CaseRangeBlock = SavedCRBlock;
1823 }
1824
1825 static std::string
SimplifyConstraint(const char * Constraint,const TargetInfo & Target,SmallVectorImpl<TargetInfo::ConstraintInfo> * OutCons=nullptr)1826 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
1827 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
1828 std::string Result;
1829
1830 while (*Constraint) {
1831 switch (*Constraint) {
1832 default:
1833 Result += Target.convertConstraint(Constraint);
1834 break;
1835 // Ignore these
1836 case '*':
1837 case '?':
1838 case '!':
1839 case '=': // Will see this and the following in mult-alt constraints.
1840 case '+':
1841 break;
1842 case '#': // Ignore the rest of the constraint alternative.
1843 while (Constraint[1] && Constraint[1] != ',')
1844 Constraint++;
1845 break;
1846 case '&':
1847 case '%':
1848 Result += *Constraint;
1849 while (Constraint[1] && Constraint[1] == *Constraint)
1850 Constraint++;
1851 break;
1852 case ',':
1853 Result += "|";
1854 break;
1855 case 'g':
1856 Result += "imr";
1857 break;
1858 case '[': {
1859 assert(OutCons &&
1860 "Must pass output names to constraints with a symbolic name");
1861 unsigned Index;
1862 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
1863 assert(result && "Could not resolve symbolic name"); (void)result;
1864 Result += llvm::utostr(Index);
1865 break;
1866 }
1867 }
1868
1869 Constraint++;
1870 }
1871
1872 return Result;
1873 }
1874
1875 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
1876 /// as using a particular register add that as a constraint that will be used
1877 /// in this asm stmt.
1878 static std::string
AddVariableConstraints(const std::string & Constraint,const Expr & AsmExpr,const TargetInfo & Target,CodeGenModule & CGM,const AsmStmt & Stmt,const bool EarlyClobber)1879 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
1880 const TargetInfo &Target, CodeGenModule &CGM,
1881 const AsmStmt &Stmt, const bool EarlyClobber) {
1882 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
1883 if (!AsmDeclRef)
1884 return Constraint;
1885 const ValueDecl &Value = *AsmDeclRef->getDecl();
1886 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
1887 if (!Variable)
1888 return Constraint;
1889 if (Variable->getStorageClass() != SC_Register)
1890 return Constraint;
1891 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
1892 if (!Attr)
1893 return Constraint;
1894 StringRef Register = Attr->getLabel();
1895 assert(Target.isValidGCCRegisterName(Register));
1896 // We're using validateOutputConstraint here because we only care if
1897 // this is a register constraint.
1898 TargetInfo::ConstraintInfo Info(Constraint, "");
1899 if (Target.validateOutputConstraint(Info) &&
1900 !Info.allowsRegister()) {
1901 CGM.ErrorUnsupported(&Stmt, "__asm__");
1902 return Constraint;
1903 }
1904 // Canonicalize the register here before returning it.
1905 Register = Target.getNormalizedGCCRegisterName(Register);
1906 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
1907 }
1908
1909 llvm::Value*
EmitAsmInputLValue(const TargetInfo::ConstraintInfo & Info,LValue InputValue,QualType InputType,std::string & ConstraintStr,SourceLocation Loc)1910 CodeGenFunction::EmitAsmInputLValue(const TargetInfo::ConstraintInfo &Info,
1911 LValue InputValue, QualType InputType,
1912 std::string &ConstraintStr,
1913 SourceLocation Loc) {
1914 llvm::Value *Arg;
1915 if (Info.allowsRegister() || !Info.allowsMemory()) {
1916 if (CodeGenFunction::hasScalarEvaluationKind(InputType)) {
1917 Arg = EmitLoadOfLValue(InputValue, Loc).getScalarVal();
1918 } else {
1919 llvm::Type *Ty = ConvertType(InputType);
1920 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
1921 if (Size <= 64 && llvm::isPowerOf2_64(Size)) {
1922 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
1923 Ty = CGM.getPointerInDefaultAS(Ty);
1924
1925 Arg = Builder.CreateLoad(
1926 Builder.CreateBitCast(InputValue.getAddress(*this), Ty));
1927 } else {
1928 Arg = InputValue.getPointer(*this);
1929 ConstraintStr += '*';
1930 }
1931 }
1932 } else {
1933 Arg = InputValue.getPointer(*this);
1934 ConstraintStr += '*';
1935 }
1936
1937 return Arg;
1938 }
1939
EmitAsmInput(const TargetInfo::ConstraintInfo & Info,const Expr * InputExpr,std::string & ConstraintStr)1940 llvm::Value* CodeGenFunction::EmitAsmInput(
1941 const TargetInfo::ConstraintInfo &Info,
1942 const Expr *InputExpr,
1943 std::string &ConstraintStr) {
1944 // If this can't be a register or memory, i.e., has to be a constant
1945 // (immediate or symbolic), try to emit it as such.
1946 if (!Info.allowsRegister() && !Info.allowsMemory()) {
1947 if (Info.requiresImmediateConstant()) {
1948 Expr::EvalResult EVResult;
1949 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
1950
1951 llvm::APSInt IntResult;
1952 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
1953 getContext()))
1954 return llvm::ConstantInt::get(getLLVMContext(), IntResult);
1955 }
1956
1957 Expr::EvalResult Result;
1958 if (InputExpr->EvaluateAsInt(Result, getContext()))
1959 return llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt());
1960 }
1961
1962 if (Info.allowsRegister() || !Info.allowsMemory())
1963 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
1964 return EmitScalarExpr(InputExpr);
1965 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
1966 return EmitScalarExpr(InputExpr);
1967 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
1968 LValue Dest = EmitLValue(InputExpr);
1969 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
1970 InputExpr->getExprLoc());
1971 }
1972
1973 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
1974 /// asm call instruction. The !srcloc MDNode contains a list of constant
1975 /// integers which are the source locations of the start of each line in the
1976 /// asm.
getAsmSrcLocInfo(const StringLiteral * Str,CodeGenFunction & CGF)1977 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
1978 CodeGenFunction &CGF) {
1979 SmallVector<llvm::Metadata *, 8> Locs;
1980 // Add the location of the first line to the MDNode.
1981 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
1982 CGF.Int32Ty, Str->getBeginLoc().getRawEncoding())));
1983 StringRef StrVal = Str->getString();
1984 if (!StrVal.empty()) {
1985 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
1986 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
1987 unsigned StartToken = 0;
1988 unsigned ByteOffset = 0;
1989
1990 // Add the location of the start of each subsequent line of the asm to the
1991 // MDNode.
1992 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
1993 if (StrVal[i] != '\n') continue;
1994 SourceLocation LineLoc = Str->getLocationOfByte(
1995 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
1996 Locs.push_back(llvm::ConstantAsMetadata::get(
1997 llvm::ConstantInt::get(CGF.Int32Ty, LineLoc.getRawEncoding())));
1998 }
1999 }
2000
2001 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2002 }
2003
UpdateAsmCallInst(llvm::CallBase & Result,bool HasSideEffect,bool ReadOnly,bool ReadNone,const AsmStmt & S,const std::vector<llvm::Type * > & ResultRegTypes,CodeGenFunction & CGF,std::vector<llvm::Value * > & RegResults)2004 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2005 bool ReadOnly, bool ReadNone, const AsmStmt &S,
2006 const std::vector<llvm::Type *> &ResultRegTypes,
2007 CodeGenFunction &CGF,
2008 std::vector<llvm::Value *> &RegResults) {
2009 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2010 llvm::Attribute::NoUnwind);
2011 // Attach readnone and readonly attributes.
2012 if (!HasSideEffect) {
2013 if (ReadNone)
2014 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2015 llvm::Attribute::ReadNone);
2016 else if (ReadOnly)
2017 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2018 llvm::Attribute::ReadOnly);
2019 }
2020
2021 // Slap the source location of the inline asm into a !srcloc metadata on the
2022 // call.
2023 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2024 Result.setMetadata("srcloc",
2025 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2026 else {
2027 // At least put the line number on MS inline asm blobs.
2028 llvm::Constant *Loc = llvm::ConstantInt::get(CGF.Int32Ty,
2029 S.getAsmLoc().getRawEncoding());
2030 Result.setMetadata("srcloc",
2031 llvm::MDNode::get(CGF.getLLVMContext(),
2032 llvm::ConstantAsMetadata::get(Loc)));
2033 }
2034
2035 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2036 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2037 // convergent (meaning, they may call an intrinsically convergent op, such
2038 // as bar.sync, and so can't have certain optimizations applied around
2039 // them).
2040 Result.addAttribute(llvm::AttributeList::FunctionIndex,
2041 llvm::Attribute::Convergent);
2042 // Extract all of the register value results from the asm.
2043 if (ResultRegTypes.size() == 1) {
2044 RegResults.push_back(&Result);
2045 } else {
2046 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2047 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2048 RegResults.push_back(Tmp);
2049 }
2050 }
2051 }
2052
EmitAsmStmt(const AsmStmt & S)2053 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2054 // Assemble the final asm string.
2055 std::string AsmString = S.generateAsmString(getContext());
2056
2057 // Get all the output and input constraints together.
2058 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2059 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2060
2061 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2062 StringRef Name;
2063 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2064 Name = GAS->getOutputName(i);
2065 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2066 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2067 assert(IsValid && "Failed to parse output constraint");
2068 OutputConstraintInfos.push_back(Info);
2069 }
2070
2071 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2072 StringRef Name;
2073 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2074 Name = GAS->getInputName(i);
2075 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2076 bool IsValid =
2077 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2078 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2079 InputConstraintInfos.push_back(Info);
2080 }
2081
2082 std::string Constraints;
2083
2084 std::vector<LValue> ResultRegDests;
2085 std::vector<QualType> ResultRegQualTys;
2086 std::vector<llvm::Type *> ResultRegTypes;
2087 std::vector<llvm::Type *> ResultTruncRegTypes;
2088 std::vector<llvm::Type *> ArgTypes;
2089 std::vector<llvm::Value*> Args;
2090 llvm::BitVector ResultTypeRequiresCast;
2091
2092 // Keep track of inout constraints.
2093 std::string InOutConstraints;
2094 std::vector<llvm::Value*> InOutArgs;
2095 std::vector<llvm::Type*> InOutArgTypes;
2096
2097 // Keep track of out constraints for tied input operand.
2098 std::vector<std::string> OutputConstraints;
2099
2100 // An inline asm can be marked readonly if it meets the following conditions:
2101 // - it doesn't have any sideeffects
2102 // - it doesn't clobber memory
2103 // - it doesn't return a value by-reference
2104 // It can be marked readnone if it doesn't have any input memory constraints
2105 // in addition to meeting the conditions listed above.
2106 bool ReadOnly = true, ReadNone = true;
2107
2108 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2109 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2110
2111 // Simplify the output constraint.
2112 std::string OutputConstraint(S.getOutputConstraint(i));
2113 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2114 getTarget(), &OutputConstraintInfos);
2115
2116 const Expr *OutExpr = S.getOutputExpr(i);
2117 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2118
2119 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2120 getTarget(), CGM, S,
2121 Info.earlyClobber());
2122 OutputConstraints.push_back(OutputConstraint);
2123 LValue Dest = EmitLValue(OutExpr);
2124 if (!Constraints.empty())
2125 Constraints += ',';
2126
2127 // If this is a register output, then make the inline asm return it
2128 // by-value. If this is a memory result, return the value by-reference.
2129 bool isScalarizableAggregate =
2130 hasAggregateEvaluationKind(OutExpr->getType());
2131 if (!Info.allowsMemory() && (hasScalarEvaluationKind(OutExpr->getType()) ||
2132 isScalarizableAggregate)) {
2133 Constraints += "=" + OutputConstraint;
2134 ResultRegQualTys.push_back(OutExpr->getType());
2135 ResultRegDests.push_back(Dest);
2136 ResultTruncRegTypes.push_back(ConvertTypeForMem(OutExpr->getType()));
2137 if (Info.allowsRegister() && isScalarizableAggregate) {
2138 ResultTypeRequiresCast.push_back(true);
2139 unsigned Size = getContext().getTypeSize(OutExpr->getType());
2140 llvm::Type *ConvTy = llvm::IntegerType::get(getLLVMContext(), Size);
2141 ResultRegTypes.push_back(ConvTy);
2142 } else {
2143 ResultTypeRequiresCast.push_back(false);
2144 ResultRegTypes.push_back(ResultTruncRegTypes.back());
2145 }
2146 // If this output is tied to an input, and if the input is larger, then
2147 // we need to set the actual result type of the inline asm node to be the
2148 // same as the input type.
2149 if (Info.hasMatchingInput()) {
2150 unsigned InputNo;
2151 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2152 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2153 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2154 break;
2155 }
2156 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2157
2158 QualType InputTy = S.getInputExpr(InputNo)->getType();
2159 QualType OutputType = OutExpr->getType();
2160
2161 uint64_t InputSize = getContext().getTypeSize(InputTy);
2162 if (getContext().getTypeSize(OutputType) < InputSize) {
2163 // Form the asm to return the value as a larger integer or fp type.
2164 ResultRegTypes.back() = ConvertType(InputTy);
2165 }
2166 }
2167 if (llvm::Type* AdjTy =
2168 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2169 ResultRegTypes.back()))
2170 ResultRegTypes.back() = AdjTy;
2171 else {
2172 CGM.getDiags().Report(S.getAsmLoc(),
2173 diag::err_asm_invalid_type_in_input)
2174 << OutExpr->getType() << OutputConstraint;
2175 }
2176
2177 // Update largest vector width for any vector types.
2178 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2179 LargestVectorWidth =
2180 std::max((uint64_t)LargestVectorWidth,
2181 VT->getPrimitiveSizeInBits().getKnownMinSize());
2182 } else {
2183 ArgTypes.push_back(Dest.getAddress(*this).getType());
2184 Args.push_back(Dest.getPointer(*this));
2185 Constraints += "=*";
2186 Constraints += OutputConstraint;
2187 ReadOnly = ReadNone = false;
2188 }
2189
2190 if (Info.isReadWrite()) {
2191 InOutConstraints += ',';
2192
2193 const Expr *InputExpr = S.getOutputExpr(i);
2194 llvm::Value *Arg = EmitAsmInputLValue(Info, Dest, InputExpr->getType(),
2195 InOutConstraints,
2196 InputExpr->getExprLoc());
2197
2198 if (llvm::Type* AdjTy =
2199 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2200 Arg->getType()))
2201 Arg = Builder.CreateBitCast(Arg, AdjTy);
2202
2203 // Update largest vector width for any vector types.
2204 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2205 LargestVectorWidth =
2206 std::max((uint64_t)LargestVectorWidth,
2207 VT->getPrimitiveSizeInBits().getKnownMinSize());
2208 if (Info.allowsRegister())
2209 InOutConstraints += llvm::utostr(i);
2210 else
2211 InOutConstraints += OutputConstraint;
2212
2213 InOutArgTypes.push_back(Arg->getType());
2214 InOutArgs.push_back(Arg);
2215 }
2216 }
2217
2218 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2219 // to the return value slot. Only do this when returning in registers.
2220 if (isa<MSAsmStmt>(&S)) {
2221 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2222 if (RetAI.isDirect() || RetAI.isExtend()) {
2223 // Make a fake lvalue for the return value slot.
2224 LValue ReturnSlot = MakeAddrLValue(ReturnValue, FnRetTy);
2225 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2226 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2227 ResultRegDests, AsmString, S.getNumOutputs());
2228 SawAsmBlock = true;
2229 }
2230 }
2231
2232 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2233 const Expr *InputExpr = S.getInputExpr(i);
2234
2235 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2236
2237 if (Info.allowsMemory())
2238 ReadNone = false;
2239
2240 if (!Constraints.empty())
2241 Constraints += ',';
2242
2243 // Simplify the input constraint.
2244 std::string InputConstraint(S.getInputConstraint(i));
2245 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2246 &OutputConstraintInfos);
2247
2248 InputConstraint = AddVariableConstraints(
2249 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2250 getTarget(), CGM, S, false /* No EarlyClobber */);
2251
2252 std::string ReplaceConstraint (InputConstraint);
2253 llvm::Value *Arg = EmitAsmInput(Info, InputExpr, Constraints);
2254
2255 // If this input argument is tied to a larger output result, extend the
2256 // input to be the same size as the output. The LLVM backend wants to see
2257 // the input and output of a matching constraint be the same size. Note
2258 // that GCC does not define what the top bits are here. We use zext because
2259 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2260 if (Info.hasTiedOperand()) {
2261 unsigned Output = Info.getTiedOperand();
2262 QualType OutputType = S.getOutputExpr(Output)->getType();
2263 QualType InputTy = InputExpr->getType();
2264
2265 if (getContext().getTypeSize(OutputType) >
2266 getContext().getTypeSize(InputTy)) {
2267 // Use ptrtoint as appropriate so that we can do our extension.
2268 if (isa<llvm::PointerType>(Arg->getType()))
2269 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2270 llvm::Type *OutputTy = ConvertType(OutputType);
2271 if (isa<llvm::IntegerType>(OutputTy))
2272 Arg = Builder.CreateZExt(Arg, OutputTy);
2273 else if (isa<llvm::PointerType>(OutputTy))
2274 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2275 else {
2276 assert(OutputTy->isFloatingPointTy() && "Unexpected output type");
2277 Arg = Builder.CreateFPExt(Arg, OutputTy);
2278 }
2279 }
2280 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2281 ReplaceConstraint = OutputConstraints[Output];
2282 }
2283 if (llvm::Type* AdjTy =
2284 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2285 Arg->getType()))
2286 Arg = Builder.CreateBitCast(Arg, AdjTy);
2287 else
2288 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2289 << InputExpr->getType() << InputConstraint;
2290
2291 // Update largest vector width for any vector types.
2292 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2293 LargestVectorWidth =
2294 std::max((uint64_t)LargestVectorWidth,
2295 VT->getPrimitiveSizeInBits().getKnownMinSize());
2296
2297 ArgTypes.push_back(Arg->getType());
2298 Args.push_back(Arg);
2299 Constraints += InputConstraint;
2300 }
2301
2302 // Labels
2303 SmallVector<llvm::BasicBlock *, 16> Transfer;
2304 llvm::BasicBlock *Fallthrough = nullptr;
2305 bool IsGCCAsmGoto = false;
2306 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2307 IsGCCAsmGoto = GS->isAsmGoto();
2308 if (IsGCCAsmGoto) {
2309 for (const auto *E : GS->labels()) {
2310 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2311 Transfer.push_back(Dest.getBlock());
2312 llvm::BlockAddress *BA =
2313 llvm::BlockAddress::get(CurFn, Dest.getBlock());
2314 Args.push_back(BA);
2315 ArgTypes.push_back(BA->getType());
2316 if (!Constraints.empty())
2317 Constraints += ',';
2318 Constraints += 'X';
2319 }
2320 Fallthrough = createBasicBlock("asm.fallthrough");
2321 }
2322 }
2323
2324 // Append the "input" part of inout constraints last.
2325 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2326 ArgTypes.push_back(InOutArgTypes[i]);
2327 Args.push_back(InOutArgs[i]);
2328 }
2329 Constraints += InOutConstraints;
2330
2331 // Clobbers
2332 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2333 StringRef Clobber = S.getClobber(i);
2334
2335 if (Clobber == "memory")
2336 ReadOnly = ReadNone = false;
2337 else if (Clobber != "cc") {
2338 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2339 if (CGM.getCodeGenOpts().StackClashProtector &&
2340 getTarget().isSPRegName(Clobber)) {
2341 CGM.getDiags().Report(S.getAsmLoc(),
2342 diag::warn_stack_clash_protection_inline_asm);
2343 }
2344 }
2345
2346 if (!Constraints.empty())
2347 Constraints += ',';
2348
2349 Constraints += "~{";
2350 Constraints += Clobber;
2351 Constraints += '}';
2352 }
2353
2354 // Add machine specific clobbers
2355 std::string MachineClobbers = getTarget().getClobbers();
2356 if (!MachineClobbers.empty()) {
2357 if (!Constraints.empty())
2358 Constraints += ',';
2359 Constraints += MachineClobbers;
2360 }
2361
2362 llvm::Type *ResultType;
2363 if (ResultRegTypes.empty())
2364 ResultType = VoidTy;
2365 else if (ResultRegTypes.size() == 1)
2366 ResultType = ResultRegTypes[0];
2367 else
2368 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2369
2370 llvm::FunctionType *FTy =
2371 llvm::FunctionType::get(ResultType, ArgTypes, false);
2372
2373 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2374 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2375 llvm::InlineAsm::AD_Intel : llvm::InlineAsm::AD_ATT;
2376 llvm::InlineAsm *IA =
2377 llvm::InlineAsm::get(FTy, AsmString, Constraints, HasSideEffect,
2378 /* IsAlignStack */ false, AsmDialect);
2379 std::vector<llvm::Value*> RegResults;
2380 if (IsGCCAsmGoto) {
2381 llvm::CallBrInst *Result =
2382 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2383 EmitBlock(Fallthrough);
2384 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2385 ReadNone, S, ResultRegTypes, *this, RegResults);
2386 } else {
2387 llvm::CallInst *Result =
2388 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2389 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, ReadOnly,
2390 ReadNone, S, ResultRegTypes, *this, RegResults);
2391 }
2392
2393 assert(RegResults.size() == ResultRegTypes.size());
2394 assert(RegResults.size() == ResultTruncRegTypes.size());
2395 assert(RegResults.size() == ResultRegDests.size());
2396 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2397 // in which case its size may grow.
2398 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2399 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2400 llvm::Value *Tmp = RegResults[i];
2401
2402 // If the result type of the LLVM IR asm doesn't match the result type of
2403 // the expression, do the conversion.
2404 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2405 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2406
2407 // Truncate the integer result to the right size, note that TruncTy can be
2408 // a pointer.
2409 if (TruncTy->isFloatingPointTy())
2410 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2411 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2412 assert(!CGM.getDataLayout().isFatPointer(TruncTy));
2413 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2414 Tmp = Builder.CreateTrunc(Tmp,
2415 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2416 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2417 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2418 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2419 Tmp = Builder.CreatePtrToInt(Tmp,
2420 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2421 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2422 } else if (TruncTy->isIntegerTy()) {
2423 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2424 } else if (TruncTy->isVectorTy()) {
2425 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2426 }
2427 }
2428
2429 LValue Dest = ResultRegDests[i];
2430 // ResultTypeRequiresCast elements correspond to the first
2431 // ResultTypeRequiresCast.size() elements of RegResults.
2432 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2433 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2434 Address A =
2435 Builder.CreateBitCast(Dest.getAddress(*this),
2436 ResultRegTypes[i]->getPointerTo(
2437 Dest.getAddress(*this).getAddressSpace()));
2438 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2439 if (Ty.isNull()) {
2440 const Expr *OutExpr = S.getOutputExpr(i);
2441 CGM.Error(
2442 OutExpr->getExprLoc(),
2443 "impossible constraint in asm: can't store value into a register");
2444 return;
2445 }
2446 Dest = MakeAddrLValue(A, Ty);
2447 }
2448 EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2449 }
2450 }
2451
InitCapturedStruct(const CapturedStmt & S)2452 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2453 const RecordDecl *RD = S.getCapturedRecordDecl();
2454 QualType RecordTy = getContext().getRecordType(RD);
2455
2456 // Initialize the captured struct.
2457 LValue SlotLV =
2458 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2459
2460 RecordDecl::field_iterator CurField = RD->field_begin();
2461 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2462 E = S.capture_init_end();
2463 I != E; ++I, ++CurField) {
2464 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2465 if (CurField->hasCapturedVLAType()) {
2466 auto VAT = CurField->getCapturedVLAType();
2467 EmitStoreThroughLValue(RValue::get(VLASizeMap[VAT->getSizeExpr()]), LV);
2468 } else {
2469 EmitInitializerForField(*CurField, LV, *I);
2470 }
2471 }
2472
2473 return SlotLV;
2474 }
2475
2476 /// Generate an outlined function for the body of a CapturedStmt, store any
2477 /// captured variables into the captured struct, and call the outlined function.
2478 llvm::Function *
EmitCapturedStmt(const CapturedStmt & S,CapturedRegionKind K)2479 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2480 LValue CapStruct = InitCapturedStruct(S);
2481
2482 // Emit the CapturedDecl
2483 CodeGenFunction CGF(CGM, true);
2484 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2485 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2486 delete CGF.CapturedStmtInfo;
2487
2488 // Emit call to the helper function.
2489 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2490
2491 return F;
2492 }
2493
GenerateCapturedStmtArgument(const CapturedStmt & S)2494 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2495 LValue CapStruct = InitCapturedStruct(S);
2496 return CapStruct.getAddress(*this);
2497 }
2498
2499 /// Creates the outlined function for a CapturedStmt.
2500 llvm::Function *
GenerateCapturedStmtFunction(const CapturedStmt & S)2501 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2502 assert(CapturedStmtInfo &&
2503 "CapturedStmtInfo should be set when generating the captured function");
2504 const CapturedDecl *CD = S.getCapturedDecl();
2505 const RecordDecl *RD = S.getCapturedRecordDecl();
2506 SourceLocation Loc = S.getBeginLoc();
2507 assert(CD->hasBody() && "missing CapturedDecl body");
2508
2509 // Build the argument list.
2510 ASTContext &Ctx = CGM.getContext();
2511 FunctionArgList Args;
2512 Args.append(CD->param_begin(), CD->param_end());
2513
2514 // Create the function declaration.
2515 const CGFunctionInfo &FuncInfo =
2516 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2517 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2518
2519 llvm::Function *F =
2520 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2521 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2522 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2523 if (CD->isNothrow())
2524 F->addFnAttr(llvm::Attribute::NoUnwind);
2525
2526 // Generate the function.
2527 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2528 CD->getBody()->getBeginLoc());
2529 // Set the context parameter in CapturedStmtInfo.
2530 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2531 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2532
2533 // Initialize variable-length arrays.
2534 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2535 Ctx.getTagDeclType(RD));
2536 for (auto *FD : RD->fields()) {
2537 if (FD->hasCapturedVLAType()) {
2538 auto *ExprArg =
2539 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2540 .getScalarVal();
2541 auto VAT = FD->getCapturedVLAType();
2542 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2543 }
2544 }
2545
2546 // If 'this' is captured, load it into CXXThisValue.
2547 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2548 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2549 LValue ThisLValue = EmitLValueForField(Base, FD);
2550 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2551 }
2552
2553 PGO.assignRegionCounters(GlobalDecl(CD), F);
2554 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2555 FinishFunction(CD->getBodyRBrace());
2556
2557 return F;
2558 }
2559