1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/Stmt.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
23 #include "clang/Basic/DiagnosticSema.h"
24 #include "clang/Basic/PrettyStackTrace.h"
25 #include "clang/Basic/SourceManager.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/SmallSet.h"
28 #include "llvm/ADT/StringExtras.h"
29 #include "llvm/IR/Assumptions.h"
30 #include "llvm/IR/DataLayout.h"
31 #include "llvm/IR/InlineAsm.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/MDBuilder.h"
34 #include "llvm/Support/SaveAndRestore.h"
35 #include <optional>
36
37 using namespace clang;
38 using namespace CodeGen;
39
40 //===----------------------------------------------------------------------===//
41 // Statement Emission
42 //===----------------------------------------------------------------------===//
43
EmitStopPoint(const Stmt * S)44 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
45 if (CGDebugInfo *DI = getDebugInfo()) {
46 SourceLocation Loc;
47 Loc = S->getBeginLoc();
48 DI->EmitLocation(Builder, Loc);
49
50 LastStopPoint = Loc;
51 }
52 }
53
EmitStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)54 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
55 assert(S && "Null statement?");
56 PGO.setCurrentStmt(S);
57
58 // These statements have their own debug info handling.
59 if (EmitSimpleStmt(S, Attrs))
60 return;
61
62 // Check if we are generating unreachable code.
63 if (!HaveInsertPoint()) {
64 // If so, and the statement doesn't contain a label, then we do not need to
65 // generate actual code. This is safe because (1) the current point is
66 // unreachable, so we don't need to execute the code, and (2) we've already
67 // handled the statements which update internal data structures (like the
68 // local variable map) which could be used by subsequent statements.
69 if (!ContainsLabel(S)) {
70 // Verify that any decl statements were handled as simple, they may be in
71 // scope of subsequent reachable statements.
72 assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
73 return;
74 }
75
76 // Otherwise, make a new block to hold the code.
77 EnsureInsertPoint();
78 }
79
80 // Generate a stoppoint if we are emitting debug info.
81 EmitStopPoint(S);
82
83 // Ignore all OpenMP directives except for simd if OpenMP with Simd is
84 // enabled.
85 if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
86 if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
87 EmitSimpleOMPExecutableDirective(*D);
88 return;
89 }
90 }
91
92 switch (S->getStmtClass()) {
93 case Stmt::NoStmtClass:
94 case Stmt::CXXCatchStmtClass:
95 case Stmt::SEHExceptStmtClass:
96 case Stmt::SEHFinallyStmtClass:
97 case Stmt::MSDependentExistsStmtClass:
98 llvm_unreachable("invalid statement class to emit generically");
99 case Stmt::NullStmtClass:
100 case Stmt::CompoundStmtClass:
101 case Stmt::DeclStmtClass:
102 case Stmt::LabelStmtClass:
103 case Stmt::AttributedStmtClass:
104 case Stmt::GotoStmtClass:
105 case Stmt::BreakStmtClass:
106 case Stmt::ContinueStmtClass:
107 case Stmt::DefaultStmtClass:
108 case Stmt::CaseStmtClass:
109 case Stmt::SEHLeaveStmtClass:
110 llvm_unreachable("should have emitted these statements as simple");
111
112 #define STMT(Type, Base)
113 #define ABSTRACT_STMT(Op)
114 #define EXPR(Type, Base) \
115 case Stmt::Type##Class:
116 #include "clang/AST/StmtNodes.inc"
117 {
118 // Remember the block we came in on.
119 llvm::BasicBlock *incoming = Builder.GetInsertBlock();
120 assert(incoming && "expression emission must have an insertion point");
121
122 EmitIgnoredExpr(cast<Expr>(S));
123
124 llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
125 assert(outgoing && "expression emission cleared block!");
126
127 // The expression emitters assume (reasonably!) that the insertion
128 // point is always set. To maintain that, the call-emission code
129 // for noreturn functions has to enter a new block with no
130 // predecessors. We want to kill that block and mark the current
131 // insertion point unreachable in the common case of a call like
132 // "exit();". Since expression emission doesn't otherwise create
133 // blocks with no predecessors, we can just test for that.
134 // However, we must be careful not to do this to our incoming
135 // block, because *statement* emission does sometimes create
136 // reachable blocks which will have no predecessors until later in
137 // the function. This occurs with, e.g., labels that are not
138 // reachable by fallthrough.
139 if (incoming != outgoing && outgoing->use_empty()) {
140 outgoing->eraseFromParent();
141 Builder.ClearInsertionPoint();
142 }
143 break;
144 }
145
146 case Stmt::IndirectGotoStmtClass:
147 EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
148
149 case Stmt::IfStmtClass: EmitIfStmt(cast<IfStmt>(*S)); break;
150 case Stmt::WhileStmtClass: EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
151 case Stmt::DoStmtClass: EmitDoStmt(cast<DoStmt>(*S), Attrs); break;
152 case Stmt::ForStmtClass: EmitForStmt(cast<ForStmt>(*S), Attrs); break;
153
154 case Stmt::ReturnStmtClass: EmitReturnStmt(cast<ReturnStmt>(*S)); break;
155
156 case Stmt::SwitchStmtClass: EmitSwitchStmt(cast<SwitchStmt>(*S)); break;
157 case Stmt::GCCAsmStmtClass: // Intentional fall-through.
158 case Stmt::MSAsmStmtClass: EmitAsmStmt(cast<AsmStmt>(*S)); break;
159 case Stmt::CoroutineBodyStmtClass:
160 EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
161 break;
162 case Stmt::CoreturnStmtClass:
163 EmitCoreturnStmt(cast<CoreturnStmt>(*S));
164 break;
165 case Stmt::CapturedStmtClass: {
166 const CapturedStmt *CS = cast<CapturedStmt>(S);
167 EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
168 }
169 break;
170 case Stmt::ObjCAtTryStmtClass:
171 EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
172 break;
173 case Stmt::ObjCAtCatchStmtClass:
174 llvm_unreachable(
175 "@catch statements should be handled by EmitObjCAtTryStmt");
176 case Stmt::ObjCAtFinallyStmtClass:
177 llvm_unreachable(
178 "@finally statements should be handled by EmitObjCAtTryStmt");
179 case Stmt::ObjCAtThrowStmtClass:
180 EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
181 break;
182 case Stmt::ObjCAtSynchronizedStmtClass:
183 EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
184 break;
185 case Stmt::ObjCForCollectionStmtClass:
186 EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
187 break;
188 case Stmt::ObjCAutoreleasePoolStmtClass:
189 EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
190 break;
191
192 case Stmt::CXXTryStmtClass:
193 EmitCXXTryStmt(cast<CXXTryStmt>(*S));
194 break;
195 case Stmt::CXXForRangeStmtClass:
196 EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
197 break;
198 case Stmt::SEHTryStmtClass:
199 EmitSEHTryStmt(cast<SEHTryStmt>(*S));
200 break;
201 case Stmt::OMPMetaDirectiveClass:
202 EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
203 break;
204 case Stmt::OMPCanonicalLoopClass:
205 EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
206 break;
207 case Stmt::OMPParallelDirectiveClass:
208 EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
209 break;
210 case Stmt::OMPSimdDirectiveClass:
211 EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
212 break;
213 case Stmt::OMPTileDirectiveClass:
214 EmitOMPTileDirective(cast<OMPTileDirective>(*S));
215 break;
216 case Stmt::OMPUnrollDirectiveClass:
217 EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
218 break;
219 case Stmt::OMPForDirectiveClass:
220 EmitOMPForDirective(cast<OMPForDirective>(*S));
221 break;
222 case Stmt::OMPForSimdDirectiveClass:
223 EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
224 break;
225 case Stmt::OMPSectionsDirectiveClass:
226 EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
227 break;
228 case Stmt::OMPSectionDirectiveClass:
229 EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
230 break;
231 case Stmt::OMPSingleDirectiveClass:
232 EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
233 break;
234 case Stmt::OMPMasterDirectiveClass:
235 EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
236 break;
237 case Stmt::OMPCriticalDirectiveClass:
238 EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
239 break;
240 case Stmt::OMPParallelForDirectiveClass:
241 EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
242 break;
243 case Stmt::OMPParallelForSimdDirectiveClass:
244 EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
245 break;
246 case Stmt::OMPParallelMasterDirectiveClass:
247 EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
248 break;
249 case Stmt::OMPParallelSectionsDirectiveClass:
250 EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
251 break;
252 case Stmt::OMPTaskDirectiveClass:
253 EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
254 break;
255 case Stmt::OMPTaskyieldDirectiveClass:
256 EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
257 break;
258 case Stmt::OMPErrorDirectiveClass:
259 EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
260 break;
261 case Stmt::OMPBarrierDirectiveClass:
262 EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
263 break;
264 case Stmt::OMPTaskwaitDirectiveClass:
265 EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
266 break;
267 case Stmt::OMPTaskgroupDirectiveClass:
268 EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
269 break;
270 case Stmt::OMPFlushDirectiveClass:
271 EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
272 break;
273 case Stmt::OMPDepobjDirectiveClass:
274 EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
275 break;
276 case Stmt::OMPScanDirectiveClass:
277 EmitOMPScanDirective(cast<OMPScanDirective>(*S));
278 break;
279 case Stmt::OMPOrderedDirectiveClass:
280 EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
281 break;
282 case Stmt::OMPAtomicDirectiveClass:
283 EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
284 break;
285 case Stmt::OMPTargetDirectiveClass:
286 EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
287 break;
288 case Stmt::OMPTeamsDirectiveClass:
289 EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
290 break;
291 case Stmt::OMPCancellationPointDirectiveClass:
292 EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
293 break;
294 case Stmt::OMPCancelDirectiveClass:
295 EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
296 break;
297 case Stmt::OMPTargetDataDirectiveClass:
298 EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
299 break;
300 case Stmt::OMPTargetEnterDataDirectiveClass:
301 EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
302 break;
303 case Stmt::OMPTargetExitDataDirectiveClass:
304 EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
305 break;
306 case Stmt::OMPTargetParallelDirectiveClass:
307 EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
308 break;
309 case Stmt::OMPTargetParallelForDirectiveClass:
310 EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
311 break;
312 case Stmt::OMPTaskLoopDirectiveClass:
313 EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
314 break;
315 case Stmt::OMPTaskLoopSimdDirectiveClass:
316 EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
317 break;
318 case Stmt::OMPMasterTaskLoopDirectiveClass:
319 EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
320 break;
321 case Stmt::OMPMaskedTaskLoopDirectiveClass:
322 llvm_unreachable("masked taskloop directive not supported yet.");
323 break;
324 case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
325 EmitOMPMasterTaskLoopSimdDirective(
326 cast<OMPMasterTaskLoopSimdDirective>(*S));
327 break;
328 case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
329 llvm_unreachable("masked taskloop simd directive not supported yet.");
330 break;
331 case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
332 EmitOMPParallelMasterTaskLoopDirective(
333 cast<OMPParallelMasterTaskLoopDirective>(*S));
334 break;
335 case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
336 llvm_unreachable("parallel masked taskloop directive not supported yet.");
337 break;
338 case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
339 EmitOMPParallelMasterTaskLoopSimdDirective(
340 cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
341 break;
342 case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
343 llvm_unreachable(
344 "parallel masked taskloop simd directive not supported yet.");
345 break;
346 case Stmt::OMPDistributeDirectiveClass:
347 EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
348 break;
349 case Stmt::OMPTargetUpdateDirectiveClass:
350 EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
351 break;
352 case Stmt::OMPDistributeParallelForDirectiveClass:
353 EmitOMPDistributeParallelForDirective(
354 cast<OMPDistributeParallelForDirective>(*S));
355 break;
356 case Stmt::OMPDistributeParallelForSimdDirectiveClass:
357 EmitOMPDistributeParallelForSimdDirective(
358 cast<OMPDistributeParallelForSimdDirective>(*S));
359 break;
360 case Stmt::OMPDistributeSimdDirectiveClass:
361 EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
362 break;
363 case Stmt::OMPTargetParallelForSimdDirectiveClass:
364 EmitOMPTargetParallelForSimdDirective(
365 cast<OMPTargetParallelForSimdDirective>(*S));
366 break;
367 case Stmt::OMPTargetSimdDirectiveClass:
368 EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
369 break;
370 case Stmt::OMPTeamsDistributeDirectiveClass:
371 EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
372 break;
373 case Stmt::OMPTeamsDistributeSimdDirectiveClass:
374 EmitOMPTeamsDistributeSimdDirective(
375 cast<OMPTeamsDistributeSimdDirective>(*S));
376 break;
377 case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
378 EmitOMPTeamsDistributeParallelForSimdDirective(
379 cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
380 break;
381 case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
382 EmitOMPTeamsDistributeParallelForDirective(
383 cast<OMPTeamsDistributeParallelForDirective>(*S));
384 break;
385 case Stmt::OMPTargetTeamsDirectiveClass:
386 EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
387 break;
388 case Stmt::OMPTargetTeamsDistributeDirectiveClass:
389 EmitOMPTargetTeamsDistributeDirective(
390 cast<OMPTargetTeamsDistributeDirective>(*S));
391 break;
392 case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
393 EmitOMPTargetTeamsDistributeParallelForDirective(
394 cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
395 break;
396 case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
397 EmitOMPTargetTeamsDistributeParallelForSimdDirective(
398 cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
399 break;
400 case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
401 EmitOMPTargetTeamsDistributeSimdDirective(
402 cast<OMPTargetTeamsDistributeSimdDirective>(*S));
403 break;
404 case Stmt::OMPInteropDirectiveClass:
405 EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
406 break;
407 case Stmt::OMPDispatchDirectiveClass:
408 llvm_unreachable("Dispatch directive not supported yet.");
409 break;
410 case Stmt::OMPMaskedDirectiveClass:
411 EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
412 break;
413 case Stmt::OMPGenericLoopDirectiveClass:
414 EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
415 break;
416 case Stmt::OMPTeamsGenericLoopDirectiveClass:
417 llvm_unreachable("teams loop directive not supported yet.");
418 break;
419 case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
420 llvm_unreachable("target teams loop directive not supported yet.");
421 break;
422 case Stmt::OMPParallelGenericLoopDirectiveClass:
423 llvm_unreachable("parallel loop directive not supported yet.");
424 break;
425 case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
426 llvm_unreachable("target parallel loop directive not supported yet.");
427 break;
428 case Stmt::OMPParallelMaskedDirectiveClass:
429 llvm_unreachable("parallel masked directive not supported yet.");
430 break;
431 }
432 }
433
EmitSimpleStmt(const Stmt * S,ArrayRef<const Attr * > Attrs)434 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
435 ArrayRef<const Attr *> Attrs) {
436 switch (S->getStmtClass()) {
437 default:
438 return false;
439 case Stmt::NullStmtClass:
440 break;
441 case Stmt::CompoundStmtClass:
442 EmitCompoundStmt(cast<CompoundStmt>(*S));
443 break;
444 case Stmt::DeclStmtClass:
445 EmitDeclStmt(cast<DeclStmt>(*S));
446 break;
447 case Stmt::LabelStmtClass:
448 EmitLabelStmt(cast<LabelStmt>(*S));
449 break;
450 case Stmt::AttributedStmtClass:
451 EmitAttributedStmt(cast<AttributedStmt>(*S));
452 break;
453 case Stmt::GotoStmtClass:
454 EmitGotoStmt(cast<GotoStmt>(*S));
455 break;
456 case Stmt::BreakStmtClass:
457 EmitBreakStmt(cast<BreakStmt>(*S));
458 break;
459 case Stmt::ContinueStmtClass:
460 EmitContinueStmt(cast<ContinueStmt>(*S));
461 break;
462 case Stmt::DefaultStmtClass:
463 EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
464 break;
465 case Stmt::CaseStmtClass:
466 EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
467 break;
468 case Stmt::SEHLeaveStmtClass:
469 EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
470 break;
471 }
472 return true;
473 }
474
475 /// EmitCompoundStmt - Emit a compound statement {..} node. If GetLast is true,
476 /// this captures the expression result of the last sub-statement and returns it
477 /// (for use by the statement expression extension).
EmitCompoundStmt(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)478 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
479 AggValueSlot AggSlot) {
480 PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
481 "LLVM IR generation of compound statement ('{}')");
482
483 // Keep track of the current cleanup stack depth, including debug scopes.
484 LexicalScope Scope(*this, S.getSourceRange());
485
486 return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
487 }
488
489 Address
EmitCompoundStmtWithoutScope(const CompoundStmt & S,bool GetLast,AggValueSlot AggSlot)490 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
491 bool GetLast,
492 AggValueSlot AggSlot) {
493
494 const Stmt *ExprResult = S.getStmtExprResult();
495 assert((!GetLast || (GetLast && ExprResult)) &&
496 "If GetLast is true then the CompoundStmt must have a StmtExprResult");
497
498 Address RetAlloca = Address::invalid();
499
500 for (auto *CurStmt : S.body()) {
501 if (GetLast && ExprResult == CurStmt) {
502 // We have to special case labels here. They are statements, but when put
503 // at the end of a statement expression, they yield the value of their
504 // subexpression. Handle this by walking through all labels we encounter,
505 // emitting them before we evaluate the subexpr.
506 // Similar issues arise for attributed statements.
507 while (!isa<Expr>(ExprResult)) {
508 if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
509 EmitLabel(LS->getDecl());
510 ExprResult = LS->getSubStmt();
511 } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
512 // FIXME: Update this if we ever have attributes that affect the
513 // semantics of an expression.
514 ExprResult = AS->getSubStmt();
515 } else {
516 llvm_unreachable("unknown value statement");
517 }
518 }
519
520 EnsureInsertPoint();
521
522 const Expr *E = cast<Expr>(ExprResult);
523 QualType ExprTy = E->getType();
524 if (hasAggregateEvaluationKind(ExprTy)) {
525 EmitAggExpr(E, AggSlot);
526 } else {
527 // We can't return an RValue here because there might be cleanups at
528 // the end of the StmtExpr. Because of that, we have to emit the result
529 // here into a temporary alloca.
530 RetAlloca = CreateMemTemp(ExprTy);
531 EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
532 /*IsInit*/ false);
533 }
534 } else {
535 EmitStmt(CurStmt);
536 }
537 }
538
539 return RetAlloca;
540 }
541
SimplifyForwardingBlocks(llvm::BasicBlock * BB)542 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
543 llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
544
545 // If there is a cleanup stack, then we it isn't worth trying to
546 // simplify this block (we would need to remove it from the scope map
547 // and cleanup entry).
548 if (!EHStack.empty())
549 return;
550
551 // Can only simplify direct branches.
552 if (!BI || !BI->isUnconditional())
553 return;
554
555 // Can only simplify empty blocks.
556 if (BI->getIterator() != BB->begin())
557 return;
558
559 BB->replaceAllUsesWith(BI->getSuccessor(0));
560 BI->eraseFromParent();
561 BB->eraseFromParent();
562 }
563
EmitBlock(llvm::BasicBlock * BB,bool IsFinished)564 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
565 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
566
567 // Fall out of the current block (if necessary).
568 EmitBranch(BB);
569
570 if (IsFinished && BB->use_empty()) {
571 delete BB;
572 return;
573 }
574
575 // Place the block after the current block, if possible, or else at
576 // the end of the function.
577 if (CurBB && CurBB->getParent())
578 CurFn->insert(std::next(CurBB->getIterator()), BB);
579 else
580 CurFn->insert(CurFn->end(), BB);
581 Builder.SetInsertPoint(BB);
582 }
583
EmitBranch(llvm::BasicBlock * Target)584 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
585 // Emit a branch from the current block to the target one if this
586 // was a real block. If this was just a fall-through block after a
587 // terminator, don't emit it.
588 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
589
590 if (!CurBB || CurBB->getTerminator()) {
591 // If there is no insert point or the previous block is already
592 // terminated, don't touch it.
593 } else {
594 // Otherwise, create a fall-through branch.
595 Builder.CreateBr(Target);
596 }
597
598 Builder.ClearInsertionPoint();
599 }
600
EmitBlockAfterUses(llvm::BasicBlock * block)601 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
602 bool inserted = false;
603 for (llvm::User *u : block->users()) {
604 if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
605 CurFn->insert(std::next(insn->getParent()->getIterator()), block);
606 inserted = true;
607 break;
608 }
609 }
610
611 if (!inserted)
612 CurFn->insert(CurFn->end(), block);
613
614 Builder.SetInsertPoint(block);
615 }
616
617 CodeGenFunction::JumpDest
getJumpDestForLabel(const LabelDecl * D)618 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
619 JumpDest &Dest = LabelMap[D];
620 if (Dest.isValid()) return Dest;
621
622 // Create, but don't insert, the new block.
623 Dest = JumpDest(createBasicBlock(D->getName()),
624 EHScopeStack::stable_iterator::invalid(),
625 NextCleanupDestIndex++);
626 return Dest;
627 }
628
EmitLabel(const LabelDecl * D)629 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
630 // Add this label to the current lexical scope if we're within any
631 // normal cleanups. Jumps "in" to this label --- when permitted by
632 // the language --- may need to be routed around such cleanups.
633 if (EHStack.hasNormalCleanups() && CurLexicalScope)
634 CurLexicalScope->addLabel(D);
635
636 JumpDest &Dest = LabelMap[D];
637
638 // If we didn't need a forward reference to this label, just go
639 // ahead and create a destination at the current scope.
640 if (!Dest.isValid()) {
641 Dest = getJumpDestInCurrentScope(D->getName());
642
643 // Otherwise, we need to give this label a target depth and remove
644 // it from the branch-fixups list.
645 } else {
646 assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
647 Dest.setScopeDepth(EHStack.stable_begin());
648 ResolveBranchFixups(Dest.getBlock());
649 }
650
651 EmitBlock(Dest.getBlock());
652
653 // Emit debug info for labels.
654 if (CGDebugInfo *DI = getDebugInfo()) {
655 if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
656 DI->setLocation(D->getLocation());
657 DI->EmitLabel(D, Builder);
658 }
659 }
660
661 incrementProfileCounter(D->getStmt());
662 }
663
664 /// Change the cleanup scope of the labels in this lexical scope to
665 /// match the scope of the enclosing context.
rescopeLabels()666 void CodeGenFunction::LexicalScope::rescopeLabels() {
667 assert(!Labels.empty());
668 EHScopeStack::stable_iterator innermostScope
669 = CGF.EHStack.getInnermostNormalCleanup();
670
671 // Change the scope depth of all the labels.
672 for (SmallVectorImpl<const LabelDecl*>::const_iterator
673 i = Labels.begin(), e = Labels.end(); i != e; ++i) {
674 assert(CGF.LabelMap.count(*i));
675 JumpDest &dest = CGF.LabelMap.find(*i)->second;
676 assert(dest.getScopeDepth().isValid());
677 assert(innermostScope.encloses(dest.getScopeDepth()));
678 dest.setScopeDepth(innermostScope);
679 }
680
681 // Reparent the labels if the new scope also has cleanups.
682 if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
683 ParentScope->Labels.append(Labels.begin(), Labels.end());
684 }
685 }
686
687
EmitLabelStmt(const LabelStmt & S)688 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
689 EmitLabel(S.getDecl());
690
691 // IsEHa - emit eha.scope.begin if it's a side entry of a scope
692 if (getLangOpts().EHAsynch && S.isSideEntry())
693 EmitSehCppScopeBegin();
694
695 EmitStmt(S.getSubStmt());
696 }
697
EmitAttributedStmt(const AttributedStmt & S)698 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
699 bool nomerge = false;
700 bool noinline = false;
701 bool alwaysinline = false;
702 const CallExpr *musttail = nullptr;
703
704 for (const auto *A : S.getAttrs()) {
705 switch (A->getKind()) {
706 default:
707 break;
708 case attr::NoMerge:
709 nomerge = true;
710 break;
711 case attr::NoInline:
712 noinline = true;
713 break;
714 case attr::AlwaysInline:
715 alwaysinline = true;
716 break;
717 case attr::MustTail:
718 const Stmt *Sub = S.getSubStmt();
719 const ReturnStmt *R = cast<ReturnStmt>(Sub);
720 musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
721 break;
722 }
723 }
724 SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
725 SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
726 SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
727 SaveAndRestore save_musttail(MustTailCall, musttail);
728 EmitStmt(S.getSubStmt(), S.getAttrs());
729 }
730
EmitGotoStmt(const GotoStmt & S)731 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
732 // If this code is reachable then emit a stop point (if generating
733 // debug info). We have to do this ourselves because we are on the
734 // "simple" statement path.
735 if (HaveInsertPoint())
736 EmitStopPoint(&S);
737
738 EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
739 }
740
741
EmitIndirectGotoStmt(const IndirectGotoStmt & S)742 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
743 if (const LabelDecl *Target = S.getConstantTarget()) {
744 EmitBranchThroughCleanup(getJumpDestForLabel(Target));
745 return;
746 }
747
748 // Ensure that we have an i8* for our PHI node.
749 llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
750 Int8PtrTy, "addr");
751 llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
752
753 // Get the basic block for the indirect goto.
754 llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
755
756 // The first instruction in the block has to be the PHI for the switch dest,
757 // add an entry for this branch.
758 cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
759
760 EmitBranch(IndGotoBB);
761 }
762
EmitIfStmt(const IfStmt & S)763 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
764 // The else branch of a consteval if statement is always the only branch that
765 // can be runtime evaluated.
766 if (S.isConsteval()) {
767 const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
768 if (Executed) {
769 RunCleanupsScope ExecutedScope(*this);
770 EmitStmt(Executed);
771 }
772 return;
773 }
774
775 // C99 6.8.4.1: The first substatement is executed if the expression compares
776 // unequal to 0. The condition must be a scalar type.
777 LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
778
779 if (S.getInit())
780 EmitStmt(S.getInit());
781
782 if (S.getConditionVariable())
783 EmitDecl(*S.getConditionVariable());
784
785 // If the condition constant folds and can be elided, try to avoid emitting
786 // the condition and the dead arm of the if/else.
787 bool CondConstant;
788 if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
789 S.isConstexpr())) {
790 // Figure out which block (then or else) is executed.
791 const Stmt *Executed = S.getThen();
792 const Stmt *Skipped = S.getElse();
793 if (!CondConstant) // Condition false?
794 std::swap(Executed, Skipped);
795
796 // If the skipped block has no labels in it, just emit the executed block.
797 // This avoids emitting dead code and simplifies the CFG substantially.
798 if (S.isConstexpr() || !ContainsLabel(Skipped)) {
799 if (CondConstant)
800 incrementProfileCounter(&S);
801 if (Executed) {
802 RunCleanupsScope ExecutedScope(*this);
803 EmitStmt(Executed);
804 }
805 return;
806 }
807 }
808
809 // Otherwise, the condition did not fold, or we couldn't elide it. Just emit
810 // the conditional branch.
811 llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
812 llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
813 llvm::BasicBlock *ElseBlock = ContBlock;
814 if (S.getElse())
815 ElseBlock = createBasicBlock("if.else");
816
817 // Prefer the PGO based weights over the likelihood attribute.
818 // When the build isn't optimized the metadata isn't used, so don't generate
819 // it.
820 // Also, differentiate between disabled PGO and a never executed branch with
821 // PGO. Assuming PGO is in use:
822 // - we want to ignore the [[likely]] attribute if the branch is never
823 // executed,
824 // - assuming the profile is poor, preserving the attribute may still be
825 // beneficial.
826 // As an approximation, preserve the attribute only if both the branch and the
827 // parent context were not executed.
828 Stmt::Likelihood LH = Stmt::LH_None;
829 uint64_t ThenCount = getProfileCount(S.getThen());
830 if (!ThenCount && !getCurrentProfileCount() &&
831 CGM.getCodeGenOpts().OptimizationLevel)
832 LH = Stmt::getLikelihood(S.getThen(), S.getElse());
833 EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
834
835 // Emit the 'then' code.
836 EmitBlock(ThenBlock);
837 incrementProfileCounter(&S);
838 {
839 RunCleanupsScope ThenScope(*this);
840 EmitStmt(S.getThen());
841 }
842 EmitBranch(ContBlock);
843
844 // Emit the 'else' code if present.
845 if (const Stmt *Else = S.getElse()) {
846 {
847 // There is no need to emit line number for an unconditional branch.
848 auto NL = ApplyDebugLocation::CreateEmpty(*this);
849 EmitBlock(ElseBlock);
850 }
851 {
852 RunCleanupsScope ElseScope(*this);
853 EmitStmt(Else);
854 }
855 {
856 // There is no need to emit line number for an unconditional branch.
857 auto NL = ApplyDebugLocation::CreateEmpty(*this);
858 EmitBranch(ContBlock);
859 }
860 }
861
862 // Emit the continuation block for code after the if.
863 EmitBlock(ContBlock, true);
864 }
865
EmitWhileStmt(const WhileStmt & S,ArrayRef<const Attr * > WhileAttrs)866 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
867 ArrayRef<const Attr *> WhileAttrs) {
868 // Emit the header for the loop, which will also become
869 // the continue target.
870 JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
871 EmitBlock(LoopHeader.getBlock());
872
873 // Create an exit block for when the condition fails, which will
874 // also become the break target.
875 JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
876
877 // Store the blocks to use for break and continue.
878 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
879
880 // C++ [stmt.while]p2:
881 // When the condition of a while statement is a declaration, the
882 // scope of the variable that is declared extends from its point
883 // of declaration (3.3.2) to the end of the while statement.
884 // [...]
885 // The object created in a condition is destroyed and created
886 // with each iteration of the loop.
887 RunCleanupsScope ConditionScope(*this);
888
889 if (S.getConditionVariable())
890 EmitDecl(*S.getConditionVariable());
891
892 // Evaluate the conditional in the while header. C99 6.8.5.1: The
893 // evaluation of the controlling expression takes place before each
894 // execution of the loop body.
895 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
896
897 // while(1) is common, avoid extra exit blocks. Be sure
898 // to correctly handle break/continue though.
899 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
900 bool CondIsConstInt = C != nullptr;
901 bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
902 const SourceRange &R = S.getSourceRange();
903 LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
904 WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
905 SourceLocToDebugLoc(R.getEnd()),
906 checkIfLoopMustProgress(CondIsConstInt));
907
908 // As long as the condition is true, go to the loop body.
909 llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
910 if (EmitBoolCondBranch) {
911 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
912 if (ConditionScope.requiresCleanups())
913 ExitBlock = createBasicBlock("while.exit");
914 llvm::MDNode *Weights =
915 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
916 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
917 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
918 BoolCondVal, Stmt::getLikelihood(S.getBody()));
919 Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
920
921 if (ExitBlock != LoopExit.getBlock()) {
922 EmitBlock(ExitBlock);
923 EmitBranchThroughCleanup(LoopExit);
924 }
925 } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
926 CGM.getDiags().Report(A->getLocation(),
927 diag::warn_attribute_has_no_effect_on_infinite_loop)
928 << A << A->getRange();
929 CGM.getDiags().Report(
930 S.getWhileLoc(),
931 diag::note_attribute_has_no_effect_on_infinite_loop_here)
932 << SourceRange(S.getWhileLoc(), S.getRParenLoc());
933 }
934
935 // Emit the loop body. We have to emit this in a cleanup scope
936 // because it might be a singleton DeclStmt.
937 {
938 RunCleanupsScope BodyScope(*this);
939 EmitBlock(LoopBody);
940 incrementProfileCounter(&S);
941 EmitStmt(S.getBody());
942 }
943
944 BreakContinueStack.pop_back();
945
946 // Immediately force cleanup.
947 ConditionScope.ForceCleanup();
948
949 EmitStopPoint(&S);
950 // Branch to the loop header again.
951 EmitBranch(LoopHeader.getBlock());
952
953 LoopStack.pop();
954
955 // Emit the exit block.
956 EmitBlock(LoopExit.getBlock(), true);
957
958 // The LoopHeader typically is just a branch if we skipped emitting
959 // a branch, try to erase it.
960 if (!EmitBoolCondBranch)
961 SimplifyForwardingBlocks(LoopHeader.getBlock());
962 }
963
EmitDoStmt(const DoStmt & S,ArrayRef<const Attr * > DoAttrs)964 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
965 ArrayRef<const Attr *> DoAttrs) {
966 JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
967 JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
968
969 uint64_t ParentCount = getCurrentProfileCount();
970
971 // Store the blocks to use for break and continue.
972 BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
973
974 // Emit the body of the loop.
975 llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
976
977 EmitBlockWithFallThrough(LoopBody, &S);
978 {
979 RunCleanupsScope BodyScope(*this);
980 EmitStmt(S.getBody());
981 }
982
983 EmitBlock(LoopCond.getBlock());
984
985 // C99 6.8.5.2: "The evaluation of the controlling expression takes place
986 // after each execution of the loop body."
987
988 // Evaluate the conditional in the while header.
989 // C99 6.8.5p2/p4: The first substatement is executed if the expression
990 // compares unequal to 0. The condition must be a scalar type.
991 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
992
993 BreakContinueStack.pop_back();
994
995 // "do {} while (0)" is common in macros, avoid extra blocks. Be sure
996 // to correctly handle break/continue though.
997 llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
998 bool CondIsConstInt = C;
999 bool EmitBoolCondBranch = !C || !C->isZero();
1000
1001 const SourceRange &R = S.getSourceRange();
1002 LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1003 SourceLocToDebugLoc(R.getBegin()),
1004 SourceLocToDebugLoc(R.getEnd()),
1005 checkIfLoopMustProgress(CondIsConstInt));
1006
1007 // As long as the condition is true, iterate the loop.
1008 if (EmitBoolCondBranch) {
1009 uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1010 Builder.CreateCondBr(
1011 BoolCondVal, LoopBody, LoopExit.getBlock(),
1012 createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1013 }
1014
1015 LoopStack.pop();
1016
1017 // Emit the exit block.
1018 EmitBlock(LoopExit.getBlock());
1019
1020 // The DoCond block typically is just a branch if we skipped
1021 // emitting a branch, try to erase it.
1022 if (!EmitBoolCondBranch)
1023 SimplifyForwardingBlocks(LoopCond.getBlock());
1024 }
1025
EmitForStmt(const ForStmt & S,ArrayRef<const Attr * > ForAttrs)1026 void CodeGenFunction::EmitForStmt(const ForStmt &S,
1027 ArrayRef<const Attr *> ForAttrs) {
1028 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1029
1030 LexicalScope ForScope(*this, S.getSourceRange());
1031
1032 // Evaluate the first part before the loop.
1033 if (S.getInit())
1034 EmitStmt(S.getInit());
1035
1036 // Start the loop with a block that tests the condition.
1037 // If there's an increment, the continue scope will be overwritten
1038 // later.
1039 JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1040 llvm::BasicBlock *CondBlock = CondDest.getBlock();
1041 EmitBlock(CondBlock);
1042
1043 Expr::EvalResult Result;
1044 bool CondIsConstInt =
1045 !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
1046
1047 const SourceRange &R = S.getSourceRange();
1048 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1049 SourceLocToDebugLoc(R.getBegin()),
1050 SourceLocToDebugLoc(R.getEnd()),
1051 checkIfLoopMustProgress(CondIsConstInt));
1052
1053 // Create a cleanup scope for the condition variable cleanups.
1054 LexicalScope ConditionScope(*this, S.getSourceRange());
1055
1056 // If the for loop doesn't have an increment we can just use the condition as
1057 // the continue block. Otherwise, if there is no condition variable, we can
1058 // form the continue block now. If there is a condition variable, we can't
1059 // form the continue block until after we've emitted the condition, because
1060 // the condition is in scope in the increment, but Sema's jump diagnostics
1061 // ensure that there are no continues from the condition variable that jump
1062 // to the loop increment.
1063 JumpDest Continue;
1064 if (!S.getInc())
1065 Continue = CondDest;
1066 else if (!S.getConditionVariable())
1067 Continue = getJumpDestInCurrentScope("for.inc");
1068 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1069
1070 if (S.getCond()) {
1071 // If the for statement has a condition scope, emit the local variable
1072 // declaration.
1073 if (S.getConditionVariable()) {
1074 EmitDecl(*S.getConditionVariable());
1075
1076 // We have entered the condition variable's scope, so we're now able to
1077 // jump to the continue block.
1078 Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1079 BreakContinueStack.back().ContinueBlock = Continue;
1080 }
1081
1082 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1083 // If there are any cleanups between here and the loop-exit scope,
1084 // create a block to stage a loop exit along.
1085 if (ForScope.requiresCleanups())
1086 ExitBlock = createBasicBlock("for.cond.cleanup");
1087
1088 // As long as the condition is true, iterate the loop.
1089 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1090
1091 // C99 6.8.5p2/p4: The first substatement is executed if the expression
1092 // compares unequal to 0. The condition must be a scalar type.
1093 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1094 llvm::MDNode *Weights =
1095 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1096 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1097 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1098 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1099
1100 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1101
1102 if (ExitBlock != LoopExit.getBlock()) {
1103 EmitBlock(ExitBlock);
1104 EmitBranchThroughCleanup(LoopExit);
1105 }
1106
1107 EmitBlock(ForBody);
1108 } else {
1109 // Treat it as a non-zero constant. Don't even create a new block for the
1110 // body, just fall into it.
1111 }
1112 incrementProfileCounter(&S);
1113
1114 {
1115 // Create a separate cleanup scope for the body, in case it is not
1116 // a compound statement.
1117 RunCleanupsScope BodyScope(*this);
1118 EmitStmt(S.getBody());
1119 }
1120
1121 // If there is an increment, emit it next.
1122 if (S.getInc()) {
1123 EmitBlock(Continue.getBlock());
1124 EmitStmt(S.getInc());
1125 }
1126
1127 BreakContinueStack.pop_back();
1128
1129 ConditionScope.ForceCleanup();
1130
1131 EmitStopPoint(&S);
1132 EmitBranch(CondBlock);
1133
1134 ForScope.ForceCleanup();
1135
1136 LoopStack.pop();
1137
1138 // Emit the fall-through block.
1139 EmitBlock(LoopExit.getBlock(), true);
1140 }
1141
1142 void
EmitCXXForRangeStmt(const CXXForRangeStmt & S,ArrayRef<const Attr * > ForAttrs)1143 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1144 ArrayRef<const Attr *> ForAttrs) {
1145 JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1146
1147 LexicalScope ForScope(*this, S.getSourceRange());
1148
1149 // Evaluate the first pieces before the loop.
1150 if (S.getInit())
1151 EmitStmt(S.getInit());
1152 EmitStmt(S.getRangeStmt());
1153 EmitStmt(S.getBeginStmt());
1154 EmitStmt(S.getEndStmt());
1155
1156 // Start the loop with a block that tests the condition.
1157 // If there's an increment, the continue scope will be overwritten
1158 // later.
1159 llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1160 EmitBlock(CondBlock);
1161
1162 const SourceRange &R = S.getSourceRange();
1163 LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1164 SourceLocToDebugLoc(R.getBegin()),
1165 SourceLocToDebugLoc(R.getEnd()));
1166
1167 // If there are any cleanups between here and the loop-exit scope,
1168 // create a block to stage a loop exit along.
1169 llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1170 if (ForScope.requiresCleanups())
1171 ExitBlock = createBasicBlock("for.cond.cleanup");
1172
1173 // The loop body, consisting of the specified body and the loop variable.
1174 llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1175
1176 // The body is executed if the expression, contextually converted
1177 // to bool, is true.
1178 llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1179 llvm::MDNode *Weights =
1180 createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1181 if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1182 BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1183 BoolCondVal, Stmt::getLikelihood(S.getBody()));
1184 Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1185
1186 if (ExitBlock != LoopExit.getBlock()) {
1187 EmitBlock(ExitBlock);
1188 EmitBranchThroughCleanup(LoopExit);
1189 }
1190
1191 EmitBlock(ForBody);
1192 incrementProfileCounter(&S);
1193
1194 // Create a block for the increment. In case of a 'continue', we jump there.
1195 JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1196
1197 // Store the blocks to use for break and continue.
1198 BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1199
1200 {
1201 // Create a separate cleanup scope for the loop variable and body.
1202 LexicalScope BodyScope(*this, S.getSourceRange());
1203 EmitStmt(S.getLoopVarStmt());
1204 EmitStmt(S.getBody());
1205 }
1206
1207 EmitStopPoint(&S);
1208 // If there is an increment, emit it next.
1209 EmitBlock(Continue.getBlock());
1210 EmitStmt(S.getInc());
1211
1212 BreakContinueStack.pop_back();
1213
1214 EmitBranch(CondBlock);
1215
1216 ForScope.ForceCleanup();
1217
1218 LoopStack.pop();
1219
1220 // Emit the fall-through block.
1221 EmitBlock(LoopExit.getBlock(), true);
1222 }
1223
EmitReturnOfRValue(RValue RV,QualType Ty)1224 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1225 if (RV.isScalar()) {
1226 Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1227 } else if (RV.isAggregate()) {
1228 LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1229 LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1230 EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1231 } else {
1232 EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1233 /*init*/ true);
1234 }
1235 EmitBranchThroughCleanup(ReturnBlock);
1236 }
1237
1238 namespace {
1239 // RAII struct used to save and restore a return statment's result expression.
1240 struct SaveRetExprRAII {
SaveRetExprRAII__anonc3df80dd0111::SaveRetExprRAII1241 SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1242 : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1243 CGF.RetExpr = RetExpr;
1244 }
~SaveRetExprRAII__anonc3df80dd0111::SaveRetExprRAII1245 ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1246 const Expr *OldRetExpr;
1247 CodeGenFunction &CGF;
1248 };
1249 } // namespace
1250
1251 /// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1252 /// codegen it as 'tail call ...; ret void;'.
makeTailCallIfSwiftAsync(const CallExpr * CE,CGBuilderTy & Builder,const CGFunctionInfo * CurFnInfo)1253 static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1254 const CGFunctionInfo *CurFnInfo) {
1255 auto calleeQualType = CE->getCallee()->getType();
1256 const FunctionType *calleeType = nullptr;
1257 if (calleeQualType->isFunctionPointerType() ||
1258 calleeQualType->isFunctionReferenceType() ||
1259 calleeQualType->isBlockPointerType() ||
1260 calleeQualType->isMemberFunctionPointerType()) {
1261 calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1262 } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1263 calleeType = ty;
1264 } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1265 if (auto methodDecl = CMCE->getMethodDecl()) {
1266 // getMethodDecl() doesn't handle member pointers at the moment.
1267 calleeType = methodDecl->getType()->castAs<FunctionType>();
1268 } else {
1269 return;
1270 }
1271 } else {
1272 return;
1273 }
1274 if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1275 (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) {
1276 auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1277 CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1278 Builder.CreateRetVoid();
1279 Builder.ClearInsertionPoint();
1280 }
1281 }
1282
1283 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1284 /// if the function returns void, or may be missing one if the function returns
1285 /// non-void. Fun stuff :).
EmitReturnStmt(const ReturnStmt & S)1286 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1287 if (requiresReturnValueCheck()) {
1288 llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1289 auto *SLocPtr =
1290 new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1291 llvm::GlobalVariable::PrivateLinkage, SLoc);
1292 SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1293 CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1294 assert(ReturnLocation.isValid() && "No valid return location");
1295 Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1296 ReturnLocation);
1297 }
1298
1299 // Returning from an outlined SEH helper is UB, and we already warn on it.
1300 if (IsOutlinedSEHHelper) {
1301 Builder.CreateUnreachable();
1302 Builder.ClearInsertionPoint();
1303 }
1304
1305 // Emit the result value, even if unused, to evaluate the side effects.
1306 const Expr *RV = S.getRetValue();
1307
1308 // Record the result expression of the return statement. The recorded
1309 // expression is used to determine whether a block capture's lifetime should
1310 // end at the end of the full expression as opposed to the end of the scope
1311 // enclosing the block expression.
1312 //
1313 // This permits a small, easily-implemented exception to our over-conservative
1314 // rules about not jumping to statements following block literals with
1315 // non-trivial cleanups.
1316 SaveRetExprRAII SaveRetExpr(RV, *this);
1317
1318 RunCleanupsScope cleanupScope(*this);
1319 if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1320 RV = EWC->getSubExpr();
1321 // FIXME: Clean this up by using an LValue for ReturnTemp,
1322 // EmitStoreThroughLValue, and EmitAnyExpr.
1323 // Check if the NRVO candidate was not globalized in OpenMP mode.
1324 if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1325 S.getNRVOCandidate()->isNRVOVariable() &&
1326 (!getLangOpts().OpenMP ||
1327 !CGM.getOpenMPRuntime()
1328 .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1329 .isValid())) {
1330 // Apply the named return value optimization for this return statement,
1331 // which means doing nothing: the appropriate result has already been
1332 // constructed into the NRVO variable.
1333
1334 // If there is an NRVO flag for this variable, set it to 1 into indicate
1335 // that the cleanup code should not destroy the variable.
1336 if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1337 Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1338 } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1339 // Make sure not to return anything, but evaluate the expression
1340 // for side effects.
1341 if (RV) {
1342 EmitAnyExpr(RV);
1343 if (auto *CE = dyn_cast<CallExpr>(RV))
1344 makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1345 }
1346 } else if (!RV) {
1347 // Do nothing (return value is left uninitialized)
1348 } else if (FnRetTy->isReferenceType()) {
1349 // If this function returns a reference, take the address of the expression
1350 // rather than the value.
1351 RValue Result = EmitReferenceBindingToExpr(RV);
1352 Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1353 } else {
1354 switch (getEvaluationKind(RV->getType())) {
1355 case TEK_Scalar:
1356 Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1357 break;
1358 case TEK_Complex:
1359 EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1360 /*isInit*/ true);
1361 break;
1362 case TEK_Aggregate:
1363 EmitAggExpr(RV, AggValueSlot::forAddr(
1364 ReturnValue, Qualifiers(),
1365 AggValueSlot::IsDestructed,
1366 AggValueSlot::DoesNotNeedGCBarriers,
1367 AggValueSlot::IsNotAliased,
1368 getOverlapForReturnValue()));
1369 break;
1370 }
1371 }
1372
1373 ++NumReturnExprs;
1374 if (!RV || RV->isEvaluatable(getContext()))
1375 ++NumSimpleReturnExprs;
1376
1377 cleanupScope.ForceCleanup();
1378 EmitBranchThroughCleanup(ReturnBlock);
1379 }
1380
EmitDeclStmt(const DeclStmt & S)1381 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1382 // As long as debug info is modeled with instructions, we have to ensure we
1383 // have a place to insert here and write the stop point here.
1384 if (HaveInsertPoint())
1385 EmitStopPoint(&S);
1386
1387 for (const auto *I : S.decls())
1388 EmitDecl(*I);
1389 }
1390
EmitBreakStmt(const BreakStmt & S)1391 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1392 assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1393
1394 // If this code is reachable then emit a stop point (if generating
1395 // debug info). We have to do this ourselves because we are on the
1396 // "simple" statement path.
1397 if (HaveInsertPoint())
1398 EmitStopPoint(&S);
1399
1400 EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1401 }
1402
EmitContinueStmt(const ContinueStmt & S)1403 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1404 assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1405
1406 // If this code is reachable then emit a stop point (if generating
1407 // debug info). We have to do this ourselves because we are on the
1408 // "simple" statement path.
1409 if (HaveInsertPoint())
1410 EmitStopPoint(&S);
1411
1412 EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1413 }
1414
1415 /// EmitCaseStmtRange - If case statement range is not too big then
1416 /// add multiple cases to switch instruction, one for each value within
1417 /// the range. If range is too big then emit "if" condition check.
EmitCaseStmtRange(const CaseStmt & S,ArrayRef<const Attr * > Attrs)1418 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1419 ArrayRef<const Attr *> Attrs) {
1420 assert(S.getRHS() && "Expected RHS value in CaseStmt");
1421
1422 llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1423 llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1424
1425 // Emit the code for this case. We do this first to make sure it is
1426 // properly chained from our predecessor before generating the
1427 // switch machinery to enter this block.
1428 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1429 EmitBlockWithFallThrough(CaseDest, &S);
1430 EmitStmt(S.getSubStmt());
1431
1432 // If range is empty, do nothing.
1433 if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1434 return;
1435
1436 Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1437 llvm::APInt Range = RHS - LHS;
1438 // FIXME: parameters such as this should not be hardcoded.
1439 if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1440 // Range is small enough to add multiple switch instruction cases.
1441 uint64_t Total = getProfileCount(&S);
1442 unsigned NCases = Range.getZExtValue() + 1;
1443 // We only have one region counter for the entire set of cases here, so we
1444 // need to divide the weights evenly between the generated cases, ensuring
1445 // that the total weight is preserved. E.g., a weight of 5 over three cases
1446 // will be distributed as weights of 2, 2, and 1.
1447 uint64_t Weight = Total / NCases, Rem = Total % NCases;
1448 for (unsigned I = 0; I != NCases; ++I) {
1449 if (SwitchWeights)
1450 SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1451 else if (SwitchLikelihood)
1452 SwitchLikelihood->push_back(LH);
1453
1454 if (Rem)
1455 Rem--;
1456 SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1457 ++LHS;
1458 }
1459 return;
1460 }
1461
1462 // The range is too big. Emit "if" condition into a new block,
1463 // making sure to save and restore the current insertion point.
1464 llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1465
1466 // Push this test onto the chain of range checks (which terminates
1467 // in the default basic block). The switch's default will be changed
1468 // to the top of this chain after switch emission is complete.
1469 llvm::BasicBlock *FalseDest = CaseRangeBlock;
1470 CaseRangeBlock = createBasicBlock("sw.caserange");
1471
1472 CurFn->insert(CurFn->end(), CaseRangeBlock);
1473 Builder.SetInsertPoint(CaseRangeBlock);
1474
1475 // Emit range check.
1476 llvm::Value *Diff =
1477 Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1478 llvm::Value *Cond =
1479 Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1480
1481 llvm::MDNode *Weights = nullptr;
1482 if (SwitchWeights) {
1483 uint64_t ThisCount = getProfileCount(&S);
1484 uint64_t DefaultCount = (*SwitchWeights)[0];
1485 Weights = createProfileWeights(ThisCount, DefaultCount);
1486
1487 // Since we're chaining the switch default through each large case range, we
1488 // need to update the weight for the default, ie, the first case, to include
1489 // this case.
1490 (*SwitchWeights)[0] += ThisCount;
1491 } else if (SwitchLikelihood)
1492 Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1493
1494 Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1495
1496 // Restore the appropriate insertion point.
1497 if (RestoreBB)
1498 Builder.SetInsertPoint(RestoreBB);
1499 else
1500 Builder.ClearInsertionPoint();
1501 }
1502
EmitCaseStmt(const CaseStmt & S,ArrayRef<const Attr * > Attrs)1503 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1504 ArrayRef<const Attr *> Attrs) {
1505 // If there is no enclosing switch instance that we're aware of, then this
1506 // case statement and its block can be elided. This situation only happens
1507 // when we've constant-folded the switch, are emitting the constant case,
1508 // and part of the constant case includes another case statement. For
1509 // instance: switch (4) { case 4: do { case 5: } while (1); }
1510 if (!SwitchInsn) {
1511 EmitStmt(S.getSubStmt());
1512 return;
1513 }
1514
1515 // Handle case ranges.
1516 if (S.getRHS()) {
1517 EmitCaseStmtRange(S, Attrs);
1518 return;
1519 }
1520
1521 llvm::ConstantInt *CaseVal =
1522 Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1523
1524 // Emit debuginfo for the case value if it is an enum value.
1525 const ConstantExpr *CE;
1526 if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1527 CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1528 else
1529 CE = dyn_cast<ConstantExpr>(S.getLHS());
1530 if (CE) {
1531 if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1532 if (CGDebugInfo *Dbg = getDebugInfo())
1533 if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1534 Dbg->EmitGlobalVariable(DE->getDecl(),
1535 APValue(llvm::APSInt(CaseVal->getValue())));
1536 }
1537
1538 if (SwitchLikelihood)
1539 SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1540
1541 // If the body of the case is just a 'break', try to not emit an empty block.
1542 // If we're profiling or we're not optimizing, leave the block in for better
1543 // debug and coverage analysis.
1544 if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1545 CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1546 isa<BreakStmt>(S.getSubStmt())) {
1547 JumpDest Block = BreakContinueStack.back().BreakBlock;
1548
1549 // Only do this optimization if there are no cleanups that need emitting.
1550 if (isObviouslyBranchWithoutCleanups(Block)) {
1551 if (SwitchWeights)
1552 SwitchWeights->push_back(getProfileCount(&S));
1553 SwitchInsn->addCase(CaseVal, Block.getBlock());
1554
1555 // If there was a fallthrough into this case, make sure to redirect it to
1556 // the end of the switch as well.
1557 if (Builder.GetInsertBlock()) {
1558 Builder.CreateBr(Block.getBlock());
1559 Builder.ClearInsertionPoint();
1560 }
1561 return;
1562 }
1563 }
1564
1565 llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1566 EmitBlockWithFallThrough(CaseDest, &S);
1567 if (SwitchWeights)
1568 SwitchWeights->push_back(getProfileCount(&S));
1569 SwitchInsn->addCase(CaseVal, CaseDest);
1570
1571 // Recursively emitting the statement is acceptable, but is not wonderful for
1572 // code where we have many case statements nested together, i.e.:
1573 // case 1:
1574 // case 2:
1575 // case 3: etc.
1576 // Handling this recursively will create a new block for each case statement
1577 // that falls through to the next case which is IR intensive. It also causes
1578 // deep recursion which can run into stack depth limitations. Handle
1579 // sequential non-range case statements specially.
1580 //
1581 // TODO When the next case has a likelihood attribute the code returns to the
1582 // recursive algorithm. Maybe improve this case if it becomes common practice
1583 // to use a lot of attributes.
1584 const CaseStmt *CurCase = &S;
1585 const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1586
1587 // Otherwise, iteratively add consecutive cases to this switch stmt.
1588 while (NextCase && NextCase->getRHS() == nullptr) {
1589 CurCase = NextCase;
1590 llvm::ConstantInt *CaseVal =
1591 Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1592
1593 if (SwitchWeights)
1594 SwitchWeights->push_back(getProfileCount(NextCase));
1595 if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1596 CaseDest = createBasicBlock("sw.bb");
1597 EmitBlockWithFallThrough(CaseDest, CurCase);
1598 }
1599 // Since this loop is only executed when the CaseStmt has no attributes
1600 // use a hard-coded value.
1601 if (SwitchLikelihood)
1602 SwitchLikelihood->push_back(Stmt::LH_None);
1603
1604 SwitchInsn->addCase(CaseVal, CaseDest);
1605 NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1606 }
1607
1608 // Generate a stop point for debug info if the case statement is
1609 // followed by a default statement. A fallthrough case before a
1610 // default case gets its own branch target.
1611 if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1612 EmitStopPoint(CurCase);
1613
1614 // Normal default recursion for non-cases.
1615 EmitStmt(CurCase->getSubStmt());
1616 }
1617
EmitDefaultStmt(const DefaultStmt & S,ArrayRef<const Attr * > Attrs)1618 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1619 ArrayRef<const Attr *> Attrs) {
1620 // If there is no enclosing switch instance that we're aware of, then this
1621 // default statement can be elided. This situation only happens when we've
1622 // constant-folded the switch.
1623 if (!SwitchInsn) {
1624 EmitStmt(S.getSubStmt());
1625 return;
1626 }
1627
1628 llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1629 assert(DefaultBlock->empty() &&
1630 "EmitDefaultStmt: Default block already defined?");
1631
1632 if (SwitchLikelihood)
1633 SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1634
1635 EmitBlockWithFallThrough(DefaultBlock, &S);
1636
1637 EmitStmt(S.getSubStmt());
1638 }
1639
1640 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1641 /// constant value that is being switched on, see if we can dead code eliminate
1642 /// the body of the switch to a simple series of statements to emit. Basically,
1643 /// on a switch (5) we want to find these statements:
1644 /// case 5:
1645 /// printf(...); <--
1646 /// ++i; <--
1647 /// break;
1648 ///
1649 /// and add them to the ResultStmts vector. If it is unsafe to do this
1650 /// transformation (for example, one of the elided statements contains a label
1651 /// that might be jumped to), return CSFC_Failure. If we handled it and 'S'
1652 /// should include statements after it (e.g. the printf() line is a substmt of
1653 /// the case) then return CSFC_FallThrough. If we handled it and found a break
1654 /// statement, then return CSFC_Success.
1655 ///
1656 /// If Case is non-null, then we are looking for the specified case, checking
1657 /// that nothing we jump over contains labels. If Case is null, then we found
1658 /// the case and are looking for the break.
1659 ///
1660 /// If the recursive walk actually finds our Case, then we set FoundCase to
1661 /// true.
1662 ///
1663 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
CollectStatementsForCase(const Stmt * S,const SwitchCase * Case,bool & FoundCase,SmallVectorImpl<const Stmt * > & ResultStmts)1664 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1665 const SwitchCase *Case,
1666 bool &FoundCase,
1667 SmallVectorImpl<const Stmt*> &ResultStmts) {
1668 // If this is a null statement, just succeed.
1669 if (!S)
1670 return Case ? CSFC_Success : CSFC_FallThrough;
1671
1672 // If this is the switchcase (case 4: or default) that we're looking for, then
1673 // we're in business. Just add the substatement.
1674 if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1675 if (S == Case) {
1676 FoundCase = true;
1677 return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1678 ResultStmts);
1679 }
1680
1681 // Otherwise, this is some other case or default statement, just ignore it.
1682 return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1683 ResultStmts);
1684 }
1685
1686 // If we are in the live part of the code and we found our break statement,
1687 // return a success!
1688 if (!Case && isa<BreakStmt>(S))
1689 return CSFC_Success;
1690
1691 // If this is a switch statement, then it might contain the SwitchCase, the
1692 // break, or neither.
1693 if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1694 // Handle this as two cases: we might be looking for the SwitchCase (if so
1695 // the skipped statements must be skippable) or we might already have it.
1696 CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1697 bool StartedInLiveCode = FoundCase;
1698 unsigned StartSize = ResultStmts.size();
1699
1700 // If we've not found the case yet, scan through looking for it.
1701 if (Case) {
1702 // Keep track of whether we see a skipped declaration. The code could be
1703 // using the declaration even if it is skipped, so we can't optimize out
1704 // the decl if the kept statements might refer to it.
1705 bool HadSkippedDecl = false;
1706
1707 // If we're looking for the case, just see if we can skip each of the
1708 // substatements.
1709 for (; Case && I != E; ++I) {
1710 HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1711
1712 switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1713 case CSFC_Failure: return CSFC_Failure;
1714 case CSFC_Success:
1715 // A successful result means that either 1) that the statement doesn't
1716 // have the case and is skippable, or 2) does contain the case value
1717 // and also contains the break to exit the switch. In the later case,
1718 // we just verify the rest of the statements are elidable.
1719 if (FoundCase) {
1720 // If we found the case and skipped declarations, we can't do the
1721 // optimization.
1722 if (HadSkippedDecl)
1723 return CSFC_Failure;
1724
1725 for (++I; I != E; ++I)
1726 if (CodeGenFunction::ContainsLabel(*I, true))
1727 return CSFC_Failure;
1728 return CSFC_Success;
1729 }
1730 break;
1731 case CSFC_FallThrough:
1732 // If we have a fallthrough condition, then we must have found the
1733 // case started to include statements. Consider the rest of the
1734 // statements in the compound statement as candidates for inclusion.
1735 assert(FoundCase && "Didn't find case but returned fallthrough?");
1736 // We recursively found Case, so we're not looking for it anymore.
1737 Case = nullptr;
1738
1739 // If we found the case and skipped declarations, we can't do the
1740 // optimization.
1741 if (HadSkippedDecl)
1742 return CSFC_Failure;
1743 break;
1744 }
1745 }
1746
1747 if (!FoundCase)
1748 return CSFC_Success;
1749
1750 assert(!HadSkippedDecl && "fallthrough after skipping decl");
1751 }
1752
1753 // If we have statements in our range, then we know that the statements are
1754 // live and need to be added to the set of statements we're tracking.
1755 bool AnyDecls = false;
1756 for (; I != E; ++I) {
1757 AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1758
1759 switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1760 case CSFC_Failure: return CSFC_Failure;
1761 case CSFC_FallThrough:
1762 // A fallthrough result means that the statement was simple and just
1763 // included in ResultStmt, keep adding them afterwards.
1764 break;
1765 case CSFC_Success:
1766 // A successful result means that we found the break statement and
1767 // stopped statement inclusion. We just ensure that any leftover stmts
1768 // are skippable and return success ourselves.
1769 for (++I; I != E; ++I)
1770 if (CodeGenFunction::ContainsLabel(*I, true))
1771 return CSFC_Failure;
1772 return CSFC_Success;
1773 }
1774 }
1775
1776 // If we're about to fall out of a scope without hitting a 'break;', we
1777 // can't perform the optimization if there were any decls in that scope
1778 // (we'd lose their end-of-lifetime).
1779 if (AnyDecls) {
1780 // If the entire compound statement was live, there's one more thing we
1781 // can try before giving up: emit the whole thing as a single statement.
1782 // We can do that unless the statement contains a 'break;'.
1783 // FIXME: Such a break must be at the end of a construct within this one.
1784 // We could emit this by just ignoring the BreakStmts entirely.
1785 if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1786 ResultStmts.resize(StartSize);
1787 ResultStmts.push_back(S);
1788 } else {
1789 return CSFC_Failure;
1790 }
1791 }
1792
1793 return CSFC_FallThrough;
1794 }
1795
1796 // Okay, this is some other statement that we don't handle explicitly, like a
1797 // for statement or increment etc. If we are skipping over this statement,
1798 // just verify it doesn't have labels, which would make it invalid to elide.
1799 if (Case) {
1800 if (CodeGenFunction::ContainsLabel(S, true))
1801 return CSFC_Failure;
1802 return CSFC_Success;
1803 }
1804
1805 // Otherwise, we want to include this statement. Everything is cool with that
1806 // so long as it doesn't contain a break out of the switch we're in.
1807 if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1808
1809 // Otherwise, everything is great. Include the statement and tell the caller
1810 // that we fall through and include the next statement as well.
1811 ResultStmts.push_back(S);
1812 return CSFC_FallThrough;
1813 }
1814
1815 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1816 /// then invoke CollectStatementsForCase to find the list of statements to emit
1817 /// for a switch on constant. See the comment above CollectStatementsForCase
1818 /// for more details.
FindCaseStatementsForValue(const SwitchStmt & S,const llvm::APSInt & ConstantCondValue,SmallVectorImpl<const Stmt * > & ResultStmts,ASTContext & C,const SwitchCase * & ResultCase)1819 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1820 const llvm::APSInt &ConstantCondValue,
1821 SmallVectorImpl<const Stmt*> &ResultStmts,
1822 ASTContext &C,
1823 const SwitchCase *&ResultCase) {
1824 // First step, find the switch case that is being branched to. We can do this
1825 // efficiently by scanning the SwitchCase list.
1826 const SwitchCase *Case = S.getSwitchCaseList();
1827 const DefaultStmt *DefaultCase = nullptr;
1828
1829 for (; Case; Case = Case->getNextSwitchCase()) {
1830 // It's either a default or case. Just remember the default statement in
1831 // case we're not jumping to any numbered cases.
1832 if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1833 DefaultCase = DS;
1834 continue;
1835 }
1836
1837 // Check to see if this case is the one we're looking for.
1838 const CaseStmt *CS = cast<CaseStmt>(Case);
1839 // Don't handle case ranges yet.
1840 if (CS->getRHS()) return false;
1841
1842 // If we found our case, remember it as 'case'.
1843 if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1844 break;
1845 }
1846
1847 // If we didn't find a matching case, we use a default if it exists, or we
1848 // elide the whole switch body!
1849 if (!Case) {
1850 // It is safe to elide the body of the switch if it doesn't contain labels
1851 // etc. If it is safe, return successfully with an empty ResultStmts list.
1852 if (!DefaultCase)
1853 return !CodeGenFunction::ContainsLabel(&S);
1854 Case = DefaultCase;
1855 }
1856
1857 // Ok, we know which case is being jumped to, try to collect all the
1858 // statements that follow it. This can fail for a variety of reasons. Also,
1859 // check to see that the recursive walk actually found our case statement.
1860 // Insane cases like this can fail to find it in the recursive walk since we
1861 // don't handle every stmt kind:
1862 // switch (4) {
1863 // while (1) {
1864 // case 4: ...
1865 bool FoundCase = false;
1866 ResultCase = Case;
1867 return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1868 ResultStmts) != CSFC_Failure &&
1869 FoundCase;
1870 }
1871
1872 static std::optional<SmallVector<uint64_t, 16>>
getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods)1873 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1874 // Are there enough branches to weight them?
1875 if (Likelihoods.size() <= 1)
1876 return std::nullopt;
1877
1878 uint64_t NumUnlikely = 0;
1879 uint64_t NumNone = 0;
1880 uint64_t NumLikely = 0;
1881 for (const auto LH : Likelihoods) {
1882 switch (LH) {
1883 case Stmt::LH_Unlikely:
1884 ++NumUnlikely;
1885 break;
1886 case Stmt::LH_None:
1887 ++NumNone;
1888 break;
1889 case Stmt::LH_Likely:
1890 ++NumLikely;
1891 break;
1892 }
1893 }
1894
1895 // Is there a likelihood attribute used?
1896 if (NumUnlikely == 0 && NumLikely == 0)
1897 return std::nullopt;
1898
1899 // When multiple cases share the same code they can be combined during
1900 // optimization. In that case the weights of the branch will be the sum of
1901 // the individual weights. Make sure the combined sum of all neutral cases
1902 // doesn't exceed the value of a single likely attribute.
1903 // The additions both avoid divisions by 0 and make sure the weights of None
1904 // don't exceed the weight of Likely.
1905 const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1906 const uint64_t None = Likely / (NumNone + 1);
1907 const uint64_t Unlikely = 0;
1908
1909 SmallVector<uint64_t, 16> Result;
1910 Result.reserve(Likelihoods.size());
1911 for (const auto LH : Likelihoods) {
1912 switch (LH) {
1913 case Stmt::LH_Unlikely:
1914 Result.push_back(Unlikely);
1915 break;
1916 case Stmt::LH_None:
1917 Result.push_back(None);
1918 break;
1919 case Stmt::LH_Likely:
1920 Result.push_back(Likely);
1921 break;
1922 }
1923 }
1924
1925 return Result;
1926 }
1927
EmitSwitchStmt(const SwitchStmt & S)1928 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1929 // Handle nested switch statements.
1930 llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1931 SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1932 SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1933 llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1934
1935 // See if we can constant fold the condition of the switch and therefore only
1936 // emit the live case statement (if any) of the switch.
1937 llvm::APSInt ConstantCondValue;
1938 if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1939 SmallVector<const Stmt*, 4> CaseStmts;
1940 const SwitchCase *Case = nullptr;
1941 if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1942 getContext(), Case)) {
1943 if (Case)
1944 incrementProfileCounter(Case);
1945 RunCleanupsScope ExecutedScope(*this);
1946
1947 if (S.getInit())
1948 EmitStmt(S.getInit());
1949
1950 // Emit the condition variable if needed inside the entire cleanup scope
1951 // used by this special case for constant folded switches.
1952 if (S.getConditionVariable())
1953 EmitDecl(*S.getConditionVariable());
1954
1955 // At this point, we are no longer "within" a switch instance, so
1956 // we can temporarily enforce this to ensure that any embedded case
1957 // statements are not emitted.
1958 SwitchInsn = nullptr;
1959
1960 // Okay, we can dead code eliminate everything except this case. Emit the
1961 // specified series of statements and we're good.
1962 for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1963 EmitStmt(CaseStmts[i]);
1964 incrementProfileCounter(&S);
1965
1966 // Now we want to restore the saved switch instance so that nested
1967 // switches continue to function properly
1968 SwitchInsn = SavedSwitchInsn;
1969
1970 return;
1971 }
1972 }
1973
1974 JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1975
1976 RunCleanupsScope ConditionScope(*this);
1977
1978 if (S.getInit())
1979 EmitStmt(S.getInit());
1980
1981 if (S.getConditionVariable())
1982 EmitDecl(*S.getConditionVariable());
1983 llvm::Value *CondV = EmitScalarExpr(S.getCond());
1984
1985 // Create basic block to hold stuff that comes after switch
1986 // statement. We also need to create a default block now so that
1987 // explicit case ranges tests can have a place to jump to on
1988 // failure.
1989 llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1990 SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1991 if (PGO.haveRegionCounts()) {
1992 // Walk the SwitchCase list to find how many there are.
1993 uint64_t DefaultCount = 0;
1994 unsigned NumCases = 0;
1995 for (const SwitchCase *Case = S.getSwitchCaseList();
1996 Case;
1997 Case = Case->getNextSwitchCase()) {
1998 if (isa<DefaultStmt>(Case))
1999 DefaultCount = getProfileCount(Case);
2000 NumCases += 1;
2001 }
2002 SwitchWeights = new SmallVector<uint64_t, 16>();
2003 SwitchWeights->reserve(NumCases);
2004 // The default needs to be first. We store the edge count, so we already
2005 // know the right weight.
2006 SwitchWeights->push_back(DefaultCount);
2007 } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2008 SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2009 // Initialize the default case.
2010 SwitchLikelihood->push_back(Stmt::LH_None);
2011 }
2012
2013 CaseRangeBlock = DefaultBlock;
2014
2015 // Clear the insertion point to indicate we are in unreachable code.
2016 Builder.ClearInsertionPoint();
2017
2018 // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2019 // then reuse last ContinueBlock.
2020 JumpDest OuterContinue;
2021 if (!BreakContinueStack.empty())
2022 OuterContinue = BreakContinueStack.back().ContinueBlock;
2023
2024 BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2025
2026 // Emit switch body.
2027 EmitStmt(S.getBody());
2028
2029 BreakContinueStack.pop_back();
2030
2031 // Update the default block in case explicit case range tests have
2032 // been chained on top.
2033 SwitchInsn->setDefaultDest(CaseRangeBlock);
2034
2035 // If a default was never emitted:
2036 if (!DefaultBlock->getParent()) {
2037 // If we have cleanups, emit the default block so that there's a
2038 // place to jump through the cleanups from.
2039 if (ConditionScope.requiresCleanups()) {
2040 EmitBlock(DefaultBlock);
2041
2042 // Otherwise, just forward the default block to the switch end.
2043 } else {
2044 DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2045 delete DefaultBlock;
2046 }
2047 }
2048
2049 ConditionScope.ForceCleanup();
2050
2051 // Emit continuation.
2052 EmitBlock(SwitchExit.getBlock(), true);
2053 incrementProfileCounter(&S);
2054
2055 // If the switch has a condition wrapped by __builtin_unpredictable,
2056 // create metadata that specifies that the switch is unpredictable.
2057 // Don't bother if not optimizing because that metadata would not be used.
2058 auto *Call = dyn_cast<CallExpr>(S.getCond());
2059 if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2060 auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2061 if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2062 llvm::MDBuilder MDHelper(getLLVMContext());
2063 SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2064 MDHelper.createUnpredictable());
2065 }
2066 }
2067
2068 if (SwitchWeights) {
2069 assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2070 "switch weights do not match switch cases");
2071 // If there's only one jump destination there's no sense weighting it.
2072 if (SwitchWeights->size() > 1)
2073 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2074 createProfileWeights(*SwitchWeights));
2075 delete SwitchWeights;
2076 } else if (SwitchLikelihood) {
2077 assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2078 "switch likelihoods do not match switch cases");
2079 std::optional<SmallVector<uint64_t, 16>> LHW =
2080 getLikelihoodWeights(*SwitchLikelihood);
2081 if (LHW) {
2082 llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2083 SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2084 createProfileWeights(*LHW));
2085 }
2086 delete SwitchLikelihood;
2087 }
2088 SwitchInsn = SavedSwitchInsn;
2089 SwitchWeights = SavedSwitchWeights;
2090 SwitchLikelihood = SavedSwitchLikelihood;
2091 CaseRangeBlock = SavedCRBlock;
2092 }
2093
2094 static std::string
SimplifyConstraint(const char * Constraint,const TargetInfo & Target,SmallVectorImpl<TargetInfo::ConstraintInfo> * OutCons=nullptr)2095 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2096 SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2097 std::string Result;
2098
2099 while (*Constraint) {
2100 switch (*Constraint) {
2101 default:
2102 Result += Target.convertConstraint(Constraint);
2103 break;
2104 // Ignore these
2105 case '*':
2106 case '?':
2107 case '!':
2108 case '=': // Will see this and the following in mult-alt constraints.
2109 case '+':
2110 break;
2111 case '#': // Ignore the rest of the constraint alternative.
2112 while (Constraint[1] && Constraint[1] != ',')
2113 Constraint++;
2114 break;
2115 case '&':
2116 case '%':
2117 Result += *Constraint;
2118 while (Constraint[1] && Constraint[1] == *Constraint)
2119 Constraint++;
2120 break;
2121 case ',':
2122 Result += "|";
2123 break;
2124 case 'g':
2125 Result += "imr";
2126 break;
2127 case '[': {
2128 assert(OutCons &&
2129 "Must pass output names to constraints with a symbolic name");
2130 unsigned Index;
2131 bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2132 assert(result && "Could not resolve symbolic name"); (void)result;
2133 Result += llvm::utostr(Index);
2134 break;
2135 }
2136 }
2137
2138 Constraint++;
2139 }
2140
2141 return Result;
2142 }
2143
2144 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2145 /// as using a particular register add that as a constraint that will be used
2146 /// in this asm stmt.
2147 static std::string
AddVariableConstraints(const std::string & Constraint,const Expr & AsmExpr,const TargetInfo & Target,CodeGenModule & CGM,const AsmStmt & Stmt,const bool EarlyClobber,std::string * GCCReg=nullptr)2148 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2149 const TargetInfo &Target, CodeGenModule &CGM,
2150 const AsmStmt &Stmt, const bool EarlyClobber,
2151 std::string *GCCReg = nullptr) {
2152 const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2153 if (!AsmDeclRef)
2154 return Constraint;
2155 const ValueDecl &Value = *AsmDeclRef->getDecl();
2156 const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2157 if (!Variable)
2158 return Constraint;
2159 if (Variable->getStorageClass() != SC_Register)
2160 return Constraint;
2161 AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2162 if (!Attr)
2163 return Constraint;
2164 StringRef Register = Attr->getLabel();
2165 assert(Target.isValidGCCRegisterName(Register));
2166 // We're using validateOutputConstraint here because we only care if
2167 // this is a register constraint.
2168 TargetInfo::ConstraintInfo Info(Constraint, "");
2169 if (Target.validateOutputConstraint(Info) &&
2170 !Info.allowsRegister()) {
2171 CGM.ErrorUnsupported(&Stmt, "__asm__");
2172 return Constraint;
2173 }
2174 // Canonicalize the register here before returning it.
2175 Register = Target.getNormalizedGCCRegisterName(Register);
2176 if (GCCReg != nullptr)
2177 *GCCReg = Register.str();
2178 return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2179 }
2180
EmitAsmInputLValue(const TargetInfo::ConstraintInfo & Info,LValue InputValue,QualType InputType,std::string & ConstraintStr,SourceLocation Loc)2181 std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2182 const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2183 QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2184 if (Info.allowsRegister() || !Info.allowsMemory()) {
2185 if (CodeGenFunction::hasScalarEvaluationKind(InputType))
2186 return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2187
2188 llvm::Type *Ty = ConvertType(InputType);
2189 uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2190 if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2191 getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2192 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2193
2194 return {Builder.CreateLoad(Builder.CreateElementBitCast(
2195 InputValue.getAddress(*this), Ty)),
2196 nullptr};
2197 }
2198 }
2199
2200 Address Addr = InputValue.getAddress(*this);
2201 ConstraintStr += '*';
2202 return {Addr.getPointer(), Addr.getElementType()};
2203 }
2204
2205 std::pair<llvm::Value *, llvm::Type *>
EmitAsmInput(const TargetInfo::ConstraintInfo & Info,const Expr * InputExpr,std::string & ConstraintStr)2206 CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2207 const Expr *InputExpr,
2208 std::string &ConstraintStr) {
2209 // If this can't be a register or memory, i.e., has to be a constant
2210 // (immediate or symbolic), try to emit it as such.
2211 if (!Info.allowsRegister() && !Info.allowsMemory()) {
2212 if (Info.requiresImmediateConstant()) {
2213 Expr::EvalResult EVResult;
2214 InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2215
2216 llvm::APSInt IntResult;
2217 if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2218 getContext()))
2219 return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2220 }
2221
2222 Expr::EvalResult Result;
2223 if (InputExpr->EvaluateAsInt(Result, getContext()))
2224 return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2225 nullptr};
2226 }
2227
2228 if (Info.allowsRegister() || !Info.allowsMemory())
2229 if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2230 return {EmitScalarExpr(InputExpr), nullptr};
2231 if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2232 return {EmitScalarExpr(InputExpr), nullptr};
2233 InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2234 LValue Dest = EmitLValue(InputExpr);
2235 return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2236 InputExpr->getExprLoc());
2237 }
2238
2239 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2240 /// asm call instruction. The !srcloc MDNode contains a list of constant
2241 /// integers which are the source locations of the start of each line in the
2242 /// asm.
getAsmSrcLocInfo(const StringLiteral * Str,CodeGenFunction & CGF)2243 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2244 CodeGenFunction &CGF) {
2245 SmallVector<llvm::Metadata *, 8> Locs;
2246 // Add the location of the first line to the MDNode.
2247 Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2248 CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2249 StringRef StrVal = Str->getString();
2250 if (!StrVal.empty()) {
2251 const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2252 const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2253 unsigned StartToken = 0;
2254 unsigned ByteOffset = 0;
2255
2256 // Add the location of the start of each subsequent line of the asm to the
2257 // MDNode.
2258 for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2259 if (StrVal[i] != '\n') continue;
2260 SourceLocation LineLoc = Str->getLocationOfByte(
2261 i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2262 Locs.push_back(llvm::ConstantAsMetadata::get(
2263 llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2264 }
2265 }
2266
2267 return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2268 }
2269
UpdateAsmCallInst(llvm::CallBase & Result,bool HasSideEffect,bool HasUnwindClobber,bool ReadOnly,bool ReadNone,bool NoMerge,const AsmStmt & S,const std::vector<llvm::Type * > & ResultRegTypes,const std::vector<llvm::Type * > & ArgElemTypes,CodeGenFunction & CGF,std::vector<llvm::Value * > & RegResults)2270 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2271 bool HasUnwindClobber, bool ReadOnly,
2272 bool ReadNone, bool NoMerge, const AsmStmt &S,
2273 const std::vector<llvm::Type *> &ResultRegTypes,
2274 const std::vector<llvm::Type *> &ArgElemTypes,
2275 CodeGenFunction &CGF,
2276 std::vector<llvm::Value *> &RegResults) {
2277 if (!HasUnwindClobber)
2278 Result.addFnAttr(llvm::Attribute::NoUnwind);
2279
2280 if (NoMerge)
2281 Result.addFnAttr(llvm::Attribute::NoMerge);
2282 // Attach readnone and readonly attributes.
2283 if (!HasSideEffect) {
2284 if (ReadNone)
2285 Result.setDoesNotAccessMemory();
2286 else if (ReadOnly)
2287 Result.setOnlyReadsMemory();
2288 }
2289
2290 // Add elementtype attribute for indirect constraints.
2291 for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2292 if (Pair.value()) {
2293 auto Attr = llvm::Attribute::get(
2294 CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2295 Result.addParamAttr(Pair.index(), Attr);
2296 }
2297 }
2298
2299 // Slap the source location of the inline asm into a !srcloc metadata on the
2300 // call.
2301 if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2302 Result.setMetadata("srcloc",
2303 getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2304 else {
2305 // At least put the line number on MS inline asm blobs.
2306 llvm::Constant *Loc =
2307 llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2308 Result.setMetadata("srcloc",
2309 llvm::MDNode::get(CGF.getLLVMContext(),
2310 llvm::ConstantAsMetadata::get(Loc)));
2311 }
2312
2313 if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2314 // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2315 // convergent (meaning, they may call an intrinsically convergent op, such
2316 // as bar.sync, and so can't have certain optimizations applied around
2317 // them).
2318 Result.addFnAttr(llvm::Attribute::Convergent);
2319 // Extract all of the register value results from the asm.
2320 if (ResultRegTypes.size() == 1) {
2321 RegResults.push_back(&Result);
2322 } else {
2323 for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2324 llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2325 RegResults.push_back(Tmp);
2326 }
2327 }
2328 }
2329
EmitAsmStmt(const AsmStmt & S)2330 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2331 // Pop all cleanup blocks at the end of the asm statement.
2332 CodeGenFunction::RunCleanupsScope Cleanups(*this);
2333
2334 // Assemble the final asm string.
2335 std::string AsmString = S.generateAsmString(getContext());
2336
2337 // Get all the output and input constraints together.
2338 SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2339 SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2340
2341 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2342 StringRef Name;
2343 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2344 Name = GAS->getOutputName(i);
2345 TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2346 bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2347 assert(IsValid && "Failed to parse output constraint");
2348 OutputConstraintInfos.push_back(Info);
2349 }
2350
2351 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2352 StringRef Name;
2353 if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2354 Name = GAS->getInputName(i);
2355 TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2356 bool IsValid =
2357 getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2358 assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2359 InputConstraintInfos.push_back(Info);
2360 }
2361
2362 std::string Constraints;
2363
2364 std::vector<LValue> ResultRegDests;
2365 std::vector<QualType> ResultRegQualTys;
2366 std::vector<llvm::Type *> ResultRegTypes;
2367 std::vector<llvm::Type *> ResultTruncRegTypes;
2368 std::vector<llvm::Type *> ArgTypes;
2369 std::vector<llvm::Type *> ArgElemTypes;
2370 std::vector<llvm::Value*> Args;
2371 llvm::BitVector ResultTypeRequiresCast;
2372 llvm::BitVector ResultRegIsFlagReg;
2373
2374 // Keep track of inout constraints.
2375 std::string InOutConstraints;
2376 std::vector<llvm::Value*> InOutArgs;
2377 std::vector<llvm::Type*> InOutArgTypes;
2378 std::vector<llvm::Type*> InOutArgElemTypes;
2379
2380 // Keep track of out constraints for tied input operand.
2381 std::vector<std::string> OutputConstraints;
2382
2383 // Keep track of defined physregs.
2384 llvm::SmallSet<std::string, 8> PhysRegOutputs;
2385
2386 // An inline asm can be marked readonly if it meets the following conditions:
2387 // - it doesn't have any sideeffects
2388 // - it doesn't clobber memory
2389 // - it doesn't return a value by-reference
2390 // It can be marked readnone if it doesn't have any input memory constraints
2391 // in addition to meeting the conditions listed above.
2392 bool ReadOnly = true, ReadNone = true;
2393
2394 for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2395 TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2396
2397 // Simplify the output constraint.
2398 std::string OutputConstraint(S.getOutputConstraint(i));
2399 OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2400 getTarget(), &OutputConstraintInfos);
2401
2402 const Expr *OutExpr = S.getOutputExpr(i);
2403 OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2404
2405 std::string GCCReg;
2406 OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2407 getTarget(), CGM, S,
2408 Info.earlyClobber(),
2409 &GCCReg);
2410 // Give an error on multiple outputs to same physreg.
2411 if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2412 CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2413
2414 OutputConstraints.push_back(OutputConstraint);
2415 LValue Dest = EmitLValue(OutExpr);
2416 if (!Constraints.empty())
2417 Constraints += ',';
2418
2419 // If this is a register output, then make the inline asm return it
2420 // by-value. If this is a memory result, return the value by-reference.
2421 QualType QTy = OutExpr->getType();
2422 const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2423 hasAggregateEvaluationKind(QTy);
2424 if (!Info.allowsMemory() && IsScalarOrAggregate) {
2425
2426 Constraints += "=" + OutputConstraint;
2427 ResultRegQualTys.push_back(QTy);
2428 ResultRegDests.push_back(Dest);
2429
2430 bool IsFlagReg = llvm::StringRef(OutputConstraint).startswith("{@cc");
2431 ResultRegIsFlagReg.push_back(IsFlagReg);
2432
2433 llvm::Type *Ty = ConvertTypeForMem(QTy);
2434 const bool RequiresCast = Info.allowsRegister() &&
2435 (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2436 Ty->isAggregateType());
2437
2438 ResultTruncRegTypes.push_back(Ty);
2439 ResultTypeRequiresCast.push_back(RequiresCast);
2440
2441 if (RequiresCast) {
2442 unsigned Size = getContext().getTypeSize(QTy);
2443 Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2444 }
2445 ResultRegTypes.push_back(Ty);
2446 // If this output is tied to an input, and if the input is larger, then
2447 // we need to set the actual result type of the inline asm node to be the
2448 // same as the input type.
2449 if (Info.hasMatchingInput()) {
2450 unsigned InputNo;
2451 for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2452 TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2453 if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2454 break;
2455 }
2456 assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2457
2458 QualType InputTy = S.getInputExpr(InputNo)->getType();
2459 QualType OutputType = OutExpr->getType();
2460
2461 uint64_t InputSize = getContext().getTypeSize(InputTy);
2462 if (getContext().getTypeSize(OutputType) < InputSize) {
2463 // Form the asm to return the value as a larger integer or fp type.
2464 ResultRegTypes.back() = ConvertType(InputTy);
2465 }
2466 }
2467 if (llvm::Type* AdjTy =
2468 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2469 ResultRegTypes.back()))
2470 ResultRegTypes.back() = AdjTy;
2471 else {
2472 CGM.getDiags().Report(S.getAsmLoc(),
2473 diag::err_asm_invalid_type_in_input)
2474 << OutExpr->getType() << OutputConstraint;
2475 }
2476
2477 // Update largest vector width for any vector types.
2478 if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2479 LargestVectorWidth =
2480 std::max((uint64_t)LargestVectorWidth,
2481 VT->getPrimitiveSizeInBits().getKnownMinValue());
2482 } else {
2483 Address DestAddr = Dest.getAddress(*this);
2484 // Matrix types in memory are represented by arrays, but accessed through
2485 // vector pointers, with the alignment specified on the access operation.
2486 // For inline assembly, update pointer arguments to use vector pointers.
2487 // Otherwise there will be a mis-match if the matrix is also an
2488 // input-argument which is represented as vector.
2489 if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2490 DestAddr = Builder.CreateElementBitCast(
2491 DestAddr, ConvertType(OutExpr->getType()));
2492
2493 ArgTypes.push_back(DestAddr.getType());
2494 ArgElemTypes.push_back(DestAddr.getElementType());
2495 Args.push_back(DestAddr.getPointer());
2496 Constraints += "=*";
2497 Constraints += OutputConstraint;
2498 ReadOnly = ReadNone = false;
2499 }
2500
2501 if (Info.isReadWrite()) {
2502 InOutConstraints += ',';
2503
2504 const Expr *InputExpr = S.getOutputExpr(i);
2505 llvm::Value *Arg;
2506 llvm::Type *ArgElemType;
2507 std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2508 Info, Dest, InputExpr->getType(), InOutConstraints,
2509 InputExpr->getExprLoc());
2510
2511 if (llvm::Type* AdjTy =
2512 getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2513 Arg->getType()))
2514 Arg = Builder.CreateBitCast(Arg, AdjTy);
2515
2516 // Update largest vector width for any vector types.
2517 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2518 LargestVectorWidth =
2519 std::max((uint64_t)LargestVectorWidth,
2520 VT->getPrimitiveSizeInBits().getKnownMinValue());
2521 // Only tie earlyclobber physregs.
2522 if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2523 InOutConstraints += llvm::utostr(i);
2524 else
2525 InOutConstraints += OutputConstraint;
2526
2527 InOutArgTypes.push_back(Arg->getType());
2528 InOutArgElemTypes.push_back(ArgElemType);
2529 InOutArgs.push_back(Arg);
2530 }
2531 }
2532
2533 // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2534 // to the return value slot. Only do this when returning in registers.
2535 if (isa<MSAsmStmt>(&S)) {
2536 const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2537 if (RetAI.isDirect() || RetAI.isExtend()) {
2538 // Make a fake lvalue for the return value slot.
2539 LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
2540 CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2541 *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2542 ResultRegDests, AsmString, S.getNumOutputs());
2543 SawAsmBlock = true;
2544 }
2545 }
2546
2547 for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2548 const Expr *InputExpr = S.getInputExpr(i);
2549
2550 TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2551
2552 if (Info.allowsMemory())
2553 ReadNone = false;
2554
2555 if (!Constraints.empty())
2556 Constraints += ',';
2557
2558 // Simplify the input constraint.
2559 std::string InputConstraint(S.getInputConstraint(i));
2560 InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2561 &OutputConstraintInfos);
2562
2563 InputConstraint = AddVariableConstraints(
2564 InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2565 getTarget(), CGM, S, false /* No EarlyClobber */);
2566
2567 std::string ReplaceConstraint (InputConstraint);
2568 llvm::Value *Arg;
2569 llvm::Type *ArgElemType;
2570 std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2571
2572 // If this input argument is tied to a larger output result, extend the
2573 // input to be the same size as the output. The LLVM backend wants to see
2574 // the input and output of a matching constraint be the same size. Note
2575 // that GCC does not define what the top bits are here. We use zext because
2576 // that is usually cheaper, but LLVM IR should really get an anyext someday.
2577 if (Info.hasTiedOperand()) {
2578 unsigned Output = Info.getTiedOperand();
2579 QualType OutputType = S.getOutputExpr(Output)->getType();
2580 QualType InputTy = InputExpr->getType();
2581
2582 if (getContext().getTypeSize(OutputType) >
2583 getContext().getTypeSize(InputTy)) {
2584 // Use ptrtoint as appropriate so that we can do our extension.
2585 if (isa<llvm::PointerType>(Arg->getType()))
2586 Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2587 llvm::Type *OutputTy = ConvertType(OutputType);
2588 if (isa<llvm::IntegerType>(OutputTy))
2589 Arg = Builder.CreateZExt(Arg, OutputTy);
2590 else if (isa<llvm::PointerType>(OutputTy))
2591 Arg = Builder.CreateZExt(Arg, IntPtrTy);
2592 else if (OutputTy->isFloatingPointTy())
2593 Arg = Builder.CreateFPExt(Arg, OutputTy);
2594 }
2595 // Deal with the tied operands' constraint code in adjustInlineAsmType.
2596 ReplaceConstraint = OutputConstraints[Output];
2597 }
2598 if (llvm::Type* AdjTy =
2599 getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2600 Arg->getType()))
2601 Arg = Builder.CreateBitCast(Arg, AdjTy);
2602 else
2603 CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2604 << InputExpr->getType() << InputConstraint;
2605
2606 // Update largest vector width for any vector types.
2607 if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2608 LargestVectorWidth =
2609 std::max((uint64_t)LargestVectorWidth,
2610 VT->getPrimitiveSizeInBits().getKnownMinValue());
2611
2612 ArgTypes.push_back(Arg->getType());
2613 ArgElemTypes.push_back(ArgElemType);
2614 Args.push_back(Arg);
2615 Constraints += InputConstraint;
2616 }
2617
2618 // Append the "input" part of inout constraints.
2619 for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2620 ArgTypes.push_back(InOutArgTypes[i]);
2621 ArgElemTypes.push_back(InOutArgElemTypes[i]);
2622 Args.push_back(InOutArgs[i]);
2623 }
2624 Constraints += InOutConstraints;
2625
2626 // Labels
2627 SmallVector<llvm::BasicBlock *, 16> Transfer;
2628 llvm::BasicBlock *Fallthrough = nullptr;
2629 bool IsGCCAsmGoto = false;
2630 if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2631 IsGCCAsmGoto = GS->isAsmGoto();
2632 if (IsGCCAsmGoto) {
2633 for (const auto *E : GS->labels()) {
2634 JumpDest Dest = getJumpDestForLabel(E->getLabel());
2635 Transfer.push_back(Dest.getBlock());
2636 if (!Constraints.empty())
2637 Constraints += ',';
2638 Constraints += "!i";
2639 }
2640 Fallthrough = createBasicBlock("asm.fallthrough");
2641 }
2642 }
2643
2644 bool HasUnwindClobber = false;
2645
2646 // Clobbers
2647 for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2648 StringRef Clobber = S.getClobber(i);
2649
2650 if (Clobber == "memory")
2651 ReadOnly = ReadNone = false;
2652 else if (Clobber == "unwind") {
2653 HasUnwindClobber = true;
2654 continue;
2655 } else if (Clobber != "cc") {
2656 Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2657 if (CGM.getCodeGenOpts().StackClashProtector &&
2658 getTarget().isSPRegName(Clobber)) {
2659 CGM.getDiags().Report(S.getAsmLoc(),
2660 diag::warn_stack_clash_protection_inline_asm);
2661 }
2662 }
2663
2664 if (isa<MSAsmStmt>(&S)) {
2665 if (Clobber == "eax" || Clobber == "edx") {
2666 if (Constraints.find("=&A") != std::string::npos)
2667 continue;
2668 std::string::size_type position1 =
2669 Constraints.find("={" + Clobber.str() + "}");
2670 if (position1 != std::string::npos) {
2671 Constraints.insert(position1 + 1, "&");
2672 continue;
2673 }
2674 std::string::size_type position2 = Constraints.find("=A");
2675 if (position2 != std::string::npos) {
2676 Constraints.insert(position2 + 1, "&");
2677 continue;
2678 }
2679 }
2680 }
2681 if (!Constraints.empty())
2682 Constraints += ',';
2683
2684 Constraints += "~{";
2685 Constraints += Clobber;
2686 Constraints += '}';
2687 }
2688
2689 assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2690 "unwind clobber can't be used with asm goto");
2691
2692 // Add machine specific clobbers
2693 std::string MachineClobbers = getTarget().getClobbers();
2694 if (!MachineClobbers.empty()) {
2695 if (!Constraints.empty())
2696 Constraints += ',';
2697 Constraints += MachineClobbers;
2698 }
2699
2700 llvm::Type *ResultType;
2701 if (ResultRegTypes.empty())
2702 ResultType = VoidTy;
2703 else if (ResultRegTypes.size() == 1)
2704 ResultType = ResultRegTypes[0];
2705 else
2706 ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2707
2708 llvm::FunctionType *FTy =
2709 llvm::FunctionType::get(ResultType, ArgTypes, false);
2710
2711 bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2712
2713 llvm::InlineAsm::AsmDialect GnuAsmDialect =
2714 CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2715 ? llvm::InlineAsm::AD_ATT
2716 : llvm::InlineAsm::AD_Intel;
2717 llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2718 llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2719
2720 llvm::InlineAsm *IA = llvm::InlineAsm::get(
2721 FTy, AsmString, Constraints, HasSideEffect,
2722 /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2723 std::vector<llvm::Value*> RegResults;
2724 if (IsGCCAsmGoto) {
2725 llvm::CallBrInst *Result =
2726 Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2727 EmitBlock(Fallthrough);
2728 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2729 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2730 ResultRegTypes, ArgElemTypes, *this, RegResults);
2731 } else if (HasUnwindClobber) {
2732 llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2733 UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2734 InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2735 *this, RegResults);
2736 } else {
2737 llvm::CallInst *Result =
2738 Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2739 UpdateAsmCallInst(cast<llvm::CallBase>(*Result), HasSideEffect, false,
2740 ReadOnly, ReadNone, InNoMergeAttributedStmt, S,
2741 ResultRegTypes, ArgElemTypes, *this, RegResults);
2742 }
2743
2744 assert(RegResults.size() == ResultRegTypes.size());
2745 assert(RegResults.size() == ResultTruncRegTypes.size());
2746 assert(RegResults.size() == ResultRegDests.size());
2747 // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2748 // in which case its size may grow.
2749 assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2750 assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2751 for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2752 llvm::Value *Tmp = RegResults[i];
2753 llvm::Type *TruncTy = ResultTruncRegTypes[i];
2754
2755 if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2756 // Target must guarantee the Value `Tmp` here is lowered to a boolean
2757 // value.
2758 llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2759 llvm::Value *IsBooleanValue =
2760 Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2761 llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2762 Builder.CreateCall(FnAssume, IsBooleanValue);
2763 }
2764
2765 // If the result type of the LLVM IR asm doesn't match the result type of
2766 // the expression, do the conversion.
2767 if (ResultRegTypes[i] != ResultTruncRegTypes[i]) {
2768
2769 // Truncate the integer result to the right size, note that TruncTy can be
2770 // a pointer.
2771 if (TruncTy->isFloatingPointTy())
2772 Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2773 else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2774 uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2775 Tmp = Builder.CreateTrunc(Tmp,
2776 llvm::IntegerType::get(getLLVMContext(), (unsigned)ResSize));
2777 Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2778 } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2779 uint64_t TmpSize =CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2780 Tmp = Builder.CreatePtrToInt(Tmp,
2781 llvm::IntegerType::get(getLLVMContext(), (unsigned)TmpSize));
2782 Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2783 } else if (TruncTy->isIntegerTy()) {
2784 Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2785 } else if (TruncTy->isVectorTy()) {
2786 Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2787 }
2788 }
2789
2790 LValue Dest = ResultRegDests[i];
2791 // ResultTypeRequiresCast elements correspond to the first
2792 // ResultTypeRequiresCast.size() elements of RegResults.
2793 if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2794 unsigned Size = getContext().getTypeSize(ResultRegQualTys[i]);
2795 Address A = Builder.CreateElementBitCast(Dest.getAddress(*this),
2796 ResultRegTypes[i]);
2797 if (getTargetHooks().isScalarizableAsmOperand(*this, TruncTy)) {
2798 Builder.CreateStore(Tmp, A);
2799 continue;
2800 }
2801
2802 QualType Ty = getContext().getIntTypeForBitwidth(Size, /*Signed*/ false);
2803 if (Ty.isNull()) {
2804 const Expr *OutExpr = S.getOutputExpr(i);
2805 CGM.getDiags().Report(OutExpr->getExprLoc(),
2806 diag::err_store_value_to_reg);
2807 return;
2808 }
2809 Dest = MakeAddrLValue(A, Ty);
2810 }
2811 EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2812 }
2813 }
2814
InitCapturedStruct(const CapturedStmt & S)2815 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2816 const RecordDecl *RD = S.getCapturedRecordDecl();
2817 QualType RecordTy = getContext().getRecordType(RD);
2818
2819 // Initialize the captured struct.
2820 LValue SlotLV =
2821 MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2822
2823 RecordDecl::field_iterator CurField = RD->field_begin();
2824 for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2825 E = S.capture_init_end();
2826 I != E; ++I, ++CurField) {
2827 LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2828 if (CurField->hasCapturedVLAType()) {
2829 EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2830 } else {
2831 EmitInitializerForField(*CurField, LV, *I);
2832 }
2833 }
2834
2835 return SlotLV;
2836 }
2837
2838 /// Generate an outlined function for the body of a CapturedStmt, store any
2839 /// captured variables into the captured struct, and call the outlined function.
2840 llvm::Function *
EmitCapturedStmt(const CapturedStmt & S,CapturedRegionKind K)2841 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2842 LValue CapStruct = InitCapturedStruct(S);
2843
2844 // Emit the CapturedDecl
2845 CodeGenFunction CGF(CGM, true);
2846 CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2847 llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2848 delete CGF.CapturedStmtInfo;
2849
2850 // Emit call to the helper function.
2851 EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2852
2853 return F;
2854 }
2855
GenerateCapturedStmtArgument(const CapturedStmt & S)2856 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2857 LValue CapStruct = InitCapturedStruct(S);
2858 return CapStruct.getAddress(*this);
2859 }
2860
2861 /// Creates the outlined function for a CapturedStmt.
2862 llvm::Function *
GenerateCapturedStmtFunction(const CapturedStmt & S)2863 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2864 assert(CapturedStmtInfo &&
2865 "CapturedStmtInfo should be set when generating the captured function");
2866 const CapturedDecl *CD = S.getCapturedDecl();
2867 const RecordDecl *RD = S.getCapturedRecordDecl();
2868 SourceLocation Loc = S.getBeginLoc();
2869 assert(CD->hasBody() && "missing CapturedDecl body");
2870
2871 // Build the argument list.
2872 ASTContext &Ctx = CGM.getContext();
2873 FunctionArgList Args;
2874 Args.append(CD->param_begin(), CD->param_end());
2875
2876 // Create the function declaration.
2877 const CGFunctionInfo &FuncInfo =
2878 CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2879 llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2880
2881 llvm::Function *F =
2882 llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2883 CapturedStmtInfo->getHelperName(), &CGM.getModule());
2884 CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2885 if (CD->isNothrow())
2886 F->addFnAttr(llvm::Attribute::NoUnwind);
2887
2888 // Generate the function.
2889 StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2890 CD->getBody()->getBeginLoc());
2891 // Set the context parameter in CapturedStmtInfo.
2892 Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2893 CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2894
2895 // Initialize variable-length arrays.
2896 LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2897 Ctx.getTagDeclType(RD));
2898 for (auto *FD : RD->fields()) {
2899 if (FD->hasCapturedVLAType()) {
2900 auto *ExprArg =
2901 EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2902 .getScalarVal();
2903 auto VAT = FD->getCapturedVLAType();
2904 VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2905 }
2906 }
2907
2908 // If 'this' is captured, load it into CXXThisValue.
2909 if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2910 FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2911 LValue ThisLValue = EmitLValueForField(Base, FD);
2912 CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2913 }
2914
2915 PGO.assignRegionCounters(GlobalDecl(CD), F);
2916 CapturedStmtInfo->EmitBody(*this, CD->getBody());
2917 FinishFunction(CD->getBodyRBrace());
2918
2919 return F;
2920 }
2921