1 //===--- CGStmt.cpp - Emit LLVM Code from Statements ----------------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This contains code to emit Stmt nodes as LLVM code.
10 //
11 //===----------------------------------------------------------------------===//
12 
13 #include "CGDebugInfo.h"
14 #include "CGOpenMPRuntime.h"
15 #include "CodeGenFunction.h"
16 #include "CodeGenModule.h"
17 #include "TargetInfo.h"
18 #include "clang/AST/Attr.h"
19 #include "clang/AST/Expr.h"
20 #include "clang/AST/Stmt.h"
21 #include "clang/AST/StmtVisitor.h"
22 #include "clang/Basic/Builtins.h"
23 #include "clang/Basic/DiagnosticSema.h"
24 #include "clang/Basic/PrettyStackTrace.h"
25 #include "clang/Basic/SourceManager.h"
26 #include "clang/Basic/TargetInfo.h"
27 #include "llvm/ADT/ArrayRef.h"
28 #include "llvm/ADT/DenseMap.h"
29 #include "llvm/ADT/SmallSet.h"
30 #include "llvm/ADT/StringExtras.h"
31 #include "llvm/IR/Assumptions.h"
32 #include "llvm/IR/DataLayout.h"
33 #include "llvm/IR/InlineAsm.h"
34 #include "llvm/IR/Intrinsics.h"
35 #include "llvm/IR/MDBuilder.h"
36 #include "llvm/Support/SaveAndRestore.h"
37 #include <optional>
38 
39 using namespace clang;
40 using namespace CodeGen;
41 
42 //===----------------------------------------------------------------------===//
43 //                              Statement Emission
44 //===----------------------------------------------------------------------===//
45 
46 void CodeGenFunction::EmitStopPoint(const Stmt *S) {
47   if (CGDebugInfo *DI = getDebugInfo()) {
48     SourceLocation Loc;
49     Loc = S->getBeginLoc();
50     DI->EmitLocation(Builder, Loc);
51 
52     LastStopPoint = Loc;
53   }
54 }
55 
56 void CodeGenFunction::EmitStmt(const Stmt *S, ArrayRef<const Attr *> Attrs) {
57   assert(S && "Null statement?");
58   PGO.setCurrentStmt(S);
59 
60   // These statements have their own debug info handling.
61   if (EmitSimpleStmt(S, Attrs))
62     return;
63 
64   // Check if we are generating unreachable code.
65   if (!HaveInsertPoint()) {
66     // If so, and the statement doesn't contain a label, then we do not need to
67     // generate actual code. This is safe because (1) the current point is
68     // unreachable, so we don't need to execute the code, and (2) we've already
69     // handled the statements which update internal data structures (like the
70     // local variable map) which could be used by subsequent statements.
71     if (!ContainsLabel(S)) {
72       // Verify that any decl statements were handled as simple, they may be in
73       // scope of subsequent reachable statements.
74       assert(!isa<DeclStmt>(*S) && "Unexpected DeclStmt!");
75       return;
76     }
77 
78     // Otherwise, make a new block to hold the code.
79     EnsureInsertPoint();
80   }
81 
82   // Generate a stoppoint if we are emitting debug info.
83   EmitStopPoint(S);
84 
85   // Ignore all OpenMP directives except for simd if OpenMP with Simd is
86   // enabled.
87   if (getLangOpts().OpenMP && getLangOpts().OpenMPSimd) {
88     if (const auto *D = dyn_cast<OMPExecutableDirective>(S)) {
89       EmitSimpleOMPExecutableDirective(*D);
90       return;
91     }
92   }
93 
94   switch (S->getStmtClass()) {
95   case Stmt::NoStmtClass:
96   case Stmt::CXXCatchStmtClass:
97   case Stmt::SEHExceptStmtClass:
98   case Stmt::SEHFinallyStmtClass:
99   case Stmt::MSDependentExistsStmtClass:
100     llvm_unreachable("invalid statement class to emit generically");
101   case Stmt::NullStmtClass:
102   case Stmt::CompoundStmtClass:
103   case Stmt::DeclStmtClass:
104   case Stmt::LabelStmtClass:
105   case Stmt::AttributedStmtClass:
106   case Stmt::GotoStmtClass:
107   case Stmt::BreakStmtClass:
108   case Stmt::ContinueStmtClass:
109   case Stmt::DefaultStmtClass:
110   case Stmt::CaseStmtClass:
111   case Stmt::SEHLeaveStmtClass:
112     llvm_unreachable("should have emitted these statements as simple");
113 
114 #define STMT(Type, Base)
115 #define ABSTRACT_STMT(Op)
116 #define EXPR(Type, Base) \
117   case Stmt::Type##Class:
118 #include "clang/AST/StmtNodes.inc"
119   {
120     // Remember the block we came in on.
121     llvm::BasicBlock *incoming = Builder.GetInsertBlock();
122     assert(incoming && "expression emission must have an insertion point");
123 
124     EmitIgnoredExpr(cast<Expr>(S));
125 
126     llvm::BasicBlock *outgoing = Builder.GetInsertBlock();
127     assert(outgoing && "expression emission cleared block!");
128 
129     // The expression emitters assume (reasonably!) that the insertion
130     // point is always set.  To maintain that, the call-emission code
131     // for noreturn functions has to enter a new block with no
132     // predecessors.  We want to kill that block and mark the current
133     // insertion point unreachable in the common case of a call like
134     // "exit();".  Since expression emission doesn't otherwise create
135     // blocks with no predecessors, we can just test for that.
136     // However, we must be careful not to do this to our incoming
137     // block, because *statement* emission does sometimes create
138     // reachable blocks which will have no predecessors until later in
139     // the function.  This occurs with, e.g., labels that are not
140     // reachable by fallthrough.
141     if (incoming != outgoing && outgoing->use_empty()) {
142       outgoing->eraseFromParent();
143       Builder.ClearInsertionPoint();
144     }
145     break;
146   }
147 
148   case Stmt::IndirectGotoStmtClass:
149     EmitIndirectGotoStmt(cast<IndirectGotoStmt>(*S)); break;
150 
151   case Stmt::IfStmtClass:      EmitIfStmt(cast<IfStmt>(*S));              break;
152   case Stmt::WhileStmtClass:   EmitWhileStmt(cast<WhileStmt>(*S), Attrs); break;
153   case Stmt::DoStmtClass:      EmitDoStmt(cast<DoStmt>(*S), Attrs);       break;
154   case Stmt::ForStmtClass:     EmitForStmt(cast<ForStmt>(*S), Attrs);     break;
155 
156   case Stmt::ReturnStmtClass:  EmitReturnStmt(cast<ReturnStmt>(*S));      break;
157 
158   case Stmt::SwitchStmtClass:  EmitSwitchStmt(cast<SwitchStmt>(*S));      break;
159   case Stmt::GCCAsmStmtClass:  // Intentional fall-through.
160   case Stmt::MSAsmStmtClass:   EmitAsmStmt(cast<AsmStmt>(*S));            break;
161   case Stmt::CoroutineBodyStmtClass:
162     EmitCoroutineBody(cast<CoroutineBodyStmt>(*S));
163     break;
164   case Stmt::CoreturnStmtClass:
165     EmitCoreturnStmt(cast<CoreturnStmt>(*S));
166     break;
167   case Stmt::CapturedStmtClass: {
168     const CapturedStmt *CS = cast<CapturedStmt>(S);
169     EmitCapturedStmt(*CS, CS->getCapturedRegionKind());
170     }
171     break;
172   case Stmt::ObjCAtTryStmtClass:
173     EmitObjCAtTryStmt(cast<ObjCAtTryStmt>(*S));
174     break;
175   case Stmt::ObjCAtCatchStmtClass:
176     llvm_unreachable(
177                     "@catch statements should be handled by EmitObjCAtTryStmt");
178   case Stmt::ObjCAtFinallyStmtClass:
179     llvm_unreachable(
180                   "@finally statements should be handled by EmitObjCAtTryStmt");
181   case Stmt::ObjCAtThrowStmtClass:
182     EmitObjCAtThrowStmt(cast<ObjCAtThrowStmt>(*S));
183     break;
184   case Stmt::ObjCAtSynchronizedStmtClass:
185     EmitObjCAtSynchronizedStmt(cast<ObjCAtSynchronizedStmt>(*S));
186     break;
187   case Stmt::ObjCForCollectionStmtClass:
188     EmitObjCForCollectionStmt(cast<ObjCForCollectionStmt>(*S));
189     break;
190   case Stmt::ObjCAutoreleasePoolStmtClass:
191     EmitObjCAutoreleasePoolStmt(cast<ObjCAutoreleasePoolStmt>(*S));
192     break;
193 
194   case Stmt::CXXTryStmtClass:
195     EmitCXXTryStmt(cast<CXXTryStmt>(*S));
196     break;
197   case Stmt::CXXForRangeStmtClass:
198     EmitCXXForRangeStmt(cast<CXXForRangeStmt>(*S), Attrs);
199     break;
200   case Stmt::SEHTryStmtClass:
201     EmitSEHTryStmt(cast<SEHTryStmt>(*S));
202     break;
203   case Stmt::OMPMetaDirectiveClass:
204     EmitOMPMetaDirective(cast<OMPMetaDirective>(*S));
205     break;
206   case Stmt::OMPCanonicalLoopClass:
207     EmitOMPCanonicalLoop(cast<OMPCanonicalLoop>(S));
208     break;
209   case Stmt::OMPParallelDirectiveClass:
210     EmitOMPParallelDirective(cast<OMPParallelDirective>(*S));
211     break;
212   case Stmt::OMPSimdDirectiveClass:
213     EmitOMPSimdDirective(cast<OMPSimdDirective>(*S));
214     break;
215   case Stmt::OMPTileDirectiveClass:
216     EmitOMPTileDirective(cast<OMPTileDirective>(*S));
217     break;
218   case Stmt::OMPUnrollDirectiveClass:
219     EmitOMPUnrollDirective(cast<OMPUnrollDirective>(*S));
220     break;
221   case Stmt::OMPForDirectiveClass:
222     EmitOMPForDirective(cast<OMPForDirective>(*S));
223     break;
224   case Stmt::OMPForSimdDirectiveClass:
225     EmitOMPForSimdDirective(cast<OMPForSimdDirective>(*S));
226     break;
227   case Stmt::OMPSectionsDirectiveClass:
228     EmitOMPSectionsDirective(cast<OMPSectionsDirective>(*S));
229     break;
230   case Stmt::OMPSectionDirectiveClass:
231     EmitOMPSectionDirective(cast<OMPSectionDirective>(*S));
232     break;
233   case Stmt::OMPSingleDirectiveClass:
234     EmitOMPSingleDirective(cast<OMPSingleDirective>(*S));
235     break;
236   case Stmt::OMPMasterDirectiveClass:
237     EmitOMPMasterDirective(cast<OMPMasterDirective>(*S));
238     break;
239   case Stmt::OMPCriticalDirectiveClass:
240     EmitOMPCriticalDirective(cast<OMPCriticalDirective>(*S));
241     break;
242   case Stmt::OMPParallelForDirectiveClass:
243     EmitOMPParallelForDirective(cast<OMPParallelForDirective>(*S));
244     break;
245   case Stmt::OMPParallelForSimdDirectiveClass:
246     EmitOMPParallelForSimdDirective(cast<OMPParallelForSimdDirective>(*S));
247     break;
248   case Stmt::OMPParallelMasterDirectiveClass:
249     EmitOMPParallelMasterDirective(cast<OMPParallelMasterDirective>(*S));
250     break;
251   case Stmt::OMPParallelSectionsDirectiveClass:
252     EmitOMPParallelSectionsDirective(cast<OMPParallelSectionsDirective>(*S));
253     break;
254   case Stmt::OMPTaskDirectiveClass:
255     EmitOMPTaskDirective(cast<OMPTaskDirective>(*S));
256     break;
257   case Stmt::OMPTaskyieldDirectiveClass:
258     EmitOMPTaskyieldDirective(cast<OMPTaskyieldDirective>(*S));
259     break;
260   case Stmt::OMPErrorDirectiveClass:
261     EmitOMPErrorDirective(cast<OMPErrorDirective>(*S));
262     break;
263   case Stmt::OMPBarrierDirectiveClass:
264     EmitOMPBarrierDirective(cast<OMPBarrierDirective>(*S));
265     break;
266   case Stmt::OMPTaskwaitDirectiveClass:
267     EmitOMPTaskwaitDirective(cast<OMPTaskwaitDirective>(*S));
268     break;
269   case Stmt::OMPTaskgroupDirectiveClass:
270     EmitOMPTaskgroupDirective(cast<OMPTaskgroupDirective>(*S));
271     break;
272   case Stmt::OMPFlushDirectiveClass:
273     EmitOMPFlushDirective(cast<OMPFlushDirective>(*S));
274     break;
275   case Stmt::OMPDepobjDirectiveClass:
276     EmitOMPDepobjDirective(cast<OMPDepobjDirective>(*S));
277     break;
278   case Stmt::OMPScanDirectiveClass:
279     EmitOMPScanDirective(cast<OMPScanDirective>(*S));
280     break;
281   case Stmt::OMPOrderedDirectiveClass:
282     EmitOMPOrderedDirective(cast<OMPOrderedDirective>(*S));
283     break;
284   case Stmt::OMPAtomicDirectiveClass:
285     EmitOMPAtomicDirective(cast<OMPAtomicDirective>(*S));
286     break;
287   case Stmt::OMPTargetDirectiveClass:
288     EmitOMPTargetDirective(cast<OMPTargetDirective>(*S));
289     break;
290   case Stmt::OMPTeamsDirectiveClass:
291     EmitOMPTeamsDirective(cast<OMPTeamsDirective>(*S));
292     break;
293   case Stmt::OMPCancellationPointDirectiveClass:
294     EmitOMPCancellationPointDirective(cast<OMPCancellationPointDirective>(*S));
295     break;
296   case Stmt::OMPCancelDirectiveClass:
297     EmitOMPCancelDirective(cast<OMPCancelDirective>(*S));
298     break;
299   case Stmt::OMPTargetDataDirectiveClass:
300     EmitOMPTargetDataDirective(cast<OMPTargetDataDirective>(*S));
301     break;
302   case Stmt::OMPTargetEnterDataDirectiveClass:
303     EmitOMPTargetEnterDataDirective(cast<OMPTargetEnterDataDirective>(*S));
304     break;
305   case Stmt::OMPTargetExitDataDirectiveClass:
306     EmitOMPTargetExitDataDirective(cast<OMPTargetExitDataDirective>(*S));
307     break;
308   case Stmt::OMPTargetParallelDirectiveClass:
309     EmitOMPTargetParallelDirective(cast<OMPTargetParallelDirective>(*S));
310     break;
311   case Stmt::OMPTargetParallelForDirectiveClass:
312     EmitOMPTargetParallelForDirective(cast<OMPTargetParallelForDirective>(*S));
313     break;
314   case Stmt::OMPTaskLoopDirectiveClass:
315     EmitOMPTaskLoopDirective(cast<OMPTaskLoopDirective>(*S));
316     break;
317   case Stmt::OMPTaskLoopSimdDirectiveClass:
318     EmitOMPTaskLoopSimdDirective(cast<OMPTaskLoopSimdDirective>(*S));
319     break;
320   case Stmt::OMPMasterTaskLoopDirectiveClass:
321     EmitOMPMasterTaskLoopDirective(cast<OMPMasterTaskLoopDirective>(*S));
322     break;
323   case Stmt::OMPMaskedTaskLoopDirectiveClass:
324     llvm_unreachable("masked taskloop directive not supported yet.");
325     break;
326   case Stmt::OMPMasterTaskLoopSimdDirectiveClass:
327     EmitOMPMasterTaskLoopSimdDirective(
328         cast<OMPMasterTaskLoopSimdDirective>(*S));
329     break;
330   case Stmt::OMPMaskedTaskLoopSimdDirectiveClass:
331     llvm_unreachable("masked taskloop simd directive not supported yet.");
332     break;
333   case Stmt::OMPParallelMasterTaskLoopDirectiveClass:
334     EmitOMPParallelMasterTaskLoopDirective(
335         cast<OMPParallelMasterTaskLoopDirective>(*S));
336     break;
337   case Stmt::OMPParallelMaskedTaskLoopDirectiveClass:
338     llvm_unreachable("parallel masked taskloop directive not supported yet.");
339     break;
340   case Stmt::OMPParallelMasterTaskLoopSimdDirectiveClass:
341     EmitOMPParallelMasterTaskLoopSimdDirective(
342         cast<OMPParallelMasterTaskLoopSimdDirective>(*S));
343     break;
344   case Stmt::OMPParallelMaskedTaskLoopSimdDirectiveClass:
345     llvm_unreachable(
346         "parallel masked taskloop simd directive not supported yet.");
347     break;
348   case Stmt::OMPDistributeDirectiveClass:
349     EmitOMPDistributeDirective(cast<OMPDistributeDirective>(*S));
350     break;
351   case Stmt::OMPTargetUpdateDirectiveClass:
352     EmitOMPTargetUpdateDirective(cast<OMPTargetUpdateDirective>(*S));
353     break;
354   case Stmt::OMPDistributeParallelForDirectiveClass:
355     EmitOMPDistributeParallelForDirective(
356         cast<OMPDistributeParallelForDirective>(*S));
357     break;
358   case Stmt::OMPDistributeParallelForSimdDirectiveClass:
359     EmitOMPDistributeParallelForSimdDirective(
360         cast<OMPDistributeParallelForSimdDirective>(*S));
361     break;
362   case Stmt::OMPDistributeSimdDirectiveClass:
363     EmitOMPDistributeSimdDirective(cast<OMPDistributeSimdDirective>(*S));
364     break;
365   case Stmt::OMPTargetParallelForSimdDirectiveClass:
366     EmitOMPTargetParallelForSimdDirective(
367         cast<OMPTargetParallelForSimdDirective>(*S));
368     break;
369   case Stmt::OMPTargetSimdDirectiveClass:
370     EmitOMPTargetSimdDirective(cast<OMPTargetSimdDirective>(*S));
371     break;
372   case Stmt::OMPTeamsDistributeDirectiveClass:
373     EmitOMPTeamsDistributeDirective(cast<OMPTeamsDistributeDirective>(*S));
374     break;
375   case Stmt::OMPTeamsDistributeSimdDirectiveClass:
376     EmitOMPTeamsDistributeSimdDirective(
377         cast<OMPTeamsDistributeSimdDirective>(*S));
378     break;
379   case Stmt::OMPTeamsDistributeParallelForSimdDirectiveClass:
380     EmitOMPTeamsDistributeParallelForSimdDirective(
381         cast<OMPTeamsDistributeParallelForSimdDirective>(*S));
382     break;
383   case Stmt::OMPTeamsDistributeParallelForDirectiveClass:
384     EmitOMPTeamsDistributeParallelForDirective(
385         cast<OMPTeamsDistributeParallelForDirective>(*S));
386     break;
387   case Stmt::OMPTargetTeamsDirectiveClass:
388     EmitOMPTargetTeamsDirective(cast<OMPTargetTeamsDirective>(*S));
389     break;
390   case Stmt::OMPTargetTeamsDistributeDirectiveClass:
391     EmitOMPTargetTeamsDistributeDirective(
392         cast<OMPTargetTeamsDistributeDirective>(*S));
393     break;
394   case Stmt::OMPTargetTeamsDistributeParallelForDirectiveClass:
395     EmitOMPTargetTeamsDistributeParallelForDirective(
396         cast<OMPTargetTeamsDistributeParallelForDirective>(*S));
397     break;
398   case Stmt::OMPTargetTeamsDistributeParallelForSimdDirectiveClass:
399     EmitOMPTargetTeamsDistributeParallelForSimdDirective(
400         cast<OMPTargetTeamsDistributeParallelForSimdDirective>(*S));
401     break;
402   case Stmt::OMPTargetTeamsDistributeSimdDirectiveClass:
403     EmitOMPTargetTeamsDistributeSimdDirective(
404         cast<OMPTargetTeamsDistributeSimdDirective>(*S));
405     break;
406   case Stmt::OMPInteropDirectiveClass:
407     EmitOMPInteropDirective(cast<OMPInteropDirective>(*S));
408     break;
409   case Stmt::OMPDispatchDirectiveClass:
410     llvm_unreachable("Dispatch directive not supported yet.");
411     break;
412   case Stmt::OMPMaskedDirectiveClass:
413     EmitOMPMaskedDirective(cast<OMPMaskedDirective>(*S));
414     break;
415   case Stmt::OMPGenericLoopDirectiveClass:
416     EmitOMPGenericLoopDirective(cast<OMPGenericLoopDirective>(*S));
417     break;
418   case Stmt::OMPTeamsGenericLoopDirectiveClass:
419     EmitOMPTeamsGenericLoopDirective(cast<OMPTeamsGenericLoopDirective>(*S));
420     break;
421   case Stmt::OMPTargetTeamsGenericLoopDirectiveClass:
422     EmitOMPTargetTeamsGenericLoopDirective(
423         cast<OMPTargetTeamsGenericLoopDirective>(*S));
424     break;
425   case Stmt::OMPParallelGenericLoopDirectiveClass:
426     EmitOMPParallelGenericLoopDirective(
427         cast<OMPParallelGenericLoopDirective>(*S));
428     break;
429   case Stmt::OMPTargetParallelGenericLoopDirectiveClass:
430     EmitOMPTargetParallelGenericLoopDirective(
431         cast<OMPTargetParallelGenericLoopDirective>(*S));
432     break;
433   case Stmt::OMPParallelMaskedDirectiveClass:
434     EmitOMPParallelMaskedDirective(cast<OMPParallelMaskedDirective>(*S));
435     break;
436   }
437 }
438 
439 bool CodeGenFunction::EmitSimpleStmt(const Stmt *S,
440                                      ArrayRef<const Attr *> Attrs) {
441   switch (S->getStmtClass()) {
442   default:
443     return false;
444   case Stmt::NullStmtClass:
445     break;
446   case Stmt::CompoundStmtClass:
447     EmitCompoundStmt(cast<CompoundStmt>(*S));
448     break;
449   case Stmt::DeclStmtClass:
450     EmitDeclStmt(cast<DeclStmt>(*S));
451     break;
452   case Stmt::LabelStmtClass:
453     EmitLabelStmt(cast<LabelStmt>(*S));
454     break;
455   case Stmt::AttributedStmtClass:
456     EmitAttributedStmt(cast<AttributedStmt>(*S));
457     break;
458   case Stmt::GotoStmtClass:
459     EmitGotoStmt(cast<GotoStmt>(*S));
460     break;
461   case Stmt::BreakStmtClass:
462     EmitBreakStmt(cast<BreakStmt>(*S));
463     break;
464   case Stmt::ContinueStmtClass:
465     EmitContinueStmt(cast<ContinueStmt>(*S));
466     break;
467   case Stmt::DefaultStmtClass:
468     EmitDefaultStmt(cast<DefaultStmt>(*S), Attrs);
469     break;
470   case Stmt::CaseStmtClass:
471     EmitCaseStmt(cast<CaseStmt>(*S), Attrs);
472     break;
473   case Stmt::SEHLeaveStmtClass:
474     EmitSEHLeaveStmt(cast<SEHLeaveStmt>(*S));
475     break;
476   }
477   return true;
478 }
479 
480 /// EmitCompoundStmt - Emit a compound statement {..} node.  If GetLast is true,
481 /// this captures the expression result of the last sub-statement and returns it
482 /// (for use by the statement expression extension).
483 Address CodeGenFunction::EmitCompoundStmt(const CompoundStmt &S, bool GetLast,
484                                           AggValueSlot AggSlot) {
485   PrettyStackTraceLoc CrashInfo(getContext().getSourceManager(),S.getLBracLoc(),
486                              "LLVM IR generation of compound statement ('{}')");
487 
488   // Keep track of the current cleanup stack depth, including debug scopes.
489   LexicalScope Scope(*this, S.getSourceRange());
490 
491   return EmitCompoundStmtWithoutScope(S, GetLast, AggSlot);
492 }
493 
494 Address
495 CodeGenFunction::EmitCompoundStmtWithoutScope(const CompoundStmt &S,
496                                               bool GetLast,
497                                               AggValueSlot AggSlot) {
498 
499   const Stmt *ExprResult = S.getStmtExprResult();
500   assert((!GetLast || (GetLast && ExprResult)) &&
501          "If GetLast is true then the CompoundStmt must have a StmtExprResult");
502 
503   Address RetAlloca = Address::invalid();
504 
505   for (auto *CurStmt : S.body()) {
506     if (GetLast && ExprResult == CurStmt) {
507       // We have to special case labels here.  They are statements, but when put
508       // at the end of a statement expression, they yield the value of their
509       // subexpression.  Handle this by walking through all labels we encounter,
510       // emitting them before we evaluate the subexpr.
511       // Similar issues arise for attributed statements.
512       while (!isa<Expr>(ExprResult)) {
513         if (const auto *LS = dyn_cast<LabelStmt>(ExprResult)) {
514           EmitLabel(LS->getDecl());
515           ExprResult = LS->getSubStmt();
516         } else if (const auto *AS = dyn_cast<AttributedStmt>(ExprResult)) {
517           // FIXME: Update this if we ever have attributes that affect the
518           // semantics of an expression.
519           ExprResult = AS->getSubStmt();
520         } else {
521           llvm_unreachable("unknown value statement");
522         }
523       }
524 
525       EnsureInsertPoint();
526 
527       const Expr *E = cast<Expr>(ExprResult);
528       QualType ExprTy = E->getType();
529       if (hasAggregateEvaluationKind(ExprTy)) {
530         EmitAggExpr(E, AggSlot);
531       } else {
532         // We can't return an RValue here because there might be cleanups at
533         // the end of the StmtExpr.  Because of that, we have to emit the result
534         // here into a temporary alloca.
535         RetAlloca = CreateMemTemp(ExprTy);
536         EmitAnyExprToMem(E, RetAlloca, Qualifiers(),
537                          /*IsInit*/ false);
538       }
539     } else {
540       EmitStmt(CurStmt);
541     }
542   }
543 
544   return RetAlloca;
545 }
546 
547 void CodeGenFunction::SimplifyForwardingBlocks(llvm::BasicBlock *BB) {
548   llvm::BranchInst *BI = dyn_cast<llvm::BranchInst>(BB->getTerminator());
549 
550   // If there is a cleanup stack, then we it isn't worth trying to
551   // simplify this block (we would need to remove it from the scope map
552   // and cleanup entry).
553   if (!EHStack.empty())
554     return;
555 
556   // Can only simplify direct branches.
557   if (!BI || !BI->isUnconditional())
558     return;
559 
560   // Can only simplify empty blocks.
561   if (BI->getIterator() != BB->begin())
562     return;
563 
564   BB->replaceAllUsesWith(BI->getSuccessor(0));
565   BI->eraseFromParent();
566   BB->eraseFromParent();
567 }
568 
569 void CodeGenFunction::EmitBlock(llvm::BasicBlock *BB, bool IsFinished) {
570   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
571 
572   // Fall out of the current block (if necessary).
573   EmitBranch(BB);
574 
575   if (IsFinished && BB->use_empty()) {
576     delete BB;
577     return;
578   }
579 
580   // Place the block after the current block, if possible, or else at
581   // the end of the function.
582   if (CurBB && CurBB->getParent())
583     CurFn->insert(std::next(CurBB->getIterator()), BB);
584   else
585     CurFn->insert(CurFn->end(), BB);
586   Builder.SetInsertPoint(BB);
587 }
588 
589 void CodeGenFunction::EmitBranch(llvm::BasicBlock *Target) {
590   // Emit a branch from the current block to the target one if this
591   // was a real block.  If this was just a fall-through block after a
592   // terminator, don't emit it.
593   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
594 
595   if (!CurBB || CurBB->getTerminator()) {
596     // If there is no insert point or the previous block is already
597     // terminated, don't touch it.
598   } else {
599     // Otherwise, create a fall-through branch.
600     Builder.CreateBr(Target);
601   }
602 
603   Builder.ClearInsertionPoint();
604 }
605 
606 void CodeGenFunction::EmitBlockAfterUses(llvm::BasicBlock *block) {
607   bool inserted = false;
608   for (llvm::User *u : block->users()) {
609     if (llvm::Instruction *insn = dyn_cast<llvm::Instruction>(u)) {
610       CurFn->insert(std::next(insn->getParent()->getIterator()), block);
611       inserted = true;
612       break;
613     }
614   }
615 
616   if (!inserted)
617     CurFn->insert(CurFn->end(), block);
618 
619   Builder.SetInsertPoint(block);
620 }
621 
622 CodeGenFunction::JumpDest
623 CodeGenFunction::getJumpDestForLabel(const LabelDecl *D) {
624   JumpDest &Dest = LabelMap[D];
625   if (Dest.isValid()) return Dest;
626 
627   // Create, but don't insert, the new block.
628   Dest = JumpDest(createBasicBlock(D->getName()),
629                   EHScopeStack::stable_iterator::invalid(),
630                   NextCleanupDestIndex++);
631   return Dest;
632 }
633 
634 void CodeGenFunction::EmitLabel(const LabelDecl *D) {
635   // Add this label to the current lexical scope if we're within any
636   // normal cleanups.  Jumps "in" to this label --- when permitted by
637   // the language --- may need to be routed around such cleanups.
638   if (EHStack.hasNormalCleanups() && CurLexicalScope)
639     CurLexicalScope->addLabel(D);
640 
641   JumpDest &Dest = LabelMap[D];
642 
643   // If we didn't need a forward reference to this label, just go
644   // ahead and create a destination at the current scope.
645   if (!Dest.isValid()) {
646     Dest = getJumpDestInCurrentScope(D->getName());
647 
648   // Otherwise, we need to give this label a target depth and remove
649   // it from the branch-fixups list.
650   } else {
651     assert(!Dest.getScopeDepth().isValid() && "already emitted label!");
652     Dest.setScopeDepth(EHStack.stable_begin());
653     ResolveBranchFixups(Dest.getBlock());
654   }
655 
656   EmitBlock(Dest.getBlock());
657 
658   // Emit debug info for labels.
659   if (CGDebugInfo *DI = getDebugInfo()) {
660     if (CGM.getCodeGenOpts().hasReducedDebugInfo()) {
661       DI->setLocation(D->getLocation());
662       DI->EmitLabel(D, Builder);
663     }
664   }
665 
666   incrementProfileCounter(D->getStmt());
667 }
668 
669 /// Change the cleanup scope of the labels in this lexical scope to
670 /// match the scope of the enclosing context.
671 void CodeGenFunction::LexicalScope::rescopeLabels() {
672   assert(!Labels.empty());
673   EHScopeStack::stable_iterator innermostScope
674     = CGF.EHStack.getInnermostNormalCleanup();
675 
676   // Change the scope depth of all the labels.
677   for (SmallVectorImpl<const LabelDecl*>::const_iterator
678          i = Labels.begin(), e = Labels.end(); i != e; ++i) {
679     assert(CGF.LabelMap.count(*i));
680     JumpDest &dest = CGF.LabelMap.find(*i)->second;
681     assert(dest.getScopeDepth().isValid());
682     assert(innermostScope.encloses(dest.getScopeDepth()));
683     dest.setScopeDepth(innermostScope);
684   }
685 
686   // Reparent the labels if the new scope also has cleanups.
687   if (innermostScope != EHScopeStack::stable_end() && ParentScope) {
688     ParentScope->Labels.append(Labels.begin(), Labels.end());
689   }
690 }
691 
692 
693 void CodeGenFunction::EmitLabelStmt(const LabelStmt &S) {
694   EmitLabel(S.getDecl());
695 
696   // IsEHa - emit eha.scope.begin if it's a side entry of a scope
697   if (getLangOpts().EHAsynch && S.isSideEntry())
698     EmitSehCppScopeBegin();
699 
700   EmitStmt(S.getSubStmt());
701 }
702 
703 void CodeGenFunction::EmitAttributedStmt(const AttributedStmt &S) {
704   bool nomerge = false;
705   bool noinline = false;
706   bool alwaysinline = false;
707   const CallExpr *musttail = nullptr;
708 
709   for (const auto *A : S.getAttrs()) {
710     switch (A->getKind()) {
711     default:
712       break;
713     case attr::NoMerge:
714       nomerge = true;
715       break;
716     case attr::NoInline:
717       noinline = true;
718       break;
719     case attr::AlwaysInline:
720       alwaysinline = true;
721       break;
722     case attr::MustTail:
723       const Stmt *Sub = S.getSubStmt();
724       const ReturnStmt *R = cast<ReturnStmt>(Sub);
725       musttail = cast<CallExpr>(R->getRetValue()->IgnoreParens());
726       break;
727     }
728   }
729   SaveAndRestore save_nomerge(InNoMergeAttributedStmt, nomerge);
730   SaveAndRestore save_noinline(InNoInlineAttributedStmt, noinline);
731   SaveAndRestore save_alwaysinline(InAlwaysInlineAttributedStmt, alwaysinline);
732   SaveAndRestore save_musttail(MustTailCall, musttail);
733   EmitStmt(S.getSubStmt(), S.getAttrs());
734 }
735 
736 void CodeGenFunction::EmitGotoStmt(const GotoStmt &S) {
737   // If this code is reachable then emit a stop point (if generating
738   // debug info). We have to do this ourselves because we are on the
739   // "simple" statement path.
740   if (HaveInsertPoint())
741     EmitStopPoint(&S);
742 
743   EmitBranchThroughCleanup(getJumpDestForLabel(S.getLabel()));
744 }
745 
746 
747 void CodeGenFunction::EmitIndirectGotoStmt(const IndirectGotoStmt &S) {
748   if (const LabelDecl *Target = S.getConstantTarget()) {
749     EmitBranchThroughCleanup(getJumpDestForLabel(Target));
750     return;
751   }
752 
753   // Ensure that we have an i8* for our PHI node.
754   llvm::Value *V = Builder.CreateBitCast(EmitScalarExpr(S.getTarget()),
755                                          Int8PtrTy, "addr");
756   llvm::BasicBlock *CurBB = Builder.GetInsertBlock();
757 
758   // Get the basic block for the indirect goto.
759   llvm::BasicBlock *IndGotoBB = GetIndirectGotoBlock();
760 
761   // The first instruction in the block has to be the PHI for the switch dest,
762   // add an entry for this branch.
763   cast<llvm::PHINode>(IndGotoBB->begin())->addIncoming(V, CurBB);
764 
765   EmitBranch(IndGotoBB);
766 }
767 
768 void CodeGenFunction::EmitIfStmt(const IfStmt &S) {
769   // The else branch of a consteval if statement is always the only branch that
770   // can be runtime evaluated.
771   if (S.isConsteval()) {
772     const Stmt *Executed = S.isNegatedConsteval() ? S.getThen() : S.getElse();
773     if (Executed) {
774       RunCleanupsScope ExecutedScope(*this);
775       EmitStmt(Executed);
776     }
777     return;
778   }
779 
780   // C99 6.8.4.1: The first substatement is executed if the expression compares
781   // unequal to 0.  The condition must be a scalar type.
782   LexicalScope ConditionScope(*this, S.getCond()->getSourceRange());
783 
784   if (S.getInit())
785     EmitStmt(S.getInit());
786 
787   if (S.getConditionVariable())
788     EmitDecl(*S.getConditionVariable());
789 
790   // If the condition constant folds and can be elided, try to avoid emitting
791   // the condition and the dead arm of the if/else.
792   bool CondConstant;
793   if (ConstantFoldsToSimpleInteger(S.getCond(), CondConstant,
794                                    S.isConstexpr())) {
795     // Figure out which block (then or else) is executed.
796     const Stmt *Executed = S.getThen();
797     const Stmt *Skipped  = S.getElse();
798     if (!CondConstant)  // Condition false?
799       std::swap(Executed, Skipped);
800 
801     // If the skipped block has no labels in it, just emit the executed block.
802     // This avoids emitting dead code and simplifies the CFG substantially.
803     if (S.isConstexpr() || !ContainsLabel(Skipped)) {
804       if (CondConstant)
805         incrementProfileCounter(&S);
806       if (Executed) {
807         RunCleanupsScope ExecutedScope(*this);
808         EmitStmt(Executed);
809       }
810       return;
811     }
812   }
813 
814   // Otherwise, the condition did not fold, or we couldn't elide it.  Just emit
815   // the conditional branch.
816   llvm::BasicBlock *ThenBlock = createBasicBlock("if.then");
817   llvm::BasicBlock *ContBlock = createBasicBlock("if.end");
818   llvm::BasicBlock *ElseBlock = ContBlock;
819   if (S.getElse())
820     ElseBlock = createBasicBlock("if.else");
821 
822   // Prefer the PGO based weights over the likelihood attribute.
823   // When the build isn't optimized the metadata isn't used, so don't generate
824   // it.
825   // Also, differentiate between disabled PGO and a never executed branch with
826   // PGO. Assuming PGO is in use:
827   // - we want to ignore the [[likely]] attribute if the branch is never
828   // executed,
829   // - assuming the profile is poor, preserving the attribute may still be
830   // beneficial.
831   // As an approximation, preserve the attribute only if both the branch and the
832   // parent context were not executed.
833   Stmt::Likelihood LH = Stmt::LH_None;
834   uint64_t ThenCount = getProfileCount(S.getThen());
835   if (!ThenCount && !getCurrentProfileCount() &&
836       CGM.getCodeGenOpts().OptimizationLevel)
837     LH = Stmt::getLikelihood(S.getThen(), S.getElse());
838   EmitBranchOnBoolExpr(S.getCond(), ThenBlock, ElseBlock, ThenCount, LH);
839 
840   // Emit the 'then' code.
841   EmitBlock(ThenBlock);
842   incrementProfileCounter(&S);
843   {
844     RunCleanupsScope ThenScope(*this);
845     EmitStmt(S.getThen());
846   }
847   EmitBranch(ContBlock);
848 
849   // Emit the 'else' code if present.
850   if (const Stmt *Else = S.getElse()) {
851     {
852       // There is no need to emit line number for an unconditional branch.
853       auto NL = ApplyDebugLocation::CreateEmpty(*this);
854       EmitBlock(ElseBlock);
855     }
856     {
857       RunCleanupsScope ElseScope(*this);
858       EmitStmt(Else);
859     }
860     {
861       // There is no need to emit line number for an unconditional branch.
862       auto NL = ApplyDebugLocation::CreateEmpty(*this);
863       EmitBranch(ContBlock);
864     }
865   }
866 
867   // Emit the continuation block for code after the if.
868   EmitBlock(ContBlock, true);
869 }
870 
871 void CodeGenFunction::EmitWhileStmt(const WhileStmt &S,
872                                     ArrayRef<const Attr *> WhileAttrs) {
873   // Emit the header for the loop, which will also become
874   // the continue target.
875   JumpDest LoopHeader = getJumpDestInCurrentScope("while.cond");
876   EmitBlock(LoopHeader.getBlock());
877 
878   // Create an exit block for when the condition fails, which will
879   // also become the break target.
880   JumpDest LoopExit = getJumpDestInCurrentScope("while.end");
881 
882   // Store the blocks to use for break and continue.
883   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopHeader));
884 
885   // C++ [stmt.while]p2:
886   //   When the condition of a while statement is a declaration, the
887   //   scope of the variable that is declared extends from its point
888   //   of declaration (3.3.2) to the end of the while statement.
889   //   [...]
890   //   The object created in a condition is destroyed and created
891   //   with each iteration of the loop.
892   RunCleanupsScope ConditionScope(*this);
893 
894   if (S.getConditionVariable())
895     EmitDecl(*S.getConditionVariable());
896 
897   // Evaluate the conditional in the while header.  C99 6.8.5.1: The
898   // evaluation of the controlling expression takes place before each
899   // execution of the loop body.
900   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
901 
902   // while(1) is common, avoid extra exit blocks.  Be sure
903   // to correctly handle break/continue though.
904   llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
905   bool CondIsConstInt = C != nullptr;
906   bool EmitBoolCondBranch = !CondIsConstInt || !C->isOne();
907   const SourceRange &R = S.getSourceRange();
908   LoopStack.push(LoopHeader.getBlock(), CGM.getContext(), CGM.getCodeGenOpts(),
909                  WhileAttrs, SourceLocToDebugLoc(R.getBegin()),
910                  SourceLocToDebugLoc(R.getEnd()),
911                  checkIfLoopMustProgress(CondIsConstInt));
912 
913   // As long as the condition is true, go to the loop body.
914   llvm::BasicBlock *LoopBody = createBasicBlock("while.body");
915   if (EmitBoolCondBranch) {
916     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
917     if (ConditionScope.requiresCleanups())
918       ExitBlock = createBasicBlock("while.exit");
919     llvm::MDNode *Weights =
920         createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
921     if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
922       BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
923           BoolCondVal, Stmt::getLikelihood(S.getBody()));
924     Builder.CreateCondBr(BoolCondVal, LoopBody, ExitBlock, Weights);
925 
926     if (ExitBlock != LoopExit.getBlock()) {
927       EmitBlock(ExitBlock);
928       EmitBranchThroughCleanup(LoopExit);
929     }
930   } else if (const Attr *A = Stmt::getLikelihoodAttr(S.getBody())) {
931     CGM.getDiags().Report(A->getLocation(),
932                           diag::warn_attribute_has_no_effect_on_infinite_loop)
933         << A << A->getRange();
934     CGM.getDiags().Report(
935         S.getWhileLoc(),
936         diag::note_attribute_has_no_effect_on_infinite_loop_here)
937         << SourceRange(S.getWhileLoc(), S.getRParenLoc());
938   }
939 
940   // Emit the loop body.  We have to emit this in a cleanup scope
941   // because it might be a singleton DeclStmt.
942   {
943     RunCleanupsScope BodyScope(*this);
944     EmitBlock(LoopBody);
945     incrementProfileCounter(&S);
946     EmitStmt(S.getBody());
947   }
948 
949   BreakContinueStack.pop_back();
950 
951   // Immediately force cleanup.
952   ConditionScope.ForceCleanup();
953 
954   EmitStopPoint(&S);
955   // Branch to the loop header again.
956   EmitBranch(LoopHeader.getBlock());
957 
958   LoopStack.pop();
959 
960   // Emit the exit block.
961   EmitBlock(LoopExit.getBlock(), true);
962 
963   // The LoopHeader typically is just a branch if we skipped emitting
964   // a branch, try to erase it.
965   if (!EmitBoolCondBranch)
966     SimplifyForwardingBlocks(LoopHeader.getBlock());
967 }
968 
969 void CodeGenFunction::EmitDoStmt(const DoStmt &S,
970                                  ArrayRef<const Attr *> DoAttrs) {
971   JumpDest LoopExit = getJumpDestInCurrentScope("do.end");
972   JumpDest LoopCond = getJumpDestInCurrentScope("do.cond");
973 
974   uint64_t ParentCount = getCurrentProfileCount();
975 
976   // Store the blocks to use for break and continue.
977   BreakContinueStack.push_back(BreakContinue(LoopExit, LoopCond));
978 
979   // Emit the body of the loop.
980   llvm::BasicBlock *LoopBody = createBasicBlock("do.body");
981 
982   EmitBlockWithFallThrough(LoopBody, &S);
983   {
984     RunCleanupsScope BodyScope(*this);
985     EmitStmt(S.getBody());
986   }
987 
988   EmitBlock(LoopCond.getBlock());
989 
990   // C99 6.8.5.2: "The evaluation of the controlling expression takes place
991   // after each execution of the loop body."
992 
993   // Evaluate the conditional in the while header.
994   // C99 6.8.5p2/p4: The first substatement is executed if the expression
995   // compares unequal to 0.  The condition must be a scalar type.
996   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
997 
998   BreakContinueStack.pop_back();
999 
1000   // "do {} while (0)" is common in macros, avoid extra blocks.  Be sure
1001   // to correctly handle break/continue though.
1002   llvm::ConstantInt *C = dyn_cast<llvm::ConstantInt>(BoolCondVal);
1003   bool CondIsConstInt = C;
1004   bool EmitBoolCondBranch = !C || !C->isZero();
1005 
1006   const SourceRange &R = S.getSourceRange();
1007   LoopStack.push(LoopBody, CGM.getContext(), CGM.getCodeGenOpts(), DoAttrs,
1008                  SourceLocToDebugLoc(R.getBegin()),
1009                  SourceLocToDebugLoc(R.getEnd()),
1010                  checkIfLoopMustProgress(CondIsConstInt));
1011 
1012   // As long as the condition is true, iterate the loop.
1013   if (EmitBoolCondBranch) {
1014     uint64_t BackedgeCount = getProfileCount(S.getBody()) - ParentCount;
1015     Builder.CreateCondBr(
1016         BoolCondVal, LoopBody, LoopExit.getBlock(),
1017         createProfileWeightsForLoop(S.getCond(), BackedgeCount));
1018   }
1019 
1020   LoopStack.pop();
1021 
1022   // Emit the exit block.
1023   EmitBlock(LoopExit.getBlock());
1024 
1025   // The DoCond block typically is just a branch if we skipped
1026   // emitting a branch, try to erase it.
1027   if (!EmitBoolCondBranch)
1028     SimplifyForwardingBlocks(LoopCond.getBlock());
1029 }
1030 
1031 void CodeGenFunction::EmitForStmt(const ForStmt &S,
1032                                   ArrayRef<const Attr *> ForAttrs) {
1033   JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1034 
1035   LexicalScope ForScope(*this, S.getSourceRange());
1036 
1037   // Evaluate the first part before the loop.
1038   if (S.getInit())
1039     EmitStmt(S.getInit());
1040 
1041   // Start the loop with a block that tests the condition.
1042   // If there's an increment, the continue scope will be overwritten
1043   // later.
1044   JumpDest CondDest = getJumpDestInCurrentScope("for.cond");
1045   llvm::BasicBlock *CondBlock = CondDest.getBlock();
1046   EmitBlock(CondBlock);
1047 
1048   Expr::EvalResult Result;
1049   bool CondIsConstInt =
1050       !S.getCond() || S.getCond()->EvaluateAsInt(Result, getContext());
1051 
1052   const SourceRange &R = S.getSourceRange();
1053   LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1054                  SourceLocToDebugLoc(R.getBegin()),
1055                  SourceLocToDebugLoc(R.getEnd()),
1056                  checkIfLoopMustProgress(CondIsConstInt));
1057 
1058   // Create a cleanup scope for the condition variable cleanups.
1059   LexicalScope ConditionScope(*this, S.getSourceRange());
1060 
1061   // If the for loop doesn't have an increment we can just use the condition as
1062   // the continue block. Otherwise, if there is no condition variable, we can
1063   // form the continue block now. If there is a condition variable, we can't
1064   // form the continue block until after we've emitted the condition, because
1065   // the condition is in scope in the increment, but Sema's jump diagnostics
1066   // ensure that there are no continues from the condition variable that jump
1067   // to the loop increment.
1068   JumpDest Continue;
1069   if (!S.getInc())
1070     Continue = CondDest;
1071   else if (!S.getConditionVariable())
1072     Continue = getJumpDestInCurrentScope("for.inc");
1073   BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1074 
1075   if (S.getCond()) {
1076     // If the for statement has a condition scope, emit the local variable
1077     // declaration.
1078     if (S.getConditionVariable()) {
1079       EmitDecl(*S.getConditionVariable());
1080 
1081       // We have entered the condition variable's scope, so we're now able to
1082       // jump to the continue block.
1083       Continue = S.getInc() ? getJumpDestInCurrentScope("for.inc") : CondDest;
1084       BreakContinueStack.back().ContinueBlock = Continue;
1085     }
1086 
1087     llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1088     // If there are any cleanups between here and the loop-exit scope,
1089     // create a block to stage a loop exit along.
1090     if (ForScope.requiresCleanups())
1091       ExitBlock = createBasicBlock("for.cond.cleanup");
1092 
1093     // As long as the condition is true, iterate the loop.
1094     llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1095 
1096     // C99 6.8.5p2/p4: The first substatement is executed if the expression
1097     // compares unequal to 0.  The condition must be a scalar type.
1098     llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1099     llvm::MDNode *Weights =
1100         createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1101     if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1102       BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1103           BoolCondVal, Stmt::getLikelihood(S.getBody()));
1104 
1105     Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1106 
1107     if (ExitBlock != LoopExit.getBlock()) {
1108       EmitBlock(ExitBlock);
1109       EmitBranchThroughCleanup(LoopExit);
1110     }
1111 
1112     EmitBlock(ForBody);
1113   } else {
1114     // Treat it as a non-zero constant.  Don't even create a new block for the
1115     // body, just fall into it.
1116   }
1117   incrementProfileCounter(&S);
1118 
1119   {
1120     // Create a separate cleanup scope for the body, in case it is not
1121     // a compound statement.
1122     RunCleanupsScope BodyScope(*this);
1123     EmitStmt(S.getBody());
1124   }
1125 
1126   // If there is an increment, emit it next.
1127   if (S.getInc()) {
1128     EmitBlock(Continue.getBlock());
1129     EmitStmt(S.getInc());
1130   }
1131 
1132   BreakContinueStack.pop_back();
1133 
1134   ConditionScope.ForceCleanup();
1135 
1136   EmitStopPoint(&S);
1137   EmitBranch(CondBlock);
1138 
1139   ForScope.ForceCleanup();
1140 
1141   LoopStack.pop();
1142 
1143   // Emit the fall-through block.
1144   EmitBlock(LoopExit.getBlock(), true);
1145 }
1146 
1147 void
1148 CodeGenFunction::EmitCXXForRangeStmt(const CXXForRangeStmt &S,
1149                                      ArrayRef<const Attr *> ForAttrs) {
1150   JumpDest LoopExit = getJumpDestInCurrentScope("for.end");
1151 
1152   LexicalScope ForScope(*this, S.getSourceRange());
1153 
1154   // Evaluate the first pieces before the loop.
1155   if (S.getInit())
1156     EmitStmt(S.getInit());
1157   EmitStmt(S.getRangeStmt());
1158   EmitStmt(S.getBeginStmt());
1159   EmitStmt(S.getEndStmt());
1160 
1161   // Start the loop with a block that tests the condition.
1162   // If there's an increment, the continue scope will be overwritten
1163   // later.
1164   llvm::BasicBlock *CondBlock = createBasicBlock("for.cond");
1165   EmitBlock(CondBlock);
1166 
1167   const SourceRange &R = S.getSourceRange();
1168   LoopStack.push(CondBlock, CGM.getContext(), CGM.getCodeGenOpts(), ForAttrs,
1169                  SourceLocToDebugLoc(R.getBegin()),
1170                  SourceLocToDebugLoc(R.getEnd()));
1171 
1172   // If there are any cleanups between here and the loop-exit scope,
1173   // create a block to stage a loop exit along.
1174   llvm::BasicBlock *ExitBlock = LoopExit.getBlock();
1175   if (ForScope.requiresCleanups())
1176     ExitBlock = createBasicBlock("for.cond.cleanup");
1177 
1178   // The loop body, consisting of the specified body and the loop variable.
1179   llvm::BasicBlock *ForBody = createBasicBlock("for.body");
1180 
1181   // The body is executed if the expression, contextually converted
1182   // to bool, is true.
1183   llvm::Value *BoolCondVal = EvaluateExprAsBool(S.getCond());
1184   llvm::MDNode *Weights =
1185       createProfileWeightsForLoop(S.getCond(), getProfileCount(S.getBody()));
1186   if (!Weights && CGM.getCodeGenOpts().OptimizationLevel)
1187     BoolCondVal = emitCondLikelihoodViaExpectIntrinsic(
1188         BoolCondVal, Stmt::getLikelihood(S.getBody()));
1189   Builder.CreateCondBr(BoolCondVal, ForBody, ExitBlock, Weights);
1190 
1191   if (ExitBlock != LoopExit.getBlock()) {
1192     EmitBlock(ExitBlock);
1193     EmitBranchThroughCleanup(LoopExit);
1194   }
1195 
1196   EmitBlock(ForBody);
1197   incrementProfileCounter(&S);
1198 
1199   // Create a block for the increment. In case of a 'continue', we jump there.
1200   JumpDest Continue = getJumpDestInCurrentScope("for.inc");
1201 
1202   // Store the blocks to use for break and continue.
1203   BreakContinueStack.push_back(BreakContinue(LoopExit, Continue));
1204 
1205   {
1206     // Create a separate cleanup scope for the loop variable and body.
1207     LexicalScope BodyScope(*this, S.getSourceRange());
1208     EmitStmt(S.getLoopVarStmt());
1209     EmitStmt(S.getBody());
1210   }
1211 
1212   EmitStopPoint(&S);
1213   // If there is an increment, emit it next.
1214   EmitBlock(Continue.getBlock());
1215   EmitStmt(S.getInc());
1216 
1217   BreakContinueStack.pop_back();
1218 
1219   EmitBranch(CondBlock);
1220 
1221   ForScope.ForceCleanup();
1222 
1223   LoopStack.pop();
1224 
1225   // Emit the fall-through block.
1226   EmitBlock(LoopExit.getBlock(), true);
1227 }
1228 
1229 void CodeGenFunction::EmitReturnOfRValue(RValue RV, QualType Ty) {
1230   if (RV.isScalar()) {
1231     Builder.CreateStore(RV.getScalarVal(), ReturnValue);
1232   } else if (RV.isAggregate()) {
1233     LValue Dest = MakeAddrLValue(ReturnValue, Ty);
1234     LValue Src = MakeAddrLValue(RV.getAggregateAddress(), Ty);
1235     EmitAggregateCopy(Dest, Src, Ty, getOverlapForReturnValue());
1236   } else {
1237     EmitStoreOfComplex(RV.getComplexVal(), MakeAddrLValue(ReturnValue, Ty),
1238                        /*init*/ true);
1239   }
1240   EmitBranchThroughCleanup(ReturnBlock);
1241 }
1242 
1243 namespace {
1244 // RAII struct used to save and restore a return statment's result expression.
1245 struct SaveRetExprRAII {
1246   SaveRetExprRAII(const Expr *RetExpr, CodeGenFunction &CGF)
1247       : OldRetExpr(CGF.RetExpr), CGF(CGF) {
1248     CGF.RetExpr = RetExpr;
1249   }
1250   ~SaveRetExprRAII() { CGF.RetExpr = OldRetExpr; }
1251   const Expr *OldRetExpr;
1252   CodeGenFunction &CGF;
1253 };
1254 } // namespace
1255 
1256 /// If we have 'return f(...);', where both caller and callee are SwiftAsync,
1257 /// codegen it as 'tail call ...; ret void;'.
1258 static void makeTailCallIfSwiftAsync(const CallExpr *CE, CGBuilderTy &Builder,
1259                                      const CGFunctionInfo *CurFnInfo) {
1260   auto calleeQualType = CE->getCallee()->getType();
1261   const FunctionType *calleeType = nullptr;
1262   if (calleeQualType->isFunctionPointerType() ||
1263       calleeQualType->isFunctionReferenceType() ||
1264       calleeQualType->isBlockPointerType() ||
1265       calleeQualType->isMemberFunctionPointerType()) {
1266     calleeType = calleeQualType->getPointeeType()->castAs<FunctionType>();
1267   } else if (auto *ty = dyn_cast<FunctionType>(calleeQualType)) {
1268     calleeType = ty;
1269   } else if (auto CMCE = dyn_cast<CXXMemberCallExpr>(CE)) {
1270     if (auto methodDecl = CMCE->getMethodDecl()) {
1271       // getMethodDecl() doesn't handle member pointers at the moment.
1272       calleeType = methodDecl->getType()->castAs<FunctionType>();
1273     } else {
1274       return;
1275     }
1276   } else {
1277     return;
1278   }
1279   if (calleeType->getCallConv() == CallingConv::CC_SwiftAsync &&
1280       (CurFnInfo->getASTCallingConvention() == CallingConv::CC_SwiftAsync)) {
1281     auto CI = cast<llvm::CallInst>(&Builder.GetInsertBlock()->back());
1282     CI->setTailCallKind(llvm::CallInst::TCK_MustTail);
1283     Builder.CreateRetVoid();
1284     Builder.ClearInsertionPoint();
1285   }
1286 }
1287 
1288 /// EmitReturnStmt - Note that due to GCC extensions, this can have an operand
1289 /// if the function returns void, or may be missing one if the function returns
1290 /// non-void.  Fun stuff :).
1291 void CodeGenFunction::EmitReturnStmt(const ReturnStmt &S) {
1292   if (requiresReturnValueCheck()) {
1293     llvm::Constant *SLoc = EmitCheckSourceLocation(S.getBeginLoc());
1294     auto *SLocPtr =
1295         new llvm::GlobalVariable(CGM.getModule(), SLoc->getType(), false,
1296                                  llvm::GlobalVariable::PrivateLinkage, SLoc);
1297     SLocPtr->setUnnamedAddr(llvm::GlobalValue::UnnamedAddr::Global);
1298     CGM.getSanitizerMetadata()->disableSanitizerForGlobal(SLocPtr);
1299     assert(ReturnLocation.isValid() && "No valid return location");
1300     Builder.CreateStore(Builder.CreateBitCast(SLocPtr, Int8PtrTy),
1301                         ReturnLocation);
1302   }
1303 
1304   // Returning from an outlined SEH helper is UB, and we already warn on it.
1305   if (IsOutlinedSEHHelper) {
1306     Builder.CreateUnreachable();
1307     Builder.ClearInsertionPoint();
1308   }
1309 
1310   // Emit the result value, even if unused, to evaluate the side effects.
1311   const Expr *RV = S.getRetValue();
1312 
1313   // Record the result expression of the return statement. The recorded
1314   // expression is used to determine whether a block capture's lifetime should
1315   // end at the end of the full expression as opposed to the end of the scope
1316   // enclosing the block expression.
1317   //
1318   // This permits a small, easily-implemented exception to our over-conservative
1319   // rules about not jumping to statements following block literals with
1320   // non-trivial cleanups.
1321   SaveRetExprRAII SaveRetExpr(RV, *this);
1322 
1323   RunCleanupsScope cleanupScope(*this);
1324   if (const auto *EWC = dyn_cast_or_null<ExprWithCleanups>(RV))
1325     RV = EWC->getSubExpr();
1326   // FIXME: Clean this up by using an LValue for ReturnTemp,
1327   // EmitStoreThroughLValue, and EmitAnyExpr.
1328   // Check if the NRVO candidate was not globalized in OpenMP mode.
1329   if (getLangOpts().ElideConstructors && S.getNRVOCandidate() &&
1330       S.getNRVOCandidate()->isNRVOVariable() &&
1331       (!getLangOpts().OpenMP ||
1332        !CGM.getOpenMPRuntime()
1333             .getAddressOfLocalVariable(*this, S.getNRVOCandidate())
1334             .isValid())) {
1335     // Apply the named return value optimization for this return statement,
1336     // which means doing nothing: the appropriate result has already been
1337     // constructed into the NRVO variable.
1338 
1339     // If there is an NRVO flag for this variable, set it to 1 into indicate
1340     // that the cleanup code should not destroy the variable.
1341     if (llvm::Value *NRVOFlag = NRVOFlags[S.getNRVOCandidate()])
1342       Builder.CreateFlagStore(Builder.getTrue(), NRVOFlag);
1343   } else if (!ReturnValue.isValid() || (RV && RV->getType()->isVoidType())) {
1344     // Make sure not to return anything, but evaluate the expression
1345     // for side effects.
1346     if (RV) {
1347       EmitAnyExpr(RV);
1348       if (auto *CE = dyn_cast<CallExpr>(RV))
1349         makeTailCallIfSwiftAsync(CE, Builder, CurFnInfo);
1350     }
1351   } else if (!RV) {
1352     // Do nothing (return value is left uninitialized)
1353   } else if (FnRetTy->isReferenceType()) {
1354     // If this function returns a reference, take the address of the expression
1355     // rather than the value.
1356     RValue Result = EmitReferenceBindingToExpr(RV);
1357     Builder.CreateStore(Result.getScalarVal(), ReturnValue);
1358   } else {
1359     switch (getEvaluationKind(RV->getType())) {
1360     case TEK_Scalar:
1361       Builder.CreateStore(EmitScalarExpr(RV), ReturnValue);
1362       break;
1363     case TEK_Complex:
1364       EmitComplexExprIntoLValue(RV, MakeAddrLValue(ReturnValue, RV->getType()),
1365                                 /*isInit*/ true);
1366       break;
1367     case TEK_Aggregate:
1368       EmitAggExpr(RV, AggValueSlot::forAddr(
1369                           ReturnValue, Qualifiers(),
1370                           AggValueSlot::IsDestructed,
1371                           AggValueSlot::DoesNotNeedGCBarriers,
1372                           AggValueSlot::IsNotAliased,
1373                           getOverlapForReturnValue()));
1374       break;
1375     }
1376   }
1377 
1378   ++NumReturnExprs;
1379   if (!RV || RV->isEvaluatable(getContext()))
1380     ++NumSimpleReturnExprs;
1381 
1382   cleanupScope.ForceCleanup();
1383   EmitBranchThroughCleanup(ReturnBlock);
1384 }
1385 
1386 void CodeGenFunction::EmitDeclStmt(const DeclStmt &S) {
1387   // As long as debug info is modeled with instructions, we have to ensure we
1388   // have a place to insert here and write the stop point here.
1389   if (HaveInsertPoint())
1390     EmitStopPoint(&S);
1391 
1392   for (const auto *I : S.decls())
1393     EmitDecl(*I);
1394 }
1395 
1396 void CodeGenFunction::EmitBreakStmt(const BreakStmt &S) {
1397   assert(!BreakContinueStack.empty() && "break stmt not in a loop or switch!");
1398 
1399   // If this code is reachable then emit a stop point (if generating
1400   // debug info). We have to do this ourselves because we are on the
1401   // "simple" statement path.
1402   if (HaveInsertPoint())
1403     EmitStopPoint(&S);
1404 
1405   EmitBranchThroughCleanup(BreakContinueStack.back().BreakBlock);
1406 }
1407 
1408 void CodeGenFunction::EmitContinueStmt(const ContinueStmt &S) {
1409   assert(!BreakContinueStack.empty() && "continue stmt not in a loop!");
1410 
1411   // If this code is reachable then emit a stop point (if generating
1412   // debug info). We have to do this ourselves because we are on the
1413   // "simple" statement path.
1414   if (HaveInsertPoint())
1415     EmitStopPoint(&S);
1416 
1417   EmitBranchThroughCleanup(BreakContinueStack.back().ContinueBlock);
1418 }
1419 
1420 /// EmitCaseStmtRange - If case statement range is not too big then
1421 /// add multiple cases to switch instruction, one for each value within
1422 /// the range. If range is too big then emit "if" condition check.
1423 void CodeGenFunction::EmitCaseStmtRange(const CaseStmt &S,
1424                                         ArrayRef<const Attr *> Attrs) {
1425   assert(S.getRHS() && "Expected RHS value in CaseStmt");
1426 
1427   llvm::APSInt LHS = S.getLHS()->EvaluateKnownConstInt(getContext());
1428   llvm::APSInt RHS = S.getRHS()->EvaluateKnownConstInt(getContext());
1429 
1430   // Emit the code for this case. We do this first to make sure it is
1431   // properly chained from our predecessor before generating the
1432   // switch machinery to enter this block.
1433   llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1434   EmitBlockWithFallThrough(CaseDest, &S);
1435   EmitStmt(S.getSubStmt());
1436 
1437   // If range is empty, do nothing.
1438   if (LHS.isSigned() ? RHS.slt(LHS) : RHS.ult(LHS))
1439     return;
1440 
1441   Stmt::Likelihood LH = Stmt::getLikelihood(Attrs);
1442   llvm::APInt Range = RHS - LHS;
1443   // FIXME: parameters such as this should not be hardcoded.
1444   if (Range.ult(llvm::APInt(Range.getBitWidth(), 64))) {
1445     // Range is small enough to add multiple switch instruction cases.
1446     uint64_t Total = getProfileCount(&S);
1447     unsigned NCases = Range.getZExtValue() + 1;
1448     // We only have one region counter for the entire set of cases here, so we
1449     // need to divide the weights evenly between the generated cases, ensuring
1450     // that the total weight is preserved. E.g., a weight of 5 over three cases
1451     // will be distributed as weights of 2, 2, and 1.
1452     uint64_t Weight = Total / NCases, Rem = Total % NCases;
1453     for (unsigned I = 0; I != NCases; ++I) {
1454       if (SwitchWeights)
1455         SwitchWeights->push_back(Weight + (Rem ? 1 : 0));
1456       else if (SwitchLikelihood)
1457         SwitchLikelihood->push_back(LH);
1458 
1459       if (Rem)
1460         Rem--;
1461       SwitchInsn->addCase(Builder.getInt(LHS), CaseDest);
1462       ++LHS;
1463     }
1464     return;
1465   }
1466 
1467   // The range is too big. Emit "if" condition into a new block,
1468   // making sure to save and restore the current insertion point.
1469   llvm::BasicBlock *RestoreBB = Builder.GetInsertBlock();
1470 
1471   // Push this test onto the chain of range checks (which terminates
1472   // in the default basic block). The switch's default will be changed
1473   // to the top of this chain after switch emission is complete.
1474   llvm::BasicBlock *FalseDest = CaseRangeBlock;
1475   CaseRangeBlock = createBasicBlock("sw.caserange");
1476 
1477   CurFn->insert(CurFn->end(), CaseRangeBlock);
1478   Builder.SetInsertPoint(CaseRangeBlock);
1479 
1480   // Emit range check.
1481   llvm::Value *Diff =
1482     Builder.CreateSub(SwitchInsn->getCondition(), Builder.getInt(LHS));
1483   llvm::Value *Cond =
1484     Builder.CreateICmpULE(Diff, Builder.getInt(Range), "inbounds");
1485 
1486   llvm::MDNode *Weights = nullptr;
1487   if (SwitchWeights) {
1488     uint64_t ThisCount = getProfileCount(&S);
1489     uint64_t DefaultCount = (*SwitchWeights)[0];
1490     Weights = createProfileWeights(ThisCount, DefaultCount);
1491 
1492     // Since we're chaining the switch default through each large case range, we
1493     // need to update the weight for the default, ie, the first case, to include
1494     // this case.
1495     (*SwitchWeights)[0] += ThisCount;
1496   } else if (SwitchLikelihood)
1497     Cond = emitCondLikelihoodViaExpectIntrinsic(Cond, LH);
1498 
1499   Builder.CreateCondBr(Cond, CaseDest, FalseDest, Weights);
1500 
1501   // Restore the appropriate insertion point.
1502   if (RestoreBB)
1503     Builder.SetInsertPoint(RestoreBB);
1504   else
1505     Builder.ClearInsertionPoint();
1506 }
1507 
1508 void CodeGenFunction::EmitCaseStmt(const CaseStmt &S,
1509                                    ArrayRef<const Attr *> Attrs) {
1510   // If there is no enclosing switch instance that we're aware of, then this
1511   // case statement and its block can be elided.  This situation only happens
1512   // when we've constant-folded the switch, are emitting the constant case,
1513   // and part of the constant case includes another case statement.  For
1514   // instance: switch (4) { case 4: do { case 5: } while (1); }
1515   if (!SwitchInsn) {
1516     EmitStmt(S.getSubStmt());
1517     return;
1518   }
1519 
1520   // Handle case ranges.
1521   if (S.getRHS()) {
1522     EmitCaseStmtRange(S, Attrs);
1523     return;
1524   }
1525 
1526   llvm::ConstantInt *CaseVal =
1527     Builder.getInt(S.getLHS()->EvaluateKnownConstInt(getContext()));
1528 
1529   // Emit debuginfo for the case value if it is an enum value.
1530   const ConstantExpr *CE;
1531   if (auto ICE = dyn_cast<ImplicitCastExpr>(S.getLHS()))
1532     CE = dyn_cast<ConstantExpr>(ICE->getSubExpr());
1533   else
1534     CE = dyn_cast<ConstantExpr>(S.getLHS());
1535   if (CE) {
1536     if (auto DE = dyn_cast<DeclRefExpr>(CE->getSubExpr()))
1537       if (CGDebugInfo *Dbg = getDebugInfo())
1538         if (CGM.getCodeGenOpts().hasReducedDebugInfo())
1539           Dbg->EmitGlobalVariable(DE->getDecl(),
1540               APValue(llvm::APSInt(CaseVal->getValue())));
1541   }
1542 
1543   if (SwitchLikelihood)
1544     SwitchLikelihood->push_back(Stmt::getLikelihood(Attrs));
1545 
1546   // If the body of the case is just a 'break', try to not emit an empty block.
1547   // If we're profiling or we're not optimizing, leave the block in for better
1548   // debug and coverage analysis.
1549   if (!CGM.getCodeGenOpts().hasProfileClangInstr() &&
1550       CGM.getCodeGenOpts().OptimizationLevel > 0 &&
1551       isa<BreakStmt>(S.getSubStmt())) {
1552     JumpDest Block = BreakContinueStack.back().BreakBlock;
1553 
1554     // Only do this optimization if there are no cleanups that need emitting.
1555     if (isObviouslyBranchWithoutCleanups(Block)) {
1556       if (SwitchWeights)
1557         SwitchWeights->push_back(getProfileCount(&S));
1558       SwitchInsn->addCase(CaseVal, Block.getBlock());
1559 
1560       // If there was a fallthrough into this case, make sure to redirect it to
1561       // the end of the switch as well.
1562       if (Builder.GetInsertBlock()) {
1563         Builder.CreateBr(Block.getBlock());
1564         Builder.ClearInsertionPoint();
1565       }
1566       return;
1567     }
1568   }
1569 
1570   llvm::BasicBlock *CaseDest = createBasicBlock("sw.bb");
1571   EmitBlockWithFallThrough(CaseDest, &S);
1572   if (SwitchWeights)
1573     SwitchWeights->push_back(getProfileCount(&S));
1574   SwitchInsn->addCase(CaseVal, CaseDest);
1575 
1576   // Recursively emitting the statement is acceptable, but is not wonderful for
1577   // code where we have many case statements nested together, i.e.:
1578   //  case 1:
1579   //    case 2:
1580   //      case 3: etc.
1581   // Handling this recursively will create a new block for each case statement
1582   // that falls through to the next case which is IR intensive.  It also causes
1583   // deep recursion which can run into stack depth limitations.  Handle
1584   // sequential non-range case statements specially.
1585   //
1586   // TODO When the next case has a likelihood attribute the code returns to the
1587   // recursive algorithm. Maybe improve this case if it becomes common practice
1588   // to use a lot of attributes.
1589   const CaseStmt *CurCase = &S;
1590   const CaseStmt *NextCase = dyn_cast<CaseStmt>(S.getSubStmt());
1591 
1592   // Otherwise, iteratively add consecutive cases to this switch stmt.
1593   while (NextCase && NextCase->getRHS() == nullptr) {
1594     CurCase = NextCase;
1595     llvm::ConstantInt *CaseVal =
1596       Builder.getInt(CurCase->getLHS()->EvaluateKnownConstInt(getContext()));
1597 
1598     if (SwitchWeights)
1599       SwitchWeights->push_back(getProfileCount(NextCase));
1600     if (CGM.getCodeGenOpts().hasProfileClangInstr()) {
1601       CaseDest = createBasicBlock("sw.bb");
1602       EmitBlockWithFallThrough(CaseDest, CurCase);
1603     }
1604     // Since this loop is only executed when the CaseStmt has no attributes
1605     // use a hard-coded value.
1606     if (SwitchLikelihood)
1607       SwitchLikelihood->push_back(Stmt::LH_None);
1608 
1609     SwitchInsn->addCase(CaseVal, CaseDest);
1610     NextCase = dyn_cast<CaseStmt>(CurCase->getSubStmt());
1611   }
1612 
1613   // Generate a stop point for debug info if the case statement is
1614   // followed by a default statement. A fallthrough case before a
1615   // default case gets its own branch target.
1616   if (CurCase->getSubStmt()->getStmtClass() == Stmt::DefaultStmtClass)
1617     EmitStopPoint(CurCase);
1618 
1619   // Normal default recursion for non-cases.
1620   EmitStmt(CurCase->getSubStmt());
1621 }
1622 
1623 void CodeGenFunction::EmitDefaultStmt(const DefaultStmt &S,
1624                                       ArrayRef<const Attr *> Attrs) {
1625   // If there is no enclosing switch instance that we're aware of, then this
1626   // default statement can be elided. This situation only happens when we've
1627   // constant-folded the switch.
1628   if (!SwitchInsn) {
1629     EmitStmt(S.getSubStmt());
1630     return;
1631   }
1632 
1633   llvm::BasicBlock *DefaultBlock = SwitchInsn->getDefaultDest();
1634   assert(DefaultBlock->empty() &&
1635          "EmitDefaultStmt: Default block already defined?");
1636 
1637   if (SwitchLikelihood)
1638     SwitchLikelihood->front() = Stmt::getLikelihood(Attrs);
1639 
1640   EmitBlockWithFallThrough(DefaultBlock, &S);
1641 
1642   EmitStmt(S.getSubStmt());
1643 }
1644 
1645 /// CollectStatementsForCase - Given the body of a 'switch' statement and a
1646 /// constant value that is being switched on, see if we can dead code eliminate
1647 /// the body of the switch to a simple series of statements to emit.  Basically,
1648 /// on a switch (5) we want to find these statements:
1649 ///    case 5:
1650 ///      printf(...);    <--
1651 ///      ++i;            <--
1652 ///      break;
1653 ///
1654 /// and add them to the ResultStmts vector.  If it is unsafe to do this
1655 /// transformation (for example, one of the elided statements contains a label
1656 /// that might be jumped to), return CSFC_Failure.  If we handled it and 'S'
1657 /// should include statements after it (e.g. the printf() line is a substmt of
1658 /// the case) then return CSFC_FallThrough.  If we handled it and found a break
1659 /// statement, then return CSFC_Success.
1660 ///
1661 /// If Case is non-null, then we are looking for the specified case, checking
1662 /// that nothing we jump over contains labels.  If Case is null, then we found
1663 /// the case and are looking for the break.
1664 ///
1665 /// If the recursive walk actually finds our Case, then we set FoundCase to
1666 /// true.
1667 ///
1668 enum CSFC_Result { CSFC_Failure, CSFC_FallThrough, CSFC_Success };
1669 static CSFC_Result CollectStatementsForCase(const Stmt *S,
1670                                             const SwitchCase *Case,
1671                                             bool &FoundCase,
1672                               SmallVectorImpl<const Stmt*> &ResultStmts) {
1673   // If this is a null statement, just succeed.
1674   if (!S)
1675     return Case ? CSFC_Success : CSFC_FallThrough;
1676 
1677   // If this is the switchcase (case 4: or default) that we're looking for, then
1678   // we're in business.  Just add the substatement.
1679   if (const SwitchCase *SC = dyn_cast<SwitchCase>(S)) {
1680     if (S == Case) {
1681       FoundCase = true;
1682       return CollectStatementsForCase(SC->getSubStmt(), nullptr, FoundCase,
1683                                       ResultStmts);
1684     }
1685 
1686     // Otherwise, this is some other case or default statement, just ignore it.
1687     return CollectStatementsForCase(SC->getSubStmt(), Case, FoundCase,
1688                                     ResultStmts);
1689   }
1690 
1691   // If we are in the live part of the code and we found our break statement,
1692   // return a success!
1693   if (!Case && isa<BreakStmt>(S))
1694     return CSFC_Success;
1695 
1696   // If this is a switch statement, then it might contain the SwitchCase, the
1697   // break, or neither.
1698   if (const CompoundStmt *CS = dyn_cast<CompoundStmt>(S)) {
1699     // Handle this as two cases: we might be looking for the SwitchCase (if so
1700     // the skipped statements must be skippable) or we might already have it.
1701     CompoundStmt::const_body_iterator I = CS->body_begin(), E = CS->body_end();
1702     bool StartedInLiveCode = FoundCase;
1703     unsigned StartSize = ResultStmts.size();
1704 
1705     // If we've not found the case yet, scan through looking for it.
1706     if (Case) {
1707       // Keep track of whether we see a skipped declaration.  The code could be
1708       // using the declaration even if it is skipped, so we can't optimize out
1709       // the decl if the kept statements might refer to it.
1710       bool HadSkippedDecl = false;
1711 
1712       // If we're looking for the case, just see if we can skip each of the
1713       // substatements.
1714       for (; Case && I != E; ++I) {
1715         HadSkippedDecl |= CodeGenFunction::mightAddDeclToScope(*I);
1716 
1717         switch (CollectStatementsForCase(*I, Case, FoundCase, ResultStmts)) {
1718         case CSFC_Failure: return CSFC_Failure;
1719         case CSFC_Success:
1720           // A successful result means that either 1) that the statement doesn't
1721           // have the case and is skippable, or 2) does contain the case value
1722           // and also contains the break to exit the switch.  In the later case,
1723           // we just verify the rest of the statements are elidable.
1724           if (FoundCase) {
1725             // If we found the case and skipped declarations, we can't do the
1726             // optimization.
1727             if (HadSkippedDecl)
1728               return CSFC_Failure;
1729 
1730             for (++I; I != E; ++I)
1731               if (CodeGenFunction::ContainsLabel(*I, true))
1732                 return CSFC_Failure;
1733             return CSFC_Success;
1734           }
1735           break;
1736         case CSFC_FallThrough:
1737           // If we have a fallthrough condition, then we must have found the
1738           // case started to include statements.  Consider the rest of the
1739           // statements in the compound statement as candidates for inclusion.
1740           assert(FoundCase && "Didn't find case but returned fallthrough?");
1741           // We recursively found Case, so we're not looking for it anymore.
1742           Case = nullptr;
1743 
1744           // If we found the case and skipped declarations, we can't do the
1745           // optimization.
1746           if (HadSkippedDecl)
1747             return CSFC_Failure;
1748           break;
1749         }
1750       }
1751 
1752       if (!FoundCase)
1753         return CSFC_Success;
1754 
1755       assert(!HadSkippedDecl && "fallthrough after skipping decl");
1756     }
1757 
1758     // If we have statements in our range, then we know that the statements are
1759     // live and need to be added to the set of statements we're tracking.
1760     bool AnyDecls = false;
1761     for (; I != E; ++I) {
1762       AnyDecls |= CodeGenFunction::mightAddDeclToScope(*I);
1763 
1764       switch (CollectStatementsForCase(*I, nullptr, FoundCase, ResultStmts)) {
1765       case CSFC_Failure: return CSFC_Failure;
1766       case CSFC_FallThrough:
1767         // A fallthrough result means that the statement was simple and just
1768         // included in ResultStmt, keep adding them afterwards.
1769         break;
1770       case CSFC_Success:
1771         // A successful result means that we found the break statement and
1772         // stopped statement inclusion.  We just ensure that any leftover stmts
1773         // are skippable and return success ourselves.
1774         for (++I; I != E; ++I)
1775           if (CodeGenFunction::ContainsLabel(*I, true))
1776             return CSFC_Failure;
1777         return CSFC_Success;
1778       }
1779     }
1780 
1781     // If we're about to fall out of a scope without hitting a 'break;', we
1782     // can't perform the optimization if there were any decls in that scope
1783     // (we'd lose their end-of-lifetime).
1784     if (AnyDecls) {
1785       // If the entire compound statement was live, there's one more thing we
1786       // can try before giving up: emit the whole thing as a single statement.
1787       // We can do that unless the statement contains a 'break;'.
1788       // FIXME: Such a break must be at the end of a construct within this one.
1789       // We could emit this by just ignoring the BreakStmts entirely.
1790       if (StartedInLiveCode && !CodeGenFunction::containsBreak(S)) {
1791         ResultStmts.resize(StartSize);
1792         ResultStmts.push_back(S);
1793       } else {
1794         return CSFC_Failure;
1795       }
1796     }
1797 
1798     return CSFC_FallThrough;
1799   }
1800 
1801   // Okay, this is some other statement that we don't handle explicitly, like a
1802   // for statement or increment etc.  If we are skipping over this statement,
1803   // just verify it doesn't have labels, which would make it invalid to elide.
1804   if (Case) {
1805     if (CodeGenFunction::ContainsLabel(S, true))
1806       return CSFC_Failure;
1807     return CSFC_Success;
1808   }
1809 
1810   // Otherwise, we want to include this statement.  Everything is cool with that
1811   // so long as it doesn't contain a break out of the switch we're in.
1812   if (CodeGenFunction::containsBreak(S)) return CSFC_Failure;
1813 
1814   // Otherwise, everything is great.  Include the statement and tell the caller
1815   // that we fall through and include the next statement as well.
1816   ResultStmts.push_back(S);
1817   return CSFC_FallThrough;
1818 }
1819 
1820 /// FindCaseStatementsForValue - Find the case statement being jumped to and
1821 /// then invoke CollectStatementsForCase to find the list of statements to emit
1822 /// for a switch on constant.  See the comment above CollectStatementsForCase
1823 /// for more details.
1824 static bool FindCaseStatementsForValue(const SwitchStmt &S,
1825                                        const llvm::APSInt &ConstantCondValue,
1826                                 SmallVectorImpl<const Stmt*> &ResultStmts,
1827                                        ASTContext &C,
1828                                        const SwitchCase *&ResultCase) {
1829   // First step, find the switch case that is being branched to.  We can do this
1830   // efficiently by scanning the SwitchCase list.
1831   const SwitchCase *Case = S.getSwitchCaseList();
1832   const DefaultStmt *DefaultCase = nullptr;
1833 
1834   for (; Case; Case = Case->getNextSwitchCase()) {
1835     // It's either a default or case.  Just remember the default statement in
1836     // case we're not jumping to any numbered cases.
1837     if (const DefaultStmt *DS = dyn_cast<DefaultStmt>(Case)) {
1838       DefaultCase = DS;
1839       continue;
1840     }
1841 
1842     // Check to see if this case is the one we're looking for.
1843     const CaseStmt *CS = cast<CaseStmt>(Case);
1844     // Don't handle case ranges yet.
1845     if (CS->getRHS()) return false;
1846 
1847     // If we found our case, remember it as 'case'.
1848     if (CS->getLHS()->EvaluateKnownConstInt(C) == ConstantCondValue)
1849       break;
1850   }
1851 
1852   // If we didn't find a matching case, we use a default if it exists, or we
1853   // elide the whole switch body!
1854   if (!Case) {
1855     // It is safe to elide the body of the switch if it doesn't contain labels
1856     // etc.  If it is safe, return successfully with an empty ResultStmts list.
1857     if (!DefaultCase)
1858       return !CodeGenFunction::ContainsLabel(&S);
1859     Case = DefaultCase;
1860   }
1861 
1862   // Ok, we know which case is being jumped to, try to collect all the
1863   // statements that follow it.  This can fail for a variety of reasons.  Also,
1864   // check to see that the recursive walk actually found our case statement.
1865   // Insane cases like this can fail to find it in the recursive walk since we
1866   // don't handle every stmt kind:
1867   // switch (4) {
1868   //   while (1) {
1869   //     case 4: ...
1870   bool FoundCase = false;
1871   ResultCase = Case;
1872   return CollectStatementsForCase(S.getBody(), Case, FoundCase,
1873                                   ResultStmts) != CSFC_Failure &&
1874          FoundCase;
1875 }
1876 
1877 static std::optional<SmallVector<uint64_t, 16>>
1878 getLikelihoodWeights(ArrayRef<Stmt::Likelihood> Likelihoods) {
1879   // Are there enough branches to weight them?
1880   if (Likelihoods.size() <= 1)
1881     return std::nullopt;
1882 
1883   uint64_t NumUnlikely = 0;
1884   uint64_t NumNone = 0;
1885   uint64_t NumLikely = 0;
1886   for (const auto LH : Likelihoods) {
1887     switch (LH) {
1888     case Stmt::LH_Unlikely:
1889       ++NumUnlikely;
1890       break;
1891     case Stmt::LH_None:
1892       ++NumNone;
1893       break;
1894     case Stmt::LH_Likely:
1895       ++NumLikely;
1896       break;
1897     }
1898   }
1899 
1900   // Is there a likelihood attribute used?
1901   if (NumUnlikely == 0 && NumLikely == 0)
1902     return std::nullopt;
1903 
1904   // When multiple cases share the same code they can be combined during
1905   // optimization. In that case the weights of the branch will be the sum of
1906   // the individual weights. Make sure the combined sum of all neutral cases
1907   // doesn't exceed the value of a single likely attribute.
1908   // The additions both avoid divisions by 0 and make sure the weights of None
1909   // don't exceed the weight of Likely.
1910   const uint64_t Likely = INT32_MAX / (NumLikely + 2);
1911   const uint64_t None = Likely / (NumNone + 1);
1912   const uint64_t Unlikely = 0;
1913 
1914   SmallVector<uint64_t, 16> Result;
1915   Result.reserve(Likelihoods.size());
1916   for (const auto LH : Likelihoods) {
1917     switch (LH) {
1918     case Stmt::LH_Unlikely:
1919       Result.push_back(Unlikely);
1920       break;
1921     case Stmt::LH_None:
1922       Result.push_back(None);
1923       break;
1924     case Stmt::LH_Likely:
1925       Result.push_back(Likely);
1926       break;
1927     }
1928   }
1929 
1930   return Result;
1931 }
1932 
1933 void CodeGenFunction::EmitSwitchStmt(const SwitchStmt &S) {
1934   // Handle nested switch statements.
1935   llvm::SwitchInst *SavedSwitchInsn = SwitchInsn;
1936   SmallVector<uint64_t, 16> *SavedSwitchWeights = SwitchWeights;
1937   SmallVector<Stmt::Likelihood, 16> *SavedSwitchLikelihood = SwitchLikelihood;
1938   llvm::BasicBlock *SavedCRBlock = CaseRangeBlock;
1939 
1940   // See if we can constant fold the condition of the switch and therefore only
1941   // emit the live case statement (if any) of the switch.
1942   llvm::APSInt ConstantCondValue;
1943   if (ConstantFoldsToSimpleInteger(S.getCond(), ConstantCondValue)) {
1944     SmallVector<const Stmt*, 4> CaseStmts;
1945     const SwitchCase *Case = nullptr;
1946     if (FindCaseStatementsForValue(S, ConstantCondValue, CaseStmts,
1947                                    getContext(), Case)) {
1948       if (Case)
1949         incrementProfileCounter(Case);
1950       RunCleanupsScope ExecutedScope(*this);
1951 
1952       if (S.getInit())
1953         EmitStmt(S.getInit());
1954 
1955       // Emit the condition variable if needed inside the entire cleanup scope
1956       // used by this special case for constant folded switches.
1957       if (S.getConditionVariable())
1958         EmitDecl(*S.getConditionVariable());
1959 
1960       // At this point, we are no longer "within" a switch instance, so
1961       // we can temporarily enforce this to ensure that any embedded case
1962       // statements are not emitted.
1963       SwitchInsn = nullptr;
1964 
1965       // Okay, we can dead code eliminate everything except this case.  Emit the
1966       // specified series of statements and we're good.
1967       for (unsigned i = 0, e = CaseStmts.size(); i != e; ++i)
1968         EmitStmt(CaseStmts[i]);
1969       incrementProfileCounter(&S);
1970 
1971       // Now we want to restore the saved switch instance so that nested
1972       // switches continue to function properly
1973       SwitchInsn = SavedSwitchInsn;
1974 
1975       return;
1976     }
1977   }
1978 
1979   JumpDest SwitchExit = getJumpDestInCurrentScope("sw.epilog");
1980 
1981   RunCleanupsScope ConditionScope(*this);
1982 
1983   if (S.getInit())
1984     EmitStmt(S.getInit());
1985 
1986   if (S.getConditionVariable())
1987     EmitDecl(*S.getConditionVariable());
1988   llvm::Value *CondV = EmitScalarExpr(S.getCond());
1989 
1990   // Create basic block to hold stuff that comes after switch
1991   // statement. We also need to create a default block now so that
1992   // explicit case ranges tests can have a place to jump to on
1993   // failure.
1994   llvm::BasicBlock *DefaultBlock = createBasicBlock("sw.default");
1995   SwitchInsn = Builder.CreateSwitch(CondV, DefaultBlock);
1996   if (PGO.haveRegionCounts()) {
1997     // Walk the SwitchCase list to find how many there are.
1998     uint64_t DefaultCount = 0;
1999     unsigned NumCases = 0;
2000     for (const SwitchCase *Case = S.getSwitchCaseList();
2001          Case;
2002          Case = Case->getNextSwitchCase()) {
2003       if (isa<DefaultStmt>(Case))
2004         DefaultCount = getProfileCount(Case);
2005       NumCases += 1;
2006     }
2007     SwitchWeights = new SmallVector<uint64_t, 16>();
2008     SwitchWeights->reserve(NumCases);
2009     // The default needs to be first. We store the edge count, so we already
2010     // know the right weight.
2011     SwitchWeights->push_back(DefaultCount);
2012   } else if (CGM.getCodeGenOpts().OptimizationLevel) {
2013     SwitchLikelihood = new SmallVector<Stmt::Likelihood, 16>();
2014     // Initialize the default case.
2015     SwitchLikelihood->push_back(Stmt::LH_None);
2016   }
2017 
2018   CaseRangeBlock = DefaultBlock;
2019 
2020   // Clear the insertion point to indicate we are in unreachable code.
2021   Builder.ClearInsertionPoint();
2022 
2023   // All break statements jump to NextBlock. If BreakContinueStack is non-empty
2024   // then reuse last ContinueBlock.
2025   JumpDest OuterContinue;
2026   if (!BreakContinueStack.empty())
2027     OuterContinue = BreakContinueStack.back().ContinueBlock;
2028 
2029   BreakContinueStack.push_back(BreakContinue(SwitchExit, OuterContinue));
2030 
2031   // Emit switch body.
2032   EmitStmt(S.getBody());
2033 
2034   BreakContinueStack.pop_back();
2035 
2036   // Update the default block in case explicit case range tests have
2037   // been chained on top.
2038   SwitchInsn->setDefaultDest(CaseRangeBlock);
2039 
2040   // If a default was never emitted:
2041   if (!DefaultBlock->getParent()) {
2042     // If we have cleanups, emit the default block so that there's a
2043     // place to jump through the cleanups from.
2044     if (ConditionScope.requiresCleanups()) {
2045       EmitBlock(DefaultBlock);
2046 
2047     // Otherwise, just forward the default block to the switch end.
2048     } else {
2049       DefaultBlock->replaceAllUsesWith(SwitchExit.getBlock());
2050       delete DefaultBlock;
2051     }
2052   }
2053 
2054   ConditionScope.ForceCleanup();
2055 
2056   // Emit continuation.
2057   EmitBlock(SwitchExit.getBlock(), true);
2058   incrementProfileCounter(&S);
2059 
2060   // If the switch has a condition wrapped by __builtin_unpredictable,
2061   // create metadata that specifies that the switch is unpredictable.
2062   // Don't bother if not optimizing because that metadata would not be used.
2063   auto *Call = dyn_cast<CallExpr>(S.getCond());
2064   if (Call && CGM.getCodeGenOpts().OptimizationLevel != 0) {
2065     auto *FD = dyn_cast_or_null<FunctionDecl>(Call->getCalleeDecl());
2066     if (FD && FD->getBuiltinID() == Builtin::BI__builtin_unpredictable) {
2067       llvm::MDBuilder MDHelper(getLLVMContext());
2068       SwitchInsn->setMetadata(llvm::LLVMContext::MD_unpredictable,
2069                               MDHelper.createUnpredictable());
2070     }
2071   }
2072 
2073   if (SwitchWeights) {
2074     assert(SwitchWeights->size() == 1 + SwitchInsn->getNumCases() &&
2075            "switch weights do not match switch cases");
2076     // If there's only one jump destination there's no sense weighting it.
2077     if (SwitchWeights->size() > 1)
2078       SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2079                               createProfileWeights(*SwitchWeights));
2080     delete SwitchWeights;
2081   } else if (SwitchLikelihood) {
2082     assert(SwitchLikelihood->size() == 1 + SwitchInsn->getNumCases() &&
2083            "switch likelihoods do not match switch cases");
2084     std::optional<SmallVector<uint64_t, 16>> LHW =
2085         getLikelihoodWeights(*SwitchLikelihood);
2086     if (LHW) {
2087       llvm::MDBuilder MDHelper(CGM.getLLVMContext());
2088       SwitchInsn->setMetadata(llvm::LLVMContext::MD_prof,
2089                               createProfileWeights(*LHW));
2090     }
2091     delete SwitchLikelihood;
2092   }
2093   SwitchInsn = SavedSwitchInsn;
2094   SwitchWeights = SavedSwitchWeights;
2095   SwitchLikelihood = SavedSwitchLikelihood;
2096   CaseRangeBlock = SavedCRBlock;
2097 }
2098 
2099 static std::string
2100 SimplifyConstraint(const char *Constraint, const TargetInfo &Target,
2101                  SmallVectorImpl<TargetInfo::ConstraintInfo> *OutCons=nullptr) {
2102   std::string Result;
2103 
2104   while (*Constraint) {
2105     switch (*Constraint) {
2106     default:
2107       Result += Target.convertConstraint(Constraint);
2108       break;
2109     // Ignore these
2110     case '*':
2111     case '?':
2112     case '!':
2113     case '=': // Will see this and the following in mult-alt constraints.
2114     case '+':
2115       break;
2116     case '#': // Ignore the rest of the constraint alternative.
2117       while (Constraint[1] && Constraint[1] != ',')
2118         Constraint++;
2119       break;
2120     case '&':
2121     case '%':
2122       Result += *Constraint;
2123       while (Constraint[1] && Constraint[1] == *Constraint)
2124         Constraint++;
2125       break;
2126     case ',':
2127       Result += "|";
2128       break;
2129     case 'g':
2130       Result += "imr";
2131       break;
2132     case '[': {
2133       assert(OutCons &&
2134              "Must pass output names to constraints with a symbolic name");
2135       unsigned Index;
2136       bool result = Target.resolveSymbolicName(Constraint, *OutCons, Index);
2137       assert(result && "Could not resolve symbolic name"); (void)result;
2138       Result += llvm::utostr(Index);
2139       break;
2140     }
2141     }
2142 
2143     Constraint++;
2144   }
2145 
2146   return Result;
2147 }
2148 
2149 /// AddVariableConstraints - Look at AsmExpr and if it is a variable declared
2150 /// as using a particular register add that as a constraint that will be used
2151 /// in this asm stmt.
2152 static std::string
2153 AddVariableConstraints(const std::string &Constraint, const Expr &AsmExpr,
2154                        const TargetInfo &Target, CodeGenModule &CGM,
2155                        const AsmStmt &Stmt, const bool EarlyClobber,
2156                        std::string *GCCReg = nullptr) {
2157   const DeclRefExpr *AsmDeclRef = dyn_cast<DeclRefExpr>(&AsmExpr);
2158   if (!AsmDeclRef)
2159     return Constraint;
2160   const ValueDecl &Value = *AsmDeclRef->getDecl();
2161   const VarDecl *Variable = dyn_cast<VarDecl>(&Value);
2162   if (!Variable)
2163     return Constraint;
2164   if (Variable->getStorageClass() != SC_Register)
2165     return Constraint;
2166   AsmLabelAttr *Attr = Variable->getAttr<AsmLabelAttr>();
2167   if (!Attr)
2168     return Constraint;
2169   StringRef Register = Attr->getLabel();
2170   assert(Target.isValidGCCRegisterName(Register));
2171   // We're using validateOutputConstraint here because we only care if
2172   // this is a register constraint.
2173   TargetInfo::ConstraintInfo Info(Constraint, "");
2174   if (Target.validateOutputConstraint(Info) &&
2175       !Info.allowsRegister()) {
2176     CGM.ErrorUnsupported(&Stmt, "__asm__");
2177     return Constraint;
2178   }
2179   // Canonicalize the register here before returning it.
2180   Register = Target.getNormalizedGCCRegisterName(Register);
2181   if (GCCReg != nullptr)
2182     *GCCReg = Register.str();
2183   return (EarlyClobber ? "&{" : "{") + Register.str() + "}";
2184 }
2185 
2186 std::pair<llvm::Value*, llvm::Type *> CodeGenFunction::EmitAsmInputLValue(
2187     const TargetInfo::ConstraintInfo &Info, LValue InputValue,
2188     QualType InputType, std::string &ConstraintStr, SourceLocation Loc) {
2189   if (Info.allowsRegister() || !Info.allowsMemory()) {
2190     if (CodeGenFunction::hasScalarEvaluationKind(InputType))
2191       return {EmitLoadOfLValue(InputValue, Loc).getScalarVal(), nullptr};
2192 
2193     llvm::Type *Ty = ConvertType(InputType);
2194     uint64_t Size = CGM.getDataLayout().getTypeSizeInBits(Ty);
2195     if ((Size <= 64 && llvm::isPowerOf2_64(Size)) ||
2196         getTargetHooks().isScalarizableAsmOperand(*this, Ty)) {
2197       Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2198 
2199       return {
2200           Builder.CreateLoad(InputValue.getAddress(*this).withElementType(Ty)),
2201           nullptr};
2202     }
2203   }
2204 
2205   Address Addr = InputValue.getAddress(*this);
2206   ConstraintStr += '*';
2207   return {Addr.getPointer(), Addr.getElementType()};
2208 }
2209 
2210 std::pair<llvm::Value *, llvm::Type *>
2211 CodeGenFunction::EmitAsmInput(const TargetInfo::ConstraintInfo &Info,
2212                               const Expr *InputExpr,
2213                               std::string &ConstraintStr) {
2214   // If this can't be a register or memory, i.e., has to be a constant
2215   // (immediate or symbolic), try to emit it as such.
2216   if (!Info.allowsRegister() && !Info.allowsMemory()) {
2217     if (Info.requiresImmediateConstant()) {
2218       Expr::EvalResult EVResult;
2219       InputExpr->EvaluateAsRValue(EVResult, getContext(), true);
2220 
2221       llvm::APSInt IntResult;
2222       if (EVResult.Val.toIntegralConstant(IntResult, InputExpr->getType(),
2223                                           getContext()))
2224         return {llvm::ConstantInt::get(getLLVMContext(), IntResult), nullptr};
2225     }
2226 
2227     Expr::EvalResult Result;
2228     if (InputExpr->EvaluateAsInt(Result, getContext()))
2229       return {llvm::ConstantInt::get(getLLVMContext(), Result.Val.getInt()),
2230               nullptr};
2231   }
2232 
2233   if (Info.allowsRegister() || !Info.allowsMemory())
2234     if (CodeGenFunction::hasScalarEvaluationKind(InputExpr->getType()))
2235       return {EmitScalarExpr(InputExpr), nullptr};
2236   if (InputExpr->getStmtClass() == Expr::CXXThisExprClass)
2237     return {EmitScalarExpr(InputExpr), nullptr};
2238   InputExpr = InputExpr->IgnoreParenNoopCasts(getContext());
2239   LValue Dest = EmitLValue(InputExpr);
2240   return EmitAsmInputLValue(Info, Dest, InputExpr->getType(), ConstraintStr,
2241                             InputExpr->getExprLoc());
2242 }
2243 
2244 /// getAsmSrcLocInfo - Return the !srcloc metadata node to attach to an inline
2245 /// asm call instruction.  The !srcloc MDNode contains a list of constant
2246 /// integers which are the source locations of the start of each line in the
2247 /// asm.
2248 static llvm::MDNode *getAsmSrcLocInfo(const StringLiteral *Str,
2249                                       CodeGenFunction &CGF) {
2250   SmallVector<llvm::Metadata *, 8> Locs;
2251   // Add the location of the first line to the MDNode.
2252   Locs.push_back(llvm::ConstantAsMetadata::get(llvm::ConstantInt::get(
2253       CGF.Int64Ty, Str->getBeginLoc().getRawEncoding())));
2254   StringRef StrVal = Str->getString();
2255   if (!StrVal.empty()) {
2256     const SourceManager &SM = CGF.CGM.getContext().getSourceManager();
2257     const LangOptions &LangOpts = CGF.CGM.getLangOpts();
2258     unsigned StartToken = 0;
2259     unsigned ByteOffset = 0;
2260 
2261     // Add the location of the start of each subsequent line of the asm to the
2262     // MDNode.
2263     for (unsigned i = 0, e = StrVal.size() - 1; i != e; ++i) {
2264       if (StrVal[i] != '\n') continue;
2265       SourceLocation LineLoc = Str->getLocationOfByte(
2266           i + 1, SM, LangOpts, CGF.getTarget(), &StartToken, &ByteOffset);
2267       Locs.push_back(llvm::ConstantAsMetadata::get(
2268           llvm::ConstantInt::get(CGF.Int64Ty, LineLoc.getRawEncoding())));
2269     }
2270   }
2271 
2272   return llvm::MDNode::get(CGF.getLLVMContext(), Locs);
2273 }
2274 
2275 static void UpdateAsmCallInst(llvm::CallBase &Result, bool HasSideEffect,
2276                               bool HasUnwindClobber, bool ReadOnly,
2277                               bool ReadNone, bool NoMerge, const AsmStmt &S,
2278                               const std::vector<llvm::Type *> &ResultRegTypes,
2279                               const std::vector<llvm::Type *> &ArgElemTypes,
2280                               CodeGenFunction &CGF,
2281                               std::vector<llvm::Value *> &RegResults) {
2282   if (!HasUnwindClobber)
2283     Result.addFnAttr(llvm::Attribute::NoUnwind);
2284 
2285   if (NoMerge)
2286     Result.addFnAttr(llvm::Attribute::NoMerge);
2287   // Attach readnone and readonly attributes.
2288   if (!HasSideEffect) {
2289     if (ReadNone)
2290       Result.setDoesNotAccessMemory();
2291     else if (ReadOnly)
2292       Result.setOnlyReadsMemory();
2293   }
2294 
2295   // Add elementtype attribute for indirect constraints.
2296   for (auto Pair : llvm::enumerate(ArgElemTypes)) {
2297     if (Pair.value()) {
2298       auto Attr = llvm::Attribute::get(
2299           CGF.getLLVMContext(), llvm::Attribute::ElementType, Pair.value());
2300       Result.addParamAttr(Pair.index(), Attr);
2301     }
2302   }
2303 
2304   // Slap the source location of the inline asm into a !srcloc metadata on the
2305   // call.
2306   if (const auto *gccAsmStmt = dyn_cast<GCCAsmStmt>(&S))
2307     Result.setMetadata("srcloc",
2308                        getAsmSrcLocInfo(gccAsmStmt->getAsmString(), CGF));
2309   else {
2310     // At least put the line number on MS inline asm blobs.
2311     llvm::Constant *Loc =
2312         llvm::ConstantInt::get(CGF.Int64Ty, S.getAsmLoc().getRawEncoding());
2313     Result.setMetadata("srcloc",
2314                        llvm::MDNode::get(CGF.getLLVMContext(),
2315                                          llvm::ConstantAsMetadata::get(Loc)));
2316   }
2317 
2318   if (CGF.getLangOpts().assumeFunctionsAreConvergent())
2319     // Conservatively, mark all inline asm blocks in CUDA or OpenCL as
2320     // convergent (meaning, they may call an intrinsically convergent op, such
2321     // as bar.sync, and so can't have certain optimizations applied around
2322     // them).
2323     Result.addFnAttr(llvm::Attribute::Convergent);
2324   // Extract all of the register value results from the asm.
2325   if (ResultRegTypes.size() == 1) {
2326     RegResults.push_back(&Result);
2327   } else {
2328     for (unsigned i = 0, e = ResultRegTypes.size(); i != e; ++i) {
2329       llvm::Value *Tmp = CGF.Builder.CreateExtractValue(&Result, i, "asmresult");
2330       RegResults.push_back(Tmp);
2331     }
2332   }
2333 }
2334 
2335 static void
2336 EmitAsmStores(CodeGenFunction &CGF, const AsmStmt &S,
2337               const llvm::ArrayRef<llvm::Value *> RegResults,
2338               const llvm::ArrayRef<llvm::Type *> ResultRegTypes,
2339               const llvm::ArrayRef<llvm::Type *> ResultTruncRegTypes,
2340               const llvm::ArrayRef<LValue> ResultRegDests,
2341               const llvm::ArrayRef<QualType> ResultRegQualTys,
2342               const llvm::BitVector &ResultTypeRequiresCast,
2343               const llvm::BitVector &ResultRegIsFlagReg) {
2344   CGBuilderTy &Builder = CGF.Builder;
2345   CodeGenModule &CGM = CGF.CGM;
2346   llvm::LLVMContext &CTX = CGF.getLLVMContext();
2347 
2348   assert(RegResults.size() == ResultRegTypes.size());
2349   assert(RegResults.size() == ResultTruncRegTypes.size());
2350   assert(RegResults.size() == ResultRegDests.size());
2351   // ResultRegDests can be also populated by addReturnRegisterOutputs() above,
2352   // in which case its size may grow.
2353   assert(ResultTypeRequiresCast.size() <= ResultRegDests.size());
2354   assert(ResultRegIsFlagReg.size() <= ResultRegDests.size());
2355 
2356   for (unsigned i = 0, e = RegResults.size(); i != e; ++i) {
2357     llvm::Value *Tmp = RegResults[i];
2358     llvm::Type *TruncTy = ResultTruncRegTypes[i];
2359 
2360     if ((i < ResultRegIsFlagReg.size()) && ResultRegIsFlagReg[i]) {
2361       // Target must guarantee the Value `Tmp` here is lowered to a boolean
2362       // value.
2363       llvm::Constant *Two = llvm::ConstantInt::get(Tmp->getType(), 2);
2364       llvm::Value *IsBooleanValue =
2365           Builder.CreateCmp(llvm::CmpInst::ICMP_ULT, Tmp, Two);
2366       llvm::Function *FnAssume = CGM.getIntrinsic(llvm::Intrinsic::assume);
2367       Builder.CreateCall(FnAssume, IsBooleanValue);
2368     }
2369 
2370     // If the result type of the LLVM IR asm doesn't match the result type of
2371     // the expression, do the conversion.
2372     if (ResultRegTypes[i] != TruncTy) {
2373 
2374       // Truncate the integer result to the right size, note that TruncTy can be
2375       // a pointer.
2376       if (TruncTy->isFloatingPointTy())
2377         Tmp = Builder.CreateFPTrunc(Tmp, TruncTy);
2378       else if (TruncTy->isPointerTy() && Tmp->getType()->isIntegerTy()) {
2379         uint64_t ResSize = CGM.getDataLayout().getTypeSizeInBits(TruncTy);
2380         Tmp = Builder.CreateTrunc(
2381             Tmp, llvm::IntegerType::get(CTX, (unsigned)ResSize));
2382         Tmp = Builder.CreateIntToPtr(Tmp, TruncTy);
2383       } else if (Tmp->getType()->isPointerTy() && TruncTy->isIntegerTy()) {
2384         uint64_t TmpSize =
2385             CGM.getDataLayout().getTypeSizeInBits(Tmp->getType());
2386         Tmp = Builder.CreatePtrToInt(
2387             Tmp, llvm::IntegerType::get(CTX, (unsigned)TmpSize));
2388         Tmp = Builder.CreateTrunc(Tmp, TruncTy);
2389       } else if (TruncTy->isIntegerTy()) {
2390         Tmp = Builder.CreateZExtOrTrunc(Tmp, TruncTy);
2391       } else if (TruncTy->isVectorTy()) {
2392         Tmp = Builder.CreateBitCast(Tmp, TruncTy);
2393       }
2394     }
2395 
2396     LValue Dest = ResultRegDests[i];
2397     // ResultTypeRequiresCast elements correspond to the first
2398     // ResultTypeRequiresCast.size() elements of RegResults.
2399     if ((i < ResultTypeRequiresCast.size()) && ResultTypeRequiresCast[i]) {
2400       unsigned Size = CGF.getContext().getTypeSize(ResultRegQualTys[i]);
2401       Address A = Dest.getAddress(CGF).withElementType(ResultRegTypes[i]);
2402       if (CGF.getTargetHooks().isScalarizableAsmOperand(CGF, TruncTy)) {
2403         Builder.CreateStore(Tmp, A);
2404         continue;
2405       }
2406 
2407       QualType Ty =
2408           CGF.getContext().getIntTypeForBitwidth(Size, /*Signed=*/false);
2409       if (Ty.isNull()) {
2410         const Expr *OutExpr = S.getOutputExpr(i);
2411         CGM.getDiags().Report(OutExpr->getExprLoc(),
2412                               diag::err_store_value_to_reg);
2413         return;
2414       }
2415       Dest = CGF.MakeAddrLValue(A, Ty);
2416     }
2417     CGF.EmitStoreThroughLValue(RValue::get(Tmp), Dest);
2418   }
2419 }
2420 
2421 void CodeGenFunction::EmitAsmStmt(const AsmStmt &S) {
2422   // Pop all cleanup blocks at the end of the asm statement.
2423   CodeGenFunction::RunCleanupsScope Cleanups(*this);
2424 
2425   // Assemble the final asm string.
2426   std::string AsmString = S.generateAsmString(getContext());
2427 
2428   // Get all the output and input constraints together.
2429   SmallVector<TargetInfo::ConstraintInfo, 4> OutputConstraintInfos;
2430   SmallVector<TargetInfo::ConstraintInfo, 4> InputConstraintInfos;
2431 
2432   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2433     StringRef Name;
2434     if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2435       Name = GAS->getOutputName(i);
2436     TargetInfo::ConstraintInfo Info(S.getOutputConstraint(i), Name);
2437     bool IsValid = getTarget().validateOutputConstraint(Info); (void)IsValid;
2438     assert(IsValid && "Failed to parse output constraint");
2439     OutputConstraintInfos.push_back(Info);
2440   }
2441 
2442   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2443     StringRef Name;
2444     if (const GCCAsmStmt *GAS = dyn_cast<GCCAsmStmt>(&S))
2445       Name = GAS->getInputName(i);
2446     TargetInfo::ConstraintInfo Info(S.getInputConstraint(i), Name);
2447     bool IsValid =
2448       getTarget().validateInputConstraint(OutputConstraintInfos, Info);
2449     assert(IsValid && "Failed to parse input constraint"); (void)IsValid;
2450     InputConstraintInfos.push_back(Info);
2451   }
2452 
2453   std::string Constraints;
2454 
2455   std::vector<LValue> ResultRegDests;
2456   std::vector<QualType> ResultRegQualTys;
2457   std::vector<llvm::Type *> ResultRegTypes;
2458   std::vector<llvm::Type *> ResultTruncRegTypes;
2459   std::vector<llvm::Type *> ArgTypes;
2460   std::vector<llvm::Type *> ArgElemTypes;
2461   std::vector<llvm::Value*> Args;
2462   llvm::BitVector ResultTypeRequiresCast;
2463   llvm::BitVector ResultRegIsFlagReg;
2464 
2465   // Keep track of inout constraints.
2466   std::string InOutConstraints;
2467   std::vector<llvm::Value*> InOutArgs;
2468   std::vector<llvm::Type*> InOutArgTypes;
2469   std::vector<llvm::Type*> InOutArgElemTypes;
2470 
2471   // Keep track of out constraints for tied input operand.
2472   std::vector<std::string> OutputConstraints;
2473 
2474   // Keep track of defined physregs.
2475   llvm::SmallSet<std::string, 8> PhysRegOutputs;
2476 
2477   // An inline asm can be marked readonly if it meets the following conditions:
2478   //  - it doesn't have any sideeffects
2479   //  - it doesn't clobber memory
2480   //  - it doesn't return a value by-reference
2481   // It can be marked readnone if it doesn't have any input memory constraints
2482   // in addition to meeting the conditions listed above.
2483   bool ReadOnly = true, ReadNone = true;
2484 
2485   for (unsigned i = 0, e = S.getNumOutputs(); i != e; i++) {
2486     TargetInfo::ConstraintInfo &Info = OutputConstraintInfos[i];
2487 
2488     // Simplify the output constraint.
2489     std::string OutputConstraint(S.getOutputConstraint(i));
2490     OutputConstraint = SimplifyConstraint(OutputConstraint.c_str() + 1,
2491                                           getTarget(), &OutputConstraintInfos);
2492 
2493     const Expr *OutExpr = S.getOutputExpr(i);
2494     OutExpr = OutExpr->IgnoreParenNoopCasts(getContext());
2495 
2496     std::string GCCReg;
2497     OutputConstraint = AddVariableConstraints(OutputConstraint, *OutExpr,
2498                                               getTarget(), CGM, S,
2499                                               Info.earlyClobber(),
2500                                               &GCCReg);
2501     // Give an error on multiple outputs to same physreg.
2502     if (!GCCReg.empty() && !PhysRegOutputs.insert(GCCReg).second)
2503       CGM.Error(S.getAsmLoc(), "multiple outputs to hard register: " + GCCReg);
2504 
2505     OutputConstraints.push_back(OutputConstraint);
2506     LValue Dest = EmitLValue(OutExpr);
2507     if (!Constraints.empty())
2508       Constraints += ',';
2509 
2510     // If this is a register output, then make the inline asm return it
2511     // by-value.  If this is a memory result, return the value by-reference.
2512     QualType QTy = OutExpr->getType();
2513     const bool IsScalarOrAggregate = hasScalarEvaluationKind(QTy) ||
2514                                      hasAggregateEvaluationKind(QTy);
2515     if (!Info.allowsMemory() && IsScalarOrAggregate) {
2516 
2517       Constraints += "=" + OutputConstraint;
2518       ResultRegQualTys.push_back(QTy);
2519       ResultRegDests.push_back(Dest);
2520 
2521       bool IsFlagReg = llvm::StringRef(OutputConstraint).startswith("{@cc");
2522       ResultRegIsFlagReg.push_back(IsFlagReg);
2523 
2524       llvm::Type *Ty = ConvertTypeForMem(QTy);
2525       const bool RequiresCast = Info.allowsRegister() &&
2526           (getTargetHooks().isScalarizableAsmOperand(*this, Ty) ||
2527            Ty->isAggregateType());
2528 
2529       ResultTruncRegTypes.push_back(Ty);
2530       ResultTypeRequiresCast.push_back(RequiresCast);
2531 
2532       if (RequiresCast) {
2533         unsigned Size = getContext().getTypeSize(QTy);
2534         Ty = llvm::IntegerType::get(getLLVMContext(), Size);
2535       }
2536       ResultRegTypes.push_back(Ty);
2537       // If this output is tied to an input, and if the input is larger, then
2538       // we need to set the actual result type of the inline asm node to be the
2539       // same as the input type.
2540       if (Info.hasMatchingInput()) {
2541         unsigned InputNo;
2542         for (InputNo = 0; InputNo != S.getNumInputs(); ++InputNo) {
2543           TargetInfo::ConstraintInfo &Input = InputConstraintInfos[InputNo];
2544           if (Input.hasTiedOperand() && Input.getTiedOperand() == i)
2545             break;
2546         }
2547         assert(InputNo != S.getNumInputs() && "Didn't find matching input!");
2548 
2549         QualType InputTy = S.getInputExpr(InputNo)->getType();
2550         QualType OutputType = OutExpr->getType();
2551 
2552         uint64_t InputSize = getContext().getTypeSize(InputTy);
2553         if (getContext().getTypeSize(OutputType) < InputSize) {
2554           // Form the asm to return the value as a larger integer or fp type.
2555           ResultRegTypes.back() = ConvertType(InputTy);
2556         }
2557       }
2558       if (llvm::Type* AdjTy =
2559             getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2560                                                  ResultRegTypes.back()))
2561         ResultRegTypes.back() = AdjTy;
2562       else {
2563         CGM.getDiags().Report(S.getAsmLoc(),
2564                               diag::err_asm_invalid_type_in_input)
2565             << OutExpr->getType() << OutputConstraint;
2566       }
2567 
2568       // Update largest vector width for any vector types.
2569       if (auto *VT = dyn_cast<llvm::VectorType>(ResultRegTypes.back()))
2570         LargestVectorWidth =
2571             std::max((uint64_t)LargestVectorWidth,
2572                      VT->getPrimitiveSizeInBits().getKnownMinValue());
2573     } else {
2574       Address DestAddr = Dest.getAddress(*this);
2575       // Matrix types in memory are represented by arrays, but accessed through
2576       // vector pointers, with the alignment specified on the access operation.
2577       // For inline assembly, update pointer arguments to use vector pointers.
2578       // Otherwise there will be a mis-match if the matrix is also an
2579       // input-argument which is represented as vector.
2580       if (isa<MatrixType>(OutExpr->getType().getCanonicalType()))
2581         DestAddr = DestAddr.withElementType(ConvertType(OutExpr->getType()));
2582 
2583       ArgTypes.push_back(DestAddr.getType());
2584       ArgElemTypes.push_back(DestAddr.getElementType());
2585       Args.push_back(DestAddr.getPointer());
2586       Constraints += "=*";
2587       Constraints += OutputConstraint;
2588       ReadOnly = ReadNone = false;
2589     }
2590 
2591     if (Info.isReadWrite()) {
2592       InOutConstraints += ',';
2593 
2594       const Expr *InputExpr = S.getOutputExpr(i);
2595       llvm::Value *Arg;
2596       llvm::Type *ArgElemType;
2597       std::tie(Arg, ArgElemType) = EmitAsmInputLValue(
2598           Info, Dest, InputExpr->getType(), InOutConstraints,
2599           InputExpr->getExprLoc());
2600 
2601       if (llvm::Type* AdjTy =
2602           getTargetHooks().adjustInlineAsmType(*this, OutputConstraint,
2603                                                Arg->getType()))
2604         Arg = Builder.CreateBitCast(Arg, AdjTy);
2605 
2606       // Update largest vector width for any vector types.
2607       if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2608         LargestVectorWidth =
2609             std::max((uint64_t)LargestVectorWidth,
2610                      VT->getPrimitiveSizeInBits().getKnownMinValue());
2611       // Only tie earlyclobber physregs.
2612       if (Info.allowsRegister() && (GCCReg.empty() || Info.earlyClobber()))
2613         InOutConstraints += llvm::utostr(i);
2614       else
2615         InOutConstraints += OutputConstraint;
2616 
2617       InOutArgTypes.push_back(Arg->getType());
2618       InOutArgElemTypes.push_back(ArgElemType);
2619       InOutArgs.push_back(Arg);
2620     }
2621   }
2622 
2623   // If this is a Microsoft-style asm blob, store the return registers (EAX:EDX)
2624   // to the return value slot. Only do this when returning in registers.
2625   if (isa<MSAsmStmt>(&S)) {
2626     const ABIArgInfo &RetAI = CurFnInfo->getReturnInfo();
2627     if (RetAI.isDirect() || RetAI.isExtend()) {
2628       // Make a fake lvalue for the return value slot.
2629       LValue ReturnSlot = MakeAddrLValueWithoutTBAA(ReturnValue, FnRetTy);
2630       CGM.getTargetCodeGenInfo().addReturnRegisterOutputs(
2631           *this, ReturnSlot, Constraints, ResultRegTypes, ResultTruncRegTypes,
2632           ResultRegDests, AsmString, S.getNumOutputs());
2633       SawAsmBlock = true;
2634     }
2635   }
2636 
2637   for (unsigned i = 0, e = S.getNumInputs(); i != e; i++) {
2638     const Expr *InputExpr = S.getInputExpr(i);
2639 
2640     TargetInfo::ConstraintInfo &Info = InputConstraintInfos[i];
2641 
2642     if (Info.allowsMemory())
2643       ReadNone = false;
2644 
2645     if (!Constraints.empty())
2646       Constraints += ',';
2647 
2648     // Simplify the input constraint.
2649     std::string InputConstraint(S.getInputConstraint(i));
2650     InputConstraint = SimplifyConstraint(InputConstraint.c_str(), getTarget(),
2651                                          &OutputConstraintInfos);
2652 
2653     InputConstraint = AddVariableConstraints(
2654         InputConstraint, *InputExpr->IgnoreParenNoopCasts(getContext()),
2655         getTarget(), CGM, S, false /* No EarlyClobber */);
2656 
2657     std::string ReplaceConstraint (InputConstraint);
2658     llvm::Value *Arg;
2659     llvm::Type *ArgElemType;
2660     std::tie(Arg, ArgElemType) = EmitAsmInput(Info, InputExpr, Constraints);
2661 
2662     // If this input argument is tied to a larger output result, extend the
2663     // input to be the same size as the output.  The LLVM backend wants to see
2664     // the input and output of a matching constraint be the same size.  Note
2665     // that GCC does not define what the top bits are here.  We use zext because
2666     // that is usually cheaper, but LLVM IR should really get an anyext someday.
2667     if (Info.hasTiedOperand()) {
2668       unsigned Output = Info.getTiedOperand();
2669       QualType OutputType = S.getOutputExpr(Output)->getType();
2670       QualType InputTy = InputExpr->getType();
2671 
2672       if (getContext().getTypeSize(OutputType) >
2673           getContext().getTypeSize(InputTy)) {
2674         // Use ptrtoint as appropriate so that we can do our extension.
2675         if (isa<llvm::PointerType>(Arg->getType()))
2676           Arg = Builder.CreatePtrToInt(Arg, IntPtrTy);
2677         llvm::Type *OutputTy = ConvertType(OutputType);
2678         if (isa<llvm::IntegerType>(OutputTy))
2679           Arg = Builder.CreateZExt(Arg, OutputTy);
2680         else if (isa<llvm::PointerType>(OutputTy))
2681           Arg = Builder.CreateZExt(Arg, IntPtrTy);
2682         else if (OutputTy->isFloatingPointTy())
2683           Arg = Builder.CreateFPExt(Arg, OutputTy);
2684       }
2685       // Deal with the tied operands' constraint code in adjustInlineAsmType.
2686       ReplaceConstraint = OutputConstraints[Output];
2687     }
2688     if (llvm::Type* AdjTy =
2689           getTargetHooks().adjustInlineAsmType(*this, ReplaceConstraint,
2690                                                    Arg->getType()))
2691       Arg = Builder.CreateBitCast(Arg, AdjTy);
2692     else
2693       CGM.getDiags().Report(S.getAsmLoc(), diag::err_asm_invalid_type_in_input)
2694           << InputExpr->getType() << InputConstraint;
2695 
2696     // Update largest vector width for any vector types.
2697     if (auto *VT = dyn_cast<llvm::VectorType>(Arg->getType()))
2698       LargestVectorWidth =
2699           std::max((uint64_t)LargestVectorWidth,
2700                    VT->getPrimitiveSizeInBits().getKnownMinValue());
2701 
2702     ArgTypes.push_back(Arg->getType());
2703     ArgElemTypes.push_back(ArgElemType);
2704     Args.push_back(Arg);
2705     Constraints += InputConstraint;
2706   }
2707 
2708   // Append the "input" part of inout constraints.
2709   for (unsigned i = 0, e = InOutArgs.size(); i != e; i++) {
2710     ArgTypes.push_back(InOutArgTypes[i]);
2711     ArgElemTypes.push_back(InOutArgElemTypes[i]);
2712     Args.push_back(InOutArgs[i]);
2713   }
2714   Constraints += InOutConstraints;
2715 
2716   // Labels
2717   SmallVector<llvm::BasicBlock *, 16> Transfer;
2718   llvm::BasicBlock *Fallthrough = nullptr;
2719   bool IsGCCAsmGoto = false;
2720   if (const auto *GS = dyn_cast<GCCAsmStmt>(&S)) {
2721     IsGCCAsmGoto = GS->isAsmGoto();
2722     if (IsGCCAsmGoto) {
2723       for (const auto *E : GS->labels()) {
2724         JumpDest Dest = getJumpDestForLabel(E->getLabel());
2725         Transfer.push_back(Dest.getBlock());
2726         if (!Constraints.empty())
2727           Constraints += ',';
2728         Constraints += "!i";
2729       }
2730       Fallthrough = createBasicBlock("asm.fallthrough");
2731     }
2732   }
2733 
2734   bool HasUnwindClobber = false;
2735 
2736   // Clobbers
2737   for (unsigned i = 0, e = S.getNumClobbers(); i != e; i++) {
2738     StringRef Clobber = S.getClobber(i);
2739 
2740     if (Clobber == "memory")
2741       ReadOnly = ReadNone = false;
2742     else if (Clobber == "unwind") {
2743       HasUnwindClobber = true;
2744       continue;
2745     } else if (Clobber != "cc") {
2746       Clobber = getTarget().getNormalizedGCCRegisterName(Clobber);
2747       if (CGM.getCodeGenOpts().StackClashProtector &&
2748           getTarget().isSPRegName(Clobber)) {
2749         CGM.getDiags().Report(S.getAsmLoc(),
2750                               diag::warn_stack_clash_protection_inline_asm);
2751       }
2752     }
2753 
2754     if (isa<MSAsmStmt>(&S)) {
2755       if (Clobber == "eax" || Clobber == "edx") {
2756         if (Constraints.find("=&A") != std::string::npos)
2757           continue;
2758         std::string::size_type position1 =
2759             Constraints.find("={" + Clobber.str() + "}");
2760         if (position1 != std::string::npos) {
2761           Constraints.insert(position1 + 1, "&");
2762           continue;
2763         }
2764         std::string::size_type position2 = Constraints.find("=A");
2765         if (position2 != std::string::npos) {
2766           Constraints.insert(position2 + 1, "&");
2767           continue;
2768         }
2769       }
2770     }
2771     if (!Constraints.empty())
2772       Constraints += ',';
2773 
2774     Constraints += "~{";
2775     Constraints += Clobber;
2776     Constraints += '}';
2777   }
2778 
2779   assert(!(HasUnwindClobber && IsGCCAsmGoto) &&
2780          "unwind clobber can't be used with asm goto");
2781 
2782   // Add machine specific clobbers
2783   std::string_view MachineClobbers = getTarget().getClobbers();
2784   if (!MachineClobbers.empty()) {
2785     if (!Constraints.empty())
2786       Constraints += ',';
2787     Constraints += MachineClobbers;
2788   }
2789 
2790   llvm::Type *ResultType;
2791   if (ResultRegTypes.empty())
2792     ResultType = VoidTy;
2793   else if (ResultRegTypes.size() == 1)
2794     ResultType = ResultRegTypes[0];
2795   else
2796     ResultType = llvm::StructType::get(getLLVMContext(), ResultRegTypes);
2797 
2798   llvm::FunctionType *FTy =
2799     llvm::FunctionType::get(ResultType, ArgTypes, false);
2800 
2801   bool HasSideEffect = S.isVolatile() || S.getNumOutputs() == 0;
2802 
2803   llvm::InlineAsm::AsmDialect GnuAsmDialect =
2804       CGM.getCodeGenOpts().getInlineAsmDialect() == CodeGenOptions::IAD_ATT
2805           ? llvm::InlineAsm::AD_ATT
2806           : llvm::InlineAsm::AD_Intel;
2807   llvm::InlineAsm::AsmDialect AsmDialect = isa<MSAsmStmt>(&S) ?
2808     llvm::InlineAsm::AD_Intel : GnuAsmDialect;
2809 
2810   llvm::InlineAsm *IA = llvm::InlineAsm::get(
2811       FTy, AsmString, Constraints, HasSideEffect,
2812       /* IsAlignStack */ false, AsmDialect, HasUnwindClobber);
2813   std::vector<llvm::Value*> RegResults;
2814   llvm::CallBrInst *CBR;
2815   llvm::DenseMap<llvm::BasicBlock *, SmallVector<llvm::Value *, 4>>
2816       CBRRegResults;
2817   if (IsGCCAsmGoto) {
2818     CBR = Builder.CreateCallBr(IA, Fallthrough, Transfer, Args);
2819     EmitBlock(Fallthrough);
2820     UpdateAsmCallInst(*CBR, HasSideEffect, false, ReadOnly, ReadNone,
2821                       InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2822                       *this, RegResults);
2823     // Because we are emitting code top to bottom, we don't have enough
2824     // information at this point to know precisely whether we have a critical
2825     // edge. If we have outputs, split all indirect destinations.
2826     if (!RegResults.empty()) {
2827       unsigned i = 0;
2828       for (llvm::BasicBlock *Dest : CBR->getIndirectDests()) {
2829         llvm::Twine SynthName = Dest->getName() + ".split";
2830         llvm::BasicBlock *SynthBB = createBasicBlock(SynthName);
2831         llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2832         Builder.SetInsertPoint(SynthBB);
2833 
2834         if (ResultRegTypes.size() == 1) {
2835           CBRRegResults[SynthBB].push_back(CBR);
2836         } else {
2837           for (unsigned j = 0, e = ResultRegTypes.size(); j != e; ++j) {
2838             llvm::Value *Tmp = Builder.CreateExtractValue(CBR, j, "asmresult");
2839             CBRRegResults[SynthBB].push_back(Tmp);
2840           }
2841         }
2842 
2843         EmitBranch(Dest);
2844         EmitBlock(SynthBB);
2845         CBR->setIndirectDest(i++, SynthBB);
2846       }
2847     }
2848   } else if (HasUnwindClobber) {
2849     llvm::CallBase *Result = EmitCallOrInvoke(IA, Args, "");
2850     UpdateAsmCallInst(*Result, HasSideEffect, true, ReadOnly, ReadNone,
2851                       InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2852                       *this, RegResults);
2853   } else {
2854     llvm::CallInst *Result =
2855         Builder.CreateCall(IA, Args, getBundlesForFunclet(IA));
2856     UpdateAsmCallInst(*Result, HasSideEffect, false, ReadOnly, ReadNone,
2857                       InNoMergeAttributedStmt, S, ResultRegTypes, ArgElemTypes,
2858                       *this, RegResults);
2859   }
2860 
2861   EmitAsmStores(*this, S, RegResults, ResultRegTypes, ResultTruncRegTypes,
2862                 ResultRegDests, ResultRegQualTys, ResultTypeRequiresCast,
2863                 ResultRegIsFlagReg);
2864 
2865   // If this is an asm goto with outputs, repeat EmitAsmStores, but with a
2866   // different insertion point; one for each indirect destination and with
2867   // CBRRegResults rather than RegResults.
2868   if (IsGCCAsmGoto && !CBRRegResults.empty()) {
2869     for (llvm::BasicBlock *Succ : CBR->getIndirectDests()) {
2870       llvm::IRBuilderBase::InsertPointGuard IPG(Builder);
2871       Builder.SetInsertPoint(Succ, --(Succ->end()));
2872       EmitAsmStores(*this, S, CBRRegResults[Succ], ResultRegTypes,
2873                     ResultTruncRegTypes, ResultRegDests, ResultRegQualTys,
2874                     ResultTypeRequiresCast, ResultRegIsFlagReg);
2875     }
2876   }
2877 }
2878 
2879 LValue CodeGenFunction::InitCapturedStruct(const CapturedStmt &S) {
2880   const RecordDecl *RD = S.getCapturedRecordDecl();
2881   QualType RecordTy = getContext().getRecordType(RD);
2882 
2883   // Initialize the captured struct.
2884   LValue SlotLV =
2885     MakeAddrLValue(CreateMemTemp(RecordTy, "agg.captured"), RecordTy);
2886 
2887   RecordDecl::field_iterator CurField = RD->field_begin();
2888   for (CapturedStmt::const_capture_init_iterator I = S.capture_init_begin(),
2889                                                  E = S.capture_init_end();
2890        I != E; ++I, ++CurField) {
2891     LValue LV = EmitLValueForFieldInitialization(SlotLV, *CurField);
2892     if (CurField->hasCapturedVLAType()) {
2893       EmitLambdaVLACapture(CurField->getCapturedVLAType(), LV);
2894     } else {
2895       EmitInitializerForField(*CurField, LV, *I);
2896     }
2897   }
2898 
2899   return SlotLV;
2900 }
2901 
2902 /// Generate an outlined function for the body of a CapturedStmt, store any
2903 /// captured variables into the captured struct, and call the outlined function.
2904 llvm::Function *
2905 CodeGenFunction::EmitCapturedStmt(const CapturedStmt &S, CapturedRegionKind K) {
2906   LValue CapStruct = InitCapturedStruct(S);
2907 
2908   // Emit the CapturedDecl
2909   CodeGenFunction CGF(CGM, true);
2910   CGCapturedStmtRAII CapInfoRAII(CGF, new CGCapturedStmtInfo(S, K));
2911   llvm::Function *F = CGF.GenerateCapturedStmtFunction(S);
2912   delete CGF.CapturedStmtInfo;
2913 
2914   // Emit call to the helper function.
2915   EmitCallOrInvoke(F, CapStruct.getPointer(*this));
2916 
2917   return F;
2918 }
2919 
2920 Address CodeGenFunction::GenerateCapturedStmtArgument(const CapturedStmt &S) {
2921   LValue CapStruct = InitCapturedStruct(S);
2922   return CapStruct.getAddress(*this);
2923 }
2924 
2925 /// Creates the outlined function for a CapturedStmt.
2926 llvm::Function *
2927 CodeGenFunction::GenerateCapturedStmtFunction(const CapturedStmt &S) {
2928   assert(CapturedStmtInfo &&
2929     "CapturedStmtInfo should be set when generating the captured function");
2930   const CapturedDecl *CD = S.getCapturedDecl();
2931   const RecordDecl *RD = S.getCapturedRecordDecl();
2932   SourceLocation Loc = S.getBeginLoc();
2933   assert(CD->hasBody() && "missing CapturedDecl body");
2934 
2935   // Build the argument list.
2936   ASTContext &Ctx = CGM.getContext();
2937   FunctionArgList Args;
2938   Args.append(CD->param_begin(), CD->param_end());
2939 
2940   // Create the function declaration.
2941   const CGFunctionInfo &FuncInfo =
2942     CGM.getTypes().arrangeBuiltinFunctionDeclaration(Ctx.VoidTy, Args);
2943   llvm::FunctionType *FuncLLVMTy = CGM.getTypes().GetFunctionType(FuncInfo);
2944 
2945   llvm::Function *F =
2946     llvm::Function::Create(FuncLLVMTy, llvm::GlobalValue::InternalLinkage,
2947                            CapturedStmtInfo->getHelperName(), &CGM.getModule());
2948   CGM.SetInternalFunctionAttributes(CD, F, FuncInfo);
2949   if (CD->isNothrow())
2950     F->addFnAttr(llvm::Attribute::NoUnwind);
2951 
2952   // Generate the function.
2953   StartFunction(CD, Ctx.VoidTy, F, FuncInfo, Args, CD->getLocation(),
2954                 CD->getBody()->getBeginLoc());
2955   // Set the context parameter in CapturedStmtInfo.
2956   Address DeclPtr = GetAddrOfLocalVar(CD->getContextParam());
2957   CapturedStmtInfo->setContextValue(Builder.CreateLoad(DeclPtr));
2958 
2959   // Initialize variable-length arrays.
2960   LValue Base = MakeNaturalAlignAddrLValue(CapturedStmtInfo->getContextValue(),
2961                                            Ctx.getTagDeclType(RD));
2962   for (auto *FD : RD->fields()) {
2963     if (FD->hasCapturedVLAType()) {
2964       auto *ExprArg =
2965           EmitLoadOfLValue(EmitLValueForField(Base, FD), S.getBeginLoc())
2966               .getScalarVal();
2967       auto VAT = FD->getCapturedVLAType();
2968       VLASizeMap[VAT->getSizeExpr()] = ExprArg;
2969     }
2970   }
2971 
2972   // If 'this' is captured, load it into CXXThisValue.
2973   if (CapturedStmtInfo->isCXXThisExprCaptured()) {
2974     FieldDecl *FD = CapturedStmtInfo->getThisFieldDecl();
2975     LValue ThisLValue = EmitLValueForField(Base, FD);
2976     CXXThisValue = EmitLoadOfLValue(ThisLValue, Loc).getScalarVal();
2977   }
2978 
2979   PGO.assignRegionCounters(GlobalDecl(CD), F);
2980   CapturedStmtInfo->EmitBody(*this, CD->getBody());
2981   FinishFunction(CD->getBodyRBrace());
2982 
2983   return F;
2984 }
2985