1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11 
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/AliasAnalysis.h"
19 #include "llvm/Analysis/BranchProbabilityInfo.h"
20 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
21 #include "llvm/Analysis/ValueTracking.h"
22 #include "llvm/CodeGen/Analysis.h"
23 #include "llvm/CodeGen/GlobalISel/CSEInfo.h"
24 #include "llvm/CodeGen/GlobalISel/CSEMIRBuilder.h"
25 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
26 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
27 #include "llvm/CodeGen/GlobalISel/InlineAsmLowering.h"
28 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
29 #include "llvm/CodeGen/LowLevelType.h"
30 #include "llvm/CodeGen/MachineBasicBlock.h"
31 #include "llvm/CodeGen/MachineFrameInfo.h"
32 #include "llvm/CodeGen/MachineFunction.h"
33 #include "llvm/CodeGen/MachineInstrBuilder.h"
34 #include "llvm/CodeGen/MachineMemOperand.h"
35 #include "llvm/CodeGen/MachineModuleInfo.h"
36 #include "llvm/CodeGen/MachineOperand.h"
37 #include "llvm/CodeGen/MachineRegisterInfo.h"
38 #include "llvm/CodeGen/RuntimeLibcalls.h"
39 #include "llvm/CodeGen/StackProtector.h"
40 #include "llvm/CodeGen/SwitchLoweringUtils.h"
41 #include "llvm/CodeGen/TargetFrameLowering.h"
42 #include "llvm/CodeGen/TargetInstrInfo.h"
43 #include "llvm/CodeGen/TargetLowering.h"
44 #include "llvm/CodeGen/TargetPassConfig.h"
45 #include "llvm/CodeGen/TargetRegisterInfo.h"
46 #include "llvm/CodeGen/TargetSubtargetInfo.h"
47 #include "llvm/IR/BasicBlock.h"
48 #include "llvm/IR/CFG.h"
49 #include "llvm/IR/Constant.h"
50 #include "llvm/IR/Constants.h"
51 #include "llvm/IR/DataLayout.h"
52 #include "llvm/IR/DerivedTypes.h"
53 #include "llvm/IR/DiagnosticInfo.h"
54 #include "llvm/IR/Function.h"
55 #include "llvm/IR/GetElementPtrTypeIterator.h"
56 #include "llvm/IR/InlineAsm.h"
57 #include "llvm/IR/InstrTypes.h"
58 #include "llvm/IR/Instructions.h"
59 #include "llvm/IR/IntrinsicInst.h"
60 #include "llvm/IR/Intrinsics.h"
61 #include "llvm/IR/LLVMContext.h"
62 #include "llvm/IR/Metadata.h"
63 #include "llvm/IR/PatternMatch.h"
64 #include "llvm/IR/Type.h"
65 #include "llvm/IR/User.h"
66 #include "llvm/IR/Value.h"
67 #include "llvm/InitializePasses.h"
68 #include "llvm/MC/MCContext.h"
69 #include "llvm/Pass.h"
70 #include "llvm/Support/Casting.h"
71 #include "llvm/Support/CodeGen.h"
72 #include "llvm/Support/Debug.h"
73 #include "llvm/Support/ErrorHandling.h"
74 #include "llvm/Support/LowLevelTypeImpl.h"
75 #include "llvm/Support/MathExtras.h"
76 #include "llvm/Support/raw_ostream.h"
77 #include "llvm/Target/TargetIntrinsicInfo.h"
78 #include "llvm/Target/TargetMachine.h"
79 #include "llvm/Transforms/Utils/MemoryOpRemark.h"
80 #include <algorithm>
81 #include <cassert>
82 #include <cstdint>
83 #include <iterator>
84 #include <string>
85 #include <utility>
86 #include <vector>
87 
88 #define DEBUG_TYPE "irtranslator"
89 
90 using namespace llvm;
91 
92 static cl::opt<bool>
93     EnableCSEInIRTranslator("enable-cse-in-irtranslator",
94                             cl::desc("Should enable CSE in irtranslator"),
95                             cl::Optional, cl::init(false));
96 char IRTranslator::ID = 0;
97 
98 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
99                 false, false)
100 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
101 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
102 INITIALIZE_PASS_DEPENDENCY(BlockFrequencyInfoWrapperPass)
103 INITIALIZE_PASS_DEPENDENCY(StackProtector)
104 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
105 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
106                 false, false)
107 
108 static void reportTranslationError(MachineFunction &MF,
109                                    const TargetPassConfig &TPC,
110                                    OptimizationRemarkEmitter &ORE,
111                                    OptimizationRemarkMissed &R) {
112   MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
113 
114   // Print the function name explicitly if we don't have a debug location (which
115   // makes the diagnostic less useful) or if we're going to emit a raw error.
116   if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
117     R << (" (in function: " + MF.getName() + ")").str();
118 
119   if (TPC.isGlobalISelAbortEnabled())
120     report_fatal_error(Twine(R.getMsg()));
121   else
122     ORE.emit(R);
123 }
124 
125 IRTranslator::IRTranslator(CodeGenOpt::Level optlevel)
126     : MachineFunctionPass(ID), OptLevel(optlevel) {}
127 
128 #ifndef NDEBUG
129 namespace {
130 /// Verify that every instruction created has the same DILocation as the
131 /// instruction being translated.
132 class DILocationVerifier : public GISelChangeObserver {
133   const Instruction *CurrInst = nullptr;
134 
135 public:
136   DILocationVerifier() = default;
137   ~DILocationVerifier() = default;
138 
139   const Instruction *getCurrentInst() const { return CurrInst; }
140   void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
141 
142   void erasingInstr(MachineInstr &MI) override {}
143   void changingInstr(MachineInstr &MI) override {}
144   void changedInstr(MachineInstr &MI) override {}
145 
146   void createdInstr(MachineInstr &MI) override {
147     assert(getCurrentInst() && "Inserted instruction without a current MI");
148 
149     // Only print the check message if we're actually checking it.
150 #ifndef NDEBUG
151     LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
152                       << " was copied to " << MI);
153 #endif
154     // We allow insts in the entry block to have a debug loc line of 0 because
155     // they could have originated from constants, and we don't want a jumpy
156     // debug experience.
157     assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
158             MI.getDebugLoc().getLine() == 0) &&
159            "Line info was not transferred to all instructions");
160   }
161 };
162 } // namespace
163 #endif // ifndef NDEBUG
164 
165 
166 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
167   AU.addRequired<StackProtector>();
168   AU.addRequired<TargetPassConfig>();
169   AU.addRequired<GISelCSEAnalysisWrapperPass>();
170   if (OptLevel != CodeGenOpt::None) {
171     AU.addRequired<BranchProbabilityInfoWrapperPass>();
172     AU.addRequired<AAResultsWrapperPass>();
173   }
174   AU.addRequired<TargetLibraryInfoWrapperPass>();
175   AU.addPreserved<TargetLibraryInfoWrapperPass>();
176   getSelectionDAGFallbackAnalysisUsage(AU);
177   MachineFunctionPass::getAnalysisUsage(AU);
178 }
179 
180 IRTranslator::ValueToVRegInfo::VRegListT &
181 IRTranslator::allocateVRegs(const Value &Val) {
182   auto VRegsIt = VMap.findVRegs(Val);
183   if (VRegsIt != VMap.vregs_end())
184     return *VRegsIt->second;
185   auto *Regs = VMap.getVRegs(Val);
186   auto *Offsets = VMap.getOffsets(Val);
187   SmallVector<LLT, 4> SplitTys;
188   computeValueLLTs(*DL, *Val.getType(), SplitTys,
189                    Offsets->empty() ? Offsets : nullptr);
190   for (unsigned i = 0; i < SplitTys.size(); ++i)
191     Regs->push_back(0);
192   return *Regs;
193 }
194 
195 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
196   auto VRegsIt = VMap.findVRegs(Val);
197   if (VRegsIt != VMap.vregs_end())
198     return *VRegsIt->second;
199 
200   if (Val.getType()->isVoidTy())
201     return *VMap.getVRegs(Val);
202 
203   // Create entry for this type.
204   auto *VRegs = VMap.getVRegs(Val);
205   auto *Offsets = VMap.getOffsets(Val);
206 
207   assert(Val.getType()->isSized() &&
208          "Don't know how to create an empty vreg");
209 
210   SmallVector<LLT, 4> SplitTys;
211   computeValueLLTs(*DL, *Val.getType(), SplitTys,
212                    Offsets->empty() ? Offsets : nullptr);
213 
214   if (!isa<Constant>(Val)) {
215     for (auto Ty : SplitTys)
216       VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
217     return *VRegs;
218   }
219 
220   if (Val.getType()->isAggregateType()) {
221     // UndefValue, ConstantAggregateZero
222     auto &C = cast<Constant>(Val);
223     unsigned Idx = 0;
224     while (auto Elt = C.getAggregateElement(Idx++)) {
225       auto EltRegs = getOrCreateVRegs(*Elt);
226       llvm::copy(EltRegs, std::back_inserter(*VRegs));
227     }
228   } else {
229     assert(SplitTys.size() == 1 && "unexpectedly split LLT");
230     VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
231     bool Success = translate(cast<Constant>(Val), VRegs->front());
232     if (!Success) {
233       OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
234                                  MF->getFunction().getSubprogram(),
235                                  &MF->getFunction().getEntryBlock());
236       R << "unable to translate constant: " << ore::NV("Type", Val.getType());
237       reportTranslationError(*MF, *TPC, *ORE, R);
238       return *VRegs;
239     }
240   }
241 
242   return *VRegs;
243 }
244 
245 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
246   auto MapEntry = FrameIndices.find(&AI);
247   if (MapEntry != FrameIndices.end())
248     return MapEntry->second;
249 
250   uint64_t ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
251   uint64_t Size =
252       ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
253 
254   // Always allocate at least one byte.
255   Size = std::max<uint64_t>(Size, 1u);
256 
257   int &FI = FrameIndices[&AI];
258   FI = MF->getFrameInfo().CreateStackObject(Size, AI.getAlign(), false, &AI);
259   return FI;
260 }
261 
262 Align IRTranslator::getMemOpAlign(const Instruction &I) {
263   if (const StoreInst *SI = dyn_cast<StoreInst>(&I))
264     return SI->getAlign();
265   if (const LoadInst *LI = dyn_cast<LoadInst>(&I))
266     return LI->getAlign();
267   if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I))
268     return AI->getAlign();
269   if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I))
270     return AI->getAlign();
271 
272   OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
273   R << "unable to translate memop: " << ore::NV("Opcode", &I);
274   reportTranslationError(*MF, *TPC, *ORE, R);
275   return Align(1);
276 }
277 
278 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
279   MachineBasicBlock *&MBB = BBToMBB[&BB];
280   assert(MBB && "BasicBlock was not encountered before");
281   return *MBB;
282 }
283 
284 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
285   assert(NewPred && "new predecessor must be a real MachineBasicBlock");
286   MachinePreds[Edge].push_back(NewPred);
287 }
288 
289 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
290                                      MachineIRBuilder &MIRBuilder) {
291   // Get or create a virtual register for each value.
292   // Unless the value is a Constant => loadimm cst?
293   // or inline constant each time?
294   // Creation of a virtual register needs to have a size.
295   Register Op0 = getOrCreateVReg(*U.getOperand(0));
296   Register Op1 = getOrCreateVReg(*U.getOperand(1));
297   Register Res = getOrCreateVReg(U);
298   uint16_t Flags = 0;
299   if (isa<Instruction>(U)) {
300     const Instruction &I = cast<Instruction>(U);
301     Flags = MachineInstr::copyFlagsFromInstruction(I);
302   }
303 
304   MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
305   return true;
306 }
307 
308 bool IRTranslator::translateUnaryOp(unsigned Opcode, const User &U,
309                                     MachineIRBuilder &MIRBuilder) {
310   Register Op0 = getOrCreateVReg(*U.getOperand(0));
311   Register Res = getOrCreateVReg(U);
312   uint16_t Flags = 0;
313   if (isa<Instruction>(U)) {
314     const Instruction &I = cast<Instruction>(U);
315     Flags = MachineInstr::copyFlagsFromInstruction(I);
316   }
317   MIRBuilder.buildInstr(Opcode, {Res}, {Op0}, Flags);
318   return true;
319 }
320 
321 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
322   return translateUnaryOp(TargetOpcode::G_FNEG, U, MIRBuilder);
323 }
324 
325 bool IRTranslator::translateCompare(const User &U,
326                                     MachineIRBuilder &MIRBuilder) {
327   auto *CI = dyn_cast<CmpInst>(&U);
328   Register Op0 = getOrCreateVReg(*U.getOperand(0));
329   Register Op1 = getOrCreateVReg(*U.getOperand(1));
330   Register Res = getOrCreateVReg(U);
331   CmpInst::Predicate Pred =
332       CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
333                                     cast<ConstantExpr>(U).getPredicate());
334   if (CmpInst::isIntPredicate(Pred))
335     MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
336   else if (Pred == CmpInst::FCMP_FALSE)
337     MIRBuilder.buildCopy(
338         Res, getOrCreateVReg(*Constant::getNullValue(U.getType())));
339   else if (Pred == CmpInst::FCMP_TRUE)
340     MIRBuilder.buildCopy(
341         Res, getOrCreateVReg(*Constant::getAllOnesValue(U.getType())));
342   else {
343     uint16_t Flags = 0;
344     if (CI)
345       Flags = MachineInstr::copyFlagsFromInstruction(*CI);
346     MIRBuilder.buildFCmp(Pred, Res, Op0, Op1, Flags);
347   }
348 
349   return true;
350 }
351 
352 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
353   const ReturnInst &RI = cast<ReturnInst>(U);
354   const Value *Ret = RI.getReturnValue();
355   if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
356     Ret = nullptr;
357 
358   ArrayRef<Register> VRegs;
359   if (Ret)
360     VRegs = getOrCreateVRegs(*Ret);
361 
362   Register SwiftErrorVReg = 0;
363   if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
364     SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
365         &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
366   }
367 
368   // The target may mess up with the insertion point, but
369   // this is not important as a return is the last instruction
370   // of the block anyway.
371   return CLI->lowerReturn(MIRBuilder, Ret, VRegs, FuncInfo, SwiftErrorVReg);
372 }
373 
374 void IRTranslator::emitBranchForMergedCondition(
375     const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
376     MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
377     BranchProbability TProb, BranchProbability FProb, bool InvertCond) {
378   // If the leaf of the tree is a comparison, merge the condition into
379   // the caseblock.
380   if (const CmpInst *BOp = dyn_cast<CmpInst>(Cond)) {
381     CmpInst::Predicate Condition;
382     if (const ICmpInst *IC = dyn_cast<ICmpInst>(Cond)) {
383       Condition = InvertCond ? IC->getInversePredicate() : IC->getPredicate();
384     } else {
385       const FCmpInst *FC = cast<FCmpInst>(Cond);
386       Condition = InvertCond ? FC->getInversePredicate() : FC->getPredicate();
387     }
388 
389     SwitchCG::CaseBlock CB(Condition, false, BOp->getOperand(0),
390                            BOp->getOperand(1), nullptr, TBB, FBB, CurBB,
391                            CurBuilder->getDebugLoc(), TProb, FProb);
392     SL->SwitchCases.push_back(CB);
393     return;
394   }
395 
396   // Create a CaseBlock record representing this branch.
397   CmpInst::Predicate Pred = InvertCond ? CmpInst::ICMP_NE : CmpInst::ICMP_EQ;
398   SwitchCG::CaseBlock CB(
399       Pred, false, Cond, ConstantInt::getTrue(MF->getFunction().getContext()),
400       nullptr, TBB, FBB, CurBB, CurBuilder->getDebugLoc(), TProb, FProb);
401   SL->SwitchCases.push_back(CB);
402 }
403 
404 static bool isValInBlock(const Value *V, const BasicBlock *BB) {
405   if (const Instruction *I = dyn_cast<Instruction>(V))
406     return I->getParent() == BB;
407   return true;
408 }
409 
410 void IRTranslator::findMergedConditions(
411     const Value *Cond, MachineBasicBlock *TBB, MachineBasicBlock *FBB,
412     MachineBasicBlock *CurBB, MachineBasicBlock *SwitchBB,
413     Instruction::BinaryOps Opc, BranchProbability TProb,
414     BranchProbability FProb, bool InvertCond) {
415   using namespace PatternMatch;
416   assert((Opc == Instruction::And || Opc == Instruction::Or) &&
417          "Expected Opc to be AND/OR");
418   // Skip over not part of the tree and remember to invert op and operands at
419   // next level.
420   Value *NotCond;
421   if (match(Cond, m_OneUse(m_Not(m_Value(NotCond)))) &&
422       isValInBlock(NotCond, CurBB->getBasicBlock())) {
423     findMergedConditions(NotCond, TBB, FBB, CurBB, SwitchBB, Opc, TProb, FProb,
424                          !InvertCond);
425     return;
426   }
427 
428   const Instruction *BOp = dyn_cast<Instruction>(Cond);
429   const Value *BOpOp0, *BOpOp1;
430   // Compute the effective opcode for Cond, taking into account whether it needs
431   // to be inverted, e.g.
432   //   and (not (or A, B)), C
433   // gets lowered as
434   //   and (and (not A, not B), C)
435   Instruction::BinaryOps BOpc = (Instruction::BinaryOps)0;
436   if (BOp) {
437     BOpc = match(BOp, m_LogicalAnd(m_Value(BOpOp0), m_Value(BOpOp1)))
438                ? Instruction::And
439                : (match(BOp, m_LogicalOr(m_Value(BOpOp0), m_Value(BOpOp1)))
440                       ? Instruction::Or
441                       : (Instruction::BinaryOps)0);
442     if (InvertCond) {
443       if (BOpc == Instruction::And)
444         BOpc = Instruction::Or;
445       else if (BOpc == Instruction::Or)
446         BOpc = Instruction::And;
447     }
448   }
449 
450   // If this node is not part of the or/and tree, emit it as a branch.
451   // Note that all nodes in the tree should have same opcode.
452   bool BOpIsInOrAndTree = BOpc && BOpc == Opc && BOp->hasOneUse();
453   if (!BOpIsInOrAndTree || BOp->getParent() != CurBB->getBasicBlock() ||
454       !isValInBlock(BOpOp0, CurBB->getBasicBlock()) ||
455       !isValInBlock(BOpOp1, CurBB->getBasicBlock())) {
456     emitBranchForMergedCondition(Cond, TBB, FBB, CurBB, SwitchBB, TProb, FProb,
457                                  InvertCond);
458     return;
459   }
460 
461   //  Create TmpBB after CurBB.
462   MachineFunction::iterator BBI(CurBB);
463   MachineBasicBlock *TmpBB =
464       MF->CreateMachineBasicBlock(CurBB->getBasicBlock());
465   CurBB->getParent()->insert(++BBI, TmpBB);
466 
467   if (Opc == Instruction::Or) {
468     // Codegen X | Y as:
469     // BB1:
470     //   jmp_if_X TBB
471     //   jmp TmpBB
472     // TmpBB:
473     //   jmp_if_Y TBB
474     //   jmp FBB
475     //
476 
477     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
478     // The requirement is that
479     //   TrueProb for BB1 + (FalseProb for BB1 * TrueProb for TmpBB)
480     //     = TrueProb for original BB.
481     // Assuming the original probabilities are A and B, one choice is to set
482     // BB1's probabilities to A/2 and A/2+B, and set TmpBB's probabilities to
483     // A/(1+B) and 2B/(1+B). This choice assumes that
484     //   TrueProb for BB1 == FalseProb for BB1 * TrueProb for TmpBB.
485     // Another choice is to assume TrueProb for BB1 equals to TrueProb for
486     // TmpBB, but the math is more complicated.
487 
488     auto NewTrueProb = TProb / 2;
489     auto NewFalseProb = TProb / 2 + FProb;
490     // Emit the LHS condition.
491     findMergedConditions(BOpOp0, TBB, TmpBB, CurBB, SwitchBB, Opc, NewTrueProb,
492                          NewFalseProb, InvertCond);
493 
494     // Normalize A/2 and B to get A/(1+B) and 2B/(1+B).
495     SmallVector<BranchProbability, 2> Probs{TProb / 2, FProb};
496     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
497     // Emit the RHS condition into TmpBB.
498     findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
499                          Probs[1], InvertCond);
500   } else {
501     assert(Opc == Instruction::And && "Unknown merge op!");
502     // Codegen X & Y as:
503     // BB1:
504     //   jmp_if_X TmpBB
505     //   jmp FBB
506     // TmpBB:
507     //   jmp_if_Y TBB
508     //   jmp FBB
509     //
510     //  This requires creation of TmpBB after CurBB.
511 
512     // We have flexibility in setting Prob for BB1 and Prob for TmpBB.
513     // The requirement is that
514     //   FalseProb for BB1 + (TrueProb for BB1 * FalseProb for TmpBB)
515     //     = FalseProb for original BB.
516     // Assuming the original probabilities are A and B, one choice is to set
517     // BB1's probabilities to A+B/2 and B/2, and set TmpBB's probabilities to
518     // 2A/(1+A) and B/(1+A). This choice assumes that FalseProb for BB1 ==
519     // TrueProb for BB1 * FalseProb for TmpBB.
520 
521     auto NewTrueProb = TProb + FProb / 2;
522     auto NewFalseProb = FProb / 2;
523     // Emit the LHS condition.
524     findMergedConditions(BOpOp0, TmpBB, FBB, CurBB, SwitchBB, Opc, NewTrueProb,
525                          NewFalseProb, InvertCond);
526 
527     // Normalize A and B/2 to get 2A/(1+A) and B/(1+A).
528     SmallVector<BranchProbability, 2> Probs{TProb, FProb / 2};
529     BranchProbability::normalizeProbabilities(Probs.begin(), Probs.end());
530     // Emit the RHS condition into TmpBB.
531     findMergedConditions(BOpOp1, TBB, FBB, TmpBB, SwitchBB, Opc, Probs[0],
532                          Probs[1], InvertCond);
533   }
534 }
535 
536 bool IRTranslator::shouldEmitAsBranches(
537     const std::vector<SwitchCG::CaseBlock> &Cases) {
538   // For multiple cases, it's better to emit as branches.
539   if (Cases.size() != 2)
540     return true;
541 
542   // If this is two comparisons of the same values or'd or and'd together, they
543   // will get folded into a single comparison, so don't emit two blocks.
544   if ((Cases[0].CmpLHS == Cases[1].CmpLHS &&
545        Cases[0].CmpRHS == Cases[1].CmpRHS) ||
546       (Cases[0].CmpRHS == Cases[1].CmpLHS &&
547        Cases[0].CmpLHS == Cases[1].CmpRHS)) {
548     return false;
549   }
550 
551   // Handle: (X != null) | (Y != null) --> (X|Y) != 0
552   // Handle: (X == null) & (Y == null) --> (X|Y) == 0
553   if (Cases[0].CmpRHS == Cases[1].CmpRHS &&
554       Cases[0].PredInfo.Pred == Cases[1].PredInfo.Pred &&
555       isa<Constant>(Cases[0].CmpRHS) &&
556       cast<Constant>(Cases[0].CmpRHS)->isNullValue()) {
557     if (Cases[0].PredInfo.Pred == CmpInst::ICMP_EQ &&
558         Cases[0].TrueBB == Cases[1].ThisBB)
559       return false;
560     if (Cases[0].PredInfo.Pred == CmpInst::ICMP_NE &&
561         Cases[0].FalseBB == Cases[1].ThisBB)
562       return false;
563   }
564 
565   return true;
566 }
567 
568 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
569   const BranchInst &BrInst = cast<BranchInst>(U);
570   auto &CurMBB = MIRBuilder.getMBB();
571   auto *Succ0MBB = &getMBB(*BrInst.getSuccessor(0));
572 
573   if (BrInst.isUnconditional()) {
574     // If the unconditional target is the layout successor, fallthrough.
575     if (OptLevel == CodeGenOpt::None || !CurMBB.isLayoutSuccessor(Succ0MBB))
576       MIRBuilder.buildBr(*Succ0MBB);
577 
578     // Link successors.
579     for (const BasicBlock *Succ : successors(&BrInst))
580       CurMBB.addSuccessor(&getMBB(*Succ));
581     return true;
582   }
583 
584   // If this condition is one of the special cases we handle, do special stuff
585   // now.
586   const Value *CondVal = BrInst.getCondition();
587   MachineBasicBlock *Succ1MBB = &getMBB(*BrInst.getSuccessor(1));
588 
589   const auto &TLI = *MF->getSubtarget().getTargetLowering();
590 
591   // If this is a series of conditions that are or'd or and'd together, emit
592   // this as a sequence of branches instead of setcc's with and/or operations.
593   // As long as jumps are not expensive (exceptions for multi-use logic ops,
594   // unpredictable branches, and vector extracts because those jumps are likely
595   // expensive for any target), this should improve performance.
596   // For example, instead of something like:
597   //     cmp A, B
598   //     C = seteq
599   //     cmp D, E
600   //     F = setle
601   //     or C, F
602   //     jnz foo
603   // Emit:
604   //     cmp A, B
605   //     je foo
606   //     cmp D, E
607   //     jle foo
608   using namespace PatternMatch;
609   const Instruction *CondI = dyn_cast<Instruction>(CondVal);
610   if (!TLI.isJumpExpensive() && CondI && CondI->hasOneUse() &&
611       !BrInst.hasMetadata(LLVMContext::MD_unpredictable)) {
612     Instruction::BinaryOps Opcode = (Instruction::BinaryOps)0;
613     Value *Vec;
614     const Value *BOp0, *BOp1;
615     if (match(CondI, m_LogicalAnd(m_Value(BOp0), m_Value(BOp1))))
616       Opcode = Instruction::And;
617     else if (match(CondI, m_LogicalOr(m_Value(BOp0), m_Value(BOp1))))
618       Opcode = Instruction::Or;
619 
620     if (Opcode && !(match(BOp0, m_ExtractElt(m_Value(Vec), m_Value())) &&
621                     match(BOp1, m_ExtractElt(m_Specific(Vec), m_Value())))) {
622       findMergedConditions(CondI, Succ0MBB, Succ1MBB, &CurMBB, &CurMBB, Opcode,
623                            getEdgeProbability(&CurMBB, Succ0MBB),
624                            getEdgeProbability(&CurMBB, Succ1MBB),
625                            /*InvertCond=*/false);
626       assert(SL->SwitchCases[0].ThisBB == &CurMBB && "Unexpected lowering!");
627 
628       // Allow some cases to be rejected.
629       if (shouldEmitAsBranches(SL->SwitchCases)) {
630         // Emit the branch for this block.
631         emitSwitchCase(SL->SwitchCases[0], &CurMBB, *CurBuilder);
632         SL->SwitchCases.erase(SL->SwitchCases.begin());
633         return true;
634       }
635 
636       // Okay, we decided not to do this, remove any inserted MBB's and clear
637       // SwitchCases.
638       for (unsigned I = 1, E = SL->SwitchCases.size(); I != E; ++I)
639         MF->erase(SL->SwitchCases[I].ThisBB);
640 
641       SL->SwitchCases.clear();
642     }
643   }
644 
645   // Create a CaseBlock record representing this branch.
646   SwitchCG::CaseBlock CB(CmpInst::ICMP_EQ, false, CondVal,
647                          ConstantInt::getTrue(MF->getFunction().getContext()),
648                          nullptr, Succ0MBB, Succ1MBB, &CurMBB,
649                          CurBuilder->getDebugLoc());
650 
651   // Use emitSwitchCase to actually insert the fast branch sequence for this
652   // cond branch.
653   emitSwitchCase(CB, &CurMBB, *CurBuilder);
654   return true;
655 }
656 
657 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
658                                         MachineBasicBlock *Dst,
659                                         BranchProbability Prob) {
660   if (!FuncInfo.BPI) {
661     Src->addSuccessorWithoutProb(Dst);
662     return;
663   }
664   if (Prob.isUnknown())
665     Prob = getEdgeProbability(Src, Dst);
666   Src->addSuccessor(Dst, Prob);
667 }
668 
669 BranchProbability
670 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
671                                  const MachineBasicBlock *Dst) const {
672   const BasicBlock *SrcBB = Src->getBasicBlock();
673   const BasicBlock *DstBB = Dst->getBasicBlock();
674   if (!FuncInfo.BPI) {
675     // If BPI is not available, set the default probability as 1 / N, where N is
676     // the number of successors.
677     auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
678     return BranchProbability(1, SuccSize);
679   }
680   return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
681 }
682 
683 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
684   using namespace SwitchCG;
685   // Extract cases from the switch.
686   const SwitchInst &SI = cast<SwitchInst>(U);
687   BranchProbabilityInfo *BPI = FuncInfo.BPI;
688   CaseClusterVector Clusters;
689   Clusters.reserve(SI.getNumCases());
690   for (const auto &I : SI.cases()) {
691     MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
692     assert(Succ && "Could not find successor mbb in mapping");
693     const ConstantInt *CaseVal = I.getCaseValue();
694     BranchProbability Prob =
695         BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
696             : BranchProbability(1, SI.getNumCases() + 1);
697     Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
698   }
699 
700   MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
701 
702   // Cluster adjacent cases with the same destination. We do this at all
703   // optimization levels because it's cheap to do and will make codegen faster
704   // if there are many clusters.
705   sortAndRangeify(Clusters);
706 
707   MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
708 
709   // If there is only the default destination, jump there directly.
710   if (Clusters.empty()) {
711     SwitchMBB->addSuccessor(DefaultMBB);
712     if (DefaultMBB != SwitchMBB->getNextNode())
713       MIB.buildBr(*DefaultMBB);
714     return true;
715   }
716 
717   SL->findJumpTables(Clusters, &SI, DefaultMBB, nullptr, nullptr);
718   SL->findBitTestClusters(Clusters, &SI);
719 
720   LLVM_DEBUG({
721     dbgs() << "Case clusters: ";
722     for (const CaseCluster &C : Clusters) {
723       if (C.Kind == CC_JumpTable)
724         dbgs() << "JT:";
725       if (C.Kind == CC_BitTests)
726         dbgs() << "BT:";
727 
728       C.Low->getValue().print(dbgs(), true);
729       if (C.Low != C.High) {
730         dbgs() << '-';
731         C.High->getValue().print(dbgs(), true);
732       }
733       dbgs() << ' ';
734     }
735     dbgs() << '\n';
736   });
737 
738   assert(!Clusters.empty());
739   SwitchWorkList WorkList;
740   CaseClusterIt First = Clusters.begin();
741   CaseClusterIt Last = Clusters.end() - 1;
742   auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
743   WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
744 
745   // FIXME: At the moment we don't do any splitting optimizations here like
746   // SelectionDAG does, so this worklist only has one entry.
747   while (!WorkList.empty()) {
748     SwitchWorkListItem W = WorkList.pop_back_val();
749     if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
750       return false;
751   }
752   return true;
753 }
754 
755 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
756                                  MachineBasicBlock *MBB) {
757   // Emit the code for the jump table
758   assert(JT.Reg != -1U && "Should lower JT Header first!");
759   MachineIRBuilder MIB(*MBB->getParent());
760   MIB.setMBB(*MBB);
761   MIB.setDebugLoc(CurBuilder->getDebugLoc());
762 
763   Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
764   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
765 
766   auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
767   MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
768 }
769 
770 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
771                                        SwitchCG::JumpTableHeader &JTH,
772                                        MachineBasicBlock *HeaderBB) {
773   MachineIRBuilder MIB(*HeaderBB->getParent());
774   MIB.setMBB(*HeaderBB);
775   MIB.setDebugLoc(CurBuilder->getDebugLoc());
776 
777   const Value &SValue = *JTH.SValue;
778   // Subtract the lowest switch case value from the value being switched on.
779   const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
780   Register SwitchOpReg = getOrCreateVReg(SValue);
781   auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
782   auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
783 
784   // This value may be smaller or larger than the target's pointer type, and
785   // therefore require extension or truncating.
786   Type *PtrIRTy = SValue.getType()->getPointerTo();
787   const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
788   Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
789 
790   JT.Reg = Sub.getReg(0);
791 
792   if (JTH.FallthroughUnreachable) {
793     if (JT.MBB != HeaderBB->getNextNode())
794       MIB.buildBr(*JT.MBB);
795     return true;
796   }
797 
798   // Emit the range check for the jump table, and branch to the default block
799   // for the switch statement if the value being switched on exceeds the
800   // largest case in the switch.
801   auto Cst = getOrCreateVReg(
802       *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
803   Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
804   auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
805 
806   auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
807 
808   // Avoid emitting unnecessary branches to the next block.
809   if (JT.MBB != HeaderBB->getNextNode())
810     BrCond = MIB.buildBr(*JT.MBB);
811   return true;
812 }
813 
814 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
815                                   MachineBasicBlock *SwitchBB,
816                                   MachineIRBuilder &MIB) {
817   Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
818   Register Cond;
819   DebugLoc OldDbgLoc = MIB.getDebugLoc();
820   MIB.setDebugLoc(CB.DbgLoc);
821   MIB.setMBB(*CB.ThisBB);
822 
823   if (CB.PredInfo.NoCmp) {
824     // Branch or fall through to TrueBB.
825     addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
826     addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
827                       CB.ThisBB);
828     CB.ThisBB->normalizeSuccProbs();
829     if (CB.TrueBB != CB.ThisBB->getNextNode())
830       MIB.buildBr(*CB.TrueBB);
831     MIB.setDebugLoc(OldDbgLoc);
832     return;
833   }
834 
835   const LLT i1Ty = LLT::scalar(1);
836   // Build the compare.
837   if (!CB.CmpMHS) {
838     const auto *CI = dyn_cast<ConstantInt>(CB.CmpRHS);
839     // For conditional branch lowering, we might try to do something silly like
840     // emit an G_ICMP to compare an existing G_ICMP i1 result with true. If so,
841     // just re-use the existing condition vreg.
842     if (MRI->getType(CondLHS).getSizeInBits() == 1 && CI &&
843         CI->getZExtValue() == 1 && CB.PredInfo.Pred == CmpInst::ICMP_EQ) {
844       Cond = CondLHS;
845     } else {
846       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
847       if (CmpInst::isFPPredicate(CB.PredInfo.Pred))
848         Cond =
849             MIB.buildFCmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
850       else
851         Cond =
852             MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
853     }
854   } else {
855     assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
856            "Can only handle SLE ranges");
857 
858     const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
859     const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
860 
861     Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
862     if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
863       Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
864       Cond =
865           MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
866     } else {
867       const LLT CmpTy = MRI->getType(CmpOpReg);
868       auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
869       auto Diff = MIB.buildConstant(CmpTy, High - Low);
870       Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
871     }
872   }
873 
874   // Update successor info
875   addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
876 
877   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
878                     CB.ThisBB);
879 
880   // TrueBB and FalseBB are always different unless the incoming IR is
881   // degenerate. This only happens when running llc on weird IR.
882   if (CB.TrueBB != CB.FalseBB)
883     addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
884   CB.ThisBB->normalizeSuccProbs();
885 
886   addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
887                     CB.ThisBB);
888 
889   MIB.buildBrCond(Cond, *CB.TrueBB);
890   MIB.buildBr(*CB.FalseBB);
891   MIB.setDebugLoc(OldDbgLoc);
892 }
893 
894 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
895                                           MachineBasicBlock *SwitchMBB,
896                                           MachineBasicBlock *CurMBB,
897                                           MachineBasicBlock *DefaultMBB,
898                                           MachineIRBuilder &MIB,
899                                           MachineFunction::iterator BBI,
900                                           BranchProbability UnhandledProbs,
901                                           SwitchCG::CaseClusterIt I,
902                                           MachineBasicBlock *Fallthrough,
903                                           bool FallthroughUnreachable) {
904   using namespace SwitchCG;
905   MachineFunction *CurMF = SwitchMBB->getParent();
906   // FIXME: Optimize away range check based on pivot comparisons.
907   JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
908   SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
909   BranchProbability DefaultProb = W.DefaultProb;
910 
911   // The jump block hasn't been inserted yet; insert it here.
912   MachineBasicBlock *JumpMBB = JT->MBB;
913   CurMF->insert(BBI, JumpMBB);
914 
915   // Since the jump table block is separate from the switch block, we need
916   // to keep track of it as a machine predecessor to the default block,
917   // otherwise we lose the phi edges.
918   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
919                     CurMBB);
920   addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
921                     JumpMBB);
922 
923   auto JumpProb = I->Prob;
924   auto FallthroughProb = UnhandledProbs;
925 
926   // If the default statement is a target of the jump table, we evenly
927   // distribute the default probability to successors of CurMBB. Also
928   // update the probability on the edge from JumpMBB to Fallthrough.
929   for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
930                                         SE = JumpMBB->succ_end();
931        SI != SE; ++SI) {
932     if (*SI == DefaultMBB) {
933       JumpProb += DefaultProb / 2;
934       FallthroughProb -= DefaultProb / 2;
935       JumpMBB->setSuccProbability(SI, DefaultProb / 2);
936       JumpMBB->normalizeSuccProbs();
937     } else {
938       // Also record edges from the jump table block to it's successors.
939       addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
940                         JumpMBB);
941     }
942   }
943 
944   if (FallthroughUnreachable)
945     JTH->FallthroughUnreachable = true;
946 
947   if (!JTH->FallthroughUnreachable)
948     addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
949   addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
950   CurMBB->normalizeSuccProbs();
951 
952   // The jump table header will be inserted in our current block, do the
953   // range check, and fall through to our fallthrough block.
954   JTH->HeaderBB = CurMBB;
955   JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
956 
957   // If we're in the right place, emit the jump table header right now.
958   if (CurMBB == SwitchMBB) {
959     if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
960       return false;
961     JTH->Emitted = true;
962   }
963   return true;
964 }
965 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
966                                             Value *Cond,
967                                             MachineBasicBlock *Fallthrough,
968                                             bool FallthroughUnreachable,
969                                             BranchProbability UnhandledProbs,
970                                             MachineBasicBlock *CurMBB,
971                                             MachineIRBuilder &MIB,
972                                             MachineBasicBlock *SwitchMBB) {
973   using namespace SwitchCG;
974   const Value *RHS, *LHS, *MHS;
975   CmpInst::Predicate Pred;
976   if (I->Low == I->High) {
977     // Check Cond == I->Low.
978     Pred = CmpInst::ICMP_EQ;
979     LHS = Cond;
980     RHS = I->Low;
981     MHS = nullptr;
982   } else {
983     // Check I->Low <= Cond <= I->High.
984     Pred = CmpInst::ICMP_SLE;
985     LHS = I->Low;
986     MHS = Cond;
987     RHS = I->High;
988   }
989 
990   // If Fallthrough is unreachable, fold away the comparison.
991   // The false probability is the sum of all unhandled cases.
992   CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
993                CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
994 
995   emitSwitchCase(CB, SwitchMBB, MIB);
996   return true;
997 }
998 
999 void IRTranslator::emitBitTestHeader(SwitchCG::BitTestBlock &B,
1000                                      MachineBasicBlock *SwitchBB) {
1001   MachineIRBuilder &MIB = *CurBuilder;
1002   MIB.setMBB(*SwitchBB);
1003 
1004   // Subtract the minimum value.
1005   Register SwitchOpReg = getOrCreateVReg(*B.SValue);
1006 
1007   LLT SwitchOpTy = MRI->getType(SwitchOpReg);
1008   Register MinValReg = MIB.buildConstant(SwitchOpTy, B.First).getReg(0);
1009   auto RangeSub = MIB.buildSub(SwitchOpTy, SwitchOpReg, MinValReg);
1010 
1011   Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
1012   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1013 
1014   LLT MaskTy = SwitchOpTy;
1015   if (MaskTy.getSizeInBits() > PtrTy.getSizeInBits() ||
1016       !isPowerOf2_32(MaskTy.getSizeInBits()))
1017     MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1018   else {
1019     // Ensure that the type will fit the mask value.
1020     for (unsigned I = 0, E = B.Cases.size(); I != E; ++I) {
1021       if (!isUIntN(SwitchOpTy.getSizeInBits(), B.Cases[I].Mask)) {
1022         // Switch table case range are encoded into series of masks.
1023         // Just use pointer type, it's guaranteed to fit.
1024         MaskTy = LLT::scalar(PtrTy.getSizeInBits());
1025         break;
1026       }
1027     }
1028   }
1029   Register SubReg = RangeSub.getReg(0);
1030   if (SwitchOpTy != MaskTy)
1031     SubReg = MIB.buildZExtOrTrunc(MaskTy, SubReg).getReg(0);
1032 
1033   B.RegVT = getMVTForLLT(MaskTy);
1034   B.Reg = SubReg;
1035 
1036   MachineBasicBlock *MBB = B.Cases[0].ThisBB;
1037 
1038   if (!B.FallthroughUnreachable)
1039     addSuccessorWithProb(SwitchBB, B.Default, B.DefaultProb);
1040   addSuccessorWithProb(SwitchBB, MBB, B.Prob);
1041 
1042   SwitchBB->normalizeSuccProbs();
1043 
1044   if (!B.FallthroughUnreachable) {
1045     // Conditional branch to the default block.
1046     auto RangeCst = MIB.buildConstant(SwitchOpTy, B.Range);
1047     auto RangeCmp = MIB.buildICmp(CmpInst::Predicate::ICMP_UGT, LLT::scalar(1),
1048                                   RangeSub, RangeCst);
1049     MIB.buildBrCond(RangeCmp, *B.Default);
1050   }
1051 
1052   // Avoid emitting unnecessary branches to the next block.
1053   if (MBB != SwitchBB->getNextNode())
1054     MIB.buildBr(*MBB);
1055 }
1056 
1057 void IRTranslator::emitBitTestCase(SwitchCG::BitTestBlock &BB,
1058                                    MachineBasicBlock *NextMBB,
1059                                    BranchProbability BranchProbToNext,
1060                                    Register Reg, SwitchCG::BitTestCase &B,
1061                                    MachineBasicBlock *SwitchBB) {
1062   MachineIRBuilder &MIB = *CurBuilder;
1063   MIB.setMBB(*SwitchBB);
1064 
1065   LLT SwitchTy = getLLTForMVT(BB.RegVT);
1066   Register Cmp;
1067   unsigned PopCount = countPopulation(B.Mask);
1068   if (PopCount == 1) {
1069     // Testing for a single bit; just compare the shift count with what it
1070     // would need to be to shift a 1 bit in that position.
1071     auto MaskTrailingZeros =
1072         MIB.buildConstant(SwitchTy, countTrailingZeros(B.Mask));
1073     Cmp =
1074         MIB.buildICmp(ICmpInst::ICMP_EQ, LLT::scalar(1), Reg, MaskTrailingZeros)
1075             .getReg(0);
1076   } else if (PopCount == BB.Range) {
1077     // There is only one zero bit in the range, test for it directly.
1078     auto MaskTrailingOnes =
1079         MIB.buildConstant(SwitchTy, countTrailingOnes(B.Mask));
1080     Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Reg, MaskTrailingOnes)
1081               .getReg(0);
1082   } else {
1083     // Make desired shift.
1084     auto CstOne = MIB.buildConstant(SwitchTy, 1);
1085     auto SwitchVal = MIB.buildShl(SwitchTy, CstOne, Reg);
1086 
1087     // Emit bit tests and jumps.
1088     auto CstMask = MIB.buildConstant(SwitchTy, B.Mask);
1089     auto AndOp = MIB.buildAnd(SwitchTy, SwitchVal, CstMask);
1090     auto CstZero = MIB.buildConstant(SwitchTy, 0);
1091     Cmp = MIB.buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), AndOp, CstZero)
1092               .getReg(0);
1093   }
1094 
1095   // The branch probability from SwitchBB to B.TargetBB is B.ExtraProb.
1096   addSuccessorWithProb(SwitchBB, B.TargetBB, B.ExtraProb);
1097   // The branch probability from SwitchBB to NextMBB is BranchProbToNext.
1098   addSuccessorWithProb(SwitchBB, NextMBB, BranchProbToNext);
1099   // It is not guaranteed that the sum of B.ExtraProb and BranchProbToNext is
1100   // one as they are relative probabilities (and thus work more like weights),
1101   // and hence we need to normalize them to let the sum of them become one.
1102   SwitchBB->normalizeSuccProbs();
1103 
1104   // Record the fact that the IR edge from the header to the bit test target
1105   // will go through our new block. Neeeded for PHIs to have nodes added.
1106   addMachineCFGPred({BB.Parent->getBasicBlock(), B.TargetBB->getBasicBlock()},
1107                     SwitchBB);
1108 
1109   MIB.buildBrCond(Cmp, *B.TargetBB);
1110 
1111   // Avoid emitting unnecessary branches to the next block.
1112   if (NextMBB != SwitchBB->getNextNode())
1113     MIB.buildBr(*NextMBB);
1114 }
1115 
1116 bool IRTranslator::lowerBitTestWorkItem(
1117     SwitchCG::SwitchWorkListItem W, MachineBasicBlock *SwitchMBB,
1118     MachineBasicBlock *CurMBB, MachineBasicBlock *DefaultMBB,
1119     MachineIRBuilder &MIB, MachineFunction::iterator BBI,
1120     BranchProbability DefaultProb, BranchProbability UnhandledProbs,
1121     SwitchCG::CaseClusterIt I, MachineBasicBlock *Fallthrough,
1122     bool FallthroughUnreachable) {
1123   using namespace SwitchCG;
1124   MachineFunction *CurMF = SwitchMBB->getParent();
1125   // FIXME: Optimize away range check based on pivot comparisons.
1126   BitTestBlock *BTB = &SL->BitTestCases[I->BTCasesIndex];
1127   // The bit test blocks haven't been inserted yet; insert them here.
1128   for (BitTestCase &BTC : BTB->Cases)
1129     CurMF->insert(BBI, BTC.ThisBB);
1130 
1131   // Fill in fields of the BitTestBlock.
1132   BTB->Parent = CurMBB;
1133   BTB->Default = Fallthrough;
1134 
1135   BTB->DefaultProb = UnhandledProbs;
1136   // If the cases in bit test don't form a contiguous range, we evenly
1137   // distribute the probability on the edge to Fallthrough to two
1138   // successors of CurMBB.
1139   if (!BTB->ContiguousRange) {
1140     BTB->Prob += DefaultProb / 2;
1141     BTB->DefaultProb -= DefaultProb / 2;
1142   }
1143 
1144   if (FallthroughUnreachable)
1145     BTB->FallthroughUnreachable = true;
1146 
1147   // If we're in the right place, emit the bit test header right now.
1148   if (CurMBB == SwitchMBB) {
1149     emitBitTestHeader(*BTB, SwitchMBB);
1150     BTB->Emitted = true;
1151   }
1152   return true;
1153 }
1154 
1155 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
1156                                        Value *Cond,
1157                                        MachineBasicBlock *SwitchMBB,
1158                                        MachineBasicBlock *DefaultMBB,
1159                                        MachineIRBuilder &MIB) {
1160   using namespace SwitchCG;
1161   MachineFunction *CurMF = FuncInfo.MF;
1162   MachineBasicBlock *NextMBB = nullptr;
1163   MachineFunction::iterator BBI(W.MBB);
1164   if (++BBI != FuncInfo.MF->end())
1165     NextMBB = &*BBI;
1166 
1167   if (EnableOpts) {
1168     // Here, we order cases by probability so the most likely case will be
1169     // checked first. However, two clusters can have the same probability in
1170     // which case their relative ordering is non-deterministic. So we use Low
1171     // as a tie-breaker as clusters are guaranteed to never overlap.
1172     llvm::sort(W.FirstCluster, W.LastCluster + 1,
1173                [](const CaseCluster &a, const CaseCluster &b) {
1174                  return a.Prob != b.Prob
1175                             ? a.Prob > b.Prob
1176                             : a.Low->getValue().slt(b.Low->getValue());
1177                });
1178 
1179     // Rearrange the case blocks so that the last one falls through if possible
1180     // without changing the order of probabilities.
1181     for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
1182       --I;
1183       if (I->Prob > W.LastCluster->Prob)
1184         break;
1185       if (I->Kind == CC_Range && I->MBB == NextMBB) {
1186         std::swap(*I, *W.LastCluster);
1187         break;
1188       }
1189     }
1190   }
1191 
1192   // Compute total probability.
1193   BranchProbability DefaultProb = W.DefaultProb;
1194   BranchProbability UnhandledProbs = DefaultProb;
1195   for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
1196     UnhandledProbs += I->Prob;
1197 
1198   MachineBasicBlock *CurMBB = W.MBB;
1199   for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
1200     bool FallthroughUnreachable = false;
1201     MachineBasicBlock *Fallthrough;
1202     if (I == W.LastCluster) {
1203       // For the last cluster, fall through to the default destination.
1204       Fallthrough = DefaultMBB;
1205       FallthroughUnreachable = isa<UnreachableInst>(
1206           DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
1207     } else {
1208       Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
1209       CurMF->insert(BBI, Fallthrough);
1210     }
1211     UnhandledProbs -= I->Prob;
1212 
1213     switch (I->Kind) {
1214     case CC_BitTests: {
1215       if (!lowerBitTestWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1216                                 DefaultProb, UnhandledProbs, I, Fallthrough,
1217                                 FallthroughUnreachable)) {
1218         LLVM_DEBUG(dbgs() << "Failed to lower bit test for switch");
1219         return false;
1220       }
1221       break;
1222     }
1223 
1224     case CC_JumpTable: {
1225       if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
1226                                   UnhandledProbs, I, Fallthrough,
1227                                   FallthroughUnreachable)) {
1228         LLVM_DEBUG(dbgs() << "Failed to lower jump table");
1229         return false;
1230       }
1231       break;
1232     }
1233     case CC_Range: {
1234       if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
1235                                     FallthroughUnreachable, UnhandledProbs,
1236                                     CurMBB, MIB, SwitchMBB)) {
1237         LLVM_DEBUG(dbgs() << "Failed to lower switch range");
1238         return false;
1239       }
1240       break;
1241     }
1242     }
1243     CurMBB = Fallthrough;
1244   }
1245 
1246   return true;
1247 }
1248 
1249 bool IRTranslator::translateIndirectBr(const User &U,
1250                                        MachineIRBuilder &MIRBuilder) {
1251   const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
1252 
1253   const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
1254   MIRBuilder.buildBrIndirect(Tgt);
1255 
1256   // Link successors.
1257   SmallPtrSet<const BasicBlock *, 32> AddedSuccessors;
1258   MachineBasicBlock &CurBB = MIRBuilder.getMBB();
1259   for (const BasicBlock *Succ : successors(&BrInst)) {
1260     // It's legal for indirectbr instructions to have duplicate blocks in the
1261     // destination list. We don't allow this in MIR. Skip anything that's
1262     // already a successor.
1263     if (!AddedSuccessors.insert(Succ).second)
1264       continue;
1265     CurBB.addSuccessor(&getMBB(*Succ));
1266   }
1267 
1268   return true;
1269 }
1270 
1271 static bool isSwiftError(const Value *V) {
1272   if (auto Arg = dyn_cast<Argument>(V))
1273     return Arg->hasSwiftErrorAttr();
1274   if (auto AI = dyn_cast<AllocaInst>(V))
1275     return AI->isSwiftError();
1276   return false;
1277 }
1278 
1279 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
1280   const LoadInst &LI = cast<LoadInst>(U);
1281 
1282   unsigned StoreSize = DL->getTypeStoreSize(LI.getType());
1283   if (StoreSize == 0)
1284     return true;
1285 
1286   ArrayRef<Register> Regs = getOrCreateVRegs(LI);
1287   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
1288   Register Base = getOrCreateVReg(*LI.getPointerOperand());
1289   AAMDNodes AAInfo = LI.getAAMetadata();
1290 
1291   const Value *Ptr = LI.getPointerOperand();
1292   Type *OffsetIRTy = DL->getIntPtrType(Ptr->getType());
1293   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1294 
1295   if (CLI->supportSwiftError() && isSwiftError(Ptr)) {
1296     assert(Regs.size() == 1 && "swifterror should be single pointer");
1297     Register VReg =
1298         SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(), Ptr);
1299     MIRBuilder.buildCopy(Regs[0], VReg);
1300     return true;
1301   }
1302 
1303   auto &TLI = *MF->getSubtarget().getTargetLowering();
1304   MachineMemOperand::Flags Flags = TLI.getLoadMemOperandFlags(LI, *DL);
1305   if (AA && !(Flags & MachineMemOperand::MOInvariant)) {
1306     if (AA->pointsToConstantMemory(
1307             MemoryLocation(Ptr, LocationSize::precise(StoreSize), AAInfo))) {
1308       Flags |= MachineMemOperand::MOInvariant;
1309 
1310       // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1311       // but the previous usage implied it did. Probably should check
1312       // isDereferenceableAndAlignedPointer.
1313       Flags |= MachineMemOperand::MODereferenceable;
1314     }
1315   }
1316 
1317   const MDNode *Ranges =
1318       Regs.size() == 1 ? LI.getMetadata(LLVMContext::MD_range) : nullptr;
1319   for (unsigned i = 0; i < Regs.size(); ++i) {
1320     Register Addr;
1321     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1322 
1323     MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
1324     Align BaseAlign = getMemOpAlign(LI);
1325     auto MMO = MF->getMachineMemOperand(
1326         Ptr, Flags, MRI->getType(Regs[i]),
1327         commonAlignment(BaseAlign, Offsets[i] / 8), AAInfo, Ranges,
1328         LI.getSyncScopeID(), LI.getOrdering());
1329     MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
1330   }
1331 
1332   return true;
1333 }
1334 
1335 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
1336   const StoreInst &SI = cast<StoreInst>(U);
1337   if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
1338     return true;
1339 
1340   ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
1341   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
1342   Register Base = getOrCreateVReg(*SI.getPointerOperand());
1343 
1344   Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
1345   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1346 
1347   if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
1348     assert(Vals.size() == 1 && "swifterror should be single pointer");
1349 
1350     Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
1351                                                     SI.getPointerOperand());
1352     MIRBuilder.buildCopy(VReg, Vals[0]);
1353     return true;
1354   }
1355 
1356   auto &TLI = *MF->getSubtarget().getTargetLowering();
1357   MachineMemOperand::Flags Flags = TLI.getStoreMemOperandFlags(SI, *DL);
1358 
1359   for (unsigned i = 0; i < Vals.size(); ++i) {
1360     Register Addr;
1361     MIRBuilder.materializePtrAdd(Addr, Base, OffsetTy, Offsets[i] / 8);
1362 
1363     MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
1364     Align BaseAlign = getMemOpAlign(SI);
1365     auto MMO = MF->getMachineMemOperand(
1366         Ptr, Flags, MRI->getType(Vals[i]),
1367         commonAlignment(BaseAlign, Offsets[i] / 8), SI.getAAMetadata(), nullptr,
1368         SI.getSyncScopeID(), SI.getOrdering());
1369     MIRBuilder.buildStore(Vals[i], Addr, *MMO);
1370   }
1371   return true;
1372 }
1373 
1374 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
1375   const Value *Src = U.getOperand(0);
1376   Type *Int32Ty = Type::getInt32Ty(U.getContext());
1377 
1378   // getIndexedOffsetInType is designed for GEPs, so the first index is the
1379   // usual array element rather than looking into the actual aggregate.
1380   SmallVector<Value *, 1> Indices;
1381   Indices.push_back(ConstantInt::get(Int32Ty, 0));
1382 
1383   if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
1384     for (auto Idx : EVI->indices())
1385       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1386   } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
1387     for (auto Idx : IVI->indices())
1388       Indices.push_back(ConstantInt::get(Int32Ty, Idx));
1389   } else {
1390     for (unsigned i = 1; i < U.getNumOperands(); ++i)
1391       Indices.push_back(U.getOperand(i));
1392   }
1393 
1394   return 8 * static_cast<uint64_t>(
1395                  DL.getIndexedOffsetInType(Src->getType(), Indices));
1396 }
1397 
1398 bool IRTranslator::translateExtractValue(const User &U,
1399                                          MachineIRBuilder &MIRBuilder) {
1400   const Value *Src = U.getOperand(0);
1401   uint64_t Offset = getOffsetFromIndices(U, *DL);
1402   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1403   ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
1404   unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
1405   auto &DstRegs = allocateVRegs(U);
1406 
1407   for (unsigned i = 0; i < DstRegs.size(); ++i)
1408     DstRegs[i] = SrcRegs[Idx++];
1409 
1410   return true;
1411 }
1412 
1413 bool IRTranslator::translateInsertValue(const User &U,
1414                                         MachineIRBuilder &MIRBuilder) {
1415   const Value *Src = U.getOperand(0);
1416   uint64_t Offset = getOffsetFromIndices(U, *DL);
1417   auto &DstRegs = allocateVRegs(U);
1418   ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
1419   ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
1420   ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
1421   auto *InsertedIt = InsertedRegs.begin();
1422 
1423   for (unsigned i = 0; i < DstRegs.size(); ++i) {
1424     if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
1425       DstRegs[i] = *InsertedIt++;
1426     else
1427       DstRegs[i] = SrcRegs[i];
1428   }
1429 
1430   return true;
1431 }
1432 
1433 bool IRTranslator::translateSelect(const User &U,
1434                                    MachineIRBuilder &MIRBuilder) {
1435   Register Tst = getOrCreateVReg(*U.getOperand(0));
1436   ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1437   ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1438   ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1439 
1440   uint16_t Flags = 0;
1441   if (const SelectInst *SI = dyn_cast<SelectInst>(&U))
1442     Flags = MachineInstr::copyFlagsFromInstruction(*SI);
1443 
1444   for (unsigned i = 0; i < ResRegs.size(); ++i) {
1445     MIRBuilder.buildSelect(ResRegs[i], Tst, Op0Regs[i], Op1Regs[i], Flags);
1446   }
1447 
1448   return true;
1449 }
1450 
1451 bool IRTranslator::translateCopy(const User &U, const Value &V,
1452                                  MachineIRBuilder &MIRBuilder) {
1453   Register Src = getOrCreateVReg(V);
1454   auto &Regs = *VMap.getVRegs(U);
1455   if (Regs.empty()) {
1456     Regs.push_back(Src);
1457     VMap.getOffsets(U)->push_back(0);
1458   } else {
1459     // If we already assigned a vreg for this instruction, we can't change that.
1460     // Emit a copy to satisfy the users we already emitted.
1461     MIRBuilder.buildCopy(Regs[0], Src);
1462   }
1463   return true;
1464 }
1465 
1466 bool IRTranslator::translateBitCast(const User &U,
1467                                     MachineIRBuilder &MIRBuilder) {
1468   // If we're bitcasting to the source type, we can reuse the source vreg.
1469   if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1470       getLLTForType(*U.getType(), *DL))
1471     return translateCopy(U, *U.getOperand(0), MIRBuilder);
1472 
1473   return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1474 }
1475 
1476 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1477                                  MachineIRBuilder &MIRBuilder) {
1478   Register Op = getOrCreateVReg(*U.getOperand(0));
1479   Register Res = getOrCreateVReg(U);
1480   MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1481   return true;
1482 }
1483 
1484 bool IRTranslator::translateGetElementPtr(const User &U,
1485                                           MachineIRBuilder &MIRBuilder) {
1486   Value &Op0 = *U.getOperand(0);
1487   Register BaseReg = getOrCreateVReg(Op0);
1488   Type *PtrIRTy = Op0.getType();
1489   LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1490   Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1491   LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1492 
1493   // Normalize Vector GEP - all scalar operands should be converted to the
1494   // splat vector.
1495   unsigned VectorWidth = 0;
1496 
1497   // True if we should use a splat vector; using VectorWidth alone is not
1498   // sufficient.
1499   bool WantSplatVector = false;
1500   if (auto *VT = dyn_cast<VectorType>(U.getType())) {
1501     VectorWidth = cast<FixedVectorType>(VT)->getNumElements();
1502     // We don't produce 1 x N vectors; those are treated as scalars.
1503     WantSplatVector = VectorWidth > 1;
1504   }
1505 
1506   // We might need to splat the base pointer into a vector if the offsets
1507   // are vectors.
1508   if (WantSplatVector && !PtrTy.isVector()) {
1509     BaseReg =
1510         MIRBuilder
1511             .buildSplatVector(LLT::fixed_vector(VectorWidth, PtrTy), BaseReg)
1512             .getReg(0);
1513     PtrIRTy = FixedVectorType::get(PtrIRTy, VectorWidth);
1514     PtrTy = getLLTForType(*PtrIRTy, *DL);
1515     OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1516     OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1517   }
1518 
1519   int64_t Offset = 0;
1520   for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1521        GTI != E; ++GTI) {
1522     const Value *Idx = GTI.getOperand();
1523     if (StructType *StTy = GTI.getStructTypeOrNull()) {
1524       unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1525       Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1526       continue;
1527     } else {
1528       uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1529 
1530       // If this is a scalar constant or a splat vector of constants,
1531       // handle it quickly.
1532       if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1533         Offset += ElementSize * CI->getSExtValue();
1534         continue;
1535       }
1536 
1537       if (Offset != 0) {
1538         auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1539         BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, OffsetMIB.getReg(0))
1540                       .getReg(0);
1541         Offset = 0;
1542       }
1543 
1544       Register IdxReg = getOrCreateVReg(*Idx);
1545       LLT IdxTy = MRI->getType(IdxReg);
1546       if (IdxTy != OffsetTy) {
1547         if (!IdxTy.isVector() && WantSplatVector) {
1548           IdxReg = MIRBuilder.buildSplatVector(
1549             OffsetTy.changeElementType(IdxTy), IdxReg).getReg(0);
1550         }
1551 
1552         IdxReg = MIRBuilder.buildSExtOrTrunc(OffsetTy, IdxReg).getReg(0);
1553       }
1554 
1555       // N = N + Idx * ElementSize;
1556       // Avoid doing it for ElementSize of 1.
1557       Register GepOffsetReg;
1558       if (ElementSize != 1) {
1559         auto ElementSizeMIB = MIRBuilder.buildConstant(
1560             getLLTForType(*OffsetIRTy, *DL), ElementSize);
1561         GepOffsetReg =
1562             MIRBuilder.buildMul(OffsetTy, IdxReg, ElementSizeMIB).getReg(0);
1563       } else
1564         GepOffsetReg = IdxReg;
1565 
1566       BaseReg = MIRBuilder.buildPtrAdd(PtrTy, BaseReg, GepOffsetReg).getReg(0);
1567     }
1568   }
1569 
1570   if (Offset != 0) {
1571     auto OffsetMIB =
1572         MIRBuilder.buildConstant(OffsetTy, Offset);
1573     MIRBuilder.buildPtrAdd(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1574     return true;
1575   }
1576 
1577   MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1578   return true;
1579 }
1580 
1581 bool IRTranslator::translateMemFunc(const CallInst &CI,
1582                                     MachineIRBuilder &MIRBuilder,
1583                                     unsigned Opcode) {
1584   const Value *SrcPtr = CI.getArgOperand(1);
1585   // If the source is undef, then just emit a nop.
1586   if (isa<UndefValue>(SrcPtr))
1587     return true;
1588 
1589   SmallVector<Register, 3> SrcRegs;
1590 
1591   unsigned MinPtrSize = UINT_MAX;
1592   for (auto AI = CI.arg_begin(), AE = CI.arg_end(); std::next(AI) != AE; ++AI) {
1593     Register SrcReg = getOrCreateVReg(**AI);
1594     LLT SrcTy = MRI->getType(SrcReg);
1595     if (SrcTy.isPointer())
1596       MinPtrSize = std::min<unsigned>(SrcTy.getSizeInBits(), MinPtrSize);
1597     SrcRegs.push_back(SrcReg);
1598   }
1599 
1600   LLT SizeTy = LLT::scalar(MinPtrSize);
1601 
1602   // The size operand should be the minimum of the pointer sizes.
1603   Register &SizeOpReg = SrcRegs[SrcRegs.size() - 1];
1604   if (MRI->getType(SizeOpReg) != SizeTy)
1605     SizeOpReg = MIRBuilder.buildZExtOrTrunc(SizeTy, SizeOpReg).getReg(0);
1606 
1607   auto ICall = MIRBuilder.buildInstr(Opcode);
1608   for (Register SrcReg : SrcRegs)
1609     ICall.addUse(SrcReg);
1610 
1611   Align DstAlign;
1612   Align SrcAlign;
1613   unsigned IsVol =
1614       cast<ConstantInt>(CI.getArgOperand(CI.arg_size() - 1))->getZExtValue();
1615 
1616   ConstantInt *CopySize = nullptr;
1617 
1618   if (auto *MCI = dyn_cast<MemCpyInst>(&CI)) {
1619     DstAlign = MCI->getDestAlign().valueOrOne();
1620     SrcAlign = MCI->getSourceAlign().valueOrOne();
1621     CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1622   } else if (auto *MCI = dyn_cast<MemCpyInlineInst>(&CI)) {
1623     DstAlign = MCI->getDestAlign().valueOrOne();
1624     SrcAlign = MCI->getSourceAlign().valueOrOne();
1625     CopySize = dyn_cast<ConstantInt>(MCI->getArgOperand(2));
1626   } else if (auto *MMI = dyn_cast<MemMoveInst>(&CI)) {
1627     DstAlign = MMI->getDestAlign().valueOrOne();
1628     SrcAlign = MMI->getSourceAlign().valueOrOne();
1629     CopySize = dyn_cast<ConstantInt>(MMI->getArgOperand(2));
1630   } else {
1631     auto *MSI = cast<MemSetInst>(&CI);
1632     DstAlign = MSI->getDestAlign().valueOrOne();
1633   }
1634 
1635   if (Opcode != TargetOpcode::G_MEMCPY_INLINE) {
1636     // We need to propagate the tail call flag from the IR inst as an argument.
1637     // Otherwise, we have to pessimize and assume later that we cannot tail call
1638     // any memory intrinsics.
1639     ICall.addImm(CI.isTailCall() ? 1 : 0);
1640   }
1641 
1642   // Create mem operands to store the alignment and volatile info.
1643   MachineMemOperand::Flags LoadFlags = MachineMemOperand::MOLoad;
1644   MachineMemOperand::Flags StoreFlags = MachineMemOperand::MOStore;
1645   if (IsVol) {
1646     LoadFlags |= MachineMemOperand::MOVolatile;
1647     StoreFlags |= MachineMemOperand::MOVolatile;
1648   }
1649 
1650   AAMDNodes AAInfo = CI.getAAMetadata();
1651   if (AA && CopySize &&
1652       AA->pointsToConstantMemory(MemoryLocation(
1653           SrcPtr, LocationSize::precise(CopySize->getZExtValue()), AAInfo))) {
1654     LoadFlags |= MachineMemOperand::MOInvariant;
1655 
1656     // FIXME: pointsToConstantMemory probably does not imply dereferenceable,
1657     // but the previous usage implied it did. Probably should check
1658     // isDereferenceableAndAlignedPointer.
1659     LoadFlags |= MachineMemOperand::MODereferenceable;
1660   }
1661 
1662   ICall.addMemOperand(
1663       MF->getMachineMemOperand(MachinePointerInfo(CI.getArgOperand(0)),
1664                                StoreFlags, 1, DstAlign, AAInfo));
1665   if (Opcode != TargetOpcode::G_MEMSET)
1666     ICall.addMemOperand(MF->getMachineMemOperand(
1667         MachinePointerInfo(SrcPtr), LoadFlags, 1, SrcAlign, AAInfo));
1668 
1669   return true;
1670 }
1671 
1672 void IRTranslator::getStackGuard(Register DstReg,
1673                                  MachineIRBuilder &MIRBuilder) {
1674   const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1675   MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1676   auto MIB =
1677       MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD, {DstReg}, {});
1678 
1679   auto &TLI = *MF->getSubtarget().getTargetLowering();
1680   Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1681   if (!Global)
1682     return;
1683 
1684   unsigned AddrSpace = Global->getType()->getPointerAddressSpace();
1685   LLT PtrTy = LLT::pointer(AddrSpace, DL->getPointerSizeInBits(AddrSpace));
1686 
1687   MachinePointerInfo MPInfo(Global);
1688   auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1689                MachineMemOperand::MODereferenceable;
1690   MachineMemOperand *MemRef = MF->getMachineMemOperand(
1691       MPInfo, Flags, PtrTy, DL->getPointerABIAlignment(AddrSpace));
1692   MIB.setMemRefs({MemRef});
1693 }
1694 
1695 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1696                                               MachineIRBuilder &MIRBuilder) {
1697   ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1698   MIRBuilder.buildInstr(
1699       Op, {ResRegs[0], ResRegs[1]},
1700       {getOrCreateVReg(*CI.getOperand(0)), getOrCreateVReg(*CI.getOperand(1))});
1701 
1702   return true;
1703 }
1704 
1705 bool IRTranslator::translateFixedPointIntrinsic(unsigned Op, const CallInst &CI,
1706                                                 MachineIRBuilder &MIRBuilder) {
1707   Register Dst = getOrCreateVReg(CI);
1708   Register Src0 = getOrCreateVReg(*CI.getOperand(0));
1709   Register Src1 = getOrCreateVReg(*CI.getOperand(1));
1710   uint64_t Scale = cast<ConstantInt>(CI.getOperand(2))->getZExtValue();
1711   MIRBuilder.buildInstr(Op, {Dst}, { Src0, Src1, Scale });
1712   return true;
1713 }
1714 
1715 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1716   switch (ID) {
1717     default:
1718       break;
1719     case Intrinsic::bswap:
1720       return TargetOpcode::G_BSWAP;
1721     case Intrinsic::bitreverse:
1722       return TargetOpcode::G_BITREVERSE;
1723     case Intrinsic::fshl:
1724       return TargetOpcode::G_FSHL;
1725     case Intrinsic::fshr:
1726       return TargetOpcode::G_FSHR;
1727     case Intrinsic::ceil:
1728       return TargetOpcode::G_FCEIL;
1729     case Intrinsic::cos:
1730       return TargetOpcode::G_FCOS;
1731     case Intrinsic::ctpop:
1732       return TargetOpcode::G_CTPOP;
1733     case Intrinsic::exp:
1734       return TargetOpcode::G_FEXP;
1735     case Intrinsic::exp2:
1736       return TargetOpcode::G_FEXP2;
1737     case Intrinsic::fabs:
1738       return TargetOpcode::G_FABS;
1739     case Intrinsic::copysign:
1740       return TargetOpcode::G_FCOPYSIGN;
1741     case Intrinsic::minnum:
1742       return TargetOpcode::G_FMINNUM;
1743     case Intrinsic::maxnum:
1744       return TargetOpcode::G_FMAXNUM;
1745     case Intrinsic::minimum:
1746       return TargetOpcode::G_FMINIMUM;
1747     case Intrinsic::maximum:
1748       return TargetOpcode::G_FMAXIMUM;
1749     case Intrinsic::canonicalize:
1750       return TargetOpcode::G_FCANONICALIZE;
1751     case Intrinsic::floor:
1752       return TargetOpcode::G_FFLOOR;
1753     case Intrinsic::fma:
1754       return TargetOpcode::G_FMA;
1755     case Intrinsic::log:
1756       return TargetOpcode::G_FLOG;
1757     case Intrinsic::log2:
1758       return TargetOpcode::G_FLOG2;
1759     case Intrinsic::log10:
1760       return TargetOpcode::G_FLOG10;
1761     case Intrinsic::nearbyint:
1762       return TargetOpcode::G_FNEARBYINT;
1763     case Intrinsic::pow:
1764       return TargetOpcode::G_FPOW;
1765     case Intrinsic::powi:
1766       return TargetOpcode::G_FPOWI;
1767     case Intrinsic::rint:
1768       return TargetOpcode::G_FRINT;
1769     case Intrinsic::round:
1770       return TargetOpcode::G_INTRINSIC_ROUND;
1771     case Intrinsic::roundeven:
1772       return TargetOpcode::G_INTRINSIC_ROUNDEVEN;
1773     case Intrinsic::sin:
1774       return TargetOpcode::G_FSIN;
1775     case Intrinsic::sqrt:
1776       return TargetOpcode::G_FSQRT;
1777     case Intrinsic::trunc:
1778       return TargetOpcode::G_INTRINSIC_TRUNC;
1779     case Intrinsic::readcyclecounter:
1780       return TargetOpcode::G_READCYCLECOUNTER;
1781     case Intrinsic::ptrmask:
1782       return TargetOpcode::G_PTRMASK;
1783     case Intrinsic::lrint:
1784       return TargetOpcode::G_INTRINSIC_LRINT;
1785     // FADD/FMUL require checking the FMF, so are handled elsewhere.
1786     case Intrinsic::vector_reduce_fmin:
1787       return TargetOpcode::G_VECREDUCE_FMIN;
1788     case Intrinsic::vector_reduce_fmax:
1789       return TargetOpcode::G_VECREDUCE_FMAX;
1790     case Intrinsic::vector_reduce_add:
1791       return TargetOpcode::G_VECREDUCE_ADD;
1792     case Intrinsic::vector_reduce_mul:
1793       return TargetOpcode::G_VECREDUCE_MUL;
1794     case Intrinsic::vector_reduce_and:
1795       return TargetOpcode::G_VECREDUCE_AND;
1796     case Intrinsic::vector_reduce_or:
1797       return TargetOpcode::G_VECREDUCE_OR;
1798     case Intrinsic::vector_reduce_xor:
1799       return TargetOpcode::G_VECREDUCE_XOR;
1800     case Intrinsic::vector_reduce_smax:
1801       return TargetOpcode::G_VECREDUCE_SMAX;
1802     case Intrinsic::vector_reduce_smin:
1803       return TargetOpcode::G_VECREDUCE_SMIN;
1804     case Intrinsic::vector_reduce_umax:
1805       return TargetOpcode::G_VECREDUCE_UMAX;
1806     case Intrinsic::vector_reduce_umin:
1807       return TargetOpcode::G_VECREDUCE_UMIN;
1808     case Intrinsic::lround:
1809       return TargetOpcode::G_LROUND;
1810     case Intrinsic::llround:
1811       return TargetOpcode::G_LLROUND;
1812   }
1813   return Intrinsic::not_intrinsic;
1814 }
1815 
1816 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1817                                             Intrinsic::ID ID,
1818                                             MachineIRBuilder &MIRBuilder) {
1819 
1820   unsigned Op = getSimpleIntrinsicOpcode(ID);
1821 
1822   // Is this a simple intrinsic?
1823   if (Op == Intrinsic::not_intrinsic)
1824     return false;
1825 
1826   // Yes. Let's translate it.
1827   SmallVector<llvm::SrcOp, 4> VRegs;
1828   for (const auto &Arg : CI.args())
1829     VRegs.push_back(getOrCreateVReg(*Arg));
1830 
1831   MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1832                         MachineInstr::copyFlagsFromInstruction(CI));
1833   return true;
1834 }
1835 
1836 // TODO: Include ConstainedOps.def when all strict instructions are defined.
1837 static unsigned getConstrainedOpcode(Intrinsic::ID ID) {
1838   switch (ID) {
1839   case Intrinsic::experimental_constrained_fadd:
1840     return TargetOpcode::G_STRICT_FADD;
1841   case Intrinsic::experimental_constrained_fsub:
1842     return TargetOpcode::G_STRICT_FSUB;
1843   case Intrinsic::experimental_constrained_fmul:
1844     return TargetOpcode::G_STRICT_FMUL;
1845   case Intrinsic::experimental_constrained_fdiv:
1846     return TargetOpcode::G_STRICT_FDIV;
1847   case Intrinsic::experimental_constrained_frem:
1848     return TargetOpcode::G_STRICT_FREM;
1849   case Intrinsic::experimental_constrained_fma:
1850     return TargetOpcode::G_STRICT_FMA;
1851   case Intrinsic::experimental_constrained_sqrt:
1852     return TargetOpcode::G_STRICT_FSQRT;
1853   default:
1854     return 0;
1855   }
1856 }
1857 
1858 bool IRTranslator::translateConstrainedFPIntrinsic(
1859   const ConstrainedFPIntrinsic &FPI, MachineIRBuilder &MIRBuilder) {
1860   fp::ExceptionBehavior EB = *FPI.getExceptionBehavior();
1861 
1862   unsigned Opcode = getConstrainedOpcode(FPI.getIntrinsicID());
1863   if (!Opcode)
1864     return false;
1865 
1866   unsigned Flags = MachineInstr::copyFlagsFromInstruction(FPI);
1867   if (EB == fp::ExceptionBehavior::ebIgnore)
1868     Flags |= MachineInstr::NoFPExcept;
1869 
1870   SmallVector<llvm::SrcOp, 4> VRegs;
1871   VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(0)));
1872   if (!FPI.isUnaryOp())
1873     VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(1)));
1874   if (FPI.isTernaryOp())
1875     VRegs.push_back(getOrCreateVReg(*FPI.getArgOperand(2)));
1876 
1877   MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(FPI)}, VRegs, Flags);
1878   return true;
1879 }
1880 
1881 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1882                                            MachineIRBuilder &MIRBuilder) {
1883   if (auto *MI = dyn_cast<AnyMemIntrinsic>(&CI)) {
1884     if (ORE->enabled()) {
1885       const Function &F = *MI->getParent()->getParent();
1886       auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
1887       if (MemoryOpRemark::canHandle(MI, TLI)) {
1888         MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI);
1889         R.visit(MI);
1890       }
1891     }
1892   }
1893 
1894   // If this is a simple intrinsic (that is, we just need to add a def of
1895   // a vreg, and uses for each arg operand, then translate it.
1896   if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1897     return true;
1898 
1899   switch (ID) {
1900   default:
1901     break;
1902   case Intrinsic::lifetime_start:
1903   case Intrinsic::lifetime_end: {
1904     // No stack colouring in O0, discard region information.
1905     if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1906       return true;
1907 
1908     unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1909                                                   : TargetOpcode::LIFETIME_END;
1910 
1911     // Get the underlying objects for the location passed on the lifetime
1912     // marker.
1913     SmallVector<const Value *, 4> Allocas;
1914     getUnderlyingObjects(CI.getArgOperand(1), Allocas);
1915 
1916     // Iterate over each underlying object, creating lifetime markers for each
1917     // static alloca. Quit if we find a non-static alloca.
1918     for (const Value *V : Allocas) {
1919       const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1920       if (!AI)
1921         continue;
1922 
1923       if (!AI->isStaticAlloca())
1924         return true;
1925 
1926       MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1927     }
1928     return true;
1929   }
1930   case Intrinsic::dbg_declare: {
1931     const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1932     assert(DI.getVariable() && "Missing variable");
1933 
1934     const Value *Address = DI.getAddress();
1935     if (!Address || isa<UndefValue>(Address)) {
1936       LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1937       return true;
1938     }
1939 
1940     assert(DI.getVariable()->isValidLocationForIntrinsic(
1941                MIRBuilder.getDebugLoc()) &&
1942            "Expected inlined-at fields to agree");
1943     auto AI = dyn_cast<AllocaInst>(Address);
1944     if (AI && AI->isStaticAlloca()) {
1945       // Static allocas are tracked at the MF level, no need for DBG_VALUE
1946       // instructions (in fact, they get ignored if they *do* exist).
1947       MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1948                              getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1949     } else {
1950       // A dbg.declare describes the address of a source variable, so lower it
1951       // into an indirect DBG_VALUE.
1952       MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1953                                        DI.getVariable(), DI.getExpression());
1954     }
1955     return true;
1956   }
1957   case Intrinsic::dbg_label: {
1958     const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1959     assert(DI.getLabel() && "Missing label");
1960 
1961     assert(DI.getLabel()->isValidLocationForIntrinsic(
1962                MIRBuilder.getDebugLoc()) &&
1963            "Expected inlined-at fields to agree");
1964 
1965     MIRBuilder.buildDbgLabel(DI.getLabel());
1966     return true;
1967   }
1968   case Intrinsic::vaend:
1969     // No target I know of cares about va_end. Certainly no in-tree target
1970     // does. Simplest intrinsic ever!
1971     return true;
1972   case Intrinsic::vastart: {
1973     auto &TLI = *MF->getSubtarget().getTargetLowering();
1974     Value *Ptr = CI.getArgOperand(0);
1975     unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1976 
1977     // FIXME: Get alignment
1978     MIRBuilder.buildInstr(TargetOpcode::G_VASTART, {}, {getOrCreateVReg(*Ptr)})
1979         .addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Ptr),
1980                                                 MachineMemOperand::MOStore,
1981                                                 ListSize, Align(1)));
1982     return true;
1983   }
1984   case Intrinsic::dbg_value: {
1985     // This form of DBG_VALUE is target-independent.
1986     const DbgValueInst &DI = cast<DbgValueInst>(CI);
1987     const Value *V = DI.getValue();
1988     assert(DI.getVariable()->isValidLocationForIntrinsic(
1989                MIRBuilder.getDebugLoc()) &&
1990            "Expected inlined-at fields to agree");
1991     if (!V || DI.hasArgList()) {
1992       // DI cannot produce a valid DBG_VALUE, so produce an undef DBG_VALUE to
1993       // terminate any prior location.
1994       MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1995     } else if (const auto *CI = dyn_cast<Constant>(V)) {
1996       MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1997     } else {
1998       for (Register Reg : getOrCreateVRegs(*V)) {
1999         // FIXME: This does not handle register-indirect values at offset 0. The
2000         // direct/indirect thing shouldn't really be handled by something as
2001         // implicit as reg+noreg vs reg+imm in the first place, but it seems
2002         // pretty baked in right now.
2003         MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
2004       }
2005     }
2006     return true;
2007   }
2008   case Intrinsic::uadd_with_overflow:
2009     return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
2010   case Intrinsic::sadd_with_overflow:
2011     return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
2012   case Intrinsic::usub_with_overflow:
2013     return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
2014   case Intrinsic::ssub_with_overflow:
2015     return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
2016   case Intrinsic::umul_with_overflow:
2017     return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
2018   case Intrinsic::smul_with_overflow:
2019     return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
2020   case Intrinsic::uadd_sat:
2021     return translateBinaryOp(TargetOpcode::G_UADDSAT, CI, MIRBuilder);
2022   case Intrinsic::sadd_sat:
2023     return translateBinaryOp(TargetOpcode::G_SADDSAT, CI, MIRBuilder);
2024   case Intrinsic::usub_sat:
2025     return translateBinaryOp(TargetOpcode::G_USUBSAT, CI, MIRBuilder);
2026   case Intrinsic::ssub_sat:
2027     return translateBinaryOp(TargetOpcode::G_SSUBSAT, CI, MIRBuilder);
2028   case Intrinsic::ushl_sat:
2029     return translateBinaryOp(TargetOpcode::G_USHLSAT, CI, MIRBuilder);
2030   case Intrinsic::sshl_sat:
2031     return translateBinaryOp(TargetOpcode::G_SSHLSAT, CI, MIRBuilder);
2032   case Intrinsic::umin:
2033     return translateBinaryOp(TargetOpcode::G_UMIN, CI, MIRBuilder);
2034   case Intrinsic::umax:
2035     return translateBinaryOp(TargetOpcode::G_UMAX, CI, MIRBuilder);
2036   case Intrinsic::smin:
2037     return translateBinaryOp(TargetOpcode::G_SMIN, CI, MIRBuilder);
2038   case Intrinsic::smax:
2039     return translateBinaryOp(TargetOpcode::G_SMAX, CI, MIRBuilder);
2040   case Intrinsic::abs:
2041     // TODO: Preserve "int min is poison" arg in GMIR?
2042     return translateUnaryOp(TargetOpcode::G_ABS, CI, MIRBuilder);
2043   case Intrinsic::smul_fix:
2044     return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIX, CI, MIRBuilder);
2045   case Intrinsic::umul_fix:
2046     return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIX, CI, MIRBuilder);
2047   case Intrinsic::smul_fix_sat:
2048     return translateFixedPointIntrinsic(TargetOpcode::G_SMULFIXSAT, CI, MIRBuilder);
2049   case Intrinsic::umul_fix_sat:
2050     return translateFixedPointIntrinsic(TargetOpcode::G_UMULFIXSAT, CI, MIRBuilder);
2051   case Intrinsic::sdiv_fix:
2052     return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIX, CI, MIRBuilder);
2053   case Intrinsic::udiv_fix:
2054     return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIX, CI, MIRBuilder);
2055   case Intrinsic::sdiv_fix_sat:
2056     return translateFixedPointIntrinsic(TargetOpcode::G_SDIVFIXSAT, CI, MIRBuilder);
2057   case Intrinsic::udiv_fix_sat:
2058     return translateFixedPointIntrinsic(TargetOpcode::G_UDIVFIXSAT, CI, MIRBuilder);
2059   case Intrinsic::fmuladd: {
2060     const TargetMachine &TM = MF->getTarget();
2061     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2062     Register Dst = getOrCreateVReg(CI);
2063     Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
2064     Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
2065     Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
2066     if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
2067         TLI.isFMAFasterThanFMulAndFAdd(*MF,
2068                                        TLI.getValueType(*DL, CI.getType()))) {
2069       // TODO: Revisit this to see if we should move this part of the
2070       // lowering to the combiner.
2071       MIRBuilder.buildFMA(Dst, Op0, Op1, Op2,
2072                           MachineInstr::copyFlagsFromInstruction(CI));
2073     } else {
2074       LLT Ty = getLLTForType(*CI.getType(), *DL);
2075       auto FMul = MIRBuilder.buildFMul(
2076           Ty, Op0, Op1, MachineInstr::copyFlagsFromInstruction(CI));
2077       MIRBuilder.buildFAdd(Dst, FMul, Op2,
2078                            MachineInstr::copyFlagsFromInstruction(CI));
2079     }
2080     return true;
2081   }
2082   case Intrinsic::convert_from_fp16:
2083     // FIXME: This intrinsic should probably be removed from the IR.
2084     MIRBuilder.buildFPExt(getOrCreateVReg(CI),
2085                           getOrCreateVReg(*CI.getArgOperand(0)),
2086                           MachineInstr::copyFlagsFromInstruction(CI));
2087     return true;
2088   case Intrinsic::convert_to_fp16:
2089     // FIXME: This intrinsic should probably be removed from the IR.
2090     MIRBuilder.buildFPTrunc(getOrCreateVReg(CI),
2091                             getOrCreateVReg(*CI.getArgOperand(0)),
2092                             MachineInstr::copyFlagsFromInstruction(CI));
2093     return true;
2094   case Intrinsic::memcpy_inline:
2095     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY_INLINE);
2096   case Intrinsic::memcpy:
2097     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMCPY);
2098   case Intrinsic::memmove:
2099     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMMOVE);
2100   case Intrinsic::memset:
2101     return translateMemFunc(CI, MIRBuilder, TargetOpcode::G_MEMSET);
2102   case Intrinsic::eh_typeid_for: {
2103     GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
2104     Register Reg = getOrCreateVReg(CI);
2105     unsigned TypeID = MF->getTypeIDFor(GV);
2106     MIRBuilder.buildConstant(Reg, TypeID);
2107     return true;
2108   }
2109   case Intrinsic::objectsize:
2110     llvm_unreachable("llvm.objectsize.* should have been lowered already");
2111 
2112   case Intrinsic::is_constant:
2113     llvm_unreachable("llvm.is.constant.* should have been lowered already");
2114 
2115   case Intrinsic::stackguard:
2116     getStackGuard(getOrCreateVReg(CI), MIRBuilder);
2117     return true;
2118   case Intrinsic::stackprotector: {
2119     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2120     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2121     Register GuardVal;
2122     if (TLI.useLoadStackGuardNode()) {
2123       GuardVal = MRI->createGenericVirtualRegister(PtrTy);
2124       getStackGuard(GuardVal, MIRBuilder);
2125     } else
2126       GuardVal = getOrCreateVReg(*CI.getArgOperand(0)); // The guard's value.
2127 
2128     AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
2129     int FI = getOrCreateFrameIndex(*Slot);
2130     MF->getFrameInfo().setStackProtectorIndex(FI);
2131 
2132     MIRBuilder.buildStore(
2133         GuardVal, getOrCreateVReg(*Slot),
2134         *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
2135                                   MachineMemOperand::MOStore |
2136                                       MachineMemOperand::MOVolatile,
2137                                   PtrTy, Align(8)));
2138     return true;
2139   }
2140   case Intrinsic::stacksave: {
2141     // Save the stack pointer to the location provided by the intrinsic.
2142     Register Reg = getOrCreateVReg(CI);
2143     Register StackPtr = MF->getSubtarget()
2144                             .getTargetLowering()
2145                             ->getStackPointerRegisterToSaveRestore();
2146 
2147     // If the target doesn't specify a stack pointer, then fall back.
2148     if (!StackPtr)
2149       return false;
2150 
2151     MIRBuilder.buildCopy(Reg, StackPtr);
2152     return true;
2153   }
2154   case Intrinsic::stackrestore: {
2155     // Restore the stack pointer from the location provided by the intrinsic.
2156     Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
2157     Register StackPtr = MF->getSubtarget()
2158                             .getTargetLowering()
2159                             ->getStackPointerRegisterToSaveRestore();
2160 
2161     // If the target doesn't specify a stack pointer, then fall back.
2162     if (!StackPtr)
2163       return false;
2164 
2165     MIRBuilder.buildCopy(StackPtr, Reg);
2166     return true;
2167   }
2168   case Intrinsic::cttz:
2169   case Intrinsic::ctlz: {
2170     ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
2171     bool isTrailing = ID == Intrinsic::cttz;
2172     unsigned Opcode = isTrailing
2173                           ? Cst->isZero() ? TargetOpcode::G_CTTZ
2174                                           : TargetOpcode::G_CTTZ_ZERO_UNDEF
2175                           : Cst->isZero() ? TargetOpcode::G_CTLZ
2176                                           : TargetOpcode::G_CTLZ_ZERO_UNDEF;
2177     MIRBuilder.buildInstr(Opcode, {getOrCreateVReg(CI)},
2178                           {getOrCreateVReg(*CI.getArgOperand(0))});
2179     return true;
2180   }
2181   case Intrinsic::invariant_start: {
2182     LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
2183     Register Undef = MRI->createGenericVirtualRegister(PtrTy);
2184     MIRBuilder.buildUndef(Undef);
2185     return true;
2186   }
2187   case Intrinsic::invariant_end:
2188     return true;
2189   case Intrinsic::expect:
2190   case Intrinsic::annotation:
2191   case Intrinsic::ptr_annotation:
2192   case Intrinsic::launder_invariant_group:
2193   case Intrinsic::strip_invariant_group: {
2194     // Drop the intrinsic, but forward the value.
2195     MIRBuilder.buildCopy(getOrCreateVReg(CI),
2196                          getOrCreateVReg(*CI.getArgOperand(0)));
2197     return true;
2198   }
2199   case Intrinsic::assume:
2200   case Intrinsic::experimental_noalias_scope_decl:
2201   case Intrinsic::var_annotation:
2202   case Intrinsic::sideeffect:
2203     // Discard annotate attributes, assumptions, and artificial side-effects.
2204     return true;
2205   case Intrinsic::read_volatile_register:
2206   case Intrinsic::read_register: {
2207     Value *Arg = CI.getArgOperand(0);
2208     MIRBuilder
2209         .buildInstr(TargetOpcode::G_READ_REGISTER, {getOrCreateVReg(CI)}, {})
2210         .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()));
2211     return true;
2212   }
2213   case Intrinsic::write_register: {
2214     Value *Arg = CI.getArgOperand(0);
2215     MIRBuilder.buildInstr(TargetOpcode::G_WRITE_REGISTER)
2216       .addMetadata(cast<MDNode>(cast<MetadataAsValue>(Arg)->getMetadata()))
2217       .addUse(getOrCreateVReg(*CI.getArgOperand(1)));
2218     return true;
2219   }
2220   case Intrinsic::localescape: {
2221     MachineBasicBlock &EntryMBB = MF->front();
2222     StringRef EscapedName = GlobalValue::dropLLVMManglingEscape(MF->getName());
2223 
2224     // Directly emit some LOCAL_ESCAPE machine instrs. Label assignment emission
2225     // is the same on all targets.
2226     for (unsigned Idx = 0, E = CI.arg_size(); Idx < E; ++Idx) {
2227       Value *Arg = CI.getArgOperand(Idx)->stripPointerCasts();
2228       if (isa<ConstantPointerNull>(Arg))
2229         continue; // Skip null pointers. They represent a hole in index space.
2230 
2231       int FI = getOrCreateFrameIndex(*cast<AllocaInst>(Arg));
2232       MCSymbol *FrameAllocSym =
2233           MF->getMMI().getContext().getOrCreateFrameAllocSymbol(EscapedName,
2234                                                                 Idx);
2235 
2236       // This should be inserted at the start of the entry block.
2237       auto LocalEscape =
2238           MIRBuilder.buildInstrNoInsert(TargetOpcode::LOCAL_ESCAPE)
2239               .addSym(FrameAllocSym)
2240               .addFrameIndex(FI);
2241 
2242       EntryMBB.insert(EntryMBB.begin(), LocalEscape);
2243     }
2244 
2245     return true;
2246   }
2247   case Intrinsic::vector_reduce_fadd:
2248   case Intrinsic::vector_reduce_fmul: {
2249     // Need to check for the reassoc flag to decide whether we want a
2250     // sequential reduction opcode or not.
2251     Register Dst = getOrCreateVReg(CI);
2252     Register ScalarSrc = getOrCreateVReg(*CI.getArgOperand(0));
2253     Register VecSrc = getOrCreateVReg(*CI.getArgOperand(1));
2254     unsigned Opc = 0;
2255     if (!CI.hasAllowReassoc()) {
2256       // The sequential ordering case.
2257       Opc = ID == Intrinsic::vector_reduce_fadd
2258                 ? TargetOpcode::G_VECREDUCE_SEQ_FADD
2259                 : TargetOpcode::G_VECREDUCE_SEQ_FMUL;
2260       MIRBuilder.buildInstr(Opc, {Dst}, {ScalarSrc, VecSrc},
2261                             MachineInstr::copyFlagsFromInstruction(CI));
2262       return true;
2263     }
2264     // We split the operation into a separate G_FADD/G_FMUL + the reduce,
2265     // since the associativity doesn't matter.
2266     unsigned ScalarOpc;
2267     if (ID == Intrinsic::vector_reduce_fadd) {
2268       Opc = TargetOpcode::G_VECREDUCE_FADD;
2269       ScalarOpc = TargetOpcode::G_FADD;
2270     } else {
2271       Opc = TargetOpcode::G_VECREDUCE_FMUL;
2272       ScalarOpc = TargetOpcode::G_FMUL;
2273     }
2274     LLT DstTy = MRI->getType(Dst);
2275     auto Rdx = MIRBuilder.buildInstr(
2276         Opc, {DstTy}, {VecSrc}, MachineInstr::copyFlagsFromInstruction(CI));
2277     MIRBuilder.buildInstr(ScalarOpc, {Dst}, {ScalarSrc, Rdx},
2278                           MachineInstr::copyFlagsFromInstruction(CI));
2279 
2280     return true;
2281   }
2282   case Intrinsic::trap:
2283   case Intrinsic::debugtrap:
2284   case Intrinsic::ubsantrap: {
2285     StringRef TrapFuncName =
2286         CI.getAttributes().getFnAttr("trap-func-name").getValueAsString();
2287     if (TrapFuncName.empty())
2288       break; // Use the default handling.
2289     CallLowering::CallLoweringInfo Info;
2290     if (ID == Intrinsic::ubsantrap) {
2291       Info.OrigArgs.push_back({getOrCreateVRegs(*CI.getArgOperand(0)),
2292                                CI.getArgOperand(0)->getType(), 0});
2293     }
2294     Info.Callee = MachineOperand::CreateES(TrapFuncName.data());
2295     Info.CB = &CI;
2296     Info.OrigRet = {Register(), Type::getVoidTy(CI.getContext()), 0};
2297     return CLI->lowerCall(MIRBuilder, Info);
2298   }
2299   case Intrinsic::fptrunc_round: {
2300     unsigned Flags = MachineInstr::copyFlagsFromInstruction(CI);
2301 
2302     // Convert the metadata argument to a constant integer
2303     Metadata *MD = cast<MetadataAsValue>(CI.getArgOperand(1))->getMetadata();
2304     Optional<RoundingMode> RoundMode =
2305         convertStrToRoundingMode(cast<MDString>(MD)->getString());
2306 
2307     // Add the Rounding mode as an integer
2308     MIRBuilder
2309         .buildInstr(TargetOpcode::G_INTRINSIC_FPTRUNC_ROUND,
2310                     {getOrCreateVReg(CI)},
2311                     {getOrCreateVReg(*CI.getArgOperand(0))}, Flags)
2312         .addImm((int)*RoundMode);
2313 
2314     return true;
2315   }
2316 #define INSTRUCTION(NAME, NARG, ROUND_MODE, INTRINSIC)  \
2317   case Intrinsic::INTRINSIC:
2318 #include "llvm/IR/ConstrainedOps.def"
2319     return translateConstrainedFPIntrinsic(cast<ConstrainedFPIntrinsic>(CI),
2320                                            MIRBuilder);
2321 
2322   }
2323   return false;
2324 }
2325 
2326 bool IRTranslator::translateInlineAsm(const CallBase &CB,
2327                                       MachineIRBuilder &MIRBuilder) {
2328 
2329   const InlineAsmLowering *ALI = MF->getSubtarget().getInlineAsmLowering();
2330 
2331   if (!ALI) {
2332     LLVM_DEBUG(
2333         dbgs() << "Inline asm lowering is not supported for this target yet\n");
2334     return false;
2335   }
2336 
2337   return ALI->lowerInlineAsm(
2338       MIRBuilder, CB, [&](const Value &Val) { return getOrCreateVRegs(Val); });
2339 }
2340 
2341 bool IRTranslator::translateCallBase(const CallBase &CB,
2342                                      MachineIRBuilder &MIRBuilder) {
2343   ArrayRef<Register> Res = getOrCreateVRegs(CB);
2344 
2345   SmallVector<ArrayRef<Register>, 8> Args;
2346   Register SwiftInVReg = 0;
2347   Register SwiftErrorVReg = 0;
2348   for (const auto &Arg : CB.args()) {
2349     if (CLI->supportSwiftError() && isSwiftError(Arg)) {
2350       assert(SwiftInVReg == 0 && "Expected only one swift error argument");
2351       LLT Ty = getLLTForType(*Arg->getType(), *DL);
2352       SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
2353       MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
2354                                             &CB, &MIRBuilder.getMBB(), Arg));
2355       Args.emplace_back(makeArrayRef(SwiftInVReg));
2356       SwiftErrorVReg =
2357           SwiftError.getOrCreateVRegDefAt(&CB, &MIRBuilder.getMBB(), Arg);
2358       continue;
2359     }
2360     Args.push_back(getOrCreateVRegs(*Arg));
2361   }
2362 
2363   if (auto *CI = dyn_cast<CallInst>(&CB)) {
2364     if (ORE->enabled()) {
2365       const Function &F = *CI->getParent()->getParent();
2366       auto &TLI = getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F);
2367       if (MemoryOpRemark::canHandle(CI, TLI)) {
2368         MemoryOpRemark R(*ORE, "gisel-irtranslator-memsize", *DL, TLI);
2369         R.visit(CI);
2370       }
2371     }
2372   }
2373 
2374   // We don't set HasCalls on MFI here yet because call lowering may decide to
2375   // optimize into tail calls. Instead, we defer that to selection where a final
2376   // scan is done to check if any instructions are calls.
2377   bool Success =
2378       CLI->lowerCall(MIRBuilder, CB, Res, Args, SwiftErrorVReg,
2379                      [&]() { return getOrCreateVReg(*CB.getCalledOperand()); });
2380 
2381   // Check if we just inserted a tail call.
2382   if (Success) {
2383     assert(!HasTailCall && "Can't tail call return twice from block?");
2384     const TargetInstrInfo *TII = MF->getSubtarget().getInstrInfo();
2385     HasTailCall = TII->isTailCall(*std::prev(MIRBuilder.getInsertPt()));
2386   }
2387 
2388   return Success;
2389 }
2390 
2391 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
2392   const CallInst &CI = cast<CallInst>(U);
2393   auto TII = MF->getTarget().getIntrinsicInfo();
2394   const Function *F = CI.getCalledFunction();
2395 
2396   // FIXME: support Windows dllimport function calls.
2397   if (F && (F->hasDLLImportStorageClass() ||
2398             (MF->getTarget().getTargetTriple().isOSWindows() &&
2399              F->hasExternalWeakLinkage())))
2400     return false;
2401 
2402   // FIXME: support control flow guard targets.
2403   if (CI.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2404     return false;
2405 
2406   if (CI.isInlineAsm())
2407     return translateInlineAsm(CI, MIRBuilder);
2408 
2409   diagnoseDontCall(CI);
2410 
2411   Intrinsic::ID ID = Intrinsic::not_intrinsic;
2412   if (F && F->isIntrinsic()) {
2413     ID = F->getIntrinsicID();
2414     if (TII && ID == Intrinsic::not_intrinsic)
2415       ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
2416   }
2417 
2418   if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic)
2419     return translateCallBase(CI, MIRBuilder);
2420 
2421   assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
2422 
2423   if (translateKnownIntrinsic(CI, ID, MIRBuilder))
2424     return true;
2425 
2426   ArrayRef<Register> ResultRegs;
2427   if (!CI.getType()->isVoidTy())
2428     ResultRegs = getOrCreateVRegs(CI);
2429 
2430   // Ignore the callsite attributes. Backend code is most likely not expecting
2431   // an intrinsic to sometimes have side effects and sometimes not.
2432   MachineInstrBuilder MIB =
2433       MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
2434   if (isa<FPMathOperator>(CI))
2435     MIB->copyIRFlags(CI);
2436 
2437   for (const auto &Arg : enumerate(CI.args())) {
2438     // If this is required to be an immediate, don't materialize it in a
2439     // register.
2440     if (CI.paramHasAttr(Arg.index(), Attribute::ImmArg)) {
2441       if (ConstantInt *CI = dyn_cast<ConstantInt>(Arg.value())) {
2442         // imm arguments are more convenient than cimm (and realistically
2443         // probably sufficient), so use them.
2444         assert(CI->getBitWidth() <= 64 &&
2445                "large intrinsic immediates not handled");
2446         MIB.addImm(CI->getSExtValue());
2447       } else {
2448         MIB.addFPImm(cast<ConstantFP>(Arg.value()));
2449       }
2450     } else if (auto *MDVal = dyn_cast<MetadataAsValue>(Arg.value())) {
2451       auto *MD = MDVal->getMetadata();
2452       auto *MDN = dyn_cast<MDNode>(MD);
2453       if (!MDN) {
2454         if (auto *ConstMD = dyn_cast<ConstantAsMetadata>(MD))
2455           MDN = MDNode::get(MF->getFunction().getContext(), ConstMD);
2456         else // This was probably an MDString.
2457           return false;
2458       }
2459       MIB.addMetadata(MDN);
2460     } else {
2461       ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg.value());
2462       if (VRegs.size() > 1)
2463         return false;
2464       MIB.addUse(VRegs[0]);
2465     }
2466   }
2467 
2468   // Add a MachineMemOperand if it is a target mem intrinsic.
2469   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
2470   TargetLowering::IntrinsicInfo Info;
2471   // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
2472   if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
2473     Align Alignment = Info.align.value_or(
2474         DL->getABITypeAlign(Info.memVT.getTypeForEVT(F->getContext())));
2475     LLT MemTy = Info.memVT.isSimple()
2476                     ? getLLTForMVT(Info.memVT.getSimpleVT())
2477                     : LLT::scalar(Info.memVT.getStoreSizeInBits());
2478     MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
2479                                                Info.flags, MemTy, Alignment));
2480   }
2481 
2482   return true;
2483 }
2484 
2485 bool IRTranslator::findUnwindDestinations(
2486     const BasicBlock *EHPadBB,
2487     BranchProbability Prob,
2488     SmallVectorImpl<std::pair<MachineBasicBlock *, BranchProbability>>
2489         &UnwindDests) {
2490   EHPersonality Personality = classifyEHPersonality(
2491       EHPadBB->getParent()->getFunction().getPersonalityFn());
2492   bool IsMSVCCXX = Personality == EHPersonality::MSVC_CXX;
2493   bool IsCoreCLR = Personality == EHPersonality::CoreCLR;
2494   bool IsWasmCXX = Personality == EHPersonality::Wasm_CXX;
2495   bool IsSEH = isAsynchronousEHPersonality(Personality);
2496 
2497   if (IsWasmCXX) {
2498     // Ignore this for now.
2499     return false;
2500   }
2501 
2502   while (EHPadBB) {
2503     const Instruction *Pad = EHPadBB->getFirstNonPHI();
2504     BasicBlock *NewEHPadBB = nullptr;
2505     if (isa<LandingPadInst>(Pad)) {
2506       // Stop on landingpads. They are not funclets.
2507       UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2508       break;
2509     }
2510     if (isa<CleanupPadInst>(Pad)) {
2511       // Stop on cleanup pads. Cleanups are always funclet entries for all known
2512       // personalities.
2513       UnwindDests.emplace_back(&getMBB(*EHPadBB), Prob);
2514       UnwindDests.back().first->setIsEHScopeEntry();
2515       UnwindDests.back().first->setIsEHFuncletEntry();
2516       break;
2517     }
2518     if (auto *CatchSwitch = dyn_cast<CatchSwitchInst>(Pad)) {
2519       // Add the catchpad handlers to the possible destinations.
2520       for (const BasicBlock *CatchPadBB : CatchSwitch->handlers()) {
2521         UnwindDests.emplace_back(&getMBB(*CatchPadBB), Prob);
2522         // For MSVC++ and the CLR, catchblocks are funclets and need prologues.
2523         if (IsMSVCCXX || IsCoreCLR)
2524           UnwindDests.back().first->setIsEHFuncletEntry();
2525         if (!IsSEH)
2526           UnwindDests.back().first->setIsEHScopeEntry();
2527       }
2528       NewEHPadBB = CatchSwitch->getUnwindDest();
2529     } else {
2530       continue;
2531     }
2532 
2533     BranchProbabilityInfo *BPI = FuncInfo.BPI;
2534     if (BPI && NewEHPadBB)
2535       Prob *= BPI->getEdgeProbability(EHPadBB, NewEHPadBB);
2536     EHPadBB = NewEHPadBB;
2537   }
2538   return true;
2539 }
2540 
2541 bool IRTranslator::translateInvoke(const User &U,
2542                                    MachineIRBuilder &MIRBuilder) {
2543   const InvokeInst &I = cast<InvokeInst>(U);
2544   MCContext &Context = MF->getContext();
2545 
2546   const BasicBlock *ReturnBB = I.getSuccessor(0);
2547   const BasicBlock *EHPadBB = I.getSuccessor(1);
2548 
2549   const Function *Fn = I.getCalledFunction();
2550 
2551   // FIXME: support invoking patchpoint and statepoint intrinsics.
2552   if (Fn && Fn->isIntrinsic())
2553     return false;
2554 
2555   // FIXME: support whatever these are.
2556   if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
2557     return false;
2558 
2559   // FIXME: support control flow guard targets.
2560   if (I.countOperandBundlesOfType(LLVMContext::OB_cfguardtarget))
2561     return false;
2562 
2563   // FIXME: support Windows exception handling.
2564   if (!isa<LandingPadInst>(EHPadBB->getFirstNonPHI()))
2565     return false;
2566 
2567   bool LowerInlineAsm = I.isInlineAsm();
2568   bool NeedEHLabel = true;
2569   // If it can't throw then use a fast-path without emitting EH labels.
2570   if (LowerInlineAsm)
2571     NeedEHLabel = (cast<InlineAsm>(I.getCalledOperand()))->canThrow();
2572 
2573   // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
2574   // the region covered by the try.
2575   MCSymbol *BeginSymbol = nullptr;
2576   if (NeedEHLabel) {
2577     BeginSymbol = Context.createTempSymbol();
2578     MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
2579   }
2580 
2581   if (LowerInlineAsm) {
2582     if (!translateInlineAsm(I, MIRBuilder))
2583       return false;
2584   } else if (!translateCallBase(I, MIRBuilder))
2585     return false;
2586 
2587   MCSymbol *EndSymbol = nullptr;
2588   if (NeedEHLabel) {
2589     EndSymbol = Context.createTempSymbol();
2590     MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
2591   }
2592 
2593   SmallVector<std::pair<MachineBasicBlock *, BranchProbability>, 1> UnwindDests;
2594   BranchProbabilityInfo *BPI = FuncInfo.BPI;
2595   MachineBasicBlock *InvokeMBB = &MIRBuilder.getMBB();
2596   BranchProbability EHPadBBProb =
2597       BPI ? BPI->getEdgeProbability(InvokeMBB->getBasicBlock(), EHPadBB)
2598           : BranchProbability::getZero();
2599 
2600   if (!findUnwindDestinations(EHPadBB, EHPadBBProb, UnwindDests))
2601     return false;
2602 
2603   MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
2604                     &ReturnMBB = getMBB(*ReturnBB);
2605   // Update successor info.
2606   addSuccessorWithProb(InvokeMBB, &ReturnMBB);
2607   for (auto &UnwindDest : UnwindDests) {
2608     UnwindDest.first->setIsEHPad();
2609     addSuccessorWithProb(InvokeMBB, UnwindDest.first, UnwindDest.second);
2610   }
2611   InvokeMBB->normalizeSuccProbs();
2612 
2613   if (NeedEHLabel) {
2614     assert(BeginSymbol && "Expected a begin symbol!");
2615     assert(EndSymbol && "Expected an end symbol!");
2616     MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
2617   }
2618 
2619   MIRBuilder.buildBr(ReturnMBB);
2620   return true;
2621 }
2622 
2623 bool IRTranslator::translateCallBr(const User &U,
2624                                    MachineIRBuilder &MIRBuilder) {
2625   // FIXME: Implement this.
2626   return false;
2627 }
2628 
2629 bool IRTranslator::translateLandingPad(const User &U,
2630                                        MachineIRBuilder &MIRBuilder) {
2631   const LandingPadInst &LP = cast<LandingPadInst>(U);
2632 
2633   MachineBasicBlock &MBB = MIRBuilder.getMBB();
2634 
2635   MBB.setIsEHPad();
2636 
2637   // If there aren't registers to copy the values into (e.g., during SjLj
2638   // exceptions), then don't bother.
2639   auto &TLI = *MF->getSubtarget().getTargetLowering();
2640   const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
2641   if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
2642       TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
2643     return true;
2644 
2645   // If landingpad's return type is token type, we don't create DAG nodes
2646   // for its exception pointer and selector value. The extraction of exception
2647   // pointer or selector value from token type landingpads is not currently
2648   // supported.
2649   if (LP.getType()->isTokenTy())
2650     return true;
2651 
2652   // Add a label to mark the beginning of the landing pad.  Deletion of the
2653   // landing pad can thus be detected via the MachineModuleInfo.
2654   MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
2655     .addSym(MF->addLandingPad(&MBB));
2656 
2657   // If the unwinder does not preserve all registers, ensure that the
2658   // function marks the clobbered registers as used.
2659   const TargetRegisterInfo &TRI = *MF->getSubtarget().getRegisterInfo();
2660   if (auto *RegMask = TRI.getCustomEHPadPreservedMask(*MF))
2661     MF->getRegInfo().addPhysRegsUsedFromRegMask(RegMask);
2662 
2663   LLT Ty = getLLTForType(*LP.getType(), *DL);
2664   Register Undef = MRI->createGenericVirtualRegister(Ty);
2665   MIRBuilder.buildUndef(Undef);
2666 
2667   SmallVector<LLT, 2> Tys;
2668   for (Type *Ty : cast<StructType>(LP.getType())->elements())
2669     Tys.push_back(getLLTForType(*Ty, *DL));
2670   assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
2671 
2672   // Mark exception register as live in.
2673   Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
2674   if (!ExceptionReg)
2675     return false;
2676 
2677   MBB.addLiveIn(ExceptionReg);
2678   ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
2679   MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
2680 
2681   Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
2682   if (!SelectorReg)
2683     return false;
2684 
2685   MBB.addLiveIn(SelectorReg);
2686   Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
2687   MIRBuilder.buildCopy(PtrVReg, SelectorReg);
2688   MIRBuilder.buildCast(ResRegs[1], PtrVReg);
2689 
2690   return true;
2691 }
2692 
2693 bool IRTranslator::translateAlloca(const User &U,
2694                                    MachineIRBuilder &MIRBuilder) {
2695   auto &AI = cast<AllocaInst>(U);
2696 
2697   if (AI.isSwiftError())
2698     return true;
2699 
2700   if (AI.isStaticAlloca()) {
2701     Register Res = getOrCreateVReg(AI);
2702     int FI = getOrCreateFrameIndex(AI);
2703     MIRBuilder.buildFrameIndex(Res, FI);
2704     return true;
2705   }
2706 
2707   // FIXME: support stack probing for Windows.
2708   if (MF->getTarget().getTargetTriple().isOSWindows())
2709     return false;
2710 
2711   // Now we're in the harder dynamic case.
2712   Register NumElts = getOrCreateVReg(*AI.getArraySize());
2713   Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
2714   LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
2715   if (MRI->getType(NumElts) != IntPtrTy) {
2716     Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
2717     MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
2718     NumElts = ExtElts;
2719   }
2720 
2721   Type *Ty = AI.getAllocatedType();
2722 
2723   Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
2724   Register TySize =
2725       getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, DL->getTypeAllocSize(Ty)));
2726   MIRBuilder.buildMul(AllocSize, NumElts, TySize);
2727 
2728   // Round the size of the allocation up to the stack alignment size
2729   // by add SA-1 to the size. This doesn't overflow because we're computing
2730   // an address inside an alloca.
2731   Align StackAlign = MF->getSubtarget().getFrameLowering()->getStackAlign();
2732   auto SAMinusOne = MIRBuilder.buildConstant(IntPtrTy, StackAlign.value() - 1);
2733   auto AllocAdd = MIRBuilder.buildAdd(IntPtrTy, AllocSize, SAMinusOne,
2734                                       MachineInstr::NoUWrap);
2735   auto AlignCst =
2736       MIRBuilder.buildConstant(IntPtrTy, ~(uint64_t)(StackAlign.value() - 1));
2737   auto AlignedAlloc = MIRBuilder.buildAnd(IntPtrTy, AllocAdd, AlignCst);
2738 
2739   Align Alignment = std::max(AI.getAlign(), DL->getPrefTypeAlign(Ty));
2740   if (Alignment <= StackAlign)
2741     Alignment = Align(1);
2742   MIRBuilder.buildDynStackAlloc(getOrCreateVReg(AI), AlignedAlloc, Alignment);
2743 
2744   MF->getFrameInfo().CreateVariableSizedObject(Alignment, &AI);
2745   assert(MF->getFrameInfo().hasVarSizedObjects());
2746   return true;
2747 }
2748 
2749 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
2750   // FIXME: We may need more info about the type. Because of how LLT works,
2751   // we're completely discarding the i64/double distinction here (amongst
2752   // others). Fortunately the ABIs I know of where that matters don't use va_arg
2753   // anyway but that's not guaranteed.
2754   MIRBuilder.buildInstr(TargetOpcode::G_VAARG, {getOrCreateVReg(U)},
2755                         {getOrCreateVReg(*U.getOperand(0)),
2756                          DL->getABITypeAlign(U.getType()).value()});
2757   return true;
2758 }
2759 
2760 bool IRTranslator::translateUnreachable(const User &U, MachineIRBuilder &MIRBuilder) {
2761     if (!MF->getTarget().Options.TrapUnreachable)
2762     return true;
2763 
2764   auto &UI = cast<UnreachableInst>(U);
2765   // We may be able to ignore unreachable behind a noreturn call.
2766   if (MF->getTarget().Options.NoTrapAfterNoreturn) {
2767     const BasicBlock &BB = *UI.getParent();
2768     if (&UI != &BB.front()) {
2769       BasicBlock::const_iterator PredI =
2770         std::prev(BasicBlock::const_iterator(UI));
2771       if (const CallInst *Call = dyn_cast<CallInst>(&*PredI)) {
2772         if (Call->doesNotReturn())
2773           return true;
2774       }
2775     }
2776   }
2777 
2778   MIRBuilder.buildIntrinsic(Intrinsic::trap, ArrayRef<Register>(), true);
2779   return true;
2780 }
2781 
2782 bool IRTranslator::translateInsertElement(const User &U,
2783                                           MachineIRBuilder &MIRBuilder) {
2784   // If it is a <1 x Ty> vector, use the scalar as it is
2785   // not a legal vector type in LLT.
2786   if (cast<FixedVectorType>(U.getType())->getNumElements() == 1)
2787     return translateCopy(U, *U.getOperand(1), MIRBuilder);
2788 
2789   Register Res = getOrCreateVReg(U);
2790   Register Val = getOrCreateVReg(*U.getOperand(0));
2791   Register Elt = getOrCreateVReg(*U.getOperand(1));
2792   Register Idx = getOrCreateVReg(*U.getOperand(2));
2793   MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
2794   return true;
2795 }
2796 
2797 bool IRTranslator::translateExtractElement(const User &U,
2798                                            MachineIRBuilder &MIRBuilder) {
2799   // If it is a <1 x Ty> vector, use the scalar as it is
2800   // not a legal vector type in LLT.
2801   if (cast<FixedVectorType>(U.getOperand(0)->getType())->getNumElements() == 1)
2802     return translateCopy(U, *U.getOperand(0), MIRBuilder);
2803 
2804   Register Res = getOrCreateVReg(U);
2805   Register Val = getOrCreateVReg(*U.getOperand(0));
2806   const auto &TLI = *MF->getSubtarget().getTargetLowering();
2807   unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
2808   Register Idx;
2809   if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
2810     if (CI->getBitWidth() != PreferredVecIdxWidth) {
2811       APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
2812       auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
2813       Idx = getOrCreateVReg(*NewIdxCI);
2814     }
2815   }
2816   if (!Idx)
2817     Idx = getOrCreateVReg(*U.getOperand(1));
2818   if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
2819     const LLT VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
2820     Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx).getReg(0);
2821   }
2822   MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
2823   return true;
2824 }
2825 
2826 bool IRTranslator::translateShuffleVector(const User &U,
2827                                           MachineIRBuilder &MIRBuilder) {
2828   ArrayRef<int> Mask;
2829   if (auto *SVI = dyn_cast<ShuffleVectorInst>(&U))
2830     Mask = SVI->getShuffleMask();
2831   else
2832     Mask = cast<ConstantExpr>(U).getShuffleMask();
2833   ArrayRef<int> MaskAlloc = MF->allocateShuffleMask(Mask);
2834   MIRBuilder
2835       .buildInstr(TargetOpcode::G_SHUFFLE_VECTOR, {getOrCreateVReg(U)},
2836                   {getOrCreateVReg(*U.getOperand(0)),
2837                    getOrCreateVReg(*U.getOperand(1))})
2838       .addShuffleMask(MaskAlloc);
2839   return true;
2840 }
2841 
2842 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
2843   const PHINode &PI = cast<PHINode>(U);
2844 
2845   SmallVector<MachineInstr *, 4> Insts;
2846   for (auto Reg : getOrCreateVRegs(PI)) {
2847     auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
2848     Insts.push_back(MIB.getInstr());
2849   }
2850 
2851   PendingPHIs.emplace_back(&PI, std::move(Insts));
2852   return true;
2853 }
2854 
2855 bool IRTranslator::translateAtomicCmpXchg(const User &U,
2856                                           MachineIRBuilder &MIRBuilder) {
2857   const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
2858 
2859   auto &TLI = *MF->getSubtarget().getTargetLowering();
2860   auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2861 
2862   auto Res = getOrCreateVRegs(I);
2863   Register OldValRes = Res[0];
2864   Register SuccessRes = Res[1];
2865   Register Addr = getOrCreateVReg(*I.getPointerOperand());
2866   Register Cmp = getOrCreateVReg(*I.getCompareOperand());
2867   Register NewVal = getOrCreateVReg(*I.getNewValOperand());
2868 
2869   MIRBuilder.buildAtomicCmpXchgWithSuccess(
2870       OldValRes, SuccessRes, Addr, Cmp, NewVal,
2871       *MF->getMachineMemOperand(
2872           MachinePointerInfo(I.getPointerOperand()), Flags, MRI->getType(Cmp),
2873           getMemOpAlign(I), I.getAAMetadata(), nullptr, I.getSyncScopeID(),
2874           I.getSuccessOrdering(), I.getFailureOrdering()));
2875   return true;
2876 }
2877 
2878 bool IRTranslator::translateAtomicRMW(const User &U,
2879                                       MachineIRBuilder &MIRBuilder) {
2880   const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
2881   auto &TLI = *MF->getSubtarget().getTargetLowering();
2882   auto Flags = TLI.getAtomicMemOperandFlags(I, *DL);
2883 
2884   Register Res = getOrCreateVReg(I);
2885   Register Addr = getOrCreateVReg(*I.getPointerOperand());
2886   Register Val = getOrCreateVReg(*I.getValOperand());
2887 
2888   unsigned Opcode = 0;
2889   switch (I.getOperation()) {
2890   default:
2891     return false;
2892   case AtomicRMWInst::Xchg:
2893     Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
2894     break;
2895   case AtomicRMWInst::Add:
2896     Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2897     break;
2898   case AtomicRMWInst::Sub:
2899     Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2900     break;
2901   case AtomicRMWInst::And:
2902     Opcode = TargetOpcode::G_ATOMICRMW_AND;
2903     break;
2904   case AtomicRMWInst::Nand:
2905     Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2906     break;
2907   case AtomicRMWInst::Or:
2908     Opcode = TargetOpcode::G_ATOMICRMW_OR;
2909     break;
2910   case AtomicRMWInst::Xor:
2911     Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2912     break;
2913   case AtomicRMWInst::Max:
2914     Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2915     break;
2916   case AtomicRMWInst::Min:
2917     Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2918     break;
2919   case AtomicRMWInst::UMax:
2920     Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2921     break;
2922   case AtomicRMWInst::UMin:
2923     Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2924     break;
2925   case AtomicRMWInst::FAdd:
2926     Opcode = TargetOpcode::G_ATOMICRMW_FADD;
2927     break;
2928   case AtomicRMWInst::FSub:
2929     Opcode = TargetOpcode::G_ATOMICRMW_FSUB;
2930     break;
2931   case AtomicRMWInst::FMax:
2932     Opcode = TargetOpcode::G_ATOMICRMW_FMAX;
2933     break;
2934   case AtomicRMWInst::FMin:
2935     Opcode = TargetOpcode::G_ATOMICRMW_FMIN;
2936     break;
2937   }
2938 
2939   MIRBuilder.buildAtomicRMW(
2940       Opcode, Res, Addr, Val,
2941       *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2942                                 Flags, MRI->getType(Val), getMemOpAlign(I),
2943                                 I.getAAMetadata(), nullptr, I.getSyncScopeID(),
2944                                 I.getOrdering()));
2945   return true;
2946 }
2947 
2948 bool IRTranslator::translateFence(const User &U,
2949                                   MachineIRBuilder &MIRBuilder) {
2950   const FenceInst &Fence = cast<FenceInst>(U);
2951   MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2952                         Fence.getSyncScopeID());
2953   return true;
2954 }
2955 
2956 bool IRTranslator::translateFreeze(const User &U,
2957                                    MachineIRBuilder &MIRBuilder) {
2958   const ArrayRef<Register> DstRegs = getOrCreateVRegs(U);
2959   const ArrayRef<Register> SrcRegs = getOrCreateVRegs(*U.getOperand(0));
2960 
2961   assert(DstRegs.size() == SrcRegs.size() &&
2962          "Freeze with different source and destination type?");
2963 
2964   for (unsigned I = 0; I < DstRegs.size(); ++I) {
2965     MIRBuilder.buildFreeze(DstRegs[I], SrcRegs[I]);
2966   }
2967 
2968   return true;
2969 }
2970 
2971 void IRTranslator::finishPendingPhis() {
2972 #ifndef NDEBUG
2973   DILocationVerifier Verifier;
2974   GISelObserverWrapper WrapperObserver(&Verifier);
2975   RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2976 #endif // ifndef NDEBUG
2977   for (auto &Phi : PendingPHIs) {
2978     const PHINode *PI = Phi.first;
2979     ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2980     MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2981     EntryBuilder->setDebugLoc(PI->getDebugLoc());
2982 #ifndef NDEBUG
2983     Verifier.setCurrentInst(PI);
2984 #endif // ifndef NDEBUG
2985 
2986     SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2987     for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2988       auto IRPred = PI->getIncomingBlock(i);
2989       ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2990       for (auto *Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2991         if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2992           continue;
2993         SeenPreds.insert(Pred);
2994         for (unsigned j = 0; j < ValRegs.size(); ++j) {
2995           MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2996           MIB.addUse(ValRegs[j]);
2997           MIB.addMBB(Pred);
2998         }
2999       }
3000     }
3001   }
3002 }
3003 
3004 bool IRTranslator::translate(const Instruction &Inst) {
3005   CurBuilder->setDebugLoc(Inst.getDebugLoc());
3006 
3007   auto &TLI = *MF->getSubtarget().getTargetLowering();
3008   if (TLI.fallBackToDAGISel(Inst))
3009     return false;
3010 
3011   switch (Inst.getOpcode()) {
3012 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
3013   case Instruction::OPCODE:                                                    \
3014     return translate##OPCODE(Inst, *CurBuilder.get());
3015 #include "llvm/IR/Instruction.def"
3016   default:
3017     return false;
3018   }
3019 }
3020 
3021 bool IRTranslator::translate(const Constant &C, Register Reg) {
3022   // We only emit constants into the entry block from here. To prevent jumpy
3023   // debug behaviour set the line to 0.
3024   if (auto CurrInstDL = CurBuilder->getDL())
3025     EntryBuilder->setDebugLoc(DILocation::get(C.getContext(), 0, 0,
3026                                               CurrInstDL.getScope(),
3027                                               CurrInstDL.getInlinedAt()));
3028 
3029   if (auto CI = dyn_cast<ConstantInt>(&C))
3030     EntryBuilder->buildConstant(Reg, *CI);
3031   else if (auto CF = dyn_cast<ConstantFP>(&C))
3032     EntryBuilder->buildFConstant(Reg, *CF);
3033   else if (isa<UndefValue>(C))
3034     EntryBuilder->buildUndef(Reg);
3035   else if (isa<ConstantPointerNull>(C))
3036     EntryBuilder->buildConstant(Reg, 0);
3037   else if (auto GV = dyn_cast<GlobalValue>(&C))
3038     EntryBuilder->buildGlobalValue(Reg, GV);
3039   else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
3040     if (!isa<FixedVectorType>(CAZ->getType()))
3041       return false;
3042     // Return the scalar if it is a <1 x Ty> vector.
3043     unsigned NumElts = CAZ->getElementCount().getFixedValue();
3044     if (NumElts == 1)
3045       return translateCopy(C, *CAZ->getElementValue(0u), *EntryBuilder);
3046     SmallVector<Register, 4> Ops;
3047     for (unsigned I = 0; I < NumElts; ++I) {
3048       Constant &Elt = *CAZ->getElementValue(I);
3049       Ops.push_back(getOrCreateVReg(Elt));
3050     }
3051     EntryBuilder->buildBuildVector(Reg, Ops);
3052   } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
3053     // Return the scalar if it is a <1 x Ty> vector.
3054     if (CV->getNumElements() == 1)
3055       return translateCopy(C, *CV->getElementAsConstant(0), *EntryBuilder);
3056     SmallVector<Register, 4> Ops;
3057     for (unsigned i = 0; i < CV->getNumElements(); ++i) {
3058       Constant &Elt = *CV->getElementAsConstant(i);
3059       Ops.push_back(getOrCreateVReg(Elt));
3060     }
3061     EntryBuilder->buildBuildVector(Reg, Ops);
3062   } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
3063     switch(CE->getOpcode()) {
3064 #define HANDLE_INST(NUM, OPCODE, CLASS)                                        \
3065   case Instruction::OPCODE:                                                    \
3066     return translate##OPCODE(*CE, *EntryBuilder.get());
3067 #include "llvm/IR/Instruction.def"
3068     default:
3069       return false;
3070     }
3071   } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
3072     if (CV->getNumOperands() == 1)
3073       return translateCopy(C, *CV->getOperand(0), *EntryBuilder);
3074     SmallVector<Register, 4> Ops;
3075     for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
3076       Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
3077     }
3078     EntryBuilder->buildBuildVector(Reg, Ops);
3079   } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
3080     EntryBuilder->buildBlockAddress(Reg, BA);
3081   } else
3082     return false;
3083 
3084   return true;
3085 }
3086 
3087 bool IRTranslator::finalizeBasicBlock(const BasicBlock &BB,
3088                                       MachineBasicBlock &MBB) {
3089   for (auto &BTB : SL->BitTestCases) {
3090     // Emit header first, if it wasn't already emitted.
3091     if (!BTB.Emitted)
3092       emitBitTestHeader(BTB, BTB.Parent);
3093 
3094     BranchProbability UnhandledProb = BTB.Prob;
3095     for (unsigned j = 0, ej = BTB.Cases.size(); j != ej; ++j) {
3096       UnhandledProb -= BTB.Cases[j].ExtraProb;
3097       // Set the current basic block to the mbb we wish to insert the code into
3098       MachineBasicBlock *MBB = BTB.Cases[j].ThisBB;
3099       // If all cases cover a contiguous range, it is not necessary to jump to
3100       // the default block after the last bit test fails. This is because the
3101       // range check during bit test header creation has guaranteed that every
3102       // case here doesn't go outside the range. In this case, there is no need
3103       // to perform the last bit test, as it will always be true. Instead, make
3104       // the second-to-last bit-test fall through to the target of the last bit
3105       // test, and delete the last bit test.
3106 
3107       MachineBasicBlock *NextMBB;
3108       if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3109         // Second-to-last bit-test with contiguous range: fall through to the
3110         // target of the final bit test.
3111         NextMBB = BTB.Cases[j + 1].TargetBB;
3112       } else if (j + 1 == ej) {
3113         // For the last bit test, fall through to Default.
3114         NextMBB = BTB.Default;
3115       } else {
3116         // Otherwise, fall through to the next bit test.
3117         NextMBB = BTB.Cases[j + 1].ThisBB;
3118       }
3119 
3120       emitBitTestCase(BTB, NextMBB, UnhandledProb, BTB.Reg, BTB.Cases[j], MBB);
3121 
3122       if ((BTB.ContiguousRange || BTB.FallthroughUnreachable) && j + 2 == ej) {
3123         // We need to record the replacement phi edge here that normally
3124         // happens in emitBitTestCase before we delete the case, otherwise the
3125         // phi edge will be lost.
3126         addMachineCFGPred({BTB.Parent->getBasicBlock(),
3127                            BTB.Cases[ej - 1].TargetBB->getBasicBlock()},
3128                           MBB);
3129         // Since we're not going to use the final bit test, remove it.
3130         BTB.Cases.pop_back();
3131         break;
3132       }
3133     }
3134     // This is "default" BB. We have two jumps to it. From "header" BB and from
3135     // last "case" BB, unless the latter was skipped.
3136     CFGEdge HeaderToDefaultEdge = {BTB.Parent->getBasicBlock(),
3137                                    BTB.Default->getBasicBlock()};
3138     addMachineCFGPred(HeaderToDefaultEdge, BTB.Parent);
3139     if (!BTB.ContiguousRange) {
3140       addMachineCFGPred(HeaderToDefaultEdge, BTB.Cases.back().ThisBB);
3141     }
3142   }
3143   SL->BitTestCases.clear();
3144 
3145   for (auto &JTCase : SL->JTCases) {
3146     // Emit header first, if it wasn't already emitted.
3147     if (!JTCase.first.Emitted)
3148       emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
3149 
3150     emitJumpTable(JTCase.second, JTCase.second.MBB);
3151   }
3152   SL->JTCases.clear();
3153 
3154   for (auto &SwCase : SL->SwitchCases)
3155     emitSwitchCase(SwCase, &CurBuilder->getMBB(), *CurBuilder);
3156   SL->SwitchCases.clear();
3157 
3158   // Check if we need to generate stack-protector guard checks.
3159   StackProtector &SP = getAnalysis<StackProtector>();
3160   if (SP.shouldEmitSDCheck(BB)) {
3161     const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3162     bool FunctionBasedInstrumentation =
3163         TLI.getSSPStackGuardCheck(*MF->getFunction().getParent());
3164     SPDescriptor.initialize(&BB, &MBB, FunctionBasedInstrumentation);
3165   }
3166   // Handle stack protector.
3167   if (SPDescriptor.shouldEmitFunctionBasedCheckStackProtector()) {
3168     LLVM_DEBUG(dbgs() << "Unimplemented stack protector case\n");
3169     return false;
3170   } else if (SPDescriptor.shouldEmitStackProtector()) {
3171     MachineBasicBlock *ParentMBB = SPDescriptor.getParentMBB();
3172     MachineBasicBlock *SuccessMBB = SPDescriptor.getSuccessMBB();
3173 
3174     // Find the split point to split the parent mbb. At the same time copy all
3175     // physical registers used in the tail of parent mbb into virtual registers
3176     // before the split point and back into physical registers after the split
3177     // point. This prevents us needing to deal with Live-ins and many other
3178     // register allocation issues caused by us splitting the parent mbb. The
3179     // register allocator will clean up said virtual copies later on.
3180     MachineBasicBlock::iterator SplitPoint = findSplitPointForStackProtector(
3181         ParentMBB, *MF->getSubtarget().getInstrInfo());
3182 
3183     // Splice the terminator of ParentMBB into SuccessMBB.
3184     SuccessMBB->splice(SuccessMBB->end(), ParentMBB, SplitPoint,
3185                        ParentMBB->end());
3186 
3187     // Add compare/jump on neq/jump to the parent BB.
3188     if (!emitSPDescriptorParent(SPDescriptor, ParentMBB))
3189       return false;
3190 
3191     // CodeGen Failure MBB if we have not codegened it yet.
3192     MachineBasicBlock *FailureMBB = SPDescriptor.getFailureMBB();
3193     if (FailureMBB->empty()) {
3194       if (!emitSPDescriptorFailure(SPDescriptor, FailureMBB))
3195         return false;
3196     }
3197 
3198     // Clear the Per-BB State.
3199     SPDescriptor.resetPerBBState();
3200   }
3201   return true;
3202 }
3203 
3204 bool IRTranslator::emitSPDescriptorParent(StackProtectorDescriptor &SPD,
3205                                           MachineBasicBlock *ParentBB) {
3206   CurBuilder->setInsertPt(*ParentBB, ParentBB->end());
3207   // First create the loads to the guard/stack slot for the comparison.
3208   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3209   Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
3210   const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
3211   LLT PtrMemTy = getLLTForMVT(TLI.getPointerMemTy(*DL));
3212 
3213   MachineFrameInfo &MFI = ParentBB->getParent()->getFrameInfo();
3214   int FI = MFI.getStackProtectorIndex();
3215 
3216   Register Guard;
3217   Register StackSlotPtr = CurBuilder->buildFrameIndex(PtrTy, FI).getReg(0);
3218   const Module &M = *ParentBB->getParent()->getFunction().getParent();
3219   Align Align = DL->getPrefTypeAlign(Type::getInt8PtrTy(M.getContext()));
3220 
3221   // Generate code to load the content of the guard slot.
3222   Register GuardVal =
3223       CurBuilder
3224           ->buildLoad(PtrMemTy, StackSlotPtr,
3225                       MachinePointerInfo::getFixedStack(*MF, FI), Align,
3226                       MachineMemOperand::MOLoad | MachineMemOperand::MOVolatile)
3227           .getReg(0);
3228 
3229   if (TLI.useStackGuardXorFP()) {
3230     LLVM_DEBUG(dbgs() << "Stack protector xor'ing with FP not yet implemented");
3231     return false;
3232   }
3233 
3234   // Retrieve guard check function, nullptr if instrumentation is inlined.
3235   if (const Function *GuardCheckFn = TLI.getSSPStackGuardCheck(M)) {
3236     // This path is currently untestable on GlobalISel, since the only platform
3237     // that needs this seems to be Windows, and we fall back on that currently.
3238     // The code still lives here in case that changes.
3239     // Silence warning about unused variable until the code below that uses
3240     // 'GuardCheckFn' is enabled.
3241     (void)GuardCheckFn;
3242     return false;
3243 #if 0
3244     // The target provides a guard check function to validate the guard value.
3245     // Generate a call to that function with the content of the guard slot as
3246     // argument.
3247     FunctionType *FnTy = GuardCheckFn->getFunctionType();
3248     assert(FnTy->getNumParams() == 1 && "Invalid function signature");
3249     ISD::ArgFlagsTy Flags;
3250     if (GuardCheckFn->hasAttribute(1, Attribute::AttrKind::InReg))
3251       Flags.setInReg();
3252     CallLowering::ArgInfo GuardArgInfo(
3253         {GuardVal, FnTy->getParamType(0), {Flags}});
3254 
3255     CallLowering::CallLoweringInfo Info;
3256     Info.OrigArgs.push_back(GuardArgInfo);
3257     Info.CallConv = GuardCheckFn->getCallingConv();
3258     Info.Callee = MachineOperand::CreateGA(GuardCheckFn, 0);
3259     Info.OrigRet = {Register(), FnTy->getReturnType()};
3260     if (!CLI->lowerCall(MIRBuilder, Info)) {
3261       LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector check\n");
3262       return false;
3263     }
3264     return true;
3265 #endif
3266   }
3267 
3268   // If useLoadStackGuardNode returns true, generate LOAD_STACK_GUARD.
3269   // Otherwise, emit a volatile load to retrieve the stack guard value.
3270   if (TLI.useLoadStackGuardNode()) {
3271     Guard =
3272         MRI->createGenericVirtualRegister(LLT::scalar(PtrTy.getSizeInBits()));
3273     getStackGuard(Guard, *CurBuilder);
3274   } else {
3275     // TODO: test using android subtarget when we support @llvm.thread.pointer.
3276     const Value *IRGuard = TLI.getSDagStackGuard(M);
3277     Register GuardPtr = getOrCreateVReg(*IRGuard);
3278 
3279     Guard = CurBuilder
3280                 ->buildLoad(PtrMemTy, GuardPtr,
3281                             MachinePointerInfo::getFixedStack(*MF, FI), Align,
3282                             MachineMemOperand::MOLoad |
3283                                 MachineMemOperand::MOVolatile)
3284                 .getReg(0);
3285   }
3286 
3287   // Perform the comparison.
3288   auto Cmp =
3289       CurBuilder->buildICmp(CmpInst::ICMP_NE, LLT::scalar(1), Guard, GuardVal);
3290   // If the guard/stackslot do not equal, branch to failure MBB.
3291   CurBuilder->buildBrCond(Cmp, *SPD.getFailureMBB());
3292   // Otherwise branch to success MBB.
3293   CurBuilder->buildBr(*SPD.getSuccessMBB());
3294   return true;
3295 }
3296 
3297 bool IRTranslator::emitSPDescriptorFailure(StackProtectorDescriptor &SPD,
3298                                            MachineBasicBlock *FailureBB) {
3299   CurBuilder->setInsertPt(*FailureBB, FailureBB->end());
3300   const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
3301 
3302   const RTLIB::Libcall Libcall = RTLIB::STACKPROTECTOR_CHECK_FAIL;
3303   const char *Name = TLI.getLibcallName(Libcall);
3304 
3305   CallLowering::CallLoweringInfo Info;
3306   Info.CallConv = TLI.getLibcallCallingConv(Libcall);
3307   Info.Callee = MachineOperand::CreateES(Name);
3308   Info.OrigRet = {Register(), Type::getVoidTy(MF->getFunction().getContext()),
3309                   0};
3310   if (!CLI->lowerCall(*CurBuilder, Info)) {
3311     LLVM_DEBUG(dbgs() << "Failed to lower call to stack protector fail\n");
3312     return false;
3313   }
3314 
3315   // On PS4/PS5, the "return address" must still be within the calling
3316   // function, even if it's at the very end, so emit an explicit TRAP here.
3317   // WebAssembly needs an unreachable instruction after a non-returning call,
3318   // because the function return type can be different from __stack_chk_fail's
3319   // return type (void).
3320   const TargetMachine &TM = MF->getTarget();
3321   if (TM.getTargetTriple().isPS() || TM.getTargetTriple().isWasm()) {
3322     LLVM_DEBUG(dbgs() << "Unhandled trap emission for stack protector fail\n");
3323     return false;
3324   }
3325   return true;
3326 }
3327 
3328 void IRTranslator::finalizeFunction() {
3329   // Release the memory used by the different maps we
3330   // needed during the translation.
3331   PendingPHIs.clear();
3332   VMap.reset();
3333   FrameIndices.clear();
3334   MachinePreds.clear();
3335   // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
3336   // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
3337   // destroying it twice (in ~IRTranslator() and ~LLVMContext())
3338   EntryBuilder.reset();
3339   CurBuilder.reset();
3340   FuncInfo.clear();
3341   SPDescriptor.resetPerFunctionState();
3342 }
3343 
3344 /// Returns true if a BasicBlock \p BB within a variadic function contains a
3345 /// variadic musttail call.
3346 static bool checkForMustTailInVarArgFn(bool IsVarArg, const BasicBlock &BB) {
3347   if (!IsVarArg)
3348     return false;
3349 
3350   // Walk the block backwards, because tail calls usually only appear at the end
3351   // of a block.
3352   return llvm::any_of(llvm::reverse(BB), [](const Instruction &I) {
3353     const auto *CI = dyn_cast<CallInst>(&I);
3354     return CI && CI->isMustTailCall();
3355   });
3356 }
3357 
3358 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
3359   MF = &CurMF;
3360   const Function &F = MF->getFunction();
3361   GISelCSEAnalysisWrapper &Wrapper =
3362       getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
3363   // Set the CSEConfig and run the analysis.
3364   GISelCSEInfo *CSEInfo = nullptr;
3365   TPC = &getAnalysis<TargetPassConfig>();
3366   bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
3367                        ? EnableCSEInIRTranslator
3368                        : TPC->isGISelCSEEnabled();
3369 
3370   if (EnableCSE) {
3371     EntryBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3372     CSEInfo = &Wrapper.get(TPC->getCSEConfig());
3373     EntryBuilder->setCSEInfo(CSEInfo);
3374     CurBuilder = std::make_unique<CSEMIRBuilder>(CurMF);
3375     CurBuilder->setCSEInfo(CSEInfo);
3376   } else {
3377     EntryBuilder = std::make_unique<MachineIRBuilder>();
3378     CurBuilder = std::make_unique<MachineIRBuilder>();
3379   }
3380   CLI = MF->getSubtarget().getCallLowering();
3381   CurBuilder->setMF(*MF);
3382   EntryBuilder->setMF(*MF);
3383   MRI = &MF->getRegInfo();
3384   DL = &F.getParent()->getDataLayout();
3385   ORE = std::make_unique<OptimizationRemarkEmitter>(&F);
3386   const TargetMachine &TM = MF->getTarget();
3387   TM.resetTargetOptions(F);
3388   EnableOpts = OptLevel != CodeGenOpt::None && !skipFunction(F);
3389   FuncInfo.MF = MF;
3390   if (EnableOpts) {
3391     AA = &getAnalysis<AAResultsWrapperPass>().getAAResults();
3392     FuncInfo.BPI = &getAnalysis<BranchProbabilityInfoWrapperPass>().getBPI();
3393   } else {
3394     AA = nullptr;
3395     FuncInfo.BPI = nullptr;
3396   }
3397 
3398   FuncInfo.CanLowerReturn = CLI->checkReturnTypeForCallConv(*MF);
3399 
3400   const auto &TLI = *MF->getSubtarget().getTargetLowering();
3401 
3402   SL = std::make_unique<GISelSwitchLowering>(this, FuncInfo);
3403   SL->init(TLI, TM, *DL);
3404 
3405 
3406 
3407   assert(PendingPHIs.empty() && "stale PHIs");
3408 
3409   // Targets which want to use big endian can enable it using
3410   // enableBigEndian()
3411   if (!DL->isLittleEndian() && !CLI->enableBigEndian()) {
3412     // Currently we don't properly handle big endian code.
3413     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3414                                F.getSubprogram(), &F.getEntryBlock());
3415     R << "unable to translate in big endian mode";
3416     reportTranslationError(*MF, *TPC, *ORE, R);
3417   }
3418 
3419   // Release the per-function state when we return, whether we succeeded or not.
3420   auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
3421 
3422   // Setup a separate basic-block for the arguments and constants
3423   MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
3424   MF->push_back(EntryBB);
3425   EntryBuilder->setMBB(*EntryBB);
3426 
3427   DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
3428   SwiftError.setFunction(CurMF);
3429   SwiftError.createEntriesInEntryBlock(DbgLoc);
3430 
3431   bool IsVarArg = F.isVarArg();
3432   bool HasMustTailInVarArgFn = false;
3433 
3434   // Create all blocks, in IR order, to preserve the layout.
3435   for (const BasicBlock &BB: F) {
3436     auto *&MBB = BBToMBB[&BB];
3437 
3438     MBB = MF->CreateMachineBasicBlock(&BB);
3439     MF->push_back(MBB);
3440 
3441     if (BB.hasAddressTaken())
3442       MBB->setHasAddressTaken();
3443 
3444     if (!HasMustTailInVarArgFn)
3445       HasMustTailInVarArgFn = checkForMustTailInVarArgFn(IsVarArg, BB);
3446   }
3447 
3448   MF->getFrameInfo().setHasMustTailInVarArgFunc(HasMustTailInVarArgFn);
3449 
3450   // Make our arguments/constants entry block fallthrough to the IR entry block.
3451   EntryBB->addSuccessor(&getMBB(F.front()));
3452 
3453   if (CLI->fallBackToDAGISel(*MF)) {
3454     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3455                                F.getSubprogram(), &F.getEntryBlock());
3456     R << "unable to lower function: " << ore::NV("Prototype", F.getType());
3457     reportTranslationError(*MF, *TPC, *ORE, R);
3458     return false;
3459   }
3460 
3461   // Lower the actual args into this basic block.
3462   SmallVector<ArrayRef<Register>, 8> VRegArgs;
3463   for (const Argument &Arg: F.args()) {
3464     if (DL->getTypeStoreSize(Arg.getType()).isZero())
3465       continue; // Don't handle zero sized types.
3466     ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
3467     VRegArgs.push_back(VRegs);
3468 
3469     if (Arg.hasSwiftErrorAttr()) {
3470       assert(VRegs.size() == 1 && "Too many vregs for Swift error");
3471       SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
3472     }
3473   }
3474 
3475   if (!CLI->lowerFormalArguments(*EntryBuilder, F, VRegArgs, FuncInfo)) {
3476     OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3477                                F.getSubprogram(), &F.getEntryBlock());
3478     R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
3479     reportTranslationError(*MF, *TPC, *ORE, R);
3480     return false;
3481   }
3482 
3483   // Need to visit defs before uses when translating instructions.
3484   GISelObserverWrapper WrapperObserver;
3485   if (EnableCSE && CSEInfo)
3486     WrapperObserver.addObserver(CSEInfo);
3487   {
3488     ReversePostOrderTraversal<const Function *> RPOT(&F);
3489 #ifndef NDEBUG
3490     DILocationVerifier Verifier;
3491     WrapperObserver.addObserver(&Verifier);
3492 #endif // ifndef NDEBUG
3493     RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
3494     RAIIMFObserverInstaller ObsInstall(*MF, WrapperObserver);
3495     for (const BasicBlock *BB : RPOT) {
3496       MachineBasicBlock &MBB = getMBB(*BB);
3497       // Set the insertion point of all the following translations to
3498       // the end of this basic block.
3499       CurBuilder->setMBB(MBB);
3500       HasTailCall = false;
3501       for (const Instruction &Inst : *BB) {
3502         // If we translated a tail call in the last step, then we know
3503         // everything after the call is either a return, or something that is
3504         // handled by the call itself. (E.g. a lifetime marker or assume
3505         // intrinsic.) In this case, we should stop translating the block and
3506         // move on.
3507         if (HasTailCall)
3508           break;
3509 #ifndef NDEBUG
3510         Verifier.setCurrentInst(&Inst);
3511 #endif // ifndef NDEBUG
3512         if (translate(Inst))
3513           continue;
3514 
3515         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3516                                    Inst.getDebugLoc(), BB);
3517         R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
3518 
3519         if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
3520           std::string InstStrStorage;
3521           raw_string_ostream InstStr(InstStrStorage);
3522           InstStr << Inst;
3523 
3524           R << ": '" << InstStr.str() << "'";
3525         }
3526 
3527         reportTranslationError(*MF, *TPC, *ORE, R);
3528         return false;
3529       }
3530 
3531       if (!finalizeBasicBlock(*BB, MBB)) {
3532         OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
3533                                    BB->getTerminator()->getDebugLoc(), BB);
3534         R << "unable to translate basic block";
3535         reportTranslationError(*MF, *TPC, *ORE, R);
3536         return false;
3537       }
3538     }
3539 #ifndef NDEBUG
3540     WrapperObserver.removeObserver(&Verifier);
3541 #endif
3542   }
3543 
3544   finishPendingPhis();
3545 
3546   SwiftError.propagateVRegs();
3547 
3548   // Merge the argument lowering and constants block with its single
3549   // successor, the LLVM-IR entry block.  We want the basic block to
3550   // be maximal.
3551   assert(EntryBB->succ_size() == 1 &&
3552          "Custom BB used for lowering should have only one successor");
3553   // Get the successor of the current entry block.
3554   MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
3555   assert(NewEntryBB.pred_size() == 1 &&
3556          "LLVM-IR entry block has a predecessor!?");
3557   // Move all the instruction from the current entry block to the
3558   // new entry block.
3559   NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
3560                     EntryBB->end());
3561 
3562   // Update the live-in information for the new entry block.
3563   for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
3564     NewEntryBB.addLiveIn(LiveIn);
3565   NewEntryBB.sortUniqueLiveIns();
3566 
3567   // Get rid of the now empty basic block.
3568   EntryBB->removeSuccessor(&NewEntryBB);
3569   MF->remove(EntryBB);
3570   MF->deleteMachineBasicBlock(EntryBB);
3571 
3572   assert(&MF->front() == &NewEntryBB &&
3573          "New entry wasn't next in the list of basic block!");
3574 
3575   // Initialize stack protector information.
3576   StackProtector &SP = getAnalysis<StackProtector>();
3577   SP.copyToMachineFrameInfo(MF->getFrameInfo());
3578 
3579   return false;
3580 }
3581