1 //===- llvm/CodeGen/GlobalISel/IRTranslator.cpp - IRTranslator ---*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the IRTranslator class.
10 //===----------------------------------------------------------------------===//
11
12 #include "llvm/CodeGen/GlobalISel/IRTranslator.h"
13 #include "llvm/ADT/PostOrderIterator.h"
14 #include "llvm/ADT/STLExtras.h"
15 #include "llvm/ADT/ScopeExit.h"
16 #include "llvm/ADT/SmallSet.h"
17 #include "llvm/ADT/SmallVector.h"
18 #include "llvm/Analysis/BranchProbabilityInfo.h"
19 #include "llvm/Analysis/OptimizationRemarkEmitter.h"
20 #include "llvm/Analysis/ValueTracking.h"
21 #include "llvm/CodeGen/Analysis.h"
22 #include "llvm/CodeGen/FunctionLoweringInfo.h"
23 #include "llvm/CodeGen/GlobalISel/CallLowering.h"
24 #include "llvm/CodeGen/GlobalISel/GISelChangeObserver.h"
25 #include "llvm/CodeGen/LowLevelType.h"
26 #include "llvm/CodeGen/MachineBasicBlock.h"
27 #include "llvm/CodeGen/MachineFrameInfo.h"
28 #include "llvm/CodeGen/MachineFunction.h"
29 #include "llvm/CodeGen/MachineInstrBuilder.h"
30 #include "llvm/CodeGen/MachineMemOperand.h"
31 #include "llvm/CodeGen/MachineOperand.h"
32 #include "llvm/CodeGen/MachineRegisterInfo.h"
33 #include "llvm/CodeGen/StackProtector.h"
34 #include "llvm/CodeGen/TargetFrameLowering.h"
35 #include "llvm/CodeGen/TargetLowering.h"
36 #include "llvm/CodeGen/TargetPassConfig.h"
37 #include "llvm/CodeGen/TargetRegisterInfo.h"
38 #include "llvm/CodeGen/TargetSubtargetInfo.h"
39 #include "llvm/IR/BasicBlock.h"
40 #include "llvm/IR/CFG.h"
41 #include "llvm/IR/Constant.h"
42 #include "llvm/IR/Constants.h"
43 #include "llvm/IR/DataLayout.h"
44 #include "llvm/IR/DebugInfo.h"
45 #include "llvm/IR/DerivedTypes.h"
46 #include "llvm/IR/Function.h"
47 #include "llvm/IR/GetElementPtrTypeIterator.h"
48 #include "llvm/IR/InlineAsm.h"
49 #include "llvm/IR/InstrTypes.h"
50 #include "llvm/IR/Instructions.h"
51 #include "llvm/IR/IntrinsicInst.h"
52 #include "llvm/IR/Intrinsics.h"
53 #include "llvm/IR/LLVMContext.h"
54 #include "llvm/IR/Metadata.h"
55 #include "llvm/IR/Type.h"
56 #include "llvm/IR/User.h"
57 #include "llvm/IR/Value.h"
58 #include "llvm/MC/MCContext.h"
59 #include "llvm/Pass.h"
60 #include "llvm/Support/Casting.h"
61 #include "llvm/Support/CodeGen.h"
62 #include "llvm/Support/Debug.h"
63 #include "llvm/Support/ErrorHandling.h"
64 #include "llvm/Support/LowLevelTypeImpl.h"
65 #include "llvm/Support/MathExtras.h"
66 #include "llvm/Support/raw_ostream.h"
67 #include "llvm/Target/TargetIntrinsicInfo.h"
68 #include "llvm/Target/TargetMachine.h"
69 #include <algorithm>
70 #include <cassert>
71 #include <cstdint>
72 #include <iterator>
73 #include <string>
74 #include <utility>
75 #include <vector>
76
77 #define DEBUG_TYPE "irtranslator"
78
79 using namespace llvm;
80
81 static cl::opt<bool>
82 EnableCSEInIRTranslator("enable-cse-in-irtranslator",
83 cl::desc("Should enable CSE in irtranslator"),
84 cl::Optional, cl::init(false));
85 char IRTranslator::ID = 0;
86
87 INITIALIZE_PASS_BEGIN(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
88 false, false)
INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)89 INITIALIZE_PASS_DEPENDENCY(TargetPassConfig)
90 INITIALIZE_PASS_DEPENDENCY(GISelCSEAnalysisWrapperPass)
91 INITIALIZE_PASS_END(IRTranslator, DEBUG_TYPE, "IRTranslator LLVM IR -> MI",
92 false, false)
93
94 static void reportTranslationError(MachineFunction &MF,
95 const TargetPassConfig &TPC,
96 OptimizationRemarkEmitter &ORE,
97 OptimizationRemarkMissed &R) {
98 MF.getProperties().set(MachineFunctionProperties::Property::FailedISel);
99
100 // Print the function name explicitly if we don't have a debug location (which
101 // makes the diagnostic less useful) or if we're going to emit a raw error.
102 if (!R.getLocation().isValid() || TPC.isGlobalISelAbortEnabled())
103 R << (" (in function: " + MF.getName() + ")").str();
104
105 if (TPC.isGlobalISelAbortEnabled())
106 report_fatal_error(R.getMsg());
107 else
108 ORE.emit(R);
109 }
110
IRTranslator()111 IRTranslator::IRTranslator() : MachineFunctionPass(ID) { }
112
113 #ifndef NDEBUG
114 namespace {
115 /// Verify that every instruction created has the same DILocation as the
116 /// instruction being translated.
117 class DILocationVerifier : public GISelChangeObserver {
118 const Instruction *CurrInst = nullptr;
119
120 public:
121 DILocationVerifier() = default;
122 ~DILocationVerifier() = default;
123
getCurrentInst() const124 const Instruction *getCurrentInst() const { return CurrInst; }
setCurrentInst(const Instruction * Inst)125 void setCurrentInst(const Instruction *Inst) { CurrInst = Inst; }
126
erasingInstr(MachineInstr & MI)127 void erasingInstr(MachineInstr &MI) override {}
changingInstr(MachineInstr & MI)128 void changingInstr(MachineInstr &MI) override {}
changedInstr(MachineInstr & MI)129 void changedInstr(MachineInstr &MI) override {}
130
createdInstr(MachineInstr & MI)131 void createdInstr(MachineInstr &MI) override {
132 assert(getCurrentInst() && "Inserted instruction without a current MI");
133
134 // Only print the check message if we're actually checking it.
135 #ifndef NDEBUG
136 LLVM_DEBUG(dbgs() << "Checking DILocation from " << *CurrInst
137 << " was copied to " << MI);
138 #endif
139 // We allow insts in the entry block to have a debug loc line of 0 because
140 // they could have originated from constants, and we don't want a jumpy
141 // debug experience.
142 assert((CurrInst->getDebugLoc() == MI.getDebugLoc() ||
143 MI.getDebugLoc().getLine() == 0) &&
144 "Line info was not transferred to all instructions");
145 }
146 };
147 } // namespace
148 #endif // ifndef NDEBUG
149
150
getAnalysisUsage(AnalysisUsage & AU) const151 void IRTranslator::getAnalysisUsage(AnalysisUsage &AU) const {
152 AU.addRequired<StackProtector>();
153 AU.addRequired<TargetPassConfig>();
154 AU.addRequired<GISelCSEAnalysisWrapperPass>();
155 getSelectionDAGFallbackAnalysisUsage(AU);
156 MachineFunctionPass::getAnalysisUsage(AU);
157 }
158
159 IRTranslator::ValueToVRegInfo::VRegListT &
allocateVRegs(const Value & Val)160 IRTranslator::allocateVRegs(const Value &Val) {
161 assert(!VMap.contains(Val) && "Value already allocated in VMap");
162 auto *Regs = VMap.getVRegs(Val);
163 auto *Offsets = VMap.getOffsets(Val);
164 SmallVector<LLT, 4> SplitTys;
165 computeValueLLTs(*DL, *Val.getType(), SplitTys,
166 Offsets->empty() ? Offsets : nullptr);
167 for (unsigned i = 0; i < SplitTys.size(); ++i)
168 Regs->push_back(0);
169 return *Regs;
170 }
171
getOrCreateVRegs(const Value & Val)172 ArrayRef<Register> IRTranslator::getOrCreateVRegs(const Value &Val) {
173 auto VRegsIt = VMap.findVRegs(Val);
174 if (VRegsIt != VMap.vregs_end())
175 return *VRegsIt->second;
176
177 if (Val.getType()->isVoidTy())
178 return *VMap.getVRegs(Val);
179
180 // Create entry for this type.
181 auto *VRegs = VMap.getVRegs(Val);
182 auto *Offsets = VMap.getOffsets(Val);
183
184 assert(Val.getType()->isSized() &&
185 "Don't know how to create an empty vreg");
186
187 SmallVector<LLT, 4> SplitTys;
188 computeValueLLTs(*DL, *Val.getType(), SplitTys,
189 Offsets->empty() ? Offsets : nullptr);
190
191 if (!isa<Constant>(Val)) {
192 for (auto Ty : SplitTys)
193 VRegs->push_back(MRI->createGenericVirtualRegister(Ty));
194 return *VRegs;
195 }
196
197 if (Val.getType()->isAggregateType()) {
198 // UndefValue, ConstantAggregateZero
199 auto &C = cast<Constant>(Val);
200 unsigned Idx = 0;
201 while (auto Elt = C.getAggregateElement(Idx++)) {
202 auto EltRegs = getOrCreateVRegs(*Elt);
203 llvm::copy(EltRegs, std::back_inserter(*VRegs));
204 }
205 } else {
206 assert(SplitTys.size() == 1 && "unexpectedly split LLT");
207 VRegs->push_back(MRI->createGenericVirtualRegister(SplitTys[0]));
208 bool Success = translate(cast<Constant>(Val), VRegs->front());
209 if (!Success) {
210 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
211 MF->getFunction().getSubprogram(),
212 &MF->getFunction().getEntryBlock());
213 R << "unable to translate constant: " << ore::NV("Type", Val.getType());
214 reportTranslationError(*MF, *TPC, *ORE, R);
215 return *VRegs;
216 }
217 }
218
219 return *VRegs;
220 }
221
getOrCreateFrameIndex(const AllocaInst & AI)222 int IRTranslator::getOrCreateFrameIndex(const AllocaInst &AI) {
223 if (FrameIndices.find(&AI) != FrameIndices.end())
224 return FrameIndices[&AI];
225
226 unsigned ElementSize = DL->getTypeAllocSize(AI.getAllocatedType());
227 unsigned Size =
228 ElementSize * cast<ConstantInt>(AI.getArraySize())->getZExtValue();
229
230 // Always allocate at least one byte.
231 Size = std::max(Size, 1u);
232
233 unsigned Alignment = AI.getAlignment();
234 if (!Alignment)
235 Alignment = DL->getABITypeAlignment(AI.getAllocatedType());
236
237 int &FI = FrameIndices[&AI];
238 FI = MF->getFrameInfo().CreateStackObject(Size, Alignment, false, &AI);
239 return FI;
240 }
241
getMemOpAlignment(const Instruction & I)242 unsigned IRTranslator::getMemOpAlignment(const Instruction &I) {
243 unsigned Alignment = 0;
244 Type *ValTy = nullptr;
245 if (const StoreInst *SI = dyn_cast<StoreInst>(&I)) {
246 Alignment = SI->getAlignment();
247 ValTy = SI->getValueOperand()->getType();
248 } else if (const LoadInst *LI = dyn_cast<LoadInst>(&I)) {
249 Alignment = LI->getAlignment();
250 ValTy = LI->getType();
251 } else if (const AtomicCmpXchgInst *AI = dyn_cast<AtomicCmpXchgInst>(&I)) {
252 // TODO(PR27168): This instruction has no alignment attribute, but unlike
253 // the default alignment for load/store, the default here is to assume
254 // it has NATURAL alignment, not DataLayout-specified alignment.
255 const DataLayout &DL = AI->getModule()->getDataLayout();
256 Alignment = DL.getTypeStoreSize(AI->getCompareOperand()->getType());
257 ValTy = AI->getCompareOperand()->getType();
258 } else if (const AtomicRMWInst *AI = dyn_cast<AtomicRMWInst>(&I)) {
259 // TODO(PR27168): This instruction has no alignment attribute, but unlike
260 // the default alignment for load/store, the default here is to assume
261 // it has NATURAL alignment, not DataLayout-specified alignment.
262 const DataLayout &DL = AI->getModule()->getDataLayout();
263 Alignment = DL.getTypeStoreSize(AI->getValOperand()->getType());
264 ValTy = AI->getType();
265 } else {
266 OptimizationRemarkMissed R("gisel-irtranslator", "", &I);
267 R << "unable to translate memop: " << ore::NV("Opcode", &I);
268 reportTranslationError(*MF, *TPC, *ORE, R);
269 return 1;
270 }
271
272 return Alignment ? Alignment : DL->getABITypeAlignment(ValTy);
273 }
274
getMBB(const BasicBlock & BB)275 MachineBasicBlock &IRTranslator::getMBB(const BasicBlock &BB) {
276 MachineBasicBlock *&MBB = BBToMBB[&BB];
277 assert(MBB && "BasicBlock was not encountered before");
278 return *MBB;
279 }
280
addMachineCFGPred(CFGEdge Edge,MachineBasicBlock * NewPred)281 void IRTranslator::addMachineCFGPred(CFGEdge Edge, MachineBasicBlock *NewPred) {
282 assert(NewPred && "new predecessor must be a real MachineBasicBlock");
283 MachinePreds[Edge].push_back(NewPred);
284 }
285
translateBinaryOp(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)286 bool IRTranslator::translateBinaryOp(unsigned Opcode, const User &U,
287 MachineIRBuilder &MIRBuilder) {
288 // Get or create a virtual register for each value.
289 // Unless the value is a Constant => loadimm cst?
290 // or inline constant each time?
291 // Creation of a virtual register needs to have a size.
292 Register Op0 = getOrCreateVReg(*U.getOperand(0));
293 Register Op1 = getOrCreateVReg(*U.getOperand(1));
294 Register Res = getOrCreateVReg(U);
295 uint16_t Flags = 0;
296 if (isa<Instruction>(U)) {
297 const Instruction &I = cast<Instruction>(U);
298 Flags = MachineInstr::copyFlagsFromInstruction(I);
299 }
300
301 MIRBuilder.buildInstr(Opcode, {Res}, {Op0, Op1}, Flags);
302 return true;
303 }
304
translateFSub(const User & U,MachineIRBuilder & MIRBuilder)305 bool IRTranslator::translateFSub(const User &U, MachineIRBuilder &MIRBuilder) {
306 // -0.0 - X --> G_FNEG
307 if (isa<Constant>(U.getOperand(0)) &&
308 U.getOperand(0) == ConstantFP::getZeroValueForNegation(U.getType())) {
309 Register Op1 = getOrCreateVReg(*U.getOperand(1));
310 Register Res = getOrCreateVReg(U);
311 uint16_t Flags = 0;
312 if (isa<Instruction>(U)) {
313 const Instruction &I = cast<Instruction>(U);
314 Flags = MachineInstr::copyFlagsFromInstruction(I);
315 }
316 // Negate the last operand of the FSUB
317 MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op1}, Flags);
318 return true;
319 }
320 return translateBinaryOp(TargetOpcode::G_FSUB, U, MIRBuilder);
321 }
322
translateFNeg(const User & U,MachineIRBuilder & MIRBuilder)323 bool IRTranslator::translateFNeg(const User &U, MachineIRBuilder &MIRBuilder) {
324 Register Op0 = getOrCreateVReg(*U.getOperand(0));
325 Register Res = getOrCreateVReg(U);
326 uint16_t Flags = 0;
327 if (isa<Instruction>(U)) {
328 const Instruction &I = cast<Instruction>(U);
329 Flags = MachineInstr::copyFlagsFromInstruction(I);
330 }
331 MIRBuilder.buildInstr(TargetOpcode::G_FNEG, {Res}, {Op0}, Flags);
332 return true;
333 }
334
translateCompare(const User & U,MachineIRBuilder & MIRBuilder)335 bool IRTranslator::translateCompare(const User &U,
336 MachineIRBuilder &MIRBuilder) {
337 const CmpInst *CI = dyn_cast<CmpInst>(&U);
338 Register Op0 = getOrCreateVReg(*U.getOperand(0));
339 Register Op1 = getOrCreateVReg(*U.getOperand(1));
340 Register Res = getOrCreateVReg(U);
341 CmpInst::Predicate Pred =
342 CI ? CI->getPredicate() : static_cast<CmpInst::Predicate>(
343 cast<ConstantExpr>(U).getPredicate());
344 if (CmpInst::isIntPredicate(Pred))
345 MIRBuilder.buildICmp(Pred, Res, Op0, Op1);
346 else if (Pred == CmpInst::FCMP_FALSE)
347 MIRBuilder.buildCopy(
348 Res, getOrCreateVReg(*Constant::getNullValue(CI->getType())));
349 else if (Pred == CmpInst::FCMP_TRUE)
350 MIRBuilder.buildCopy(
351 Res, getOrCreateVReg(*Constant::getAllOnesValue(CI->getType())));
352 else {
353 MIRBuilder.buildInstr(TargetOpcode::G_FCMP, {Res}, {Pred, Op0, Op1},
354 MachineInstr::copyFlagsFromInstruction(*CI));
355 }
356
357 return true;
358 }
359
translateRet(const User & U,MachineIRBuilder & MIRBuilder)360 bool IRTranslator::translateRet(const User &U, MachineIRBuilder &MIRBuilder) {
361 const ReturnInst &RI = cast<ReturnInst>(U);
362 const Value *Ret = RI.getReturnValue();
363 if (Ret && DL->getTypeStoreSize(Ret->getType()) == 0)
364 Ret = nullptr;
365
366 ArrayRef<Register> VRegs;
367 if (Ret)
368 VRegs = getOrCreateVRegs(*Ret);
369
370 Register SwiftErrorVReg = 0;
371 if (CLI->supportSwiftError() && SwiftError.getFunctionArg()) {
372 SwiftErrorVReg = SwiftError.getOrCreateVRegUseAt(
373 &RI, &MIRBuilder.getMBB(), SwiftError.getFunctionArg());
374 }
375
376 // The target may mess up with the insertion point, but
377 // this is not important as a return is the last instruction
378 // of the block anyway.
379 return CLI->lowerReturn(MIRBuilder, Ret, VRegs, SwiftErrorVReg);
380 }
381
translateBr(const User & U,MachineIRBuilder & MIRBuilder)382 bool IRTranslator::translateBr(const User &U, MachineIRBuilder &MIRBuilder) {
383 const BranchInst &BrInst = cast<BranchInst>(U);
384 unsigned Succ = 0;
385 if (!BrInst.isUnconditional()) {
386 // We want a G_BRCOND to the true BB followed by an unconditional branch.
387 Register Tst = getOrCreateVReg(*BrInst.getCondition());
388 const BasicBlock &TrueTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ++));
389 MachineBasicBlock &TrueBB = getMBB(TrueTgt);
390 MIRBuilder.buildBrCond(Tst, TrueBB);
391 }
392
393 const BasicBlock &BrTgt = *cast<BasicBlock>(BrInst.getSuccessor(Succ));
394 MachineBasicBlock &TgtBB = getMBB(BrTgt);
395 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
396
397 // If the unconditional target is the layout successor, fallthrough.
398 if (!CurBB.isLayoutSuccessor(&TgtBB))
399 MIRBuilder.buildBr(TgtBB);
400
401 // Link successors.
402 for (const BasicBlock *Succ : successors(&BrInst))
403 CurBB.addSuccessor(&getMBB(*Succ));
404 return true;
405 }
406
addSuccessorWithProb(MachineBasicBlock * Src,MachineBasicBlock * Dst,BranchProbability Prob)407 void IRTranslator::addSuccessorWithProb(MachineBasicBlock *Src,
408 MachineBasicBlock *Dst,
409 BranchProbability Prob) {
410 if (!FuncInfo.BPI) {
411 Src->addSuccessorWithoutProb(Dst);
412 return;
413 }
414 if (Prob.isUnknown())
415 Prob = getEdgeProbability(Src, Dst);
416 Src->addSuccessor(Dst, Prob);
417 }
418
419 BranchProbability
getEdgeProbability(const MachineBasicBlock * Src,const MachineBasicBlock * Dst) const420 IRTranslator::getEdgeProbability(const MachineBasicBlock *Src,
421 const MachineBasicBlock *Dst) const {
422 const BasicBlock *SrcBB = Src->getBasicBlock();
423 const BasicBlock *DstBB = Dst->getBasicBlock();
424 if (!FuncInfo.BPI) {
425 // If BPI is not available, set the default probability as 1 / N, where N is
426 // the number of successors.
427 auto SuccSize = std::max<uint32_t>(succ_size(SrcBB), 1);
428 return BranchProbability(1, SuccSize);
429 }
430 return FuncInfo.BPI->getEdgeProbability(SrcBB, DstBB);
431 }
432
translateSwitch(const User & U,MachineIRBuilder & MIB)433 bool IRTranslator::translateSwitch(const User &U, MachineIRBuilder &MIB) {
434 using namespace SwitchCG;
435 // Extract cases from the switch.
436 const SwitchInst &SI = cast<SwitchInst>(U);
437 BranchProbabilityInfo *BPI = FuncInfo.BPI;
438 CaseClusterVector Clusters;
439 Clusters.reserve(SI.getNumCases());
440 for (auto &I : SI.cases()) {
441 MachineBasicBlock *Succ = &getMBB(*I.getCaseSuccessor());
442 assert(Succ && "Could not find successor mbb in mapping");
443 const ConstantInt *CaseVal = I.getCaseValue();
444 BranchProbability Prob =
445 BPI ? BPI->getEdgeProbability(SI.getParent(), I.getSuccessorIndex())
446 : BranchProbability(1, SI.getNumCases() + 1);
447 Clusters.push_back(CaseCluster::range(CaseVal, CaseVal, Succ, Prob));
448 }
449
450 MachineBasicBlock *DefaultMBB = &getMBB(*SI.getDefaultDest());
451
452 // Cluster adjacent cases with the same destination. We do this at all
453 // optimization levels because it's cheap to do and will make codegen faster
454 // if there are many clusters.
455 sortAndRangeify(Clusters);
456
457 MachineBasicBlock *SwitchMBB = &getMBB(*SI.getParent());
458
459 // If there is only the default destination, jump there directly.
460 if (Clusters.empty()) {
461 SwitchMBB->addSuccessor(DefaultMBB);
462 if (DefaultMBB != SwitchMBB->getNextNode())
463 MIB.buildBr(*DefaultMBB);
464 return true;
465 }
466
467 SL->findJumpTables(Clusters, &SI, DefaultMBB);
468
469 LLVM_DEBUG({
470 dbgs() << "Case clusters: ";
471 for (const CaseCluster &C : Clusters) {
472 if (C.Kind == CC_JumpTable)
473 dbgs() << "JT:";
474 if (C.Kind == CC_BitTests)
475 dbgs() << "BT:";
476
477 C.Low->getValue().print(dbgs(), true);
478 if (C.Low != C.High) {
479 dbgs() << '-';
480 C.High->getValue().print(dbgs(), true);
481 }
482 dbgs() << ' ';
483 }
484 dbgs() << '\n';
485 });
486
487 assert(!Clusters.empty());
488 SwitchWorkList WorkList;
489 CaseClusterIt First = Clusters.begin();
490 CaseClusterIt Last = Clusters.end() - 1;
491 auto DefaultProb = getEdgeProbability(SwitchMBB, DefaultMBB);
492 WorkList.push_back({SwitchMBB, First, Last, nullptr, nullptr, DefaultProb});
493
494 // FIXME: At the moment we don't do any splitting optimizations here like
495 // SelectionDAG does, so this worklist only has one entry.
496 while (!WorkList.empty()) {
497 SwitchWorkListItem W = WorkList.back();
498 WorkList.pop_back();
499 if (!lowerSwitchWorkItem(W, SI.getCondition(), SwitchMBB, DefaultMBB, MIB))
500 return false;
501 }
502 return true;
503 }
504
emitJumpTable(SwitchCG::JumpTable & JT,MachineBasicBlock * MBB)505 void IRTranslator::emitJumpTable(SwitchCG::JumpTable &JT,
506 MachineBasicBlock *MBB) {
507 // Emit the code for the jump table
508 assert(JT.Reg != -1U && "Should lower JT Header first!");
509 MachineIRBuilder MIB(*MBB->getParent());
510 MIB.setMBB(*MBB);
511 MIB.setDebugLoc(CurBuilder->getDebugLoc());
512
513 Type *PtrIRTy = Type::getInt8PtrTy(MF->getFunction().getContext());
514 const LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
515
516 auto Table = MIB.buildJumpTable(PtrTy, JT.JTI);
517 MIB.buildBrJT(Table.getReg(0), JT.JTI, JT.Reg);
518 }
519
emitJumpTableHeader(SwitchCG::JumpTable & JT,SwitchCG::JumpTableHeader & JTH,MachineBasicBlock * HeaderBB)520 bool IRTranslator::emitJumpTableHeader(SwitchCG::JumpTable &JT,
521 SwitchCG::JumpTableHeader &JTH,
522 MachineBasicBlock *HeaderBB) {
523 MachineIRBuilder MIB(*HeaderBB->getParent());
524 MIB.setMBB(*HeaderBB);
525 MIB.setDebugLoc(CurBuilder->getDebugLoc());
526
527 const Value &SValue = *JTH.SValue;
528 // Subtract the lowest switch case value from the value being switched on.
529 const LLT SwitchTy = getLLTForType(*SValue.getType(), *DL);
530 Register SwitchOpReg = getOrCreateVReg(SValue);
531 auto FirstCst = MIB.buildConstant(SwitchTy, JTH.First);
532 auto Sub = MIB.buildSub({SwitchTy}, SwitchOpReg, FirstCst);
533
534 // This value may be smaller or larger than the target's pointer type, and
535 // therefore require extension or truncating.
536 Type *PtrIRTy = SValue.getType()->getPointerTo();
537 const LLT PtrScalarTy = LLT::scalar(DL->getTypeSizeInBits(PtrIRTy));
538 Sub = MIB.buildZExtOrTrunc(PtrScalarTy, Sub);
539
540 JT.Reg = Sub.getReg(0);
541
542 if (JTH.OmitRangeCheck) {
543 if (JT.MBB != HeaderBB->getNextNode())
544 MIB.buildBr(*JT.MBB);
545 return true;
546 }
547
548 // Emit the range check for the jump table, and branch to the default block
549 // for the switch statement if the value being switched on exceeds the
550 // largest case in the switch.
551 auto Cst = getOrCreateVReg(
552 *ConstantInt::get(SValue.getType(), JTH.Last - JTH.First));
553 Cst = MIB.buildZExtOrTrunc(PtrScalarTy, Cst).getReg(0);
554 auto Cmp = MIB.buildICmp(CmpInst::ICMP_UGT, LLT::scalar(1), Sub, Cst);
555
556 auto BrCond = MIB.buildBrCond(Cmp.getReg(0), *JT.Default);
557
558 // Avoid emitting unnecessary branches to the next block.
559 if (JT.MBB != HeaderBB->getNextNode())
560 BrCond = MIB.buildBr(*JT.MBB);
561 return true;
562 }
563
emitSwitchCase(SwitchCG::CaseBlock & CB,MachineBasicBlock * SwitchBB,MachineIRBuilder & MIB)564 void IRTranslator::emitSwitchCase(SwitchCG::CaseBlock &CB,
565 MachineBasicBlock *SwitchBB,
566 MachineIRBuilder &MIB) {
567 Register CondLHS = getOrCreateVReg(*CB.CmpLHS);
568 Register Cond;
569 DebugLoc OldDbgLoc = MIB.getDebugLoc();
570 MIB.setDebugLoc(CB.DbgLoc);
571 MIB.setMBB(*CB.ThisBB);
572
573 if (CB.PredInfo.NoCmp) {
574 // Branch or fall through to TrueBB.
575 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
576 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
577 CB.ThisBB);
578 CB.ThisBB->normalizeSuccProbs();
579 if (CB.TrueBB != CB.ThisBB->getNextNode())
580 MIB.buildBr(*CB.TrueBB);
581 MIB.setDebugLoc(OldDbgLoc);
582 return;
583 }
584
585 const LLT i1Ty = LLT::scalar(1);
586 // Build the compare.
587 if (!CB.CmpMHS) {
588 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
589 Cond = MIB.buildICmp(CB.PredInfo.Pred, i1Ty, CondLHS, CondRHS).getReg(0);
590 } else {
591 assert(CB.PredInfo.Pred == CmpInst::ICMP_SLE &&
592 "Can only handle SLE ranges");
593
594 const APInt& Low = cast<ConstantInt>(CB.CmpLHS)->getValue();
595 const APInt& High = cast<ConstantInt>(CB.CmpRHS)->getValue();
596
597 Register CmpOpReg = getOrCreateVReg(*CB.CmpMHS);
598 if (cast<ConstantInt>(CB.CmpLHS)->isMinValue(true)) {
599 Register CondRHS = getOrCreateVReg(*CB.CmpRHS);
600 Cond =
601 MIB.buildICmp(CmpInst::ICMP_SLE, i1Ty, CmpOpReg, CondRHS).getReg(0);
602 } else {
603 const LLT &CmpTy = MRI->getType(CmpOpReg);
604 auto Sub = MIB.buildSub({CmpTy}, CmpOpReg, CondLHS);
605 auto Diff = MIB.buildConstant(CmpTy, High - Low);
606 Cond = MIB.buildICmp(CmpInst::ICMP_ULE, i1Ty, Sub, Diff).getReg(0);
607 }
608 }
609
610 // Update successor info
611 addSuccessorWithProb(CB.ThisBB, CB.TrueBB, CB.TrueProb);
612
613 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.TrueBB->getBasicBlock()},
614 CB.ThisBB);
615
616 // TrueBB and FalseBB are always different unless the incoming IR is
617 // degenerate. This only happens when running llc on weird IR.
618 if (CB.TrueBB != CB.FalseBB)
619 addSuccessorWithProb(CB.ThisBB, CB.FalseBB, CB.FalseProb);
620 CB.ThisBB->normalizeSuccProbs();
621
622 // if (SwitchBB->getBasicBlock() != CB.FalseBB->getBasicBlock())
623 addMachineCFGPred({SwitchBB->getBasicBlock(), CB.FalseBB->getBasicBlock()},
624 CB.ThisBB);
625
626 // If the lhs block is the next block, invert the condition so that we can
627 // fall through to the lhs instead of the rhs block.
628 if (CB.TrueBB == CB.ThisBB->getNextNode()) {
629 std::swap(CB.TrueBB, CB.FalseBB);
630 auto True = MIB.buildConstant(i1Ty, 1);
631 Cond = MIB.buildInstr(TargetOpcode::G_XOR, {i1Ty}, {Cond, True}, None)
632 .getReg(0);
633 }
634
635 MIB.buildBrCond(Cond, *CB.TrueBB);
636 MIB.buildBr(*CB.FalseBB);
637 MIB.setDebugLoc(OldDbgLoc);
638 }
639
lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,MachineBasicBlock * SwitchMBB,MachineBasicBlock * CurMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB,MachineFunction::iterator BBI,BranchProbability UnhandledProbs,SwitchCG::CaseClusterIt I,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable)640 bool IRTranslator::lowerJumpTableWorkItem(SwitchCG::SwitchWorkListItem W,
641 MachineBasicBlock *SwitchMBB,
642 MachineBasicBlock *CurMBB,
643 MachineBasicBlock *DefaultMBB,
644 MachineIRBuilder &MIB,
645 MachineFunction::iterator BBI,
646 BranchProbability UnhandledProbs,
647 SwitchCG::CaseClusterIt I,
648 MachineBasicBlock *Fallthrough,
649 bool FallthroughUnreachable) {
650 using namespace SwitchCG;
651 MachineFunction *CurMF = SwitchMBB->getParent();
652 // FIXME: Optimize away range check based on pivot comparisons.
653 JumpTableHeader *JTH = &SL->JTCases[I->JTCasesIndex].first;
654 SwitchCG::JumpTable *JT = &SL->JTCases[I->JTCasesIndex].second;
655 BranchProbability DefaultProb = W.DefaultProb;
656
657 // The jump block hasn't been inserted yet; insert it here.
658 MachineBasicBlock *JumpMBB = JT->MBB;
659 CurMF->insert(BBI, JumpMBB);
660
661 // Since the jump table block is separate from the switch block, we need
662 // to keep track of it as a machine predecessor to the default block,
663 // otherwise we lose the phi edges.
664 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
665 CurMBB);
666 addMachineCFGPred({SwitchMBB->getBasicBlock(), DefaultMBB->getBasicBlock()},
667 JumpMBB);
668
669 auto JumpProb = I->Prob;
670 auto FallthroughProb = UnhandledProbs;
671
672 // If the default statement is a target of the jump table, we evenly
673 // distribute the default probability to successors of CurMBB. Also
674 // update the probability on the edge from JumpMBB to Fallthrough.
675 for (MachineBasicBlock::succ_iterator SI = JumpMBB->succ_begin(),
676 SE = JumpMBB->succ_end();
677 SI != SE; ++SI) {
678 if (*SI == DefaultMBB) {
679 JumpProb += DefaultProb / 2;
680 FallthroughProb -= DefaultProb / 2;
681 JumpMBB->setSuccProbability(SI, DefaultProb / 2);
682 JumpMBB->normalizeSuccProbs();
683 } else {
684 // Also record edges from the jump table block to it's successors.
685 addMachineCFGPred({SwitchMBB->getBasicBlock(), (*SI)->getBasicBlock()},
686 JumpMBB);
687 }
688 }
689
690 // Skip the range check if the fallthrough block is unreachable.
691 if (FallthroughUnreachable)
692 JTH->OmitRangeCheck = true;
693
694 if (!JTH->OmitRangeCheck)
695 addSuccessorWithProb(CurMBB, Fallthrough, FallthroughProb);
696 addSuccessorWithProb(CurMBB, JumpMBB, JumpProb);
697 CurMBB->normalizeSuccProbs();
698
699 // The jump table header will be inserted in our current block, do the
700 // range check, and fall through to our fallthrough block.
701 JTH->HeaderBB = CurMBB;
702 JT->Default = Fallthrough; // FIXME: Move Default to JumpTableHeader.
703
704 // If we're in the right place, emit the jump table header right now.
705 if (CurMBB == SwitchMBB) {
706 if (!emitJumpTableHeader(*JT, *JTH, CurMBB))
707 return false;
708 JTH->Emitted = true;
709 }
710 return true;
711 }
lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,Value * Cond,MachineBasicBlock * Fallthrough,bool FallthroughUnreachable,BranchProbability UnhandledProbs,MachineBasicBlock * CurMBB,MachineIRBuilder & MIB,MachineBasicBlock * SwitchMBB)712 bool IRTranslator::lowerSwitchRangeWorkItem(SwitchCG::CaseClusterIt I,
713 Value *Cond,
714 MachineBasicBlock *Fallthrough,
715 bool FallthroughUnreachable,
716 BranchProbability UnhandledProbs,
717 MachineBasicBlock *CurMBB,
718 MachineIRBuilder &MIB,
719 MachineBasicBlock *SwitchMBB) {
720 using namespace SwitchCG;
721 const Value *RHS, *LHS, *MHS;
722 CmpInst::Predicate Pred;
723 if (I->Low == I->High) {
724 // Check Cond == I->Low.
725 Pred = CmpInst::ICMP_EQ;
726 LHS = Cond;
727 RHS = I->Low;
728 MHS = nullptr;
729 } else {
730 // Check I->Low <= Cond <= I->High.
731 Pred = CmpInst::ICMP_SLE;
732 LHS = I->Low;
733 MHS = Cond;
734 RHS = I->High;
735 }
736
737 // If Fallthrough is unreachable, fold away the comparison.
738 // The false probability is the sum of all unhandled cases.
739 CaseBlock CB(Pred, FallthroughUnreachable, LHS, RHS, MHS, I->MBB, Fallthrough,
740 CurMBB, MIB.getDebugLoc(), I->Prob, UnhandledProbs);
741
742 emitSwitchCase(CB, SwitchMBB, MIB);
743 return true;
744 }
745
lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,Value * Cond,MachineBasicBlock * SwitchMBB,MachineBasicBlock * DefaultMBB,MachineIRBuilder & MIB)746 bool IRTranslator::lowerSwitchWorkItem(SwitchCG::SwitchWorkListItem W,
747 Value *Cond,
748 MachineBasicBlock *SwitchMBB,
749 MachineBasicBlock *DefaultMBB,
750 MachineIRBuilder &MIB) {
751 using namespace SwitchCG;
752 MachineFunction *CurMF = FuncInfo.MF;
753 MachineBasicBlock *NextMBB = nullptr;
754 MachineFunction::iterator BBI(W.MBB);
755 if (++BBI != FuncInfo.MF->end())
756 NextMBB = &*BBI;
757
758 if (EnableOpts) {
759 // Here, we order cases by probability so the most likely case will be
760 // checked first. However, two clusters can have the same probability in
761 // which case their relative ordering is non-deterministic. So we use Low
762 // as a tie-breaker as clusters are guaranteed to never overlap.
763 llvm::sort(W.FirstCluster, W.LastCluster + 1,
764 [](const CaseCluster &a, const CaseCluster &b) {
765 return a.Prob != b.Prob
766 ? a.Prob > b.Prob
767 : a.Low->getValue().slt(b.Low->getValue());
768 });
769
770 // Rearrange the case blocks so that the last one falls through if possible
771 // without changing the order of probabilities.
772 for (CaseClusterIt I = W.LastCluster; I > W.FirstCluster;) {
773 --I;
774 if (I->Prob > W.LastCluster->Prob)
775 break;
776 if (I->Kind == CC_Range && I->MBB == NextMBB) {
777 std::swap(*I, *W.LastCluster);
778 break;
779 }
780 }
781 }
782
783 // Compute total probability.
784 BranchProbability DefaultProb = W.DefaultProb;
785 BranchProbability UnhandledProbs = DefaultProb;
786 for (CaseClusterIt I = W.FirstCluster; I <= W.LastCluster; ++I)
787 UnhandledProbs += I->Prob;
788
789 MachineBasicBlock *CurMBB = W.MBB;
790 for (CaseClusterIt I = W.FirstCluster, E = W.LastCluster; I <= E; ++I) {
791 bool FallthroughUnreachable = false;
792 MachineBasicBlock *Fallthrough;
793 if (I == W.LastCluster) {
794 // For the last cluster, fall through to the default destination.
795 Fallthrough = DefaultMBB;
796 FallthroughUnreachable = isa<UnreachableInst>(
797 DefaultMBB->getBasicBlock()->getFirstNonPHIOrDbg());
798 } else {
799 Fallthrough = CurMF->CreateMachineBasicBlock(CurMBB->getBasicBlock());
800 CurMF->insert(BBI, Fallthrough);
801 }
802 UnhandledProbs -= I->Prob;
803
804 switch (I->Kind) {
805 case CC_BitTests: {
806 LLVM_DEBUG(dbgs() << "Switch to bit test optimization unimplemented");
807 return false; // Bit tests currently unimplemented.
808 }
809 case CC_JumpTable: {
810 if (!lowerJumpTableWorkItem(W, SwitchMBB, CurMBB, DefaultMBB, MIB, BBI,
811 UnhandledProbs, I, Fallthrough,
812 FallthroughUnreachable)) {
813 LLVM_DEBUG(dbgs() << "Failed to lower jump table");
814 return false;
815 }
816 break;
817 }
818 case CC_Range: {
819 if (!lowerSwitchRangeWorkItem(I, Cond, Fallthrough,
820 FallthroughUnreachable, UnhandledProbs,
821 CurMBB, MIB, SwitchMBB)) {
822 LLVM_DEBUG(dbgs() << "Failed to lower switch range");
823 return false;
824 }
825 break;
826 }
827 }
828 CurMBB = Fallthrough;
829 }
830
831 return true;
832 }
833
translateIndirectBr(const User & U,MachineIRBuilder & MIRBuilder)834 bool IRTranslator::translateIndirectBr(const User &U,
835 MachineIRBuilder &MIRBuilder) {
836 const IndirectBrInst &BrInst = cast<IndirectBrInst>(U);
837
838 const Register Tgt = getOrCreateVReg(*BrInst.getAddress());
839 MIRBuilder.buildBrIndirect(Tgt);
840
841 // Link successors.
842 MachineBasicBlock &CurBB = MIRBuilder.getMBB();
843 for (const BasicBlock *Succ : successors(&BrInst))
844 CurBB.addSuccessor(&getMBB(*Succ));
845
846 return true;
847 }
848
isSwiftError(const Value * V)849 static bool isSwiftError(const Value *V) {
850 if (auto Arg = dyn_cast<Argument>(V))
851 return Arg->hasSwiftErrorAttr();
852 if (auto AI = dyn_cast<AllocaInst>(V))
853 return AI->isSwiftError();
854 return false;
855 }
856
translateLoad(const User & U,MachineIRBuilder & MIRBuilder)857 bool IRTranslator::translateLoad(const User &U, MachineIRBuilder &MIRBuilder) {
858 const LoadInst &LI = cast<LoadInst>(U);
859
860 auto Flags = LI.isVolatile() ? MachineMemOperand::MOVolatile
861 : MachineMemOperand::MONone;
862 Flags |= MachineMemOperand::MOLoad;
863
864 if (DL->getTypeStoreSize(LI.getType()) == 0)
865 return true;
866
867 ArrayRef<Register> Regs = getOrCreateVRegs(LI);
868 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(LI);
869 Register Base = getOrCreateVReg(*LI.getPointerOperand());
870
871 Type *OffsetIRTy = DL->getIntPtrType(LI.getPointerOperandType());
872 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
873
874 if (CLI->supportSwiftError() && isSwiftError(LI.getPointerOperand())) {
875 assert(Regs.size() == 1 && "swifterror should be single pointer");
876 Register VReg = SwiftError.getOrCreateVRegUseAt(&LI, &MIRBuilder.getMBB(),
877 LI.getPointerOperand());
878 MIRBuilder.buildCopy(Regs[0], VReg);
879 return true;
880 }
881
882
883 for (unsigned i = 0; i < Regs.size(); ++i) {
884 Register Addr;
885 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
886
887 MachinePointerInfo Ptr(LI.getPointerOperand(), Offsets[i] / 8);
888 unsigned BaseAlign = getMemOpAlignment(LI);
889 auto MMO = MF->getMachineMemOperand(
890 Ptr, Flags, (MRI->getType(Regs[i]).getSizeInBits() + 7) / 8,
891 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
892 LI.getSyncScopeID(), LI.getOrdering());
893 MIRBuilder.buildLoad(Regs[i], Addr, *MMO);
894 }
895
896 return true;
897 }
898
translateStore(const User & U,MachineIRBuilder & MIRBuilder)899 bool IRTranslator::translateStore(const User &U, MachineIRBuilder &MIRBuilder) {
900 const StoreInst &SI = cast<StoreInst>(U);
901 auto Flags = SI.isVolatile() ? MachineMemOperand::MOVolatile
902 : MachineMemOperand::MONone;
903 Flags |= MachineMemOperand::MOStore;
904
905 if (DL->getTypeStoreSize(SI.getValueOperand()->getType()) == 0)
906 return true;
907
908 ArrayRef<Register> Vals = getOrCreateVRegs(*SI.getValueOperand());
909 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*SI.getValueOperand());
910 Register Base = getOrCreateVReg(*SI.getPointerOperand());
911
912 Type *OffsetIRTy = DL->getIntPtrType(SI.getPointerOperandType());
913 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
914
915 if (CLI->supportSwiftError() && isSwiftError(SI.getPointerOperand())) {
916 assert(Vals.size() == 1 && "swifterror should be single pointer");
917
918 Register VReg = SwiftError.getOrCreateVRegDefAt(&SI, &MIRBuilder.getMBB(),
919 SI.getPointerOperand());
920 MIRBuilder.buildCopy(VReg, Vals[0]);
921 return true;
922 }
923
924 for (unsigned i = 0; i < Vals.size(); ++i) {
925 Register Addr;
926 MIRBuilder.materializeGEP(Addr, Base, OffsetTy, Offsets[i] / 8);
927
928 MachinePointerInfo Ptr(SI.getPointerOperand(), Offsets[i] / 8);
929 unsigned BaseAlign = getMemOpAlignment(SI);
930 auto MMO = MF->getMachineMemOperand(
931 Ptr, Flags, (MRI->getType(Vals[i]).getSizeInBits() + 7) / 8,
932 MinAlign(BaseAlign, Offsets[i] / 8), AAMDNodes(), nullptr,
933 SI.getSyncScopeID(), SI.getOrdering());
934 MIRBuilder.buildStore(Vals[i], Addr, *MMO);
935 }
936 return true;
937 }
938
getOffsetFromIndices(const User & U,const DataLayout & DL)939 static uint64_t getOffsetFromIndices(const User &U, const DataLayout &DL) {
940 const Value *Src = U.getOperand(0);
941 Type *Int32Ty = Type::getInt32Ty(U.getContext());
942
943 // getIndexedOffsetInType is designed for GEPs, so the first index is the
944 // usual array element rather than looking into the actual aggregate.
945 SmallVector<Value *, 1> Indices;
946 Indices.push_back(ConstantInt::get(Int32Ty, 0));
947
948 if (const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(&U)) {
949 for (auto Idx : EVI->indices())
950 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
951 } else if (const InsertValueInst *IVI = dyn_cast<InsertValueInst>(&U)) {
952 for (auto Idx : IVI->indices())
953 Indices.push_back(ConstantInt::get(Int32Ty, Idx));
954 } else {
955 for (unsigned i = 1; i < U.getNumOperands(); ++i)
956 Indices.push_back(U.getOperand(i));
957 }
958
959 return 8 * static_cast<uint64_t>(
960 DL.getIndexedOffsetInType(Src->getType(), Indices));
961 }
962
translateExtractValue(const User & U,MachineIRBuilder & MIRBuilder)963 bool IRTranslator::translateExtractValue(const User &U,
964 MachineIRBuilder &MIRBuilder) {
965 const Value *Src = U.getOperand(0);
966 uint64_t Offset = getOffsetFromIndices(U, *DL);
967 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
968 ArrayRef<uint64_t> Offsets = *VMap.getOffsets(*Src);
969 unsigned Idx = llvm::lower_bound(Offsets, Offset) - Offsets.begin();
970 auto &DstRegs = allocateVRegs(U);
971
972 for (unsigned i = 0; i < DstRegs.size(); ++i)
973 DstRegs[i] = SrcRegs[Idx++];
974
975 return true;
976 }
977
translateInsertValue(const User & U,MachineIRBuilder & MIRBuilder)978 bool IRTranslator::translateInsertValue(const User &U,
979 MachineIRBuilder &MIRBuilder) {
980 const Value *Src = U.getOperand(0);
981 uint64_t Offset = getOffsetFromIndices(U, *DL);
982 auto &DstRegs = allocateVRegs(U);
983 ArrayRef<uint64_t> DstOffsets = *VMap.getOffsets(U);
984 ArrayRef<Register> SrcRegs = getOrCreateVRegs(*Src);
985 ArrayRef<Register> InsertedRegs = getOrCreateVRegs(*U.getOperand(1));
986 auto InsertedIt = InsertedRegs.begin();
987
988 for (unsigned i = 0; i < DstRegs.size(); ++i) {
989 if (DstOffsets[i] >= Offset && InsertedIt != InsertedRegs.end())
990 DstRegs[i] = *InsertedIt++;
991 else
992 DstRegs[i] = SrcRegs[i];
993 }
994
995 return true;
996 }
997
translateSelect(const User & U,MachineIRBuilder & MIRBuilder)998 bool IRTranslator::translateSelect(const User &U,
999 MachineIRBuilder &MIRBuilder) {
1000 Register Tst = getOrCreateVReg(*U.getOperand(0));
1001 ArrayRef<Register> ResRegs = getOrCreateVRegs(U);
1002 ArrayRef<Register> Op0Regs = getOrCreateVRegs(*U.getOperand(1));
1003 ArrayRef<Register> Op1Regs = getOrCreateVRegs(*U.getOperand(2));
1004
1005 const SelectInst &SI = cast<SelectInst>(U);
1006 uint16_t Flags = 0;
1007 if (const CmpInst *Cmp = dyn_cast<CmpInst>(SI.getCondition()))
1008 Flags = MachineInstr::copyFlagsFromInstruction(*Cmp);
1009
1010 for (unsigned i = 0; i < ResRegs.size(); ++i) {
1011 MIRBuilder.buildInstr(TargetOpcode::G_SELECT, {ResRegs[i]},
1012 {Tst, Op0Regs[i], Op1Regs[i]}, Flags);
1013 }
1014
1015 return true;
1016 }
1017
translateBitCast(const User & U,MachineIRBuilder & MIRBuilder)1018 bool IRTranslator::translateBitCast(const User &U,
1019 MachineIRBuilder &MIRBuilder) {
1020 // If we're bitcasting to the source type, we can reuse the source vreg.
1021 if (getLLTForType(*U.getOperand(0)->getType(), *DL) ==
1022 getLLTForType(*U.getType(), *DL)) {
1023 Register SrcReg = getOrCreateVReg(*U.getOperand(0));
1024 auto &Regs = *VMap.getVRegs(U);
1025 // If we already assigned a vreg for this bitcast, we can't change that.
1026 // Emit a copy to satisfy the users we already emitted.
1027 if (!Regs.empty())
1028 MIRBuilder.buildCopy(Regs[0], SrcReg);
1029 else {
1030 Regs.push_back(SrcReg);
1031 VMap.getOffsets(U)->push_back(0);
1032 }
1033 return true;
1034 }
1035 return translateCast(TargetOpcode::G_BITCAST, U, MIRBuilder);
1036 }
1037
translateCast(unsigned Opcode,const User & U,MachineIRBuilder & MIRBuilder)1038 bool IRTranslator::translateCast(unsigned Opcode, const User &U,
1039 MachineIRBuilder &MIRBuilder) {
1040 Register Op = getOrCreateVReg(*U.getOperand(0));
1041 Register Res = getOrCreateVReg(U);
1042 MIRBuilder.buildInstr(Opcode, {Res}, {Op});
1043 return true;
1044 }
1045
translateGetElementPtr(const User & U,MachineIRBuilder & MIRBuilder)1046 bool IRTranslator::translateGetElementPtr(const User &U,
1047 MachineIRBuilder &MIRBuilder) {
1048 // FIXME: support vector GEPs.
1049 if (U.getType()->isVectorTy())
1050 return false;
1051
1052 Value &Op0 = *U.getOperand(0);
1053 Register BaseReg = getOrCreateVReg(Op0);
1054 Type *PtrIRTy = Op0.getType();
1055 LLT PtrTy = getLLTForType(*PtrIRTy, *DL);
1056 Type *OffsetIRTy = DL->getIntPtrType(PtrIRTy);
1057 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1058
1059 int64_t Offset = 0;
1060 for (gep_type_iterator GTI = gep_type_begin(&U), E = gep_type_end(&U);
1061 GTI != E; ++GTI) {
1062 const Value *Idx = GTI.getOperand();
1063 if (StructType *StTy = GTI.getStructTypeOrNull()) {
1064 unsigned Field = cast<Constant>(Idx)->getUniqueInteger().getZExtValue();
1065 Offset += DL->getStructLayout(StTy)->getElementOffset(Field);
1066 continue;
1067 } else {
1068 uint64_t ElementSize = DL->getTypeAllocSize(GTI.getIndexedType());
1069
1070 // If this is a scalar constant or a splat vector of constants,
1071 // handle it quickly.
1072 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
1073 Offset += ElementSize * CI->getSExtValue();
1074 continue;
1075 }
1076
1077 if (Offset != 0) {
1078 Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
1079 LLT OffsetTy = getLLTForType(*OffsetIRTy, *DL);
1080 auto OffsetMIB = MIRBuilder.buildConstant({OffsetTy}, Offset);
1081 MIRBuilder.buildGEP(NewBaseReg, BaseReg, OffsetMIB.getReg(0));
1082
1083 BaseReg = NewBaseReg;
1084 Offset = 0;
1085 }
1086
1087 Register IdxReg = getOrCreateVReg(*Idx);
1088 if (MRI->getType(IdxReg) != OffsetTy) {
1089 Register NewIdxReg = MRI->createGenericVirtualRegister(OffsetTy);
1090 MIRBuilder.buildSExtOrTrunc(NewIdxReg, IdxReg);
1091 IdxReg = NewIdxReg;
1092 }
1093
1094 // N = N + Idx * ElementSize;
1095 // Avoid doing it for ElementSize of 1.
1096 Register GepOffsetReg;
1097 if (ElementSize != 1) {
1098 GepOffsetReg = MRI->createGenericVirtualRegister(OffsetTy);
1099 auto ElementSizeMIB = MIRBuilder.buildConstant(
1100 getLLTForType(*OffsetIRTy, *DL), ElementSize);
1101 MIRBuilder.buildMul(GepOffsetReg, ElementSizeMIB.getReg(0), IdxReg);
1102 } else
1103 GepOffsetReg = IdxReg;
1104
1105 Register NewBaseReg = MRI->createGenericVirtualRegister(PtrTy);
1106 MIRBuilder.buildGEP(NewBaseReg, BaseReg, GepOffsetReg);
1107 BaseReg = NewBaseReg;
1108 }
1109 }
1110
1111 if (Offset != 0) {
1112 auto OffsetMIB =
1113 MIRBuilder.buildConstant(getLLTForType(*OffsetIRTy, *DL), Offset);
1114 MIRBuilder.buildGEP(getOrCreateVReg(U), BaseReg, OffsetMIB.getReg(0));
1115 return true;
1116 }
1117
1118 MIRBuilder.buildCopy(getOrCreateVReg(U), BaseReg);
1119 return true;
1120 }
1121
translateMemfunc(const CallInst & CI,MachineIRBuilder & MIRBuilder,unsigned ID)1122 bool IRTranslator::translateMemfunc(const CallInst &CI,
1123 MachineIRBuilder &MIRBuilder,
1124 unsigned ID) {
1125
1126 // If the source is undef, then just emit a nop.
1127 if (isa<UndefValue>(CI.getArgOperand(1))) {
1128 switch (ID) {
1129 case Intrinsic::memmove:
1130 case Intrinsic::memcpy:
1131 case Intrinsic::memset:
1132 return true;
1133 default:
1134 break;
1135 }
1136 }
1137
1138 LLT SizeTy = getLLTForType(*CI.getArgOperand(2)->getType(), *DL);
1139 Type *DstTy = CI.getArgOperand(0)->getType();
1140 if (cast<PointerType>(DstTy)->getAddressSpace() != 0 ||
1141 SizeTy.getSizeInBits() != DL->getPointerSizeInBits(0))
1142 return false;
1143
1144 SmallVector<CallLowering::ArgInfo, 8> Args;
1145 for (int i = 0; i < 3; ++i) {
1146 const auto &Arg = CI.getArgOperand(i);
1147 Args.emplace_back(getOrCreateVReg(*Arg), Arg->getType());
1148 }
1149
1150 const char *Callee;
1151 switch (ID) {
1152 case Intrinsic::memmove:
1153 case Intrinsic::memcpy: {
1154 Type *SrcTy = CI.getArgOperand(1)->getType();
1155 if(cast<PointerType>(SrcTy)->getAddressSpace() != 0)
1156 return false;
1157 Callee = ID == Intrinsic::memcpy ? "memcpy" : "memmove";
1158 break;
1159 }
1160 case Intrinsic::memset:
1161 Callee = "memset";
1162 break;
1163 default:
1164 return false;
1165 }
1166
1167 return CLI->lowerCall(MIRBuilder, CI.getCallingConv(),
1168 MachineOperand::CreateES(Callee),
1169 CallLowering::ArgInfo({0}, CI.getType()), Args);
1170 }
1171
getStackGuard(Register DstReg,MachineIRBuilder & MIRBuilder)1172 void IRTranslator::getStackGuard(Register DstReg,
1173 MachineIRBuilder &MIRBuilder) {
1174 const TargetRegisterInfo *TRI = MF->getSubtarget().getRegisterInfo();
1175 MRI->setRegClass(DstReg, TRI->getPointerRegClass(*MF));
1176 auto MIB = MIRBuilder.buildInstr(TargetOpcode::LOAD_STACK_GUARD);
1177 MIB.addDef(DstReg);
1178
1179 auto &TLI = *MF->getSubtarget().getTargetLowering();
1180 Value *Global = TLI.getSDagStackGuard(*MF->getFunction().getParent());
1181 if (!Global)
1182 return;
1183
1184 MachinePointerInfo MPInfo(Global);
1185 auto Flags = MachineMemOperand::MOLoad | MachineMemOperand::MOInvariant |
1186 MachineMemOperand::MODereferenceable;
1187 MachineMemOperand *MemRef =
1188 MF->getMachineMemOperand(MPInfo, Flags, DL->getPointerSizeInBits() / 8,
1189 DL->getPointerABIAlignment(0));
1190 MIB.setMemRefs({MemRef});
1191 }
1192
translateOverflowIntrinsic(const CallInst & CI,unsigned Op,MachineIRBuilder & MIRBuilder)1193 bool IRTranslator::translateOverflowIntrinsic(const CallInst &CI, unsigned Op,
1194 MachineIRBuilder &MIRBuilder) {
1195 ArrayRef<Register> ResRegs = getOrCreateVRegs(CI);
1196 MIRBuilder.buildInstr(Op)
1197 .addDef(ResRegs[0])
1198 .addDef(ResRegs[1])
1199 .addUse(getOrCreateVReg(*CI.getOperand(0)))
1200 .addUse(getOrCreateVReg(*CI.getOperand(1)));
1201
1202 return true;
1203 }
1204
getSimpleIntrinsicOpcode(Intrinsic::ID ID)1205 unsigned IRTranslator::getSimpleIntrinsicOpcode(Intrinsic::ID ID) {
1206 switch (ID) {
1207 default:
1208 break;
1209 case Intrinsic::bswap:
1210 return TargetOpcode::G_BSWAP;
1211 case Intrinsic::ceil:
1212 return TargetOpcode::G_FCEIL;
1213 case Intrinsic::cos:
1214 return TargetOpcode::G_FCOS;
1215 case Intrinsic::ctpop:
1216 return TargetOpcode::G_CTPOP;
1217 case Intrinsic::exp:
1218 return TargetOpcode::G_FEXP;
1219 case Intrinsic::exp2:
1220 return TargetOpcode::G_FEXP2;
1221 case Intrinsic::fabs:
1222 return TargetOpcode::G_FABS;
1223 case Intrinsic::copysign:
1224 return TargetOpcode::G_FCOPYSIGN;
1225 case Intrinsic::minnum:
1226 return TargetOpcode::G_FMINNUM;
1227 case Intrinsic::maxnum:
1228 return TargetOpcode::G_FMAXNUM;
1229 case Intrinsic::minimum:
1230 return TargetOpcode::G_FMINIMUM;
1231 case Intrinsic::maximum:
1232 return TargetOpcode::G_FMAXIMUM;
1233 case Intrinsic::canonicalize:
1234 return TargetOpcode::G_FCANONICALIZE;
1235 case Intrinsic::floor:
1236 return TargetOpcode::G_FFLOOR;
1237 case Intrinsic::fma:
1238 return TargetOpcode::G_FMA;
1239 case Intrinsic::log:
1240 return TargetOpcode::G_FLOG;
1241 case Intrinsic::log2:
1242 return TargetOpcode::G_FLOG2;
1243 case Intrinsic::log10:
1244 return TargetOpcode::G_FLOG10;
1245 case Intrinsic::nearbyint:
1246 return TargetOpcode::G_FNEARBYINT;
1247 case Intrinsic::pow:
1248 return TargetOpcode::G_FPOW;
1249 case Intrinsic::rint:
1250 return TargetOpcode::G_FRINT;
1251 case Intrinsic::round:
1252 return TargetOpcode::G_INTRINSIC_ROUND;
1253 case Intrinsic::sin:
1254 return TargetOpcode::G_FSIN;
1255 case Intrinsic::sqrt:
1256 return TargetOpcode::G_FSQRT;
1257 case Intrinsic::trunc:
1258 return TargetOpcode::G_INTRINSIC_TRUNC;
1259 }
1260 return Intrinsic::not_intrinsic;
1261 }
1262
translateSimpleIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)1263 bool IRTranslator::translateSimpleIntrinsic(const CallInst &CI,
1264 Intrinsic::ID ID,
1265 MachineIRBuilder &MIRBuilder) {
1266
1267 unsigned Op = getSimpleIntrinsicOpcode(ID);
1268
1269 // Is this a simple intrinsic?
1270 if (Op == Intrinsic::not_intrinsic)
1271 return false;
1272
1273 // Yes. Let's translate it.
1274 SmallVector<llvm::SrcOp, 4> VRegs;
1275 for (auto &Arg : CI.arg_operands())
1276 VRegs.push_back(getOrCreateVReg(*Arg));
1277
1278 MIRBuilder.buildInstr(Op, {getOrCreateVReg(CI)}, VRegs,
1279 MachineInstr::copyFlagsFromInstruction(CI));
1280 return true;
1281 }
1282
translateKnownIntrinsic(const CallInst & CI,Intrinsic::ID ID,MachineIRBuilder & MIRBuilder)1283 bool IRTranslator::translateKnownIntrinsic(const CallInst &CI, Intrinsic::ID ID,
1284 MachineIRBuilder &MIRBuilder) {
1285
1286 // If this is a simple intrinsic (that is, we just need to add a def of
1287 // a vreg, and uses for each arg operand, then translate it.
1288 if (translateSimpleIntrinsic(CI, ID, MIRBuilder))
1289 return true;
1290
1291 switch (ID) {
1292 default:
1293 break;
1294 case Intrinsic::lifetime_start:
1295 case Intrinsic::lifetime_end: {
1296 // No stack colouring in O0, discard region information.
1297 if (MF->getTarget().getOptLevel() == CodeGenOpt::None)
1298 return true;
1299
1300 unsigned Op = ID == Intrinsic::lifetime_start ? TargetOpcode::LIFETIME_START
1301 : TargetOpcode::LIFETIME_END;
1302
1303 // Get the underlying objects for the location passed on the lifetime
1304 // marker.
1305 SmallVector<const Value *, 4> Allocas;
1306 GetUnderlyingObjects(CI.getArgOperand(1), Allocas, *DL);
1307
1308 // Iterate over each underlying object, creating lifetime markers for each
1309 // static alloca. Quit if we find a non-static alloca.
1310 for (const Value *V : Allocas) {
1311 const AllocaInst *AI = dyn_cast<AllocaInst>(V);
1312 if (!AI)
1313 continue;
1314
1315 if (!AI->isStaticAlloca())
1316 return true;
1317
1318 MIRBuilder.buildInstr(Op).addFrameIndex(getOrCreateFrameIndex(*AI));
1319 }
1320 return true;
1321 }
1322 case Intrinsic::dbg_declare: {
1323 const DbgDeclareInst &DI = cast<DbgDeclareInst>(CI);
1324 assert(DI.getVariable() && "Missing variable");
1325
1326 const Value *Address = DI.getAddress();
1327 if (!Address || isa<UndefValue>(Address)) {
1328 LLVM_DEBUG(dbgs() << "Dropping debug info for " << DI << "\n");
1329 return true;
1330 }
1331
1332 assert(DI.getVariable()->isValidLocationForIntrinsic(
1333 MIRBuilder.getDebugLoc()) &&
1334 "Expected inlined-at fields to agree");
1335 auto AI = dyn_cast<AllocaInst>(Address);
1336 if (AI && AI->isStaticAlloca()) {
1337 // Static allocas are tracked at the MF level, no need for DBG_VALUE
1338 // instructions (in fact, they get ignored if they *do* exist).
1339 MF->setVariableDbgInfo(DI.getVariable(), DI.getExpression(),
1340 getOrCreateFrameIndex(*AI), DI.getDebugLoc());
1341 } else {
1342 // A dbg.declare describes the address of a source variable, so lower it
1343 // into an indirect DBG_VALUE.
1344 MIRBuilder.buildIndirectDbgValue(getOrCreateVReg(*Address),
1345 DI.getVariable(), DI.getExpression());
1346 }
1347 return true;
1348 }
1349 case Intrinsic::dbg_label: {
1350 const DbgLabelInst &DI = cast<DbgLabelInst>(CI);
1351 assert(DI.getLabel() && "Missing label");
1352
1353 assert(DI.getLabel()->isValidLocationForIntrinsic(
1354 MIRBuilder.getDebugLoc()) &&
1355 "Expected inlined-at fields to agree");
1356
1357 MIRBuilder.buildDbgLabel(DI.getLabel());
1358 return true;
1359 }
1360 case Intrinsic::vaend:
1361 // No target I know of cares about va_end. Certainly no in-tree target
1362 // does. Simplest intrinsic ever!
1363 return true;
1364 case Intrinsic::vastart: {
1365 auto &TLI = *MF->getSubtarget().getTargetLowering();
1366 Value *Ptr = CI.getArgOperand(0);
1367 unsigned ListSize = TLI.getVaListSizeInBits(*DL) / 8;
1368
1369 // FIXME: Get alignment
1370 MIRBuilder.buildInstr(TargetOpcode::G_VASTART)
1371 .addUse(getOrCreateVReg(*Ptr))
1372 .addMemOperand(MF->getMachineMemOperand(
1373 MachinePointerInfo(Ptr), MachineMemOperand::MOStore, ListSize, 1));
1374 return true;
1375 }
1376 case Intrinsic::dbg_value: {
1377 // This form of DBG_VALUE is target-independent.
1378 const DbgValueInst &DI = cast<DbgValueInst>(CI);
1379 const Value *V = DI.getValue();
1380 assert(DI.getVariable()->isValidLocationForIntrinsic(
1381 MIRBuilder.getDebugLoc()) &&
1382 "Expected inlined-at fields to agree");
1383 if (!V) {
1384 // Currently the optimizer can produce this; insert an undef to
1385 // help debugging. Probably the optimizer should not do this.
1386 MIRBuilder.buildIndirectDbgValue(0, DI.getVariable(), DI.getExpression());
1387 } else if (const auto *CI = dyn_cast<Constant>(V)) {
1388 MIRBuilder.buildConstDbgValue(*CI, DI.getVariable(), DI.getExpression());
1389 } else {
1390 Register Reg = getOrCreateVReg(*V);
1391 // FIXME: This does not handle register-indirect values at offset 0. The
1392 // direct/indirect thing shouldn't really be handled by something as
1393 // implicit as reg+noreg vs reg+imm in the first palce, but it seems
1394 // pretty baked in right now.
1395 MIRBuilder.buildDirectDbgValue(Reg, DI.getVariable(), DI.getExpression());
1396 }
1397 return true;
1398 }
1399 case Intrinsic::uadd_with_overflow:
1400 return translateOverflowIntrinsic(CI, TargetOpcode::G_UADDO, MIRBuilder);
1401 case Intrinsic::sadd_with_overflow:
1402 return translateOverflowIntrinsic(CI, TargetOpcode::G_SADDO, MIRBuilder);
1403 case Intrinsic::usub_with_overflow:
1404 return translateOverflowIntrinsic(CI, TargetOpcode::G_USUBO, MIRBuilder);
1405 case Intrinsic::ssub_with_overflow:
1406 return translateOverflowIntrinsic(CI, TargetOpcode::G_SSUBO, MIRBuilder);
1407 case Intrinsic::umul_with_overflow:
1408 return translateOverflowIntrinsic(CI, TargetOpcode::G_UMULO, MIRBuilder);
1409 case Intrinsic::smul_with_overflow:
1410 return translateOverflowIntrinsic(CI, TargetOpcode::G_SMULO, MIRBuilder);
1411 case Intrinsic::fmuladd: {
1412 const TargetMachine &TM = MF->getTarget();
1413 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1414 Register Dst = getOrCreateVReg(CI);
1415 Register Op0 = getOrCreateVReg(*CI.getArgOperand(0));
1416 Register Op1 = getOrCreateVReg(*CI.getArgOperand(1));
1417 Register Op2 = getOrCreateVReg(*CI.getArgOperand(2));
1418 if (TM.Options.AllowFPOpFusion != FPOpFusion::Strict &&
1419 TLI.isFMAFasterThanFMulAndFAdd(TLI.getValueType(*DL, CI.getType()))) {
1420 // TODO: Revisit this to see if we should move this part of the
1421 // lowering to the combiner.
1422 MIRBuilder.buildInstr(TargetOpcode::G_FMA, {Dst}, {Op0, Op1, Op2},
1423 MachineInstr::copyFlagsFromInstruction(CI));
1424 } else {
1425 LLT Ty = getLLTForType(*CI.getType(), *DL);
1426 auto FMul = MIRBuilder.buildInstr(TargetOpcode::G_FMUL, {Ty}, {Op0, Op1},
1427 MachineInstr::copyFlagsFromInstruction(CI));
1428 MIRBuilder.buildInstr(TargetOpcode::G_FADD, {Dst}, {FMul, Op2},
1429 MachineInstr::copyFlagsFromInstruction(CI));
1430 }
1431 return true;
1432 }
1433 case Intrinsic::memcpy:
1434 case Intrinsic::memmove:
1435 case Intrinsic::memset:
1436 return translateMemfunc(CI, MIRBuilder, ID);
1437 case Intrinsic::eh_typeid_for: {
1438 GlobalValue *GV = ExtractTypeInfo(CI.getArgOperand(0));
1439 Register Reg = getOrCreateVReg(CI);
1440 unsigned TypeID = MF->getTypeIDFor(GV);
1441 MIRBuilder.buildConstant(Reg, TypeID);
1442 return true;
1443 }
1444 case Intrinsic::objectsize: {
1445 // If we don't know by now, we're never going to know.
1446 const ConstantInt *Min = cast<ConstantInt>(CI.getArgOperand(1));
1447
1448 MIRBuilder.buildConstant(getOrCreateVReg(CI), Min->isZero() ? -1ULL : 0);
1449 return true;
1450 }
1451 case Intrinsic::is_constant:
1452 // If this wasn't constant-folded away by now, then it's not a
1453 // constant.
1454 MIRBuilder.buildConstant(getOrCreateVReg(CI), 0);
1455 return true;
1456 case Intrinsic::stackguard:
1457 getStackGuard(getOrCreateVReg(CI), MIRBuilder);
1458 return true;
1459 case Intrinsic::stackprotector: {
1460 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1461 Register GuardVal = MRI->createGenericVirtualRegister(PtrTy);
1462 getStackGuard(GuardVal, MIRBuilder);
1463
1464 AllocaInst *Slot = cast<AllocaInst>(CI.getArgOperand(1));
1465 int FI = getOrCreateFrameIndex(*Slot);
1466 MF->getFrameInfo().setStackProtectorIndex(FI);
1467
1468 MIRBuilder.buildStore(
1469 GuardVal, getOrCreateVReg(*Slot),
1470 *MF->getMachineMemOperand(MachinePointerInfo::getFixedStack(*MF, FI),
1471 MachineMemOperand::MOStore |
1472 MachineMemOperand::MOVolatile,
1473 PtrTy.getSizeInBits() / 8, 8));
1474 return true;
1475 }
1476 case Intrinsic::stacksave: {
1477 // Save the stack pointer to the location provided by the intrinsic.
1478 Register Reg = getOrCreateVReg(CI);
1479 Register StackPtr = MF->getSubtarget()
1480 .getTargetLowering()
1481 ->getStackPointerRegisterToSaveRestore();
1482
1483 // If the target doesn't specify a stack pointer, then fall back.
1484 if (!StackPtr)
1485 return false;
1486
1487 MIRBuilder.buildCopy(Reg, StackPtr);
1488 return true;
1489 }
1490 case Intrinsic::stackrestore: {
1491 // Restore the stack pointer from the location provided by the intrinsic.
1492 Register Reg = getOrCreateVReg(*CI.getArgOperand(0));
1493 Register StackPtr = MF->getSubtarget()
1494 .getTargetLowering()
1495 ->getStackPointerRegisterToSaveRestore();
1496
1497 // If the target doesn't specify a stack pointer, then fall back.
1498 if (!StackPtr)
1499 return false;
1500
1501 MIRBuilder.buildCopy(StackPtr, Reg);
1502 return true;
1503 }
1504 case Intrinsic::cttz:
1505 case Intrinsic::ctlz: {
1506 ConstantInt *Cst = cast<ConstantInt>(CI.getArgOperand(1));
1507 bool isTrailing = ID == Intrinsic::cttz;
1508 unsigned Opcode = isTrailing
1509 ? Cst->isZero() ? TargetOpcode::G_CTTZ
1510 : TargetOpcode::G_CTTZ_ZERO_UNDEF
1511 : Cst->isZero() ? TargetOpcode::G_CTLZ
1512 : TargetOpcode::G_CTLZ_ZERO_UNDEF;
1513 MIRBuilder.buildInstr(Opcode)
1514 .addDef(getOrCreateVReg(CI))
1515 .addUse(getOrCreateVReg(*CI.getArgOperand(0)));
1516 return true;
1517 }
1518 case Intrinsic::invariant_start: {
1519 LLT PtrTy = getLLTForType(*CI.getArgOperand(0)->getType(), *DL);
1520 Register Undef = MRI->createGenericVirtualRegister(PtrTy);
1521 MIRBuilder.buildUndef(Undef);
1522 return true;
1523 }
1524 case Intrinsic::invariant_end:
1525 return true;
1526 case Intrinsic::assume:
1527 case Intrinsic::var_annotation:
1528 case Intrinsic::sideeffect:
1529 // Discard annotate attributes, assumptions, and artificial side-effects.
1530 return true;
1531 }
1532 return false;
1533 }
1534
translateInlineAsm(const CallInst & CI,MachineIRBuilder & MIRBuilder)1535 bool IRTranslator::translateInlineAsm(const CallInst &CI,
1536 MachineIRBuilder &MIRBuilder) {
1537 const InlineAsm &IA = cast<InlineAsm>(*CI.getCalledValue());
1538 if (!IA.getConstraintString().empty())
1539 return false;
1540
1541 unsigned ExtraInfo = 0;
1542 if (IA.hasSideEffects())
1543 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1544 if (IA.getDialect() == InlineAsm::AD_Intel)
1545 ExtraInfo |= InlineAsm::Extra_AsmDialect;
1546
1547 MIRBuilder.buildInstr(TargetOpcode::INLINEASM)
1548 .addExternalSymbol(IA.getAsmString().c_str())
1549 .addImm(ExtraInfo);
1550
1551 return true;
1552 }
1553
translateCall(const User & U,MachineIRBuilder & MIRBuilder)1554 bool IRTranslator::translateCall(const User &U, MachineIRBuilder &MIRBuilder) {
1555 const CallInst &CI = cast<CallInst>(U);
1556 auto TII = MF->getTarget().getIntrinsicInfo();
1557 const Function *F = CI.getCalledFunction();
1558
1559 // FIXME: support Windows dllimport function calls.
1560 if (F && F->hasDLLImportStorageClass())
1561 return false;
1562
1563 if (CI.isInlineAsm())
1564 return translateInlineAsm(CI, MIRBuilder);
1565
1566 Intrinsic::ID ID = Intrinsic::not_intrinsic;
1567 if (F && F->isIntrinsic()) {
1568 ID = F->getIntrinsicID();
1569 if (TII && ID == Intrinsic::not_intrinsic)
1570 ID = static_cast<Intrinsic::ID>(TII->getIntrinsicID(F));
1571 }
1572
1573 if (!F || !F->isIntrinsic() || ID == Intrinsic::not_intrinsic) {
1574 ArrayRef<Register> Res = getOrCreateVRegs(CI);
1575
1576 SmallVector<ArrayRef<Register>, 8> Args;
1577 Register SwiftInVReg = 0;
1578 Register SwiftErrorVReg = 0;
1579 for (auto &Arg: CI.arg_operands()) {
1580 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1581 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
1582 LLT Ty = getLLTForType(*Arg->getType(), *DL);
1583 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
1584 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
1585 &CI, &MIRBuilder.getMBB(), Arg));
1586 Args.emplace_back(makeArrayRef(SwiftInVReg));
1587 SwiftErrorVReg =
1588 SwiftError.getOrCreateVRegDefAt(&CI, &MIRBuilder.getMBB(), Arg);
1589 continue;
1590 }
1591 Args.push_back(getOrCreateVRegs(*Arg));
1592 }
1593
1594 MF->getFrameInfo().setHasCalls(true);
1595 bool Success =
1596 CLI->lowerCall(MIRBuilder, &CI, Res, Args, SwiftErrorVReg,
1597 [&]() { return getOrCreateVReg(*CI.getCalledValue()); });
1598
1599 return Success;
1600 }
1601
1602 assert(ID != Intrinsic::not_intrinsic && "unknown intrinsic");
1603
1604 if (translateKnownIntrinsic(CI, ID, MIRBuilder))
1605 return true;
1606
1607 ArrayRef<Register> ResultRegs;
1608 if (!CI.getType()->isVoidTy())
1609 ResultRegs = getOrCreateVRegs(CI);
1610
1611 // Ignore the callsite attributes. Backend code is most likely not expecting
1612 // an intrinsic to sometimes have side effects and sometimes not.
1613 MachineInstrBuilder MIB =
1614 MIRBuilder.buildIntrinsic(ID, ResultRegs, !F->doesNotAccessMemory());
1615 if (isa<FPMathOperator>(CI))
1616 MIB->copyIRFlags(CI);
1617
1618 for (auto &Arg : CI.arg_operands()) {
1619 // Some intrinsics take metadata parameters. Reject them.
1620 if (isa<MetadataAsValue>(Arg))
1621 return false;
1622 ArrayRef<Register> VRegs = getOrCreateVRegs(*Arg);
1623 if (VRegs.size() > 1)
1624 return false;
1625 MIB.addUse(VRegs[0]);
1626 }
1627
1628 // Add a MachineMemOperand if it is a target mem intrinsic.
1629 const TargetLowering &TLI = *MF->getSubtarget().getTargetLowering();
1630 TargetLowering::IntrinsicInfo Info;
1631 // TODO: Add a GlobalISel version of getTgtMemIntrinsic.
1632 if (TLI.getTgtMemIntrinsic(Info, CI, *MF, ID)) {
1633 unsigned Align = Info.align;
1634 if (Align == 0)
1635 Align = DL->getABITypeAlignment(Info.memVT.getTypeForEVT(F->getContext()));
1636
1637 uint64_t Size = Info.memVT.getStoreSize();
1638 MIB.addMemOperand(MF->getMachineMemOperand(MachinePointerInfo(Info.ptrVal),
1639 Info.flags, Size, Align));
1640 }
1641
1642 return true;
1643 }
1644
translateInvoke(const User & U,MachineIRBuilder & MIRBuilder)1645 bool IRTranslator::translateInvoke(const User &U,
1646 MachineIRBuilder &MIRBuilder) {
1647 const InvokeInst &I = cast<InvokeInst>(U);
1648 MCContext &Context = MF->getContext();
1649
1650 const BasicBlock *ReturnBB = I.getSuccessor(0);
1651 const BasicBlock *EHPadBB = I.getSuccessor(1);
1652
1653 const Value *Callee = I.getCalledValue();
1654 const Function *Fn = dyn_cast<Function>(Callee);
1655 if (isa<InlineAsm>(Callee))
1656 return false;
1657
1658 // FIXME: support invoking patchpoint and statepoint intrinsics.
1659 if (Fn && Fn->isIntrinsic())
1660 return false;
1661
1662 // FIXME: support whatever these are.
1663 if (I.countOperandBundlesOfType(LLVMContext::OB_deopt))
1664 return false;
1665
1666 // FIXME: support Windows exception handling.
1667 if (!isa<LandingPadInst>(EHPadBB->front()))
1668 return false;
1669
1670 // Emit the actual call, bracketed by EH_LABELs so that the MF knows about
1671 // the region covered by the try.
1672 MCSymbol *BeginSymbol = Context.createTempSymbol();
1673 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(BeginSymbol);
1674
1675 ArrayRef<Register> Res;
1676 if (!I.getType()->isVoidTy())
1677 Res = getOrCreateVRegs(I);
1678 SmallVector<ArrayRef<Register>, 8> Args;
1679 Register SwiftErrorVReg = 0;
1680 Register SwiftInVReg = 0;
1681 for (auto &Arg : I.arg_operands()) {
1682 if (CLI->supportSwiftError() && isSwiftError(Arg)) {
1683 assert(SwiftInVReg == 0 && "Expected only one swift error argument");
1684 LLT Ty = getLLTForType(*Arg->getType(), *DL);
1685 SwiftInVReg = MRI->createGenericVirtualRegister(Ty);
1686 MIRBuilder.buildCopy(SwiftInVReg, SwiftError.getOrCreateVRegUseAt(
1687 &I, &MIRBuilder.getMBB(), Arg));
1688 Args.push_back(makeArrayRef(SwiftInVReg));
1689 SwiftErrorVReg =
1690 SwiftError.getOrCreateVRegDefAt(&I, &MIRBuilder.getMBB(), Arg);
1691 continue;
1692 }
1693
1694 Args.push_back(getOrCreateVRegs(*Arg));
1695 }
1696
1697 if (!CLI->lowerCall(MIRBuilder, &I, Res, Args, SwiftErrorVReg,
1698 [&]() { return getOrCreateVReg(*I.getCalledValue()); }))
1699 return false;
1700
1701 MCSymbol *EndSymbol = Context.createTempSymbol();
1702 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL).addSym(EndSymbol);
1703
1704 // FIXME: track probabilities.
1705 MachineBasicBlock &EHPadMBB = getMBB(*EHPadBB),
1706 &ReturnMBB = getMBB(*ReturnBB);
1707 MF->addInvoke(&EHPadMBB, BeginSymbol, EndSymbol);
1708 MIRBuilder.getMBB().addSuccessor(&ReturnMBB);
1709 MIRBuilder.getMBB().addSuccessor(&EHPadMBB);
1710 MIRBuilder.buildBr(ReturnMBB);
1711
1712 return true;
1713 }
1714
translateCallBr(const User & U,MachineIRBuilder & MIRBuilder)1715 bool IRTranslator::translateCallBr(const User &U,
1716 MachineIRBuilder &MIRBuilder) {
1717 // FIXME: Implement this.
1718 return false;
1719 }
1720
translateLandingPad(const User & U,MachineIRBuilder & MIRBuilder)1721 bool IRTranslator::translateLandingPad(const User &U,
1722 MachineIRBuilder &MIRBuilder) {
1723 const LandingPadInst &LP = cast<LandingPadInst>(U);
1724
1725 MachineBasicBlock &MBB = MIRBuilder.getMBB();
1726
1727 MBB.setIsEHPad();
1728
1729 // If there aren't registers to copy the values into (e.g., during SjLj
1730 // exceptions), then don't bother.
1731 auto &TLI = *MF->getSubtarget().getTargetLowering();
1732 const Constant *PersonalityFn = MF->getFunction().getPersonalityFn();
1733 if (TLI.getExceptionPointerRegister(PersonalityFn) == 0 &&
1734 TLI.getExceptionSelectorRegister(PersonalityFn) == 0)
1735 return true;
1736
1737 // If landingpad's return type is token type, we don't create DAG nodes
1738 // for its exception pointer and selector value. The extraction of exception
1739 // pointer or selector value from token type landingpads is not currently
1740 // supported.
1741 if (LP.getType()->isTokenTy())
1742 return true;
1743
1744 // Add a label to mark the beginning of the landing pad. Deletion of the
1745 // landing pad can thus be detected via the MachineModuleInfo.
1746 MIRBuilder.buildInstr(TargetOpcode::EH_LABEL)
1747 .addSym(MF->addLandingPad(&MBB));
1748
1749 LLT Ty = getLLTForType(*LP.getType(), *DL);
1750 Register Undef = MRI->createGenericVirtualRegister(Ty);
1751 MIRBuilder.buildUndef(Undef);
1752
1753 SmallVector<LLT, 2> Tys;
1754 for (Type *Ty : cast<StructType>(LP.getType())->elements())
1755 Tys.push_back(getLLTForType(*Ty, *DL));
1756 assert(Tys.size() == 2 && "Only two-valued landingpads are supported");
1757
1758 // Mark exception register as live in.
1759 Register ExceptionReg = TLI.getExceptionPointerRegister(PersonalityFn);
1760 if (!ExceptionReg)
1761 return false;
1762
1763 MBB.addLiveIn(ExceptionReg);
1764 ArrayRef<Register> ResRegs = getOrCreateVRegs(LP);
1765 MIRBuilder.buildCopy(ResRegs[0], ExceptionReg);
1766
1767 Register SelectorReg = TLI.getExceptionSelectorRegister(PersonalityFn);
1768 if (!SelectorReg)
1769 return false;
1770
1771 MBB.addLiveIn(SelectorReg);
1772 Register PtrVReg = MRI->createGenericVirtualRegister(Tys[0]);
1773 MIRBuilder.buildCopy(PtrVReg, SelectorReg);
1774 MIRBuilder.buildCast(ResRegs[1], PtrVReg);
1775
1776 return true;
1777 }
1778
translateAlloca(const User & U,MachineIRBuilder & MIRBuilder)1779 bool IRTranslator::translateAlloca(const User &U,
1780 MachineIRBuilder &MIRBuilder) {
1781 auto &AI = cast<AllocaInst>(U);
1782
1783 if (AI.isSwiftError())
1784 return true;
1785
1786 if (AI.isStaticAlloca()) {
1787 Register Res = getOrCreateVReg(AI);
1788 int FI = getOrCreateFrameIndex(AI);
1789 MIRBuilder.buildFrameIndex(Res, FI);
1790 return true;
1791 }
1792
1793 // FIXME: support stack probing for Windows.
1794 if (MF->getTarget().getTargetTriple().isOSWindows())
1795 return false;
1796
1797 // Now we're in the harder dynamic case.
1798 Type *Ty = AI.getAllocatedType();
1799 unsigned Align =
1800 std::max((unsigned)DL->getPrefTypeAlignment(Ty), AI.getAlignment());
1801
1802 Register NumElts = getOrCreateVReg(*AI.getArraySize());
1803
1804 Type *IntPtrIRTy = DL->getIntPtrType(AI.getType());
1805 LLT IntPtrTy = getLLTForType(*IntPtrIRTy, *DL);
1806 if (MRI->getType(NumElts) != IntPtrTy) {
1807 Register ExtElts = MRI->createGenericVirtualRegister(IntPtrTy);
1808 MIRBuilder.buildZExtOrTrunc(ExtElts, NumElts);
1809 NumElts = ExtElts;
1810 }
1811
1812 Register AllocSize = MRI->createGenericVirtualRegister(IntPtrTy);
1813 Register TySize =
1814 getOrCreateVReg(*ConstantInt::get(IntPtrIRTy, -DL->getTypeAllocSize(Ty)));
1815 MIRBuilder.buildMul(AllocSize, NumElts, TySize);
1816
1817 LLT PtrTy = getLLTForType(*AI.getType(), *DL);
1818 auto &TLI = *MF->getSubtarget().getTargetLowering();
1819 Register SPReg = TLI.getStackPointerRegisterToSaveRestore();
1820
1821 Register SPTmp = MRI->createGenericVirtualRegister(PtrTy);
1822 MIRBuilder.buildCopy(SPTmp, SPReg);
1823
1824 Register AllocTmp = MRI->createGenericVirtualRegister(PtrTy);
1825 MIRBuilder.buildGEP(AllocTmp, SPTmp, AllocSize);
1826
1827 // Handle alignment. We have to realign if the allocation granule was smaller
1828 // than stack alignment, or the specific alloca requires more than stack
1829 // alignment.
1830 unsigned StackAlign =
1831 MF->getSubtarget().getFrameLowering()->getStackAlignment();
1832 Align = std::max(Align, StackAlign);
1833 if (Align > StackAlign || DL->getTypeAllocSize(Ty) % StackAlign != 0) {
1834 // Round the size of the allocation up to the stack alignment size
1835 // by add SA-1 to the size. This doesn't overflow because we're computing
1836 // an address inside an alloca.
1837 Register AlignedAlloc = MRI->createGenericVirtualRegister(PtrTy);
1838 MIRBuilder.buildPtrMask(AlignedAlloc, AllocTmp, Log2_32(Align));
1839 AllocTmp = AlignedAlloc;
1840 }
1841
1842 MIRBuilder.buildCopy(SPReg, AllocTmp);
1843 MIRBuilder.buildCopy(getOrCreateVReg(AI), AllocTmp);
1844
1845 MF->getFrameInfo().CreateVariableSizedObject(Align ? Align : 1, &AI);
1846 assert(MF->getFrameInfo().hasVarSizedObjects());
1847 return true;
1848 }
1849
translateVAArg(const User & U,MachineIRBuilder & MIRBuilder)1850 bool IRTranslator::translateVAArg(const User &U, MachineIRBuilder &MIRBuilder) {
1851 // FIXME: We may need more info about the type. Because of how LLT works,
1852 // we're completely discarding the i64/double distinction here (amongst
1853 // others). Fortunately the ABIs I know of where that matters don't use va_arg
1854 // anyway but that's not guaranteed.
1855 MIRBuilder.buildInstr(TargetOpcode::G_VAARG)
1856 .addDef(getOrCreateVReg(U))
1857 .addUse(getOrCreateVReg(*U.getOperand(0)))
1858 .addImm(DL->getABITypeAlignment(U.getType()));
1859 return true;
1860 }
1861
translateInsertElement(const User & U,MachineIRBuilder & MIRBuilder)1862 bool IRTranslator::translateInsertElement(const User &U,
1863 MachineIRBuilder &MIRBuilder) {
1864 // If it is a <1 x Ty> vector, use the scalar as it is
1865 // not a legal vector type in LLT.
1866 if (U.getType()->getVectorNumElements() == 1) {
1867 Register Elt = getOrCreateVReg(*U.getOperand(1));
1868 auto &Regs = *VMap.getVRegs(U);
1869 if (Regs.empty()) {
1870 Regs.push_back(Elt);
1871 VMap.getOffsets(U)->push_back(0);
1872 } else {
1873 MIRBuilder.buildCopy(Regs[0], Elt);
1874 }
1875 return true;
1876 }
1877
1878 Register Res = getOrCreateVReg(U);
1879 Register Val = getOrCreateVReg(*U.getOperand(0));
1880 Register Elt = getOrCreateVReg(*U.getOperand(1));
1881 Register Idx = getOrCreateVReg(*U.getOperand(2));
1882 MIRBuilder.buildInsertVectorElement(Res, Val, Elt, Idx);
1883 return true;
1884 }
1885
translateExtractElement(const User & U,MachineIRBuilder & MIRBuilder)1886 bool IRTranslator::translateExtractElement(const User &U,
1887 MachineIRBuilder &MIRBuilder) {
1888 // If it is a <1 x Ty> vector, use the scalar as it is
1889 // not a legal vector type in LLT.
1890 if (U.getOperand(0)->getType()->getVectorNumElements() == 1) {
1891 Register Elt = getOrCreateVReg(*U.getOperand(0));
1892 auto &Regs = *VMap.getVRegs(U);
1893 if (Regs.empty()) {
1894 Regs.push_back(Elt);
1895 VMap.getOffsets(U)->push_back(0);
1896 } else {
1897 MIRBuilder.buildCopy(Regs[0], Elt);
1898 }
1899 return true;
1900 }
1901 Register Res = getOrCreateVReg(U);
1902 Register Val = getOrCreateVReg(*U.getOperand(0));
1903 const auto &TLI = *MF->getSubtarget().getTargetLowering();
1904 unsigned PreferredVecIdxWidth = TLI.getVectorIdxTy(*DL).getSizeInBits();
1905 Register Idx;
1906 if (auto *CI = dyn_cast<ConstantInt>(U.getOperand(1))) {
1907 if (CI->getBitWidth() != PreferredVecIdxWidth) {
1908 APInt NewIdx = CI->getValue().sextOrTrunc(PreferredVecIdxWidth);
1909 auto *NewIdxCI = ConstantInt::get(CI->getContext(), NewIdx);
1910 Idx = getOrCreateVReg(*NewIdxCI);
1911 }
1912 }
1913 if (!Idx)
1914 Idx = getOrCreateVReg(*U.getOperand(1));
1915 if (MRI->getType(Idx).getSizeInBits() != PreferredVecIdxWidth) {
1916 const LLT &VecIdxTy = LLT::scalar(PreferredVecIdxWidth);
1917 Idx = MIRBuilder.buildSExtOrTrunc(VecIdxTy, Idx)->getOperand(0).getReg();
1918 }
1919 MIRBuilder.buildExtractVectorElement(Res, Val, Idx);
1920 return true;
1921 }
1922
translateShuffleVector(const User & U,MachineIRBuilder & MIRBuilder)1923 bool IRTranslator::translateShuffleVector(const User &U,
1924 MachineIRBuilder &MIRBuilder) {
1925 MIRBuilder.buildInstr(TargetOpcode::G_SHUFFLE_VECTOR)
1926 .addDef(getOrCreateVReg(U))
1927 .addUse(getOrCreateVReg(*U.getOperand(0)))
1928 .addUse(getOrCreateVReg(*U.getOperand(1)))
1929 .addUse(getOrCreateVReg(*U.getOperand(2)));
1930 return true;
1931 }
1932
translatePHI(const User & U,MachineIRBuilder & MIRBuilder)1933 bool IRTranslator::translatePHI(const User &U, MachineIRBuilder &MIRBuilder) {
1934 const PHINode &PI = cast<PHINode>(U);
1935
1936 SmallVector<MachineInstr *, 4> Insts;
1937 for (auto Reg : getOrCreateVRegs(PI)) {
1938 auto MIB = MIRBuilder.buildInstr(TargetOpcode::G_PHI, {Reg}, {});
1939 Insts.push_back(MIB.getInstr());
1940 }
1941
1942 PendingPHIs.emplace_back(&PI, std::move(Insts));
1943 return true;
1944 }
1945
translateAtomicCmpXchg(const User & U,MachineIRBuilder & MIRBuilder)1946 bool IRTranslator::translateAtomicCmpXchg(const User &U,
1947 MachineIRBuilder &MIRBuilder) {
1948 const AtomicCmpXchgInst &I = cast<AtomicCmpXchgInst>(U);
1949
1950 if (I.isWeak())
1951 return false;
1952
1953 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1954 : MachineMemOperand::MONone;
1955 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1956
1957 Type *ResType = I.getType();
1958 Type *ValType = ResType->Type::getStructElementType(0);
1959
1960 auto Res = getOrCreateVRegs(I);
1961 Register OldValRes = Res[0];
1962 Register SuccessRes = Res[1];
1963 Register Addr = getOrCreateVReg(*I.getPointerOperand());
1964 Register Cmp = getOrCreateVReg(*I.getCompareOperand());
1965 Register NewVal = getOrCreateVReg(*I.getNewValOperand());
1966
1967 MIRBuilder.buildAtomicCmpXchgWithSuccess(
1968 OldValRes, SuccessRes, Addr, Cmp, NewVal,
1969 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
1970 Flags, DL->getTypeStoreSize(ValType),
1971 getMemOpAlignment(I), AAMDNodes(), nullptr,
1972 I.getSyncScopeID(), I.getSuccessOrdering(),
1973 I.getFailureOrdering()));
1974 return true;
1975 }
1976
translateAtomicRMW(const User & U,MachineIRBuilder & MIRBuilder)1977 bool IRTranslator::translateAtomicRMW(const User &U,
1978 MachineIRBuilder &MIRBuilder) {
1979 const AtomicRMWInst &I = cast<AtomicRMWInst>(U);
1980
1981 auto Flags = I.isVolatile() ? MachineMemOperand::MOVolatile
1982 : MachineMemOperand::MONone;
1983 Flags |= MachineMemOperand::MOLoad | MachineMemOperand::MOStore;
1984
1985 Type *ResType = I.getType();
1986
1987 Register Res = getOrCreateVReg(I);
1988 Register Addr = getOrCreateVReg(*I.getPointerOperand());
1989 Register Val = getOrCreateVReg(*I.getValOperand());
1990
1991 unsigned Opcode = 0;
1992 switch (I.getOperation()) {
1993 default:
1994 llvm_unreachable("Unknown atomicrmw op");
1995 return false;
1996 case AtomicRMWInst::Xchg:
1997 Opcode = TargetOpcode::G_ATOMICRMW_XCHG;
1998 break;
1999 case AtomicRMWInst::Add:
2000 Opcode = TargetOpcode::G_ATOMICRMW_ADD;
2001 break;
2002 case AtomicRMWInst::Sub:
2003 Opcode = TargetOpcode::G_ATOMICRMW_SUB;
2004 break;
2005 case AtomicRMWInst::And:
2006 Opcode = TargetOpcode::G_ATOMICRMW_AND;
2007 break;
2008 case AtomicRMWInst::Nand:
2009 Opcode = TargetOpcode::G_ATOMICRMW_NAND;
2010 break;
2011 case AtomicRMWInst::Or:
2012 Opcode = TargetOpcode::G_ATOMICRMW_OR;
2013 break;
2014 case AtomicRMWInst::Xor:
2015 Opcode = TargetOpcode::G_ATOMICRMW_XOR;
2016 break;
2017 case AtomicRMWInst::Max:
2018 Opcode = TargetOpcode::G_ATOMICRMW_MAX;
2019 break;
2020 case AtomicRMWInst::Min:
2021 Opcode = TargetOpcode::G_ATOMICRMW_MIN;
2022 break;
2023 case AtomicRMWInst::UMax:
2024 Opcode = TargetOpcode::G_ATOMICRMW_UMAX;
2025 break;
2026 case AtomicRMWInst::UMin:
2027 Opcode = TargetOpcode::G_ATOMICRMW_UMIN;
2028 break;
2029 }
2030
2031 MIRBuilder.buildAtomicRMW(
2032 Opcode, Res, Addr, Val,
2033 *MF->getMachineMemOperand(MachinePointerInfo(I.getPointerOperand()),
2034 Flags, DL->getTypeStoreSize(ResType),
2035 getMemOpAlignment(I), AAMDNodes(), nullptr,
2036 I.getSyncScopeID(), I.getOrdering()));
2037 return true;
2038 }
2039
translateFence(const User & U,MachineIRBuilder & MIRBuilder)2040 bool IRTranslator::translateFence(const User &U,
2041 MachineIRBuilder &MIRBuilder) {
2042 const FenceInst &Fence = cast<FenceInst>(U);
2043 MIRBuilder.buildFence(static_cast<unsigned>(Fence.getOrdering()),
2044 Fence.getSyncScopeID());
2045 return true;
2046 }
2047
finishPendingPhis()2048 void IRTranslator::finishPendingPhis() {
2049 #ifndef NDEBUG
2050 DILocationVerifier Verifier;
2051 GISelObserverWrapper WrapperObserver(&Verifier);
2052 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2053 #endif // ifndef NDEBUG
2054 for (auto &Phi : PendingPHIs) {
2055 const PHINode *PI = Phi.first;
2056 ArrayRef<MachineInstr *> ComponentPHIs = Phi.second;
2057 MachineBasicBlock *PhiMBB = ComponentPHIs[0]->getParent();
2058 EntryBuilder->setDebugLoc(PI->getDebugLoc());
2059 #ifndef NDEBUG
2060 Verifier.setCurrentInst(PI);
2061 #endif // ifndef NDEBUG
2062
2063 SmallSet<const MachineBasicBlock *, 16> SeenPreds;
2064 for (unsigned i = 0; i < PI->getNumIncomingValues(); ++i) {
2065 auto IRPred = PI->getIncomingBlock(i);
2066 ArrayRef<Register> ValRegs = getOrCreateVRegs(*PI->getIncomingValue(i));
2067 for (auto Pred : getMachinePredBBs({IRPred, PI->getParent()})) {
2068 if (SeenPreds.count(Pred) || !PhiMBB->isPredecessor(Pred))
2069 continue;
2070 SeenPreds.insert(Pred);
2071 for (unsigned j = 0; j < ValRegs.size(); ++j) {
2072 MachineInstrBuilder MIB(*MF, ComponentPHIs[j]);
2073 MIB.addUse(ValRegs[j]);
2074 MIB.addMBB(Pred);
2075 }
2076 }
2077 }
2078 }
2079 }
2080
valueIsSplit(const Value & V,SmallVectorImpl<uint64_t> * Offsets)2081 bool IRTranslator::valueIsSplit(const Value &V,
2082 SmallVectorImpl<uint64_t> *Offsets) {
2083 SmallVector<LLT, 4> SplitTys;
2084 if (Offsets && !Offsets->empty())
2085 Offsets->clear();
2086 computeValueLLTs(*DL, *V.getType(), SplitTys, Offsets);
2087 return SplitTys.size() > 1;
2088 }
2089
translate(const Instruction & Inst)2090 bool IRTranslator::translate(const Instruction &Inst) {
2091 CurBuilder->setDebugLoc(Inst.getDebugLoc());
2092 // We only emit constants into the entry block from here. To prevent jumpy
2093 // debug behaviour set the line to 0.
2094 if (const DebugLoc &DL = Inst.getDebugLoc())
2095 EntryBuilder->setDebugLoc(
2096 DebugLoc::get(0, 0, DL.getScope(), DL.getInlinedAt()));
2097 else
2098 EntryBuilder->setDebugLoc(DebugLoc());
2099
2100 switch (Inst.getOpcode()) {
2101 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2102 case Instruction::OPCODE: \
2103 return translate##OPCODE(Inst, *CurBuilder.get());
2104 #include "llvm/IR/Instruction.def"
2105 default:
2106 return false;
2107 }
2108 }
2109
translate(const Constant & C,Register Reg)2110 bool IRTranslator::translate(const Constant &C, Register Reg) {
2111 if (auto CI = dyn_cast<ConstantInt>(&C))
2112 EntryBuilder->buildConstant(Reg, *CI);
2113 else if (auto CF = dyn_cast<ConstantFP>(&C))
2114 EntryBuilder->buildFConstant(Reg, *CF);
2115 else if (isa<UndefValue>(C))
2116 EntryBuilder->buildUndef(Reg);
2117 else if (isa<ConstantPointerNull>(C)) {
2118 // As we are trying to build a constant val of 0 into a pointer,
2119 // insert a cast to make them correct with respect to types.
2120 unsigned NullSize = DL->getTypeSizeInBits(C.getType());
2121 auto *ZeroTy = Type::getIntNTy(C.getContext(), NullSize);
2122 auto *ZeroVal = ConstantInt::get(ZeroTy, 0);
2123 Register ZeroReg = getOrCreateVReg(*ZeroVal);
2124 EntryBuilder->buildCast(Reg, ZeroReg);
2125 } else if (auto GV = dyn_cast<GlobalValue>(&C))
2126 EntryBuilder->buildGlobalValue(Reg, GV);
2127 else if (auto CAZ = dyn_cast<ConstantAggregateZero>(&C)) {
2128 if (!CAZ->getType()->isVectorTy())
2129 return false;
2130 // Return the scalar if it is a <1 x Ty> vector.
2131 if (CAZ->getNumElements() == 1)
2132 return translate(*CAZ->getElementValue(0u), Reg);
2133 SmallVector<Register, 4> Ops;
2134 for (unsigned i = 0; i < CAZ->getNumElements(); ++i) {
2135 Constant &Elt = *CAZ->getElementValue(i);
2136 Ops.push_back(getOrCreateVReg(Elt));
2137 }
2138 EntryBuilder->buildBuildVector(Reg, Ops);
2139 } else if (auto CV = dyn_cast<ConstantDataVector>(&C)) {
2140 // Return the scalar if it is a <1 x Ty> vector.
2141 if (CV->getNumElements() == 1)
2142 return translate(*CV->getElementAsConstant(0), Reg);
2143 SmallVector<Register, 4> Ops;
2144 for (unsigned i = 0; i < CV->getNumElements(); ++i) {
2145 Constant &Elt = *CV->getElementAsConstant(i);
2146 Ops.push_back(getOrCreateVReg(Elt));
2147 }
2148 EntryBuilder->buildBuildVector(Reg, Ops);
2149 } else if (auto CE = dyn_cast<ConstantExpr>(&C)) {
2150 switch(CE->getOpcode()) {
2151 #define HANDLE_INST(NUM, OPCODE, CLASS) \
2152 case Instruction::OPCODE: \
2153 return translate##OPCODE(*CE, *EntryBuilder.get());
2154 #include "llvm/IR/Instruction.def"
2155 default:
2156 return false;
2157 }
2158 } else if (auto CV = dyn_cast<ConstantVector>(&C)) {
2159 if (CV->getNumOperands() == 1)
2160 return translate(*CV->getOperand(0), Reg);
2161 SmallVector<Register, 4> Ops;
2162 for (unsigned i = 0; i < CV->getNumOperands(); ++i) {
2163 Ops.push_back(getOrCreateVReg(*CV->getOperand(i)));
2164 }
2165 EntryBuilder->buildBuildVector(Reg, Ops);
2166 } else if (auto *BA = dyn_cast<BlockAddress>(&C)) {
2167 EntryBuilder->buildBlockAddress(Reg, BA);
2168 } else
2169 return false;
2170
2171 return true;
2172 }
2173
finalizeBasicBlock()2174 void IRTranslator::finalizeBasicBlock() {
2175 for (auto &JTCase : SL->JTCases) {
2176 // Emit header first, if it wasn't already emitted.
2177 if (!JTCase.first.Emitted)
2178 emitJumpTableHeader(JTCase.second, JTCase.first, JTCase.first.HeaderBB);
2179
2180 emitJumpTable(JTCase.second, JTCase.second.MBB);
2181 }
2182 SL->JTCases.clear();
2183 }
2184
finalizeFunction()2185 void IRTranslator::finalizeFunction() {
2186 // Release the memory used by the different maps we
2187 // needed during the translation.
2188 PendingPHIs.clear();
2189 VMap.reset();
2190 FrameIndices.clear();
2191 MachinePreds.clear();
2192 // MachineIRBuilder::DebugLoc can outlive the DILocation it holds. Clear it
2193 // to avoid accessing free’d memory (in runOnMachineFunction) and to avoid
2194 // destroying it twice (in ~IRTranslator() and ~LLVMContext())
2195 EntryBuilder.reset();
2196 CurBuilder.reset();
2197 FuncInfo.clear();
2198 }
2199
runOnMachineFunction(MachineFunction & CurMF)2200 bool IRTranslator::runOnMachineFunction(MachineFunction &CurMF) {
2201 MF = &CurMF;
2202 const Function &F = MF->getFunction();
2203 if (F.empty())
2204 return false;
2205 GISelCSEAnalysisWrapper &Wrapper =
2206 getAnalysis<GISelCSEAnalysisWrapperPass>().getCSEWrapper();
2207 // Set the CSEConfig and run the analysis.
2208 GISelCSEInfo *CSEInfo = nullptr;
2209 TPC = &getAnalysis<TargetPassConfig>();
2210 bool EnableCSE = EnableCSEInIRTranslator.getNumOccurrences()
2211 ? EnableCSEInIRTranslator
2212 : TPC->isGISelCSEEnabled();
2213
2214 if (EnableCSE) {
2215 EntryBuilder = make_unique<CSEMIRBuilder>(CurMF);
2216 CSEInfo = &Wrapper.get(TPC->getCSEConfig());
2217 EntryBuilder->setCSEInfo(CSEInfo);
2218 CurBuilder = make_unique<CSEMIRBuilder>(CurMF);
2219 CurBuilder->setCSEInfo(CSEInfo);
2220 } else {
2221 EntryBuilder = make_unique<MachineIRBuilder>();
2222 CurBuilder = make_unique<MachineIRBuilder>();
2223 }
2224 CLI = MF->getSubtarget().getCallLowering();
2225 CurBuilder->setMF(*MF);
2226 EntryBuilder->setMF(*MF);
2227 MRI = &MF->getRegInfo();
2228 DL = &F.getParent()->getDataLayout();
2229 ORE = llvm::make_unique<OptimizationRemarkEmitter>(&F);
2230 FuncInfo.MF = MF;
2231 FuncInfo.BPI = nullptr;
2232 const auto &TLI = *MF->getSubtarget().getTargetLowering();
2233 const TargetMachine &TM = MF->getTarget();
2234 SL = make_unique<GISelSwitchLowering>(this, FuncInfo);
2235 SL->init(TLI, TM, *DL);
2236
2237 EnableOpts = TM.getOptLevel() != CodeGenOpt::None && !skipFunction(F);
2238
2239 assert(PendingPHIs.empty() && "stale PHIs");
2240
2241 if (!DL->isLittleEndian()) {
2242 // Currently we don't properly handle big endian code.
2243 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2244 F.getSubprogram(), &F.getEntryBlock());
2245 R << "unable to translate in big endian mode";
2246 reportTranslationError(*MF, *TPC, *ORE, R);
2247 }
2248
2249 // Release the per-function state when we return, whether we succeeded or not.
2250 auto FinalizeOnReturn = make_scope_exit([this]() { finalizeFunction(); });
2251
2252 // Setup a separate basic-block for the arguments and constants
2253 MachineBasicBlock *EntryBB = MF->CreateMachineBasicBlock();
2254 MF->push_back(EntryBB);
2255 EntryBuilder->setMBB(*EntryBB);
2256
2257 DebugLoc DbgLoc = F.getEntryBlock().getFirstNonPHI()->getDebugLoc();
2258 SwiftError.setFunction(CurMF);
2259 SwiftError.createEntriesInEntryBlock(DbgLoc);
2260
2261 // Create all blocks, in IR order, to preserve the layout.
2262 for (const BasicBlock &BB: F) {
2263 auto *&MBB = BBToMBB[&BB];
2264
2265 MBB = MF->CreateMachineBasicBlock(&BB);
2266 MF->push_back(MBB);
2267
2268 if (BB.hasAddressTaken())
2269 MBB->setHasAddressTaken();
2270 }
2271
2272 // Make our arguments/constants entry block fallthrough to the IR entry block.
2273 EntryBB->addSuccessor(&getMBB(F.front()));
2274
2275 // Lower the actual args into this basic block.
2276 SmallVector<ArrayRef<Register>, 8> VRegArgs;
2277 for (const Argument &Arg: F.args()) {
2278 if (DL->getTypeStoreSize(Arg.getType()) == 0)
2279 continue; // Don't handle zero sized types.
2280 ArrayRef<Register> VRegs = getOrCreateVRegs(Arg);
2281 VRegArgs.push_back(VRegs);
2282
2283 if (Arg.hasSwiftErrorAttr()) {
2284 assert(VRegs.size() == 1 && "Too many vregs for Swift error");
2285 SwiftError.setCurrentVReg(EntryBB, SwiftError.getFunctionArg(), VRegs[0]);
2286 }
2287 }
2288
2289 // We don't currently support translating swifterror or swiftself functions.
2290 for (auto &Arg : F.args()) {
2291 if (Arg.hasSwiftSelfAttr()) {
2292 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2293 F.getSubprogram(), &F.getEntryBlock());
2294 R << "unable to lower arguments due to swiftself: "
2295 << ore::NV("Prototype", F.getType());
2296 reportTranslationError(*MF, *TPC, *ORE, R);
2297 return false;
2298 }
2299 }
2300
2301 if (!CLI->lowerFormalArguments(*EntryBuilder.get(), F, VRegArgs)) {
2302 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2303 F.getSubprogram(), &F.getEntryBlock());
2304 R << "unable to lower arguments: " << ore::NV("Prototype", F.getType());
2305 reportTranslationError(*MF, *TPC, *ORE, R);
2306 return false;
2307 }
2308
2309 // Need to visit defs before uses when translating instructions.
2310 GISelObserverWrapper WrapperObserver;
2311 if (EnableCSE && CSEInfo)
2312 WrapperObserver.addObserver(CSEInfo);
2313 {
2314 ReversePostOrderTraversal<const Function *> RPOT(&F);
2315 #ifndef NDEBUG
2316 DILocationVerifier Verifier;
2317 WrapperObserver.addObserver(&Verifier);
2318 #endif // ifndef NDEBUG
2319 RAIIDelegateInstaller DelInstall(*MF, &WrapperObserver);
2320 for (const BasicBlock *BB : RPOT) {
2321 MachineBasicBlock &MBB = getMBB(*BB);
2322 // Set the insertion point of all the following translations to
2323 // the end of this basic block.
2324 CurBuilder->setMBB(MBB);
2325
2326 for (const Instruction &Inst : *BB) {
2327 #ifndef NDEBUG
2328 Verifier.setCurrentInst(&Inst);
2329 #endif // ifndef NDEBUG
2330 if (translate(Inst))
2331 continue;
2332
2333 OptimizationRemarkMissed R("gisel-irtranslator", "GISelFailure",
2334 Inst.getDebugLoc(), BB);
2335 R << "unable to translate instruction: " << ore::NV("Opcode", &Inst);
2336
2337 if (ORE->allowExtraAnalysis("gisel-irtranslator")) {
2338 std::string InstStrStorage;
2339 raw_string_ostream InstStr(InstStrStorage);
2340 InstStr << Inst;
2341
2342 R << ": '" << InstStr.str() << "'";
2343 }
2344
2345 reportTranslationError(*MF, *TPC, *ORE, R);
2346 return false;
2347 }
2348
2349 finalizeBasicBlock();
2350 }
2351 #ifndef NDEBUG
2352 WrapperObserver.removeObserver(&Verifier);
2353 #endif
2354 }
2355
2356 finishPendingPhis();
2357
2358 SwiftError.propagateVRegs();
2359
2360 // Merge the argument lowering and constants block with its single
2361 // successor, the LLVM-IR entry block. We want the basic block to
2362 // be maximal.
2363 assert(EntryBB->succ_size() == 1 &&
2364 "Custom BB used for lowering should have only one successor");
2365 // Get the successor of the current entry block.
2366 MachineBasicBlock &NewEntryBB = **EntryBB->succ_begin();
2367 assert(NewEntryBB.pred_size() == 1 &&
2368 "LLVM-IR entry block has a predecessor!?");
2369 // Move all the instruction from the current entry block to the
2370 // new entry block.
2371 NewEntryBB.splice(NewEntryBB.begin(), EntryBB, EntryBB->begin(),
2372 EntryBB->end());
2373
2374 // Update the live-in information for the new entry block.
2375 for (const MachineBasicBlock::RegisterMaskPair &LiveIn : EntryBB->liveins())
2376 NewEntryBB.addLiveIn(LiveIn);
2377 NewEntryBB.sortUniqueLiveIns();
2378
2379 // Get rid of the now empty basic block.
2380 EntryBB->removeSuccessor(&NewEntryBB);
2381 MF->remove(EntryBB);
2382 MF->DeleteMachineBasicBlock(EntryBB);
2383
2384 assert(&MF->front() == &NewEntryBB &&
2385 "New entry wasn't next in the list of basic block!");
2386
2387 // Initialize stack protector information.
2388 StackProtector &SP = getAnalysis<StackProtector>();
2389 SP.copyToMachineFrameInfo(MF->getFrameInfo());
2390
2391 return false;
2392 }
2393