1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the FastISel class.
10 //
11 // "Fast" instruction selection is designed to emit very poor code quickly.
12 // Also, it is not designed to be able to do much lowering, so most illegal
13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14 // also not intended to be able to do much optimization, except in a few cases
15 // where doing optimizations reduces overall compile time. For example, folding
16 // constants into immediate fields is often done, because it's cheap and it
17 // reduces the number of instructions later phases have to examine.
18 //
19 // "Fast" instruction selection is able to fail gracefully and transfer
20 // control to the SelectionDAG selector for operations that it doesn't
21 // support. In many cases, this allows us to avoid duplicating a lot of
22 // the complicated lowering logic that SelectionDAG currently has.
23 //
24 // The intended use for "fast" instruction selection is "-O0" mode
25 // compilation, where the quality of the generated code is irrelevant when
26 // weighed against the speed at which the code can be generated. Also,
27 // at -O0, the LLVM optimizers are not running, and this makes the
28 // compile time of codegen a much higher portion of the overall compile
29 // time. Despite its limitations, "fast" instruction selection is able to
30 // handle enough code on its own to provide noticeable overall speedups
31 // in -O0 compiles.
32 //
33 // Basic operations are supported in a target-independent way, by reading
34 // the same instruction descriptions that the SelectionDAG selector reads,
35 // and identifying simple arithmetic operations that can be directly selected
36 // from simple operators. More complicated operations currently require
37 // target-specific code.
38 //
39 //===----------------------------------------------------------------------===//
40
41 #include "llvm/CodeGen/FastISel.h"
42 #include "llvm/ADT/APFloat.h"
43 #include "llvm/ADT/APSInt.h"
44 #include "llvm/ADT/DenseMap.h"
45 #include "llvm/ADT/Optional.h"
46 #include "llvm/ADT/SmallPtrSet.h"
47 #include "llvm/ADT/SmallString.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/Analysis/BranchProbabilityInfo.h"
51 #include "llvm/Analysis/TargetLibraryInfo.h"
52 #include "llvm/CodeGen/Analysis.h"
53 #include "llvm/CodeGen/FunctionLoweringInfo.h"
54 #include "llvm/CodeGen/ISDOpcodes.h"
55 #include "llvm/CodeGen/MachineBasicBlock.h"
56 #include "llvm/CodeGen/MachineFrameInfo.h"
57 #include "llvm/CodeGen/MachineInstr.h"
58 #include "llvm/CodeGen/MachineInstrBuilder.h"
59 #include "llvm/CodeGen/MachineMemOperand.h"
60 #include "llvm/CodeGen/MachineModuleInfo.h"
61 #include "llvm/CodeGen/MachineOperand.h"
62 #include "llvm/CodeGen/MachineRegisterInfo.h"
63 #include "llvm/CodeGen/StackMaps.h"
64 #include "llvm/CodeGen/TargetInstrInfo.h"
65 #include "llvm/CodeGen/TargetLowering.h"
66 #include "llvm/CodeGen/TargetSubtargetInfo.h"
67 #include "llvm/CodeGen/ValueTypes.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugInfo.h"
76 #include "llvm/IR/DebugLoc.h"
77 #include "llvm/IR/DerivedTypes.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GetElementPtrTypeIterator.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/LLVMContext.h"
87 #include "llvm/IR/Mangler.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Operator.h"
90 #include "llvm/IR/PatternMatch.h"
91 #include "llvm/IR/Type.h"
92 #include "llvm/IR/User.h"
93 #include "llvm/IR/Value.h"
94 #include "llvm/MC/MCContext.h"
95 #include "llvm/MC/MCInstrDesc.h"
96 #include "llvm/MC/MCRegisterInfo.h"
97 #include "llvm/Support/Casting.h"
98 #include "llvm/Support/Debug.h"
99 #include "llvm/Support/ErrorHandling.h"
100 #include "llvm/Support/MachineValueType.h"
101 #include "llvm/Support/MathExtras.h"
102 #include "llvm/Support/raw_ostream.h"
103 #include "llvm/Target/TargetMachine.h"
104 #include "llvm/Target/TargetOptions.h"
105 #include <algorithm>
106 #include <cassert>
107 #include <cstdint>
108 #include <iterator>
109 #include <utility>
110
111 using namespace llvm;
112 using namespace PatternMatch;
113
114 #define DEBUG_TYPE "isel"
115
116 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
117 "target-independent selector");
118 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
119 "target-specific selector");
120 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
121
122 /// Set the current block to which generated machine instructions will be
123 /// appended.
startNewBlock()124 void FastISel::startNewBlock() {
125 assert(LocalValueMap.empty() &&
126 "local values should be cleared after finishing a BB");
127
128 // Instructions are appended to FuncInfo.MBB. If the basic block already
129 // contains labels or copies, use the last instruction as the last local
130 // value.
131 EmitStartPt = nullptr;
132 if (!FuncInfo.MBB->empty())
133 EmitStartPt = &FuncInfo.MBB->back();
134 LastLocalValue = EmitStartPt;
135 }
136
finishBasicBlock()137 void FastISel::finishBasicBlock() { flushLocalValueMap(); }
138
lowerArguments()139 bool FastISel::lowerArguments() {
140 if (!FuncInfo.CanLowerReturn)
141 // Fallback to SDISel argument lowering code to deal with sret pointer
142 // parameter.
143 return false;
144
145 if (!fastLowerArguments())
146 return false;
147
148 // Enter arguments into ValueMap for uses in non-entry BBs.
149 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
150 E = FuncInfo.Fn->arg_end();
151 I != E; ++I) {
152 DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I);
153 assert(VI != LocalValueMap.end() && "Missed an argument?");
154 FuncInfo.ValueMap[&*I] = VI->second;
155 }
156 return true;
157 }
158
159 /// Return the defined register if this instruction defines exactly one
160 /// virtual register and uses no other virtual registers. Otherwise return 0.
findLocalRegDef(MachineInstr & MI)161 static Register findLocalRegDef(MachineInstr &MI) {
162 Register RegDef;
163 for (const MachineOperand &MO : MI.operands()) {
164 if (!MO.isReg())
165 continue;
166 if (MO.isDef()) {
167 if (RegDef)
168 return Register();
169 RegDef = MO.getReg();
170 } else if (MO.getReg().isVirtual()) {
171 // This is another use of a vreg. Don't delete it.
172 return Register();
173 }
174 }
175 return RegDef;
176 }
177
isRegUsedByPhiNodes(Register DefReg,FunctionLoweringInfo & FuncInfo)178 static bool isRegUsedByPhiNodes(Register DefReg,
179 FunctionLoweringInfo &FuncInfo) {
180 for (auto &P : FuncInfo.PHINodesToUpdate)
181 if (P.second == DefReg)
182 return true;
183 return false;
184 }
185
flushLocalValueMap()186 void FastISel::flushLocalValueMap() {
187 // If FastISel bails out, it could leave local value instructions behind
188 // that aren't used for anything. Detect and erase those.
189 if (LastLocalValue != EmitStartPt) {
190 // Save the first instruction after local values, for later.
191 MachineBasicBlock::iterator FirstNonValue(LastLocalValue);
192 ++FirstNonValue;
193
194 MachineBasicBlock::reverse_iterator RE =
195 EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
196 : FuncInfo.MBB->rend();
197 MachineBasicBlock::reverse_iterator RI(LastLocalValue);
198 for (; RI != RE;) {
199 MachineInstr &LocalMI = *RI;
200 // Increment before erasing what it points to.
201 ++RI;
202 Register DefReg = findLocalRegDef(LocalMI);
203 if (!DefReg)
204 continue;
205 if (FuncInfo.RegsWithFixups.count(DefReg))
206 continue;
207 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
208 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
209 if (EmitStartPt == &LocalMI)
210 EmitStartPt = EmitStartPt->getPrevNode();
211 LLVM_DEBUG(dbgs() << "removing dead local value materialization"
212 << LocalMI);
213 LocalMI.eraseFromParent();
214 }
215 }
216
217 if (FirstNonValue != FuncInfo.MBB->end()) {
218 // See if there are any local value instructions left. If so, we want to
219 // make sure the first one has a debug location; if it doesn't, use the
220 // first non-value instruction's debug location.
221
222 // If EmitStartPt is non-null, this block had copies at the top before
223 // FastISel started doing anything; it points to the last one, so the
224 // first local value instruction is the one after EmitStartPt.
225 // If EmitStartPt is null, the first local value instruction is at the
226 // top of the block.
227 MachineBasicBlock::iterator FirstLocalValue =
228 EmitStartPt ? ++MachineBasicBlock::iterator(EmitStartPt)
229 : FuncInfo.MBB->begin();
230 if (FirstLocalValue != FirstNonValue && !FirstLocalValue->getDebugLoc())
231 FirstLocalValue->setDebugLoc(FirstNonValue->getDebugLoc());
232 }
233 }
234
235 LocalValueMap.clear();
236 LastLocalValue = EmitStartPt;
237 recomputeInsertPt();
238 SavedInsertPt = FuncInfo.InsertPt;
239 }
240
getRegForValue(const Value * V)241 Register FastISel::getRegForValue(const Value *V) {
242 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
243 // Don't handle non-simple values in FastISel.
244 if (!RealVT.isSimple())
245 return Register();
246
247 // Ignore illegal types. We must do this before looking up the value
248 // in ValueMap because Arguments are given virtual registers regardless
249 // of whether FastISel can handle them.
250 MVT VT = RealVT.getSimpleVT();
251 if (!TLI.isTypeLegal(VT)) {
252 // Handle integer promotions, though, because they're common and easy.
253 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
254 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
255 else
256 return Register();
257 }
258
259 // Look up the value to see if we already have a register for it.
260 Register Reg = lookUpRegForValue(V);
261 if (Reg)
262 return Reg;
263
264 // In bottom-up mode, just create the virtual register which will be used
265 // to hold the value. It will be materialized later.
266 if (isa<Instruction>(V) &&
267 (!isa<AllocaInst>(V) ||
268 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
269 return FuncInfo.InitializeRegForValue(V);
270
271 SavePoint SaveInsertPt = enterLocalValueArea();
272
273 // Materialize the value in a register. Emit any instructions in the
274 // local value area.
275 Reg = materializeRegForValue(V, VT);
276
277 leaveLocalValueArea(SaveInsertPt);
278
279 return Reg;
280 }
281
materializeConstant(const Value * V,MVT VT)282 Register FastISel::materializeConstant(const Value *V, MVT VT) {
283 Register Reg;
284 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
285 if (CI->getValue().getActiveBits() <= 64)
286 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
287 } else if (isa<AllocaInst>(V))
288 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
289 else if (isa<ConstantPointerNull>(V))
290 // Translate this as an integer zero so that it can be
291 // local-CSE'd with actual integer zeros.
292 Reg =
293 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
294 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
295 if (CF->isNullValue())
296 Reg = fastMaterializeFloatZero(CF);
297 else
298 // Try to emit the constant directly.
299 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
300
301 if (!Reg) {
302 // Try to emit the constant by using an integer constant with a cast.
303 const APFloat &Flt = CF->getValueAPF();
304 EVT IntVT = TLI.getPointerTy(DL);
305 uint32_t IntBitWidth = IntVT.getSizeInBits();
306 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
307 bool isExact;
308 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
309 if (isExact) {
310 Register IntegerReg =
311 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
312 if (IntegerReg)
313 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP,
314 IntegerReg);
315 }
316 }
317 } else if (const auto *Op = dyn_cast<Operator>(V)) {
318 if (!selectOperator(Op, Op->getOpcode()))
319 if (!isa<Instruction>(Op) ||
320 !fastSelectInstruction(cast<Instruction>(Op)))
321 return 0;
322 Reg = lookUpRegForValue(Op);
323 } else if (isa<UndefValue>(V)) {
324 Reg = createResultReg(TLI.getRegClassFor(VT));
325 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
326 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
327 }
328 return Reg;
329 }
330
331 /// Helper for getRegForValue. This function is called when the value isn't
332 /// already available in a register and must be materialized with new
333 /// instructions.
materializeRegForValue(const Value * V,MVT VT)334 Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
335 Register Reg;
336 // Give the target-specific code a try first.
337 if (isa<Constant>(V))
338 Reg = fastMaterializeConstant(cast<Constant>(V));
339
340 // If target-specific code couldn't or didn't want to handle the value, then
341 // give target-independent code a try.
342 if (!Reg)
343 Reg = materializeConstant(V, VT);
344
345 // Don't cache constant materializations in the general ValueMap.
346 // To do so would require tracking what uses they dominate.
347 if (Reg) {
348 LocalValueMap[V] = Reg;
349 LastLocalValue = MRI.getVRegDef(Reg);
350 }
351 return Reg;
352 }
353
lookUpRegForValue(const Value * V)354 Register FastISel::lookUpRegForValue(const Value *V) {
355 // Look up the value to see if we already have a register for it. We
356 // cache values defined by Instructions across blocks, and other values
357 // only locally. This is because Instructions already have the SSA
358 // def-dominates-use requirement enforced.
359 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V);
360 if (I != FuncInfo.ValueMap.end())
361 return I->second;
362 return LocalValueMap[V];
363 }
364
updateValueMap(const Value * I,Register Reg,unsigned NumRegs)365 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
366 if (!isa<Instruction>(I)) {
367 LocalValueMap[I] = Reg;
368 return;
369 }
370
371 Register &AssignedReg = FuncInfo.ValueMap[I];
372 if (!AssignedReg)
373 // Use the new register.
374 AssignedReg = Reg;
375 else if (Reg != AssignedReg) {
376 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
377 for (unsigned i = 0; i < NumRegs; i++) {
378 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
379 FuncInfo.RegsWithFixups.insert(Reg + i);
380 }
381
382 AssignedReg = Reg;
383 }
384 }
385
getRegForGEPIndex(const Value * Idx)386 Register FastISel::getRegForGEPIndex(const Value *Idx) {
387 Register IdxN = getRegForValue(Idx);
388 if (!IdxN)
389 // Unhandled operand. Halt "fast" selection and bail.
390 return Register();
391
392 // If the index is smaller or larger than intptr_t, truncate or extend it.
393 MVT PtrVT = TLI.getPointerTy(DL);
394 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
395 if (IdxVT.bitsLT(PtrVT)) {
396 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN);
397 } else if (IdxVT.bitsGT(PtrVT)) {
398 IdxN =
399 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN);
400 }
401 return IdxN;
402 }
403
recomputeInsertPt()404 void FastISel::recomputeInsertPt() {
405 if (getLastLocalValue()) {
406 FuncInfo.InsertPt = getLastLocalValue();
407 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
408 ++FuncInfo.InsertPt;
409 } else
410 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
411
412 // Now skip past any EH_LABELs, which must remain at the beginning.
413 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
414 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
415 ++FuncInfo.InsertPt;
416 }
417
removeDeadCode(MachineBasicBlock::iterator I,MachineBasicBlock::iterator E)418 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
419 MachineBasicBlock::iterator E) {
420 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
421 "Invalid iterator!");
422 while (I != E) {
423 if (SavedInsertPt == I)
424 SavedInsertPt = E;
425 if (EmitStartPt == I)
426 EmitStartPt = E.isValid() ? &*E : nullptr;
427 if (LastLocalValue == I)
428 LastLocalValue = E.isValid() ? &*E : nullptr;
429
430 MachineInstr *Dead = &*I;
431 ++I;
432 Dead->eraseFromParent();
433 ++NumFastIselDead;
434 }
435 recomputeInsertPt();
436 }
437
enterLocalValueArea()438 FastISel::SavePoint FastISel::enterLocalValueArea() {
439 SavePoint OldInsertPt = FuncInfo.InsertPt;
440 recomputeInsertPt();
441 return OldInsertPt;
442 }
443
leaveLocalValueArea(SavePoint OldInsertPt)444 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
445 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
446 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
447
448 // Restore the previous insert position.
449 FuncInfo.InsertPt = OldInsertPt;
450 }
451
selectBinaryOp(const User * I,unsigned ISDOpcode)452 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
453 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
454 if (VT == MVT::Other || !VT.isSimple())
455 // Unhandled type. Halt "fast" selection and bail.
456 return false;
457
458 // We only handle legal types. For example, on x86-32 the instruction
459 // selector contains all of the 64-bit instructions from x86-64,
460 // under the assumption that i64 won't be used if the target doesn't
461 // support it.
462 if (!TLI.isTypeLegal(VT)) {
463 // MVT::i1 is special. Allow AND, OR, or XOR because they
464 // don't require additional zeroing, which makes them easy.
465 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
466 ISDOpcode == ISD::XOR))
467 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
468 else
469 return false;
470 }
471
472 // Check if the first operand is a constant, and handle it as "ri". At -O0,
473 // we don't have anything that canonicalizes operand order.
474 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
475 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
476 Register Op1 = getRegForValue(I->getOperand(1));
477 if (!Op1)
478 return false;
479
480 Register ResultReg =
481 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, CI->getZExtValue(),
482 VT.getSimpleVT());
483 if (!ResultReg)
484 return false;
485
486 // We successfully emitted code for the given LLVM Instruction.
487 updateValueMap(I, ResultReg);
488 return true;
489 }
490
491 Register Op0 = getRegForValue(I->getOperand(0));
492 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
493 return false;
494
495 // Check if the second operand is a constant and handle it appropriately.
496 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
497 uint64_t Imm = CI->getSExtValue();
498
499 // Transform "sdiv exact X, 8" -> "sra X, 3".
500 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
501 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
502 Imm = Log2_64(Imm);
503 ISDOpcode = ISD::SRA;
504 }
505
506 // Transform "urem x, pow2" -> "and x, pow2-1".
507 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
508 isPowerOf2_64(Imm)) {
509 --Imm;
510 ISDOpcode = ISD::AND;
511 }
512
513 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0, Imm,
514 VT.getSimpleVT());
515 if (!ResultReg)
516 return false;
517
518 // We successfully emitted code for the given LLVM Instruction.
519 updateValueMap(I, ResultReg);
520 return true;
521 }
522
523 Register Op1 = getRegForValue(I->getOperand(1));
524 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
525 return false;
526
527 // Now we have both operands in registers. Emit the instruction.
528 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
529 ISDOpcode, Op0, Op1);
530 if (!ResultReg)
531 // Target-specific code wasn't able to find a machine opcode for
532 // the given ISD opcode and type. Halt "fast" selection and bail.
533 return false;
534
535 // We successfully emitted code for the given LLVM Instruction.
536 updateValueMap(I, ResultReg);
537 return true;
538 }
539
selectGetElementPtr(const User * I)540 bool FastISel::selectGetElementPtr(const User *I) {
541 Register N = getRegForValue(I->getOperand(0));
542 if (!N) // Unhandled operand. Halt "fast" selection and bail.
543 return false;
544
545 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
546 // and bail.
547 if (isa<VectorType>(I->getType()))
548 return false;
549
550 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
551 // into a single N = N + TotalOffset.
552 uint64_t TotalOffs = 0;
553 // FIXME: What's a good SWAG number for MaxOffs?
554 uint64_t MaxOffs = 2048;
555 MVT VT = TLI.getPointerTy(DL);
556 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
557 GTI != E; ++GTI) {
558 const Value *Idx = GTI.getOperand();
559 if (StructType *StTy = GTI.getStructTypeOrNull()) {
560 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
561 if (Field) {
562 // N = N + Offset
563 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
564 if (TotalOffs >= MaxOffs) {
565 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
566 if (!N) // Unhandled operand. Halt "fast" selection and bail.
567 return false;
568 TotalOffs = 0;
569 }
570 }
571 } else {
572 Type *Ty = GTI.getIndexedType();
573
574 // If this is a constant subscript, handle it quickly.
575 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
576 if (CI->isZero())
577 continue;
578 // N = N + Offset
579 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
580 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
581 if (TotalOffs >= MaxOffs) {
582 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
583 if (!N) // Unhandled operand. Halt "fast" selection and bail.
584 return false;
585 TotalOffs = 0;
586 }
587 continue;
588 }
589 if (TotalOffs) {
590 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
591 if (!N) // Unhandled operand. Halt "fast" selection and bail.
592 return false;
593 TotalOffs = 0;
594 }
595
596 // N = N + Idx * ElementSize;
597 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
598 Register IdxN = getRegForGEPIndex(Idx);
599 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
600 return false;
601
602 if (ElementSize != 1) {
603 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, ElementSize, VT);
604 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
605 return false;
606 }
607 N = fastEmit_rr(VT, VT, ISD::ADD, N, IdxN);
608 if (!N) // Unhandled operand. Halt "fast" selection and bail.
609 return false;
610 }
611 }
612 if (TotalOffs) {
613 N = fastEmit_ri_(VT, ISD::ADD, N, TotalOffs, VT);
614 if (!N) // Unhandled operand. Halt "fast" selection and bail.
615 return false;
616 }
617
618 // We successfully emitted code for the given LLVM Instruction.
619 updateValueMap(I, N);
620 return true;
621 }
622
addStackMapLiveVars(SmallVectorImpl<MachineOperand> & Ops,const CallInst * CI,unsigned StartIdx)623 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
624 const CallInst *CI, unsigned StartIdx) {
625 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
626 Value *Val = CI->getArgOperand(i);
627 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
628 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
629 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
630 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
631 } else if (isa<ConstantPointerNull>(Val)) {
632 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
633 Ops.push_back(MachineOperand::CreateImm(0));
634 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
635 // Values coming from a stack location also require a special encoding,
636 // but that is added later on by the target specific frame index
637 // elimination implementation.
638 auto SI = FuncInfo.StaticAllocaMap.find(AI);
639 if (SI != FuncInfo.StaticAllocaMap.end())
640 Ops.push_back(MachineOperand::CreateFI(SI->second));
641 else
642 return false;
643 } else {
644 Register Reg = getRegForValue(Val);
645 if (!Reg)
646 return false;
647 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
648 }
649 }
650 return true;
651 }
652
selectStackmap(const CallInst * I)653 bool FastISel::selectStackmap(const CallInst *I) {
654 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
655 // [live variables...])
656 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
657 "Stackmap cannot return a value.");
658
659 // The stackmap intrinsic only records the live variables (the arguments
660 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
661 // intrinsic, this won't be lowered to a function call. This means we don't
662 // have to worry about calling conventions and target-specific lowering code.
663 // Instead we perform the call lowering right here.
664 //
665 // CALLSEQ_START(0, 0...)
666 // STACKMAP(id, nbytes, ...)
667 // CALLSEQ_END(0, 0)
668 //
669 SmallVector<MachineOperand, 32> Ops;
670
671 // Add the <id> and <numBytes> constants.
672 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
673 "Expected a constant integer.");
674 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
675 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
676
677 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
678 "Expected a constant integer.");
679 const auto *NumBytes =
680 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
681 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
682
683 // Push live variables for the stack map (skipping the first two arguments
684 // <id> and <numBytes>).
685 if (!addStackMapLiveVars(Ops, I, 2))
686 return false;
687
688 // We are not adding any register mask info here, because the stackmap doesn't
689 // clobber anything.
690
691 // Add scratch registers as implicit def and early clobber.
692 CallingConv::ID CC = I->getCallingConv();
693 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
694 for (unsigned i = 0; ScratchRegs[i]; ++i)
695 Ops.push_back(MachineOperand::CreateReg(
696 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
697 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
698
699 // Issue CALLSEQ_START
700 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
701 auto Builder =
702 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
703 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
704 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
705 Builder.addImm(0);
706
707 // Issue STACKMAP.
708 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
709 TII.get(TargetOpcode::STACKMAP));
710 for (auto const &MO : Ops)
711 MIB.add(MO);
712
713 // Issue CALLSEQ_END
714 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
715 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
716 .addImm(0)
717 .addImm(0);
718
719 // Inform the Frame Information that we have a stackmap in this function.
720 FuncInfo.MF->getFrameInfo().setHasStackMap();
721
722 return true;
723 }
724
725 /// Lower an argument list according to the target calling convention.
726 ///
727 /// This is a helper for lowering intrinsics that follow a target calling
728 /// convention or require stack pointer adjustment. Only a subset of the
729 /// intrinsic's operands need to participate in the calling convention.
lowerCallOperands(const CallInst * CI,unsigned ArgIdx,unsigned NumArgs,const Value * Callee,bool ForceRetVoidTy,CallLoweringInfo & CLI)730 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
731 unsigned NumArgs, const Value *Callee,
732 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
733 ArgListTy Args;
734 Args.reserve(NumArgs);
735
736 // Populate the argument list.
737 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
738 Value *V = CI->getOperand(ArgI);
739
740 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
741
742 ArgListEntry Entry;
743 Entry.Val = V;
744 Entry.Ty = V->getType();
745 Entry.setAttributes(CI, ArgI);
746 Args.push_back(Entry);
747 }
748
749 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
750 : CI->getType();
751 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
752
753 return lowerCallTo(CLI);
754 }
755
setCallee(const DataLayout & DL,MCContext & Ctx,CallingConv::ID CC,Type * ResultTy,StringRef Target,ArgListTy && ArgsList,unsigned FixedArgs)756 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
757 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
758 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
759 SmallString<32> MangledName;
760 Mangler::getNameWithPrefix(MangledName, Target, DL);
761 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
762 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
763 }
764
selectPatchpoint(const CallInst * I)765 bool FastISel::selectPatchpoint(const CallInst *I) {
766 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
767 // i32 <numBytes>,
768 // i8* <target>,
769 // i32 <numArgs>,
770 // [Args...],
771 // [live variables...])
772 CallingConv::ID CC = I->getCallingConv();
773 bool IsAnyRegCC = CC == CallingConv::AnyReg;
774 bool HasDef = !I->getType()->isVoidTy();
775 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
776
777 // Get the real number of arguments participating in the call <numArgs>
778 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
779 "Expected a constant integer.");
780 const auto *NumArgsVal =
781 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
782 unsigned NumArgs = NumArgsVal->getZExtValue();
783
784 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
785 // This includes all meta-operands up to but not including CC.
786 unsigned NumMetaOpers = PatchPointOpers::CCPos;
787 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
788 "Not enough arguments provided to the patchpoint intrinsic");
789
790 // For AnyRegCC the arguments are lowered later on manually.
791 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
792 CallLoweringInfo CLI;
793 CLI.setIsPatchPoint();
794 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
795 return false;
796
797 assert(CLI.Call && "No call instruction specified.");
798
799 SmallVector<MachineOperand, 32> Ops;
800
801 // Add an explicit result reg if we use the anyreg calling convention.
802 if (IsAnyRegCC && HasDef) {
803 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
804 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
805 CLI.NumResultRegs = 1;
806 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
807 }
808
809 // Add the <id> and <numBytes> constants.
810 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
811 "Expected a constant integer.");
812 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
813 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
814
815 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
816 "Expected a constant integer.");
817 const auto *NumBytes =
818 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
819 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
820
821 // Add the call target.
822 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
823 uint64_t CalleeConstAddr =
824 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
825 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
826 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
827 if (C->getOpcode() == Instruction::IntToPtr) {
828 uint64_t CalleeConstAddr =
829 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
830 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
831 } else
832 llvm_unreachable("Unsupported ConstantExpr.");
833 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
834 Ops.push_back(MachineOperand::CreateGA(GV, 0));
835 } else if (isa<ConstantPointerNull>(Callee))
836 Ops.push_back(MachineOperand::CreateImm(0));
837 else
838 llvm_unreachable("Unsupported callee address.");
839
840 // Adjust <numArgs> to account for any arguments that have been passed on
841 // the stack instead.
842 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
843 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
844
845 // Add the calling convention
846 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
847
848 // Add the arguments we omitted previously. The register allocator should
849 // place these in any free register.
850 if (IsAnyRegCC) {
851 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
852 Register Reg = getRegForValue(I->getArgOperand(i));
853 if (!Reg)
854 return false;
855 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
856 }
857 }
858
859 // Push the arguments from the call instruction.
860 for (auto Reg : CLI.OutRegs)
861 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
862
863 // Push live variables for the stack map.
864 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
865 return false;
866
867 // Push the register mask info.
868 Ops.push_back(MachineOperand::CreateRegMask(
869 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
870
871 // Add scratch registers as implicit def and early clobber.
872 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
873 for (unsigned i = 0; ScratchRegs[i]; ++i)
874 Ops.push_back(MachineOperand::CreateReg(
875 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
876 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
877
878 // Add implicit defs (return values).
879 for (auto Reg : CLI.InRegs)
880 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
881 /*isImp=*/true));
882
883 // Insert the patchpoint instruction before the call generated by the target.
884 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
885 TII.get(TargetOpcode::PATCHPOINT));
886
887 for (auto &MO : Ops)
888 MIB.add(MO);
889
890 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
891
892 // Delete the original call instruction.
893 CLI.Call->eraseFromParent();
894
895 // Inform the Frame Information that we have a patchpoint in this function.
896 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
897
898 if (CLI.NumResultRegs)
899 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
900 return true;
901 }
902
selectXRayCustomEvent(const CallInst * I)903 bool FastISel::selectXRayCustomEvent(const CallInst *I) {
904 const auto &Triple = TM.getTargetTriple();
905 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
906 return true; // don't do anything to this instruction.
907 SmallVector<MachineOperand, 8> Ops;
908 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
909 /*isDef=*/false));
910 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
911 /*isDef=*/false));
912 MachineInstrBuilder MIB =
913 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
914 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
915 for (auto &MO : Ops)
916 MIB.add(MO);
917
918 // Insert the Patchable Event Call instruction, that gets lowered properly.
919 return true;
920 }
921
selectXRayTypedEvent(const CallInst * I)922 bool FastISel::selectXRayTypedEvent(const CallInst *I) {
923 const auto &Triple = TM.getTargetTriple();
924 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
925 return true; // don't do anything to this instruction.
926 SmallVector<MachineOperand, 8> Ops;
927 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
928 /*isDef=*/false));
929 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
930 /*isDef=*/false));
931 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
932 /*isDef=*/false));
933 MachineInstrBuilder MIB =
934 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
935 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
936 for (auto &MO : Ops)
937 MIB.add(MO);
938
939 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
940 return true;
941 }
942
943 /// Returns an AttributeList representing the attributes applied to the return
944 /// value of the given call.
getReturnAttrs(FastISel::CallLoweringInfo & CLI)945 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
946 SmallVector<Attribute::AttrKind, 2> Attrs;
947 if (CLI.RetSExt)
948 Attrs.push_back(Attribute::SExt);
949 if (CLI.RetZExt)
950 Attrs.push_back(Attribute::ZExt);
951 if (CLI.IsInReg)
952 Attrs.push_back(Attribute::InReg);
953
954 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
955 Attrs);
956 }
957
lowerCallTo(const CallInst * CI,const char * SymName,unsigned NumArgs)958 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
959 unsigned NumArgs) {
960 MCContext &Ctx = MF->getContext();
961 SmallString<32> MangledName;
962 Mangler::getNameWithPrefix(MangledName, SymName, DL);
963 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
964 return lowerCallTo(CI, Sym, NumArgs);
965 }
966
lowerCallTo(const CallInst * CI,MCSymbol * Symbol,unsigned NumArgs)967 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
968 unsigned NumArgs) {
969 FunctionType *FTy = CI->getFunctionType();
970 Type *RetTy = CI->getType();
971
972 ArgListTy Args;
973 Args.reserve(NumArgs);
974
975 // Populate the argument list.
976 // Attributes for args start at offset 1, after the return attribute.
977 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
978 Value *V = CI->getOperand(ArgI);
979
980 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
981
982 ArgListEntry Entry;
983 Entry.Val = V;
984 Entry.Ty = V->getType();
985 Entry.setAttributes(CI, ArgI);
986 Args.push_back(Entry);
987 }
988 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
989
990 CallLoweringInfo CLI;
991 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
992
993 return lowerCallTo(CLI);
994 }
995
lowerCallTo(CallLoweringInfo & CLI)996 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
997 // Handle the incoming return values from the call.
998 CLI.clearIns();
999 SmallVector<EVT, 4> RetTys;
1000 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
1001
1002 SmallVector<ISD::OutputArg, 4> Outs;
1003 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
1004
1005 bool CanLowerReturn = TLI.CanLowerReturn(
1006 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
1007
1008 // FIXME: sret demotion isn't supported yet - bail out.
1009 if (!CanLowerReturn)
1010 return false;
1011
1012 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
1013 EVT VT = RetTys[I];
1014 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1015 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1016 for (unsigned i = 0; i != NumRegs; ++i) {
1017 ISD::InputArg MyFlags;
1018 MyFlags.VT = RegisterVT;
1019 MyFlags.ArgVT = VT;
1020 MyFlags.Used = CLI.IsReturnValueUsed;
1021 if (CLI.RetSExt)
1022 MyFlags.Flags.setSExt();
1023 if (CLI.RetZExt)
1024 MyFlags.Flags.setZExt();
1025 if (CLI.IsInReg)
1026 MyFlags.Flags.setInReg();
1027 CLI.Ins.push_back(MyFlags);
1028 }
1029 }
1030
1031 // Handle all of the outgoing arguments.
1032 CLI.clearOuts();
1033 for (auto &Arg : CLI.getArgs()) {
1034 Type *FinalType = Arg.Ty;
1035 if (Arg.IsByVal)
1036 FinalType = Arg.IndirectType;
1037 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1038 FinalType, CLI.CallConv, CLI.IsVarArg, DL);
1039
1040 ISD::ArgFlagsTy Flags;
1041 if (Arg.IsZExt)
1042 Flags.setZExt();
1043 if (Arg.IsSExt)
1044 Flags.setSExt();
1045 if (Arg.IsInReg)
1046 Flags.setInReg();
1047 if (Arg.IsSRet)
1048 Flags.setSRet();
1049 if (Arg.IsSwiftSelf)
1050 Flags.setSwiftSelf();
1051 if (Arg.IsSwiftAsync)
1052 Flags.setSwiftAsync();
1053 if (Arg.IsSwiftError)
1054 Flags.setSwiftError();
1055 if (Arg.IsCFGuardTarget)
1056 Flags.setCFGuardTarget();
1057 if (Arg.IsByVal)
1058 Flags.setByVal();
1059 if (Arg.IsInAlloca) {
1060 Flags.setInAlloca();
1061 // Set the byval flag for CCAssignFn callbacks that don't know about
1062 // inalloca. This way we can know how many bytes we should've allocated
1063 // and how many bytes a callee cleanup function will pop. If we port
1064 // inalloca to more targets, we'll have to add custom inalloca handling in
1065 // the various CC lowering callbacks.
1066 Flags.setByVal();
1067 }
1068 if (Arg.IsPreallocated) {
1069 Flags.setPreallocated();
1070 // Set the byval flag for CCAssignFn callbacks that don't know about
1071 // preallocated. This way we can know how many bytes we should've
1072 // allocated and how many bytes a callee cleanup function will pop. If we
1073 // port preallocated to more targets, we'll have to add custom
1074 // preallocated handling in the various CC lowering callbacks.
1075 Flags.setByVal();
1076 }
1077 MaybeAlign MemAlign = Arg.Alignment;
1078 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1079 unsigned FrameSize = DL.getTypeAllocSize(Arg.IndirectType);
1080
1081 // For ByVal, alignment should come from FE. BE will guess if this info
1082 // is not there, but there are cases it cannot get right.
1083 if (!MemAlign)
1084 MemAlign = Align(TLI.getByValTypeAlignment(Arg.IndirectType, DL));
1085 Flags.setByValSize(FrameSize);
1086 } else if (!MemAlign) {
1087 MemAlign = DL.getABITypeAlign(Arg.Ty);
1088 }
1089 Flags.setMemAlign(*MemAlign);
1090 if (Arg.IsNest)
1091 Flags.setNest();
1092 if (NeedsRegBlock)
1093 Flags.setInConsecutiveRegs();
1094 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1095 CLI.OutVals.push_back(Arg.Val);
1096 CLI.OutFlags.push_back(Flags);
1097 }
1098
1099 if (!fastLowerCall(CLI))
1100 return false;
1101
1102 // Set all unused physreg defs as dead.
1103 assert(CLI.Call && "No call instruction specified.");
1104 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1105
1106 if (CLI.NumResultRegs && CLI.CB)
1107 updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs);
1108
1109 // Set labels for heapallocsite call.
1110 if (CLI.CB)
1111 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1112 CLI.Call->setHeapAllocMarker(*MF, MD);
1113
1114 return true;
1115 }
1116
lowerCall(const CallInst * CI)1117 bool FastISel::lowerCall(const CallInst *CI) {
1118 FunctionType *FuncTy = CI->getFunctionType();
1119 Type *RetTy = CI->getType();
1120
1121 ArgListTy Args;
1122 ArgListEntry Entry;
1123 Args.reserve(CI->arg_size());
1124
1125 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1126 Value *V = *i;
1127
1128 // Skip empty types
1129 if (V->getType()->isEmptyTy())
1130 continue;
1131
1132 Entry.Val = V;
1133 Entry.Ty = V->getType();
1134
1135 // Skip the first return-type Attribute to get to params.
1136 Entry.setAttributes(CI, i - CI->arg_begin());
1137 Args.push_back(Entry);
1138 }
1139
1140 // Check if target-independent constraints permit a tail call here.
1141 // Target-dependent constraints are checked within fastLowerCall.
1142 bool IsTailCall = CI->isTailCall();
1143 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1144 IsTailCall = false;
1145 if (IsTailCall && MF->getFunction()
1146 .getFnAttribute("disable-tail-calls")
1147 .getValueAsBool())
1148 IsTailCall = false;
1149
1150 CallLoweringInfo CLI;
1151 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1152 .setTailCall(IsTailCall);
1153
1154 return lowerCallTo(CLI);
1155 }
1156
selectCall(const User * I)1157 bool FastISel::selectCall(const User *I) {
1158 const CallInst *Call = cast<CallInst>(I);
1159
1160 // Handle simple inline asms.
1161 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1162 // Don't attempt to handle constraints.
1163 if (!IA->getConstraintString().empty())
1164 return false;
1165
1166 unsigned ExtraInfo = 0;
1167 if (IA->hasSideEffects())
1168 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1169 if (IA->isAlignStack())
1170 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1171 if (Call->isConvergent())
1172 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1173 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1174
1175 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1176 TII.get(TargetOpcode::INLINEASM));
1177 MIB.addExternalSymbol(IA->getAsmString().c_str());
1178 MIB.addImm(ExtraInfo);
1179
1180 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1181 if (SrcLoc)
1182 MIB.addMetadata(SrcLoc);
1183
1184 return true;
1185 }
1186
1187 // Handle intrinsic function calls.
1188 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1189 return selectIntrinsicCall(II);
1190
1191 return lowerCall(Call);
1192 }
1193
selectIntrinsicCall(const IntrinsicInst * II)1194 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1195 switch (II->getIntrinsicID()) {
1196 default:
1197 break;
1198 // At -O0 we don't care about the lifetime intrinsics.
1199 case Intrinsic::lifetime_start:
1200 case Intrinsic::lifetime_end:
1201 // The donothing intrinsic does, well, nothing.
1202 case Intrinsic::donothing:
1203 // Neither does the sideeffect intrinsic.
1204 case Intrinsic::sideeffect:
1205 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1206 case Intrinsic::assume:
1207 // Neither does the llvm.experimental.noalias.scope.decl intrinsic
1208 case Intrinsic::experimental_noalias_scope_decl:
1209 return true;
1210 case Intrinsic::dbg_declare: {
1211 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1212 assert(DI->getVariable() && "Missing variable");
1213 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1214 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1215 << " (!hasDebugInfo)\n");
1216 return true;
1217 }
1218
1219 const Value *Address = DI->getAddress();
1220 if (!Address || isa<UndefValue>(Address)) {
1221 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1222 << " (bad/undef address)\n");
1223 return true;
1224 }
1225
1226 // Byval arguments with frame indices were already handled after argument
1227 // lowering and before isel.
1228 const auto *Arg =
1229 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1230 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1231 return true;
1232
1233 Optional<MachineOperand> Op;
1234 if (Register Reg = lookUpRegForValue(Address))
1235 Op = MachineOperand::CreateReg(Reg, false);
1236
1237 // If we have a VLA that has a "use" in a metadata node that's then used
1238 // here but it has no other uses, then we have a problem. E.g.,
1239 //
1240 // int foo (const int *x) {
1241 // char a[*x];
1242 // return 0;
1243 // }
1244 //
1245 // If we assign 'a' a vreg and fast isel later on has to use the selection
1246 // DAG isel, it will want to copy the value to the vreg. However, there are
1247 // no uses, which goes counter to what selection DAG isel expects.
1248 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1249 (!isa<AllocaInst>(Address) ||
1250 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1251 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1252 false);
1253
1254 if (Op) {
1255 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1256 "Expected inlined-at fields to agree");
1257 // A dbg.declare describes the address of a source variable, so lower it
1258 // into an indirect DBG_VALUE.
1259 auto Builder =
1260 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1261 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true, *Op,
1262 DI->getVariable(), DI->getExpression());
1263
1264 // If using instruction referencing, mutate this into a DBG_INSTR_REF,
1265 // to be later patched up by finalizeDebugInstrRefs. Tack a deref onto
1266 // the expression, we don't have an "indirect" flag in DBG_INSTR_REF.
1267 if (TM.Options.ValueTrackingVariableLocations && Op->isReg()) {
1268 Builder->setDesc(TII.get(TargetOpcode::DBG_INSTR_REF));
1269 Builder->getOperand(1).ChangeToImmediate(0);
1270 auto *NewExpr =
1271 DIExpression::prepend(DI->getExpression(), DIExpression::DerefBefore);
1272 Builder->getOperand(3).setMetadata(NewExpr);
1273 }
1274 } else {
1275 // We can't yet handle anything else here because it would require
1276 // generating code, thus altering codegen because of debug info.
1277 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1278 << " (no materialized reg for address)\n");
1279 }
1280 return true;
1281 }
1282 case Intrinsic::dbg_value: {
1283 // This form of DBG_VALUE is target-independent.
1284 const DbgValueInst *DI = cast<DbgValueInst>(II);
1285 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1286 const Value *V = DI->getValue();
1287 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1288 "Expected inlined-at fields to agree");
1289 if (!V || isa<UndefValue>(V) || DI->hasArgList()) {
1290 // DI is either undef or cannot produce a valid DBG_VALUE, so produce an
1291 // undef DBG_VALUE to terminate any prior location.
1292 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1293 DI->getVariable(), DI->getExpression());
1294 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1295 if (CI->getBitWidth() > 64)
1296 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1297 .addCImm(CI)
1298 .addImm(0U)
1299 .addMetadata(DI->getVariable())
1300 .addMetadata(DI->getExpression());
1301 else
1302 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1303 .addImm(CI->getZExtValue())
1304 .addImm(0U)
1305 .addMetadata(DI->getVariable())
1306 .addMetadata(DI->getExpression());
1307 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1308 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1309 .addFPImm(CF)
1310 .addImm(0U)
1311 .addMetadata(DI->getVariable())
1312 .addMetadata(DI->getExpression());
1313 } else if (Register Reg = lookUpRegForValue(V)) {
1314 // FIXME: This does not handle register-indirect values at offset 0.
1315 bool IsIndirect = false;
1316 auto Builder =
1317 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1318 DI->getVariable(), DI->getExpression());
1319
1320 // If using instruction referencing, mutate this into a DBG_INSTR_REF,
1321 // to be later patched up by finalizeDebugInstrRefs.
1322 if (TM.Options.ValueTrackingVariableLocations) {
1323 Builder->setDesc(TII.get(TargetOpcode::DBG_INSTR_REF));
1324 Builder->getOperand(1).ChangeToImmediate(0);
1325 }
1326 } else {
1327 // We don't know how to handle other cases, so we drop.
1328 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1329 }
1330 return true;
1331 }
1332 case Intrinsic::dbg_label: {
1333 const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1334 assert(DI->getLabel() && "Missing label");
1335 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1336 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1337 return true;
1338 }
1339
1340 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1341 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1342 return true;
1343 }
1344 case Intrinsic::objectsize:
1345 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1346
1347 case Intrinsic::is_constant:
1348 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1349
1350 case Intrinsic::launder_invariant_group:
1351 case Intrinsic::strip_invariant_group:
1352 case Intrinsic::expect: {
1353 Register ResultReg = getRegForValue(II->getArgOperand(0));
1354 if (!ResultReg)
1355 return false;
1356 updateValueMap(II, ResultReg);
1357 return true;
1358 }
1359 case Intrinsic::experimental_stackmap:
1360 return selectStackmap(II);
1361 case Intrinsic::experimental_patchpoint_void:
1362 case Intrinsic::experimental_patchpoint_i64:
1363 return selectPatchpoint(II);
1364
1365 case Intrinsic::xray_customevent:
1366 return selectXRayCustomEvent(II);
1367 case Intrinsic::xray_typedevent:
1368 return selectXRayTypedEvent(II);
1369 }
1370
1371 return fastLowerIntrinsicCall(II);
1372 }
1373
selectCast(const User * I,unsigned Opcode)1374 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1375 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1376 EVT DstVT = TLI.getValueType(DL, I->getType());
1377
1378 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1379 !DstVT.isSimple())
1380 // Unhandled type. Halt "fast" selection and bail.
1381 return false;
1382
1383 // Check if the destination type is legal.
1384 if (!TLI.isTypeLegal(DstVT))
1385 return false;
1386
1387 // Check if the source operand is legal.
1388 if (!TLI.isTypeLegal(SrcVT))
1389 return false;
1390
1391 Register InputReg = getRegForValue(I->getOperand(0));
1392 if (!InputReg)
1393 // Unhandled operand. Halt "fast" selection and bail.
1394 return false;
1395
1396 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1397 Opcode, InputReg);
1398 if (!ResultReg)
1399 return false;
1400
1401 updateValueMap(I, ResultReg);
1402 return true;
1403 }
1404
selectBitCast(const User * I)1405 bool FastISel::selectBitCast(const User *I) {
1406 // If the bitcast doesn't change the type, just use the operand value.
1407 if (I->getType() == I->getOperand(0)->getType()) {
1408 Register Reg = getRegForValue(I->getOperand(0));
1409 if (!Reg)
1410 return false;
1411 updateValueMap(I, Reg);
1412 return true;
1413 }
1414
1415 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1416 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1417 EVT DstEVT = TLI.getValueType(DL, I->getType());
1418 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1419 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1420 // Unhandled type. Halt "fast" selection and bail.
1421 return false;
1422
1423 MVT SrcVT = SrcEVT.getSimpleVT();
1424 MVT DstVT = DstEVT.getSimpleVT();
1425 Register Op0 = getRegForValue(I->getOperand(0));
1426 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1427 return false;
1428
1429 // First, try to perform the bitcast by inserting a reg-reg copy.
1430 Register ResultReg;
1431 if (SrcVT == DstVT) {
1432 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1433 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1434 // Don't attempt a cross-class copy. It will likely fail.
1435 if (SrcClass == DstClass) {
1436 ResultReg = createResultReg(DstClass);
1437 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1438 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1439 }
1440 }
1441
1442 // If the reg-reg copy failed, select a BITCAST opcode.
1443 if (!ResultReg)
1444 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0);
1445
1446 if (!ResultReg)
1447 return false;
1448
1449 updateValueMap(I, ResultReg);
1450 return true;
1451 }
1452
selectFreeze(const User * I)1453 bool FastISel::selectFreeze(const User *I) {
1454 Register Reg = getRegForValue(I->getOperand(0));
1455 if (!Reg)
1456 // Unhandled operand.
1457 return false;
1458
1459 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1460 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1461 // Unhandled type, bail out.
1462 return false;
1463
1464 MVT Ty = ETy.getSimpleVT();
1465 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1466 Register ResultReg = createResultReg(TyRegClass);
1467 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1468 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1469
1470 updateValueMap(I, ResultReg);
1471 return true;
1472 }
1473
1474 // Remove local value instructions starting from the instruction after
1475 // SavedLastLocalValue to the current function insert point.
removeDeadLocalValueCode(MachineInstr * SavedLastLocalValue)1476 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1477 {
1478 MachineInstr *CurLastLocalValue = getLastLocalValue();
1479 if (CurLastLocalValue != SavedLastLocalValue) {
1480 // Find the first local value instruction to be deleted.
1481 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1482 // Otherwise it's the first instruction in the block.
1483 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1484 if (SavedLastLocalValue)
1485 ++FirstDeadInst;
1486 else
1487 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1488 setLastLocalValue(SavedLastLocalValue);
1489 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1490 }
1491 }
1492
selectInstruction(const Instruction * I)1493 bool FastISel::selectInstruction(const Instruction *I) {
1494 // Flush the local value map before starting each instruction.
1495 // This improves locality and debugging, and can reduce spills.
1496 // Reuse of values across IR instructions is relatively uncommon.
1497 flushLocalValueMap();
1498
1499 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1500 // Just before the terminator instruction, insert instructions to
1501 // feed PHI nodes in successor blocks.
1502 if (I->isTerminator()) {
1503 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1504 // PHI node handling may have generated local value instructions,
1505 // even though it failed to handle all PHI nodes.
1506 // We remove these instructions because SelectionDAGISel will generate
1507 // them again.
1508 removeDeadLocalValueCode(SavedLastLocalValue);
1509 return false;
1510 }
1511 }
1512
1513 // FastISel does not handle any operand bundles except OB_funclet.
1514 if (auto *Call = dyn_cast<CallBase>(I))
1515 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1516 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1517 return false;
1518
1519 DbgLoc = I->getDebugLoc();
1520
1521 SavedInsertPt = FuncInfo.InsertPt;
1522
1523 if (const auto *Call = dyn_cast<CallInst>(I)) {
1524 const Function *F = Call->getCalledFunction();
1525 LibFunc Func;
1526
1527 // As a special case, don't handle calls to builtin library functions that
1528 // may be translated directly to target instructions.
1529 if (F && !F->hasLocalLinkage() && F->hasName() &&
1530 LibInfo->getLibFunc(F->getName(), Func) &&
1531 LibInfo->hasOptimizedCodeGen(Func))
1532 return false;
1533
1534 // Don't handle Intrinsic::trap if a trap function is specified.
1535 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1536 Call->hasFnAttr("trap-func-name"))
1537 return false;
1538 }
1539
1540 // First, try doing target-independent selection.
1541 if (!SkipTargetIndependentISel) {
1542 if (selectOperator(I, I->getOpcode())) {
1543 ++NumFastIselSuccessIndependent;
1544 DbgLoc = DebugLoc();
1545 return true;
1546 }
1547 // Remove dead code.
1548 recomputeInsertPt();
1549 if (SavedInsertPt != FuncInfo.InsertPt)
1550 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1551 SavedInsertPt = FuncInfo.InsertPt;
1552 }
1553 // Next, try calling the target to attempt to handle the instruction.
1554 if (fastSelectInstruction(I)) {
1555 ++NumFastIselSuccessTarget;
1556 DbgLoc = DebugLoc();
1557 return true;
1558 }
1559 // Remove dead code.
1560 recomputeInsertPt();
1561 if (SavedInsertPt != FuncInfo.InsertPt)
1562 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1563
1564 DbgLoc = DebugLoc();
1565 // Undo phi node updates, because they will be added again by SelectionDAG.
1566 if (I->isTerminator()) {
1567 // PHI node handling may have generated local value instructions.
1568 // We remove them because SelectionDAGISel will generate them again.
1569 removeDeadLocalValueCode(SavedLastLocalValue);
1570 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1571 }
1572 return false;
1573 }
1574
1575 /// Emit an unconditional branch to the given block, unless it is the immediate
1576 /// (fall-through) successor, and update the CFG.
fastEmitBranch(MachineBasicBlock * MSucc,const DebugLoc & DbgLoc)1577 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1578 const DebugLoc &DbgLoc) {
1579 if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 &&
1580 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1581 // For more accurate line information if this is the only non-debug
1582 // instruction in the block then emit it, otherwise we have the
1583 // unconditional fall-through case, which needs no instructions.
1584 } else {
1585 // The unconditional branch case.
1586 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1587 SmallVector<MachineOperand, 0>(), DbgLoc);
1588 }
1589 if (FuncInfo.BPI) {
1590 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1591 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1592 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1593 } else
1594 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1595 }
1596
finishCondBranch(const BasicBlock * BranchBB,MachineBasicBlock * TrueMBB,MachineBasicBlock * FalseMBB)1597 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1598 MachineBasicBlock *TrueMBB,
1599 MachineBasicBlock *FalseMBB) {
1600 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1601 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1602 // successor/predecessor lists.
1603 if (TrueMBB != FalseMBB) {
1604 if (FuncInfo.BPI) {
1605 auto BranchProbability =
1606 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1607 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1608 } else
1609 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1610 }
1611
1612 fastEmitBranch(FalseMBB, DbgLoc);
1613 }
1614
1615 /// Emit an FNeg operation.
selectFNeg(const User * I,const Value * In)1616 bool FastISel::selectFNeg(const User *I, const Value *In) {
1617 Register OpReg = getRegForValue(In);
1618 if (!OpReg)
1619 return false;
1620
1621 // If the target has ISD::FNEG, use it.
1622 EVT VT = TLI.getValueType(DL, I->getType());
1623 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1624 OpReg);
1625 if (ResultReg) {
1626 updateValueMap(I, ResultReg);
1627 return true;
1628 }
1629
1630 // Bitcast the value to integer, twiddle the sign bit with xor,
1631 // and then bitcast it back to floating-point.
1632 if (VT.getSizeInBits() > 64)
1633 return false;
1634 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1635 if (!TLI.isTypeLegal(IntVT))
1636 return false;
1637
1638 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1639 ISD::BITCAST, OpReg);
1640 if (!IntReg)
1641 return false;
1642
1643 Register IntResultReg = fastEmit_ri_(
1644 IntVT.getSimpleVT(), ISD::XOR, IntReg,
1645 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1646 if (!IntResultReg)
1647 return false;
1648
1649 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1650 IntResultReg);
1651 if (!ResultReg)
1652 return false;
1653
1654 updateValueMap(I, ResultReg);
1655 return true;
1656 }
1657
selectExtractValue(const User * U)1658 bool FastISel::selectExtractValue(const User *U) {
1659 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1660 if (!EVI)
1661 return false;
1662
1663 // Make sure we only try to handle extracts with a legal result. But also
1664 // allow i1 because it's easy.
1665 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1666 if (!RealVT.isSimple())
1667 return false;
1668 MVT VT = RealVT.getSimpleVT();
1669 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1670 return false;
1671
1672 const Value *Op0 = EVI->getOperand(0);
1673 Type *AggTy = Op0->getType();
1674
1675 // Get the base result register.
1676 unsigned ResultReg;
1677 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0);
1678 if (I != FuncInfo.ValueMap.end())
1679 ResultReg = I->second;
1680 else if (isa<Instruction>(Op0))
1681 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1682 else
1683 return false; // fast-isel can't handle aggregate constants at the moment
1684
1685 // Get the actual result register, which is an offset from the base register.
1686 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1687
1688 SmallVector<EVT, 4> AggValueVTs;
1689 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1690
1691 for (unsigned i = 0; i < VTIndex; i++)
1692 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1693
1694 updateValueMap(EVI, ResultReg);
1695 return true;
1696 }
1697
selectOperator(const User * I,unsigned Opcode)1698 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1699 switch (Opcode) {
1700 case Instruction::Add:
1701 return selectBinaryOp(I, ISD::ADD);
1702 case Instruction::FAdd:
1703 return selectBinaryOp(I, ISD::FADD);
1704 case Instruction::Sub:
1705 return selectBinaryOp(I, ISD::SUB);
1706 case Instruction::FSub:
1707 return selectBinaryOp(I, ISD::FSUB);
1708 case Instruction::Mul:
1709 return selectBinaryOp(I, ISD::MUL);
1710 case Instruction::FMul:
1711 return selectBinaryOp(I, ISD::FMUL);
1712 case Instruction::SDiv:
1713 return selectBinaryOp(I, ISD::SDIV);
1714 case Instruction::UDiv:
1715 return selectBinaryOp(I, ISD::UDIV);
1716 case Instruction::FDiv:
1717 return selectBinaryOp(I, ISD::FDIV);
1718 case Instruction::SRem:
1719 return selectBinaryOp(I, ISD::SREM);
1720 case Instruction::URem:
1721 return selectBinaryOp(I, ISD::UREM);
1722 case Instruction::FRem:
1723 return selectBinaryOp(I, ISD::FREM);
1724 case Instruction::Shl:
1725 return selectBinaryOp(I, ISD::SHL);
1726 case Instruction::LShr:
1727 return selectBinaryOp(I, ISD::SRL);
1728 case Instruction::AShr:
1729 return selectBinaryOp(I, ISD::SRA);
1730 case Instruction::And:
1731 return selectBinaryOp(I, ISD::AND);
1732 case Instruction::Or:
1733 return selectBinaryOp(I, ISD::OR);
1734 case Instruction::Xor:
1735 return selectBinaryOp(I, ISD::XOR);
1736
1737 case Instruction::FNeg:
1738 return selectFNeg(I, I->getOperand(0));
1739
1740 case Instruction::GetElementPtr:
1741 return selectGetElementPtr(I);
1742
1743 case Instruction::Br: {
1744 const BranchInst *BI = cast<BranchInst>(I);
1745
1746 if (BI->isUnconditional()) {
1747 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1748 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1749 fastEmitBranch(MSucc, BI->getDebugLoc());
1750 return true;
1751 }
1752
1753 // Conditional branches are not handed yet.
1754 // Halt "fast" selection and bail.
1755 return false;
1756 }
1757
1758 case Instruction::Unreachable:
1759 if (TM.Options.TrapUnreachable)
1760 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1761 else
1762 return true;
1763
1764 case Instruction::Alloca:
1765 // FunctionLowering has the static-sized case covered.
1766 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1767 return true;
1768
1769 // Dynamic-sized alloca is not handled yet.
1770 return false;
1771
1772 case Instruction::Call:
1773 // On AIX, call lowering uses the DAG-ISEL path currently so that the
1774 // callee of the direct function call instruction will be mapped to the
1775 // symbol for the function's entry point, which is distinct from the
1776 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1777 // name is the C-linkage name of the source level function.
1778 if (TM.getTargetTriple().isOSAIX())
1779 return false;
1780 return selectCall(I);
1781
1782 case Instruction::BitCast:
1783 return selectBitCast(I);
1784
1785 case Instruction::FPToSI:
1786 return selectCast(I, ISD::FP_TO_SINT);
1787 case Instruction::ZExt:
1788 return selectCast(I, ISD::ZERO_EXTEND);
1789 case Instruction::SExt:
1790 return selectCast(I, ISD::SIGN_EXTEND);
1791 case Instruction::Trunc:
1792 return selectCast(I, ISD::TRUNCATE);
1793 case Instruction::SIToFP:
1794 return selectCast(I, ISD::SINT_TO_FP);
1795
1796 case Instruction::IntToPtr: // Deliberate fall-through.
1797 case Instruction::PtrToInt: {
1798 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1799 EVT DstVT = TLI.getValueType(DL, I->getType());
1800 if (DstVT.bitsGT(SrcVT))
1801 return selectCast(I, ISD::ZERO_EXTEND);
1802 if (DstVT.bitsLT(SrcVT))
1803 return selectCast(I, ISD::TRUNCATE);
1804 Register Reg = getRegForValue(I->getOperand(0));
1805 if (!Reg)
1806 return false;
1807 updateValueMap(I, Reg);
1808 return true;
1809 }
1810
1811 case Instruction::ExtractValue:
1812 return selectExtractValue(I);
1813
1814 case Instruction::Freeze:
1815 return selectFreeze(I);
1816
1817 case Instruction::PHI:
1818 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1819
1820 default:
1821 // Unhandled instruction. Halt "fast" selection and bail.
1822 return false;
1823 }
1824 }
1825
FastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo,bool SkipTargetIndependentISel)1826 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1827 const TargetLibraryInfo *LibInfo,
1828 bool SkipTargetIndependentISel)
1829 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1830 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1831 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1832 TII(*MF->getSubtarget().getInstrInfo()),
1833 TLI(*MF->getSubtarget().getTargetLowering()),
1834 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1835 SkipTargetIndependentISel(SkipTargetIndependentISel),
1836 LastLocalValue(nullptr), EmitStartPt(nullptr) {}
1837
1838 FastISel::~FastISel() = default;
1839
fastLowerArguments()1840 bool FastISel::fastLowerArguments() { return false; }
1841
fastLowerCall(CallLoweringInfo &)1842 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
1843
fastLowerIntrinsicCall(const IntrinsicInst *)1844 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
1845 return false;
1846 }
1847
fastEmit_(MVT,MVT,unsigned)1848 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
1849
fastEmit_r(MVT,MVT,unsigned,unsigned)1850 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/) {
1851 return 0;
1852 }
1853
fastEmit_rr(MVT,MVT,unsigned,unsigned,unsigned)1854 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
1855 unsigned /*Op1*/) {
1856 return 0;
1857 }
1858
fastEmit_i(MVT,MVT,unsigned,uint64_t)1859 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
1860 return 0;
1861 }
1862
fastEmit_f(MVT,MVT,unsigned,const ConstantFP *)1863 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
1864 const ConstantFP * /*FPImm*/) {
1865 return 0;
1866 }
1867
fastEmit_ri(MVT,MVT,unsigned,unsigned,uint64_t)1868 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
1869 uint64_t /*Imm*/) {
1870 return 0;
1871 }
1872
1873 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
1874 /// instruction with an immediate operand using fastEmit_ri.
1875 /// If that fails, it materializes the immediate into a register and try
1876 /// fastEmit_rr instead.
fastEmit_ri_(MVT VT,unsigned Opcode,unsigned Op0,uint64_t Imm,MVT ImmType)1877 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
1878 uint64_t Imm, MVT ImmType) {
1879 // If this is a multiply by a power of two, emit this as a shift left.
1880 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
1881 Opcode = ISD::SHL;
1882 Imm = Log2_64(Imm);
1883 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
1884 // div x, 8 -> srl x, 3
1885 Opcode = ISD::SRL;
1886 Imm = Log2_64(Imm);
1887 }
1888
1889 // Horrible hack (to be removed), check to make sure shift amounts are
1890 // in-range.
1891 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
1892 Imm >= VT.getSizeInBits())
1893 return 0;
1894
1895 // First check if immediate type is legal. If not, we can't use the ri form.
1896 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Imm);
1897 if (ResultReg)
1898 return ResultReg;
1899 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
1900 if (!MaterialReg) {
1901 // This is a bit ugly/slow, but failing here means falling out of
1902 // fast-isel, which would be very slow.
1903 IntegerType *ITy =
1904 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
1905 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
1906 if (!MaterialReg)
1907 return 0;
1908 }
1909 return fastEmit_rr(VT, VT, Opcode, Op0, MaterialReg);
1910 }
1911
createResultReg(const TargetRegisterClass * RC)1912 Register FastISel::createResultReg(const TargetRegisterClass *RC) {
1913 return MRI.createVirtualRegister(RC);
1914 }
1915
constrainOperandRegClass(const MCInstrDesc & II,Register Op,unsigned OpNum)1916 Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op,
1917 unsigned OpNum) {
1918 if (Op.isVirtual()) {
1919 const TargetRegisterClass *RegClass =
1920 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
1921 if (!MRI.constrainRegClass(Op, RegClass)) {
1922 // If it's not legal to COPY between the register classes, something
1923 // has gone very wrong before we got here.
1924 Register NewOp = createResultReg(RegClass);
1925 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1926 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
1927 return NewOp;
1928 }
1929 }
1930 return Op;
1931 }
1932
fastEmitInst_(unsigned MachineInstOpcode,const TargetRegisterClass * RC)1933 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
1934 const TargetRegisterClass *RC) {
1935 Register ResultReg = createResultReg(RC);
1936 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1937
1938 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
1939 return ResultReg;
1940 }
1941
fastEmitInst_r(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0)1942 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
1943 const TargetRegisterClass *RC, unsigned Op0) {
1944 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1945
1946 Register ResultReg = createResultReg(RC);
1947 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1948
1949 if (II.getNumDefs() >= 1)
1950 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1951 .addReg(Op0);
1952 else {
1953 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1954 .addReg(Op0);
1955 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1956 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1957 }
1958
1959 return ResultReg;
1960 }
1961
fastEmitInst_rr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,unsigned Op1)1962 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
1963 const TargetRegisterClass *RC, unsigned Op0,
1964 unsigned Op1) {
1965 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1966
1967 Register ResultReg = createResultReg(RC);
1968 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1969 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1970
1971 if (II.getNumDefs() >= 1)
1972 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1973 .addReg(Op0)
1974 .addReg(Op1);
1975 else {
1976 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1977 .addReg(Op0)
1978 .addReg(Op1);
1979 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1980 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
1981 }
1982 return ResultReg;
1983 }
1984
fastEmitInst_rrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,unsigned Op1,unsigned Op2)1985 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
1986 const TargetRegisterClass *RC, unsigned Op0,
1987 unsigned Op1, unsigned Op2) {
1988 const MCInstrDesc &II = TII.get(MachineInstOpcode);
1989
1990 Register ResultReg = createResultReg(RC);
1991 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
1992 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
1993 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
1994
1995 if (II.getNumDefs() >= 1)
1996 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
1997 .addReg(Op0)
1998 .addReg(Op1)
1999 .addReg(Op2);
2000 else {
2001 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2002 .addReg(Op0)
2003 .addReg(Op1)
2004 .addReg(Op2);
2005 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2006 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2007 }
2008 return ResultReg;
2009 }
2010
fastEmitInst_ri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,uint64_t Imm)2011 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2012 const TargetRegisterClass *RC, unsigned Op0,
2013 uint64_t Imm) {
2014 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2015
2016 Register ResultReg = createResultReg(RC);
2017 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2018
2019 if (II.getNumDefs() >= 1)
2020 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2021 .addReg(Op0)
2022 .addImm(Imm);
2023 else {
2024 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2025 .addReg(Op0)
2026 .addImm(Imm);
2027 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2028 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2029 }
2030 return ResultReg;
2031 }
2032
fastEmitInst_rii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,uint64_t Imm1,uint64_t Imm2)2033 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2034 const TargetRegisterClass *RC, unsigned Op0,
2035 uint64_t Imm1, uint64_t Imm2) {
2036 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2037
2038 Register ResultReg = createResultReg(RC);
2039 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2040
2041 if (II.getNumDefs() >= 1)
2042 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2043 .addReg(Op0)
2044 .addImm(Imm1)
2045 .addImm(Imm2);
2046 else {
2047 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2048 .addReg(Op0)
2049 .addImm(Imm1)
2050 .addImm(Imm2);
2051 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2052 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2053 }
2054 return ResultReg;
2055 }
2056
fastEmitInst_f(unsigned MachineInstOpcode,const TargetRegisterClass * RC,const ConstantFP * FPImm)2057 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2058 const TargetRegisterClass *RC,
2059 const ConstantFP *FPImm) {
2060 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2061
2062 Register ResultReg = createResultReg(RC);
2063
2064 if (II.getNumDefs() >= 1)
2065 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2066 .addFPImm(FPImm);
2067 else {
2068 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2069 .addFPImm(FPImm);
2070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2071 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2072 }
2073 return ResultReg;
2074 }
2075
fastEmitInst_rri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,unsigned Op1,uint64_t Imm)2076 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2077 const TargetRegisterClass *RC, unsigned Op0,
2078 unsigned Op1, uint64_t Imm) {
2079 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2080
2081 Register ResultReg = createResultReg(RC);
2082 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2083 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2084
2085 if (II.getNumDefs() >= 1)
2086 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2087 .addReg(Op0)
2088 .addReg(Op1)
2089 .addImm(Imm);
2090 else {
2091 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2092 .addReg(Op0)
2093 .addReg(Op1)
2094 .addImm(Imm);
2095 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2096 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2097 }
2098 return ResultReg;
2099 }
2100
fastEmitInst_i(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm)2101 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2102 const TargetRegisterClass *RC, uint64_t Imm) {
2103 Register ResultReg = createResultReg(RC);
2104 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2105
2106 if (II.getNumDefs() >= 1)
2107 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2108 .addImm(Imm);
2109 else {
2110 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
2111 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2112 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2113 }
2114 return ResultReg;
2115 }
2116
fastEmitInst_extractsubreg(MVT RetVT,unsigned Op0,uint32_t Idx)2117 Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2118 uint32_t Idx) {
2119 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2120 assert(Register::isVirtualRegister(Op0) &&
2121 "Cannot yet extract from physregs");
2122 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2123 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2124 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2125 ResultReg).addReg(Op0, 0, Idx);
2126 return ResultReg;
2127 }
2128
2129 /// Emit MachineInstrs to compute the value of Op with all but the least
2130 /// significant bit set to zero.
fastEmitZExtFromI1(MVT VT,unsigned Op0)2131 Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0) {
2132 return fastEmit_ri(VT, VT, ISD::AND, Op0, 1);
2133 }
2134
2135 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2136 /// Emit code to ensure constants are copied into registers when needed.
2137 /// Remember the virtual registers that need to be added to the Machine PHI
2138 /// nodes as input. We cannot just directly add them, because expansion
2139 /// might result in multiple MBB's for one BB. As such, the start of the
2140 /// BB might correspond to a different MBB than the end.
handlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)2141 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2142 const Instruction *TI = LLVMBB->getTerminator();
2143
2144 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2145 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2146
2147 // Check successor nodes' PHI nodes that expect a constant to be available
2148 // from this block.
2149 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2150 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2151 if (!isa<PHINode>(SuccBB->begin()))
2152 continue;
2153 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2154
2155 // If this terminator has multiple identical successors (common for
2156 // switches), only handle each succ once.
2157 if (!SuccsHandled.insert(SuccMBB).second)
2158 continue;
2159
2160 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2161
2162 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2163 // nodes and Machine PHI nodes, but the incoming operands have not been
2164 // emitted yet.
2165 for (const PHINode &PN : SuccBB->phis()) {
2166 // Ignore dead phi's.
2167 if (PN.use_empty())
2168 continue;
2169
2170 // Only handle legal types. Two interesting things to note here. First,
2171 // by bailing out early, we may leave behind some dead instructions,
2172 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2173 // own moves. Second, this check is necessary because FastISel doesn't
2174 // use CreateRegs to create registers, so it always creates
2175 // exactly one register for each non-void instruction.
2176 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2177 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2178 // Handle integer promotions, though, because they're common and easy.
2179 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2180 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2181 return false;
2182 }
2183 }
2184
2185 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2186
2187 // Set the DebugLoc for the copy. Use the location of the operand if
2188 // there is one; otherwise no location, flushLocalValueMap will fix it.
2189 DbgLoc = DebugLoc();
2190 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2191 DbgLoc = Inst->getDebugLoc();
2192
2193 Register Reg = getRegForValue(PHIOp);
2194 if (!Reg) {
2195 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2196 return false;
2197 }
2198 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2199 DbgLoc = DebugLoc();
2200 }
2201 }
2202
2203 return true;
2204 }
2205
tryToFoldLoad(const LoadInst * LI,const Instruction * FoldInst)2206 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2207 assert(LI->hasOneUse() &&
2208 "tryToFoldLoad expected a LoadInst with a single use");
2209 // We know that the load has a single use, but don't know what it is. If it
2210 // isn't one of the folded instructions, then we can't succeed here. Handle
2211 // this by scanning the single-use users of the load until we get to FoldInst.
2212 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2213
2214 const Instruction *TheUser = LI->user_back();
2215 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2216 // Stay in the right block.
2217 TheUser->getParent() == FoldInst->getParent() &&
2218 --MaxUsers) { // Don't scan too far.
2219 // If there are multiple or no uses of this instruction, then bail out.
2220 if (!TheUser->hasOneUse())
2221 return false;
2222
2223 TheUser = TheUser->user_back();
2224 }
2225
2226 // If we didn't find the fold instruction, then we failed to collapse the
2227 // sequence.
2228 if (TheUser != FoldInst)
2229 return false;
2230
2231 // Don't try to fold volatile loads. Target has to deal with alignment
2232 // constraints.
2233 if (LI->isVolatile())
2234 return false;
2235
2236 // Figure out which vreg this is going into. If there is no assigned vreg yet
2237 // then there actually was no reference to it. Perhaps the load is referenced
2238 // by a dead instruction.
2239 Register LoadReg = getRegForValue(LI);
2240 if (!LoadReg)
2241 return false;
2242
2243 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2244 // may mean that the instruction got lowered to multiple MIs, or the use of
2245 // the loaded value ended up being multiple operands of the result.
2246 if (!MRI.hasOneUse(LoadReg))
2247 return false;
2248
2249 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2250 MachineInstr *User = RI->getParent();
2251
2252 // Set the insertion point properly. Folding the load can cause generation of
2253 // other random instructions (like sign extends) for addressing modes; make
2254 // sure they get inserted in a logical place before the new instruction.
2255 FuncInfo.InsertPt = User;
2256 FuncInfo.MBB = User->getParent();
2257
2258 // Ask the target to try folding the load.
2259 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2260 }
2261
canFoldAddIntoGEP(const User * GEP,const Value * Add)2262 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2263 // Must be an add.
2264 if (!isa<AddOperator>(Add))
2265 return false;
2266 // Type size needs to match.
2267 if (DL.getTypeSizeInBits(GEP->getType()) !=
2268 DL.getTypeSizeInBits(Add->getType()))
2269 return false;
2270 // Must be in the same basic block.
2271 if (isa<Instruction>(Add) &&
2272 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2273 return false;
2274 // Must have a constant operand.
2275 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2276 }
2277
2278 MachineMemOperand *
createMachineMemOperandFor(const Instruction * I) const2279 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2280 const Value *Ptr;
2281 Type *ValTy;
2282 MaybeAlign Alignment;
2283 MachineMemOperand::Flags Flags;
2284 bool IsVolatile;
2285
2286 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2287 Alignment = LI->getAlign();
2288 IsVolatile = LI->isVolatile();
2289 Flags = MachineMemOperand::MOLoad;
2290 Ptr = LI->getPointerOperand();
2291 ValTy = LI->getType();
2292 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2293 Alignment = SI->getAlign();
2294 IsVolatile = SI->isVolatile();
2295 Flags = MachineMemOperand::MOStore;
2296 Ptr = SI->getPointerOperand();
2297 ValTy = SI->getValueOperand()->getType();
2298 } else
2299 return nullptr;
2300
2301 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2302 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2303 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2304 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2305
2306 AAMDNodes AAInfo;
2307 I->getAAMetadata(AAInfo);
2308
2309 if (!Alignment) // Ensure that codegen never sees alignment 0.
2310 Alignment = DL.getABITypeAlign(ValTy);
2311
2312 unsigned Size = DL.getTypeStoreSize(ValTy);
2313
2314 if (IsVolatile)
2315 Flags |= MachineMemOperand::MOVolatile;
2316 if (IsNonTemporal)
2317 Flags |= MachineMemOperand::MONonTemporal;
2318 if (IsDereferenceable)
2319 Flags |= MachineMemOperand::MODereferenceable;
2320 if (IsInvariant)
2321 Flags |= MachineMemOperand::MOInvariant;
2322
2323 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2324 *Alignment, AAInfo, Ranges);
2325 }
2326
optimizeCmpPredicate(const CmpInst * CI) const2327 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2328 // If both operands are the same, then try to optimize or fold the cmp.
2329 CmpInst::Predicate Predicate = CI->getPredicate();
2330 if (CI->getOperand(0) != CI->getOperand(1))
2331 return Predicate;
2332
2333 switch (Predicate) {
2334 default: llvm_unreachable("Invalid predicate!");
2335 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2336 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2337 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2338 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2339 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2340 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2341 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2342 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2343 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2344 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2345 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2346 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2347 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2348 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2349 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2350 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2351
2352 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2353 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2354 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2355 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2356 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2357 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2358 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2359 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2360 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2361 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2362 }
2363
2364 return Predicate;
2365 }
2366