1 //===- FastISel.cpp - Implementation of the FastISel class ----------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file contains the implementation of the FastISel class.
10 //
11 // "Fast" instruction selection is designed to emit very poor code quickly.
12 // Also, it is not designed to be able to do much lowering, so most illegal
13 // types (e.g. i64 on 32-bit targets) and operations are not supported. It is
14 // also not intended to be able to do much optimization, except in a few cases
15 // where doing optimizations reduces overall compile time. For example, folding
16 // constants into immediate fields is often done, because it's cheap and it
17 // reduces the number of instructions later phases have to examine.
18 //
19 // "Fast" instruction selection is able to fail gracefully and transfer
20 // control to the SelectionDAG selector for operations that it doesn't
21 // support. In many cases, this allows us to avoid duplicating a lot of
22 // the complicated lowering logic that SelectionDAG currently has.
23 //
24 // The intended use for "fast" instruction selection is "-O0" mode
25 // compilation, where the quality of the generated code is irrelevant when
26 // weighed against the speed at which the code can be generated. Also,
27 // at -O0, the LLVM optimizers are not running, and this makes the
28 // compile time of codegen a much higher portion of the overall compile
29 // time. Despite its limitations, "fast" instruction selection is able to
30 // handle enough code on its own to provide noticeable overall speedups
31 // in -O0 compiles.
32 //
33 // Basic operations are supported in a target-independent way, by reading
34 // the same instruction descriptions that the SelectionDAG selector reads,
35 // and identifying simple arithmetic operations that can be directly selected
36 // from simple operators. More complicated operations currently require
37 // target-specific code.
38 //
39 //===----------------------------------------------------------------------===//
40
41 #include "llvm/CodeGen/FastISel.h"
42 #include "llvm/ADT/APFloat.h"
43 #include "llvm/ADT/APSInt.h"
44 #include "llvm/ADT/DenseMap.h"
45 #include "llvm/ADT/Optional.h"
46 #include "llvm/ADT/SmallPtrSet.h"
47 #include "llvm/ADT/SmallString.h"
48 #include "llvm/ADT/SmallVector.h"
49 #include "llvm/ADT/Statistic.h"
50 #include "llvm/Analysis/BranchProbabilityInfo.h"
51 #include "llvm/Analysis/TargetLibraryInfo.h"
52 #include "llvm/CodeGen/Analysis.h"
53 #include "llvm/CodeGen/FunctionLoweringInfo.h"
54 #include "llvm/CodeGen/ISDOpcodes.h"
55 #include "llvm/CodeGen/MachineBasicBlock.h"
56 #include "llvm/CodeGen/MachineFrameInfo.h"
57 #include "llvm/CodeGen/MachineInstr.h"
58 #include "llvm/CodeGen/MachineInstrBuilder.h"
59 #include "llvm/CodeGen/MachineMemOperand.h"
60 #include "llvm/CodeGen/MachineModuleInfo.h"
61 #include "llvm/CodeGen/MachineOperand.h"
62 #include "llvm/CodeGen/MachineRegisterInfo.h"
63 #include "llvm/CodeGen/StackMaps.h"
64 #include "llvm/CodeGen/TargetInstrInfo.h"
65 #include "llvm/CodeGen/TargetLowering.h"
66 #include "llvm/CodeGen/TargetSubtargetInfo.h"
67 #include "llvm/CodeGen/ValueTypes.h"
68 #include "llvm/IR/Argument.h"
69 #include "llvm/IR/Attributes.h"
70 #include "llvm/IR/BasicBlock.h"
71 #include "llvm/IR/CallingConv.h"
72 #include "llvm/IR/Constant.h"
73 #include "llvm/IR/Constants.h"
74 #include "llvm/IR/DataLayout.h"
75 #include "llvm/IR/DebugInfo.h"
76 #include "llvm/IR/DebugLoc.h"
77 #include "llvm/IR/DerivedTypes.h"
78 #include "llvm/IR/Function.h"
79 #include "llvm/IR/GetElementPtrTypeIterator.h"
80 #include "llvm/IR/GlobalValue.h"
81 #include "llvm/IR/InlineAsm.h"
82 #include "llvm/IR/InstrTypes.h"
83 #include "llvm/IR/Instruction.h"
84 #include "llvm/IR/Instructions.h"
85 #include "llvm/IR/IntrinsicInst.h"
86 #include "llvm/IR/LLVMContext.h"
87 #include "llvm/IR/Mangler.h"
88 #include "llvm/IR/Metadata.h"
89 #include "llvm/IR/Operator.h"
90 #include "llvm/IR/PatternMatch.h"
91 #include "llvm/IR/Type.h"
92 #include "llvm/IR/User.h"
93 #include "llvm/IR/Value.h"
94 #include "llvm/MC/MCContext.h"
95 #include "llvm/MC/MCInstrDesc.h"
96 #include "llvm/MC/MCRegisterInfo.h"
97 #include "llvm/Support/Casting.h"
98 #include "llvm/Support/Debug.h"
99 #include "llvm/Support/ErrorHandling.h"
100 #include "llvm/Support/MachineValueType.h"
101 #include "llvm/Support/MathExtras.h"
102 #include "llvm/Support/raw_ostream.h"
103 #include "llvm/Target/TargetMachine.h"
104 #include "llvm/Target/TargetOptions.h"
105 #include <algorithm>
106 #include <cassert>
107 #include <cstdint>
108 #include <iterator>
109 #include <utility>
110
111 using namespace llvm;
112 using namespace PatternMatch;
113
114 #define DEBUG_TYPE "isel"
115
116 // FIXME: Remove this after the feature has proven reliable.
117 static cl::opt<bool> SinkLocalValues("fast-isel-sink-local-values",
118 cl::init(true), cl::Hidden,
119 cl::desc("Sink local values in FastISel"));
120
121 STATISTIC(NumFastIselSuccessIndependent, "Number of insts selected by "
122 "target-independent selector");
123 STATISTIC(NumFastIselSuccessTarget, "Number of insts selected by "
124 "target-specific selector");
125 STATISTIC(NumFastIselDead, "Number of dead insts removed on failure");
126
127 /// Set the current block to which generated machine instructions will be
128 /// appended.
startNewBlock()129 void FastISel::startNewBlock() {
130 assert(LocalValueMap.empty() &&
131 "local values should be cleared after finishing a BB");
132
133 // Instructions are appended to FuncInfo.MBB. If the basic block already
134 // contains labels or copies, use the last instruction as the last local
135 // value.
136 EmitStartPt = nullptr;
137 if (!FuncInfo.MBB->empty())
138 EmitStartPt = &FuncInfo.MBB->back();
139 LastLocalValue = EmitStartPt;
140 }
141
142 /// Flush the local CSE map and sink anything we can.
finishBasicBlock()143 void FastISel::finishBasicBlock() { flushLocalValueMap(); }
144
lowerArguments()145 bool FastISel::lowerArguments() {
146 if (!FuncInfo.CanLowerReturn)
147 // Fallback to SDISel argument lowering code to deal with sret pointer
148 // parameter.
149 return false;
150
151 if (!fastLowerArguments())
152 return false;
153
154 // Enter arguments into ValueMap for uses in non-entry BBs.
155 for (Function::const_arg_iterator I = FuncInfo.Fn->arg_begin(),
156 E = FuncInfo.Fn->arg_end();
157 I != E; ++I) {
158 DenseMap<const Value *, Register>::iterator VI = LocalValueMap.find(&*I);
159 assert(VI != LocalValueMap.end() && "Missed an argument?");
160 FuncInfo.ValueMap[&*I] = VI->second;
161 }
162 return true;
163 }
164
165 /// Return the defined register if this instruction defines exactly one
166 /// virtual register and uses no other virtual registers. Otherwise return 0.
findSinkableLocalRegDef(MachineInstr & MI)167 static Register findSinkableLocalRegDef(MachineInstr &MI) {
168 Register RegDef;
169 for (const MachineOperand &MO : MI.operands()) {
170 if (!MO.isReg())
171 continue;
172 if (MO.isDef()) {
173 if (RegDef)
174 return 0;
175 RegDef = MO.getReg();
176 } else if (MO.getReg().isVirtual()) {
177 // This is another use of a vreg. Don't try to sink it.
178 return Register();
179 }
180 }
181 return RegDef;
182 }
183
flushLocalValueMap()184 void FastISel::flushLocalValueMap() {
185 // Try to sink local values down to their first use so that we can give them a
186 // better debug location. This has the side effect of shrinking local value
187 // live ranges, which helps out fast regalloc.
188 if (SinkLocalValues && LastLocalValue != EmitStartPt) {
189 // Sink local value materialization instructions between EmitStartPt and
190 // LastLocalValue. Visit them bottom-up, starting from LastLocalValue, to
191 // avoid inserting into the range that we're iterating over.
192 MachineBasicBlock::reverse_iterator RE =
193 EmitStartPt ? MachineBasicBlock::reverse_iterator(EmitStartPt)
194 : FuncInfo.MBB->rend();
195 MachineBasicBlock::reverse_iterator RI(LastLocalValue);
196
197 InstOrderMap OrderMap;
198 for (; RI != RE;) {
199 MachineInstr &LocalMI = *RI;
200 ++RI;
201 bool Store = true;
202 if (!LocalMI.isSafeToMove(nullptr, Store))
203 continue;
204 Register DefReg = findSinkableLocalRegDef(LocalMI);
205 if (DefReg == 0)
206 continue;
207
208 sinkLocalValueMaterialization(LocalMI, DefReg, OrderMap);
209 }
210 }
211
212 LocalValueMap.clear();
213 LastLocalValue = EmitStartPt;
214 recomputeInsertPt();
215 SavedInsertPt = FuncInfo.InsertPt;
216 LastFlushPoint = FuncInfo.InsertPt;
217 }
218
isRegUsedByPhiNodes(Register DefReg,FunctionLoweringInfo & FuncInfo)219 static bool isRegUsedByPhiNodes(Register DefReg,
220 FunctionLoweringInfo &FuncInfo) {
221 for (auto &P : FuncInfo.PHINodesToUpdate)
222 if (P.second == DefReg)
223 return true;
224 return false;
225 }
226
isTerminatingEHLabel(MachineBasicBlock * MBB,MachineInstr & MI)227 static bool isTerminatingEHLabel(MachineBasicBlock *MBB, MachineInstr &MI) {
228 // Ignore non-EH labels.
229 if (!MI.isEHLabel())
230 return false;
231
232 // Any EH label outside a landing pad must be for an invoke. Consider it a
233 // terminator.
234 if (!MBB->isEHPad())
235 return true;
236
237 // If this is a landingpad, the first non-phi instruction will be an EH_LABEL.
238 // Don't consider that label to be a terminator.
239 return MI.getIterator() != MBB->getFirstNonPHI();
240 }
241
242 /// Build a map of instruction orders. Return the first terminator and its
243 /// order. Consider EH_LABEL instructions to be terminators as well, since local
244 /// values for phis after invokes must be materialized before the call.
initialize(MachineBasicBlock * MBB,MachineBasicBlock::iterator LastFlushPoint)245 void FastISel::InstOrderMap::initialize(
246 MachineBasicBlock *MBB, MachineBasicBlock::iterator LastFlushPoint) {
247 unsigned Order = 0;
248 for (MachineInstr &I : *MBB) {
249 if (!FirstTerminator &&
250 (I.isTerminator() || isTerminatingEHLabel(MBB, I))) {
251 FirstTerminator = &I;
252 FirstTerminatorOrder = Order;
253 }
254 Orders[&I] = Order++;
255
256 // We don't need to order instructions past the last flush point.
257 if (I.getIterator() == LastFlushPoint)
258 break;
259 }
260 }
261
sinkLocalValueMaterialization(MachineInstr & LocalMI,Register DefReg,InstOrderMap & OrderMap)262 void FastISel::sinkLocalValueMaterialization(MachineInstr &LocalMI,
263 Register DefReg,
264 InstOrderMap &OrderMap) {
265 // If this register is used by a register fixup, MRI will not contain all
266 // the uses until after register fixups, so don't attempt to sink or DCE
267 // this instruction. Register fixups typically come from no-op cast
268 // instructions, which replace the cast instruction vreg with the local
269 // value vreg.
270 if (FuncInfo.RegsWithFixups.count(DefReg))
271 return;
272
273 // We can DCE this instruction if there are no uses and it wasn't a
274 // materialized for a successor PHI node.
275 bool UsedByPHI = isRegUsedByPhiNodes(DefReg, FuncInfo);
276 if (!UsedByPHI && MRI.use_nodbg_empty(DefReg)) {
277 if (EmitStartPt == &LocalMI)
278 EmitStartPt = EmitStartPt->getPrevNode();
279 LLVM_DEBUG(dbgs() << "removing dead local value materialization "
280 << LocalMI);
281 OrderMap.Orders.erase(&LocalMI);
282 LocalMI.eraseFromParent();
283 return;
284 }
285
286 // Number the instructions if we haven't yet so we can efficiently find the
287 // earliest use.
288 if (OrderMap.Orders.empty())
289 OrderMap.initialize(FuncInfo.MBB, LastFlushPoint);
290
291 // Find the first user in the BB.
292 MachineInstr *FirstUser = nullptr;
293 unsigned FirstOrder = std::numeric_limits<unsigned>::max();
294 for (MachineInstr &UseInst : MRI.use_nodbg_instructions(DefReg)) {
295 auto I = OrderMap.Orders.find(&UseInst);
296 assert(I != OrderMap.Orders.end() &&
297 "local value used by instruction outside local region");
298 unsigned UseOrder = I->second;
299 if (UseOrder < FirstOrder) {
300 FirstOrder = UseOrder;
301 FirstUser = &UseInst;
302 }
303 }
304
305 // The insertion point will be the first terminator or the first user,
306 // whichever came first. If there was no terminator, this must be a
307 // fallthrough block and the insertion point is the end of the block.
308 MachineBasicBlock::instr_iterator SinkPos;
309 if (UsedByPHI && OrderMap.FirstTerminatorOrder < FirstOrder) {
310 FirstOrder = OrderMap.FirstTerminatorOrder;
311 SinkPos = OrderMap.FirstTerminator->getIterator();
312 } else if (FirstUser) {
313 SinkPos = FirstUser->getIterator();
314 } else {
315 assert(UsedByPHI && "must be users if not used by a phi");
316 SinkPos = FuncInfo.MBB->instr_end();
317 }
318
319 // Collect all DBG_VALUEs before the new insertion position so that we can
320 // sink them.
321 SmallVector<MachineInstr *, 1> DbgValues;
322 for (MachineInstr &DbgVal : MRI.use_instructions(DefReg)) {
323 if (!DbgVal.isDebugValue())
324 continue;
325 unsigned UseOrder = OrderMap.Orders[&DbgVal];
326 if (UseOrder < FirstOrder)
327 DbgValues.push_back(&DbgVal);
328 }
329
330 // Sink LocalMI before SinkPos and assign it the same DebugLoc.
331 LLVM_DEBUG(dbgs() << "sinking local value to first use " << LocalMI);
332 FuncInfo.MBB->remove(&LocalMI);
333 FuncInfo.MBB->insert(SinkPos, &LocalMI);
334 if (SinkPos != FuncInfo.MBB->end())
335 LocalMI.setDebugLoc(SinkPos->getDebugLoc());
336
337 // Sink any debug values that we've collected.
338 for (MachineInstr *DI : DbgValues) {
339 FuncInfo.MBB->remove(DI);
340 FuncInfo.MBB->insert(SinkPos, DI);
341 }
342 }
343
hasTrivialKill(const Value * V)344 bool FastISel::hasTrivialKill(const Value *V) {
345 // Don't consider constants or arguments to have trivial kills.
346 const Instruction *I = dyn_cast<Instruction>(V);
347 if (!I)
348 return false;
349
350 // No-op casts are trivially coalesced by fast-isel.
351 if (const auto *Cast = dyn_cast<CastInst>(I))
352 if (Cast->isNoopCast(DL) && !hasTrivialKill(Cast->getOperand(0)))
353 return false;
354
355 // Even the value might have only one use in the LLVM IR, it is possible that
356 // FastISel might fold the use into another instruction and now there is more
357 // than one use at the Machine Instruction level.
358 Register Reg = lookUpRegForValue(V);
359 if (Reg && !MRI.use_empty(Reg))
360 return false;
361
362 // GEPs with all zero indices are trivially coalesced by fast-isel.
363 if (const auto *GEP = dyn_cast<GetElementPtrInst>(I))
364 if (GEP->hasAllZeroIndices() && !hasTrivialKill(GEP->getOperand(0)))
365 return false;
366
367 // Only instructions with a single use in the same basic block are considered
368 // to have trivial kills.
369 return I->hasOneUse() &&
370 !(I->getOpcode() == Instruction::BitCast ||
371 I->getOpcode() == Instruction::PtrToInt ||
372 I->getOpcode() == Instruction::IntToPtr) &&
373 cast<Instruction>(*I->user_begin())->getParent() == I->getParent();
374 }
375
getRegForValue(const Value * V)376 Register FastISel::getRegForValue(const Value *V) {
377 EVT RealVT = TLI.getValueType(DL, V->getType(), /*AllowUnknown=*/true);
378 // Don't handle non-simple values in FastISel.
379 if (!RealVT.isSimple())
380 return Register();
381
382 // Ignore illegal types. We must do this before looking up the value
383 // in ValueMap because Arguments are given virtual registers regardless
384 // of whether FastISel can handle them.
385 MVT VT = RealVT.getSimpleVT();
386 if (!TLI.isTypeLegal(VT)) {
387 // Handle integer promotions, though, because they're common and easy.
388 if (VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)
389 VT = TLI.getTypeToTransformTo(V->getContext(), VT).getSimpleVT();
390 else
391 return Register();
392 }
393
394 // Look up the value to see if we already have a register for it.
395 Register Reg = lookUpRegForValue(V);
396 if (Reg)
397 return Reg;
398
399 // In bottom-up mode, just create the virtual register which will be used
400 // to hold the value. It will be materialized later.
401 if (isa<Instruction>(V) &&
402 (!isa<AllocaInst>(V) ||
403 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(V))))
404 return FuncInfo.InitializeRegForValue(V);
405
406 SavePoint SaveInsertPt = enterLocalValueArea();
407
408 // Materialize the value in a register. Emit any instructions in the
409 // local value area.
410 Reg = materializeRegForValue(V, VT);
411
412 leaveLocalValueArea(SaveInsertPt);
413
414 return Reg;
415 }
416
materializeConstant(const Value * V,MVT VT)417 Register FastISel::materializeConstant(const Value *V, MVT VT) {
418 Register Reg;
419 if (const auto *CI = dyn_cast<ConstantInt>(V)) {
420 if (CI->getValue().getActiveBits() <= 64)
421 Reg = fastEmit_i(VT, VT, ISD::Constant, CI->getZExtValue());
422 } else if (isa<AllocaInst>(V))
423 Reg = fastMaterializeAlloca(cast<AllocaInst>(V));
424 else if (isa<ConstantPointerNull>(V))
425 // Translate this as an integer zero so that it can be
426 // local-CSE'd with actual integer zeros.
427 Reg =
428 getRegForValue(Constant::getNullValue(DL.getIntPtrType(V->getType())));
429 else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
430 if (CF->isNullValue())
431 Reg = fastMaterializeFloatZero(CF);
432 else
433 // Try to emit the constant directly.
434 Reg = fastEmit_f(VT, VT, ISD::ConstantFP, CF);
435
436 if (!Reg) {
437 // Try to emit the constant by using an integer constant with a cast.
438 const APFloat &Flt = CF->getValueAPF();
439 EVT IntVT = TLI.getPointerTy(DL);
440 uint32_t IntBitWidth = IntVT.getSizeInBits();
441 APSInt SIntVal(IntBitWidth, /*isUnsigned=*/false);
442 bool isExact;
443 (void)Flt.convertToInteger(SIntVal, APFloat::rmTowardZero, &isExact);
444 if (isExact) {
445 Register IntegerReg =
446 getRegForValue(ConstantInt::get(V->getContext(), SIntVal));
447 if (IntegerReg)
448 Reg = fastEmit_r(IntVT.getSimpleVT(), VT, ISD::SINT_TO_FP, IntegerReg,
449 /*Op0IsKill=*/false);
450 }
451 }
452 } else if (const auto *Op = dyn_cast<Operator>(V)) {
453 if (!selectOperator(Op, Op->getOpcode()))
454 if (!isa<Instruction>(Op) ||
455 !fastSelectInstruction(cast<Instruction>(Op)))
456 return 0;
457 Reg = lookUpRegForValue(Op);
458 } else if (isa<UndefValue>(V)) {
459 Reg = createResultReg(TLI.getRegClassFor(VT));
460 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
461 TII.get(TargetOpcode::IMPLICIT_DEF), Reg);
462 }
463 return Reg;
464 }
465
466 /// Helper for getRegForValue. This function is called when the value isn't
467 /// already available in a register and must be materialized with new
468 /// instructions.
materializeRegForValue(const Value * V,MVT VT)469 Register FastISel::materializeRegForValue(const Value *V, MVT VT) {
470 Register Reg;
471 // Give the target-specific code a try first.
472 if (isa<Constant>(V))
473 Reg = fastMaterializeConstant(cast<Constant>(V));
474
475 // If target-specific code couldn't or didn't want to handle the value, then
476 // give target-independent code a try.
477 if (!Reg)
478 Reg = materializeConstant(V, VT);
479
480 // Don't cache constant materializations in the general ValueMap.
481 // To do so would require tracking what uses they dominate.
482 if (Reg) {
483 LocalValueMap[V] = Reg;
484 LastLocalValue = MRI.getVRegDef(Reg);
485 }
486 return Reg;
487 }
488
lookUpRegForValue(const Value * V)489 Register FastISel::lookUpRegForValue(const Value *V) {
490 // Look up the value to see if we already have a register for it. We
491 // cache values defined by Instructions across blocks, and other values
492 // only locally. This is because Instructions already have the SSA
493 // def-dominates-use requirement enforced.
494 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(V);
495 if (I != FuncInfo.ValueMap.end())
496 return I->second;
497 return LocalValueMap[V];
498 }
499
updateValueMap(const Value * I,Register Reg,unsigned NumRegs)500 void FastISel::updateValueMap(const Value *I, Register Reg, unsigned NumRegs) {
501 if (!isa<Instruction>(I)) {
502 LocalValueMap[I] = Reg;
503 return;
504 }
505
506 Register &AssignedReg = FuncInfo.ValueMap[I];
507 if (!AssignedReg)
508 // Use the new register.
509 AssignedReg = Reg;
510 else if (Reg != AssignedReg) {
511 // Arrange for uses of AssignedReg to be replaced by uses of Reg.
512 for (unsigned i = 0; i < NumRegs; i++) {
513 FuncInfo.RegFixups[AssignedReg + i] = Reg + i;
514 FuncInfo.RegsWithFixups.insert(Reg + i);
515 }
516
517 AssignedReg = Reg;
518 }
519 }
520
getRegForGEPIndex(const Value * Idx)521 std::pair<Register, bool> FastISel::getRegForGEPIndex(const Value *Idx) {
522 Register IdxN = getRegForValue(Idx);
523 if (!IdxN)
524 // Unhandled operand. Halt "fast" selection and bail.
525 return std::pair<Register, bool>(Register(), false);
526
527 bool IdxNIsKill = hasTrivialKill(Idx);
528
529 // If the index is smaller or larger than intptr_t, truncate or extend it.
530 MVT PtrVT = TLI.getPointerTy(DL);
531 EVT IdxVT = EVT::getEVT(Idx->getType(), /*HandleUnknown=*/false);
532 if (IdxVT.bitsLT(PtrVT)) {
533 IdxN = fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::SIGN_EXTEND, IdxN,
534 IdxNIsKill);
535 IdxNIsKill = true;
536 } else if (IdxVT.bitsGT(PtrVT)) {
537 IdxN =
538 fastEmit_r(IdxVT.getSimpleVT(), PtrVT, ISD::TRUNCATE, IdxN, IdxNIsKill);
539 IdxNIsKill = true;
540 }
541 return std::pair<Register, bool>(IdxN, IdxNIsKill);
542 }
543
recomputeInsertPt()544 void FastISel::recomputeInsertPt() {
545 if (getLastLocalValue()) {
546 FuncInfo.InsertPt = getLastLocalValue();
547 FuncInfo.MBB = FuncInfo.InsertPt->getParent();
548 ++FuncInfo.InsertPt;
549 } else
550 FuncInfo.InsertPt = FuncInfo.MBB->getFirstNonPHI();
551
552 // Now skip past any EH_LABELs, which must remain at the beginning.
553 while (FuncInfo.InsertPt != FuncInfo.MBB->end() &&
554 FuncInfo.InsertPt->getOpcode() == TargetOpcode::EH_LABEL)
555 ++FuncInfo.InsertPt;
556 }
557
removeDeadCode(MachineBasicBlock::iterator I,MachineBasicBlock::iterator E)558 void FastISel::removeDeadCode(MachineBasicBlock::iterator I,
559 MachineBasicBlock::iterator E) {
560 assert(I.isValid() && E.isValid() && std::distance(I, E) > 0 &&
561 "Invalid iterator!");
562 while (I != E) {
563 if (LastFlushPoint == I)
564 LastFlushPoint = E;
565 if (SavedInsertPt == I)
566 SavedInsertPt = E;
567 if (EmitStartPt == I)
568 EmitStartPt = E.isValid() ? &*E : nullptr;
569 if (LastLocalValue == I)
570 LastLocalValue = E.isValid() ? &*E : nullptr;
571
572 MachineInstr *Dead = &*I;
573 ++I;
574 Dead->eraseFromParent();
575 ++NumFastIselDead;
576 }
577 recomputeInsertPt();
578 }
579
enterLocalValueArea()580 FastISel::SavePoint FastISel::enterLocalValueArea() {
581 MachineBasicBlock::iterator OldInsertPt = FuncInfo.InsertPt;
582 DebugLoc OldDL = DbgLoc;
583 recomputeInsertPt();
584 DbgLoc = DebugLoc();
585 SavePoint SP = {OldInsertPt, OldDL};
586 return SP;
587 }
588
leaveLocalValueArea(SavePoint OldInsertPt)589 void FastISel::leaveLocalValueArea(SavePoint OldInsertPt) {
590 if (FuncInfo.InsertPt != FuncInfo.MBB->begin())
591 LastLocalValue = &*std::prev(FuncInfo.InsertPt);
592
593 // Restore the previous insert position.
594 FuncInfo.InsertPt = OldInsertPt.InsertPt;
595 DbgLoc = OldInsertPt.DL;
596 }
597
selectBinaryOp(const User * I,unsigned ISDOpcode)598 bool FastISel::selectBinaryOp(const User *I, unsigned ISDOpcode) {
599 EVT VT = EVT::getEVT(I->getType(), /*HandleUnknown=*/true);
600 if (VT == MVT::Other || !VT.isSimple())
601 // Unhandled type. Halt "fast" selection and bail.
602 return false;
603
604 // We only handle legal types. For example, on x86-32 the instruction
605 // selector contains all of the 64-bit instructions from x86-64,
606 // under the assumption that i64 won't be used if the target doesn't
607 // support it.
608 if (!TLI.isTypeLegal(VT)) {
609 // MVT::i1 is special. Allow AND, OR, or XOR because they
610 // don't require additional zeroing, which makes them easy.
611 if (VT == MVT::i1 && (ISDOpcode == ISD::AND || ISDOpcode == ISD::OR ||
612 ISDOpcode == ISD::XOR))
613 VT = TLI.getTypeToTransformTo(I->getContext(), VT);
614 else
615 return false;
616 }
617
618 // Check if the first operand is a constant, and handle it as "ri". At -O0,
619 // we don't have anything that canonicalizes operand order.
620 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(0)))
621 if (isa<Instruction>(I) && cast<Instruction>(I)->isCommutative()) {
622 Register Op1 = getRegForValue(I->getOperand(1));
623 if (!Op1)
624 return false;
625 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
626
627 Register ResultReg =
628 fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op1, Op1IsKill,
629 CI->getZExtValue(), VT.getSimpleVT());
630 if (!ResultReg)
631 return false;
632
633 // We successfully emitted code for the given LLVM Instruction.
634 updateValueMap(I, ResultReg);
635 return true;
636 }
637
638 Register Op0 = getRegForValue(I->getOperand(0));
639 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
640 return false;
641 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
642
643 // Check if the second operand is a constant and handle it appropriately.
644 if (const auto *CI = dyn_cast<ConstantInt>(I->getOperand(1))) {
645 uint64_t Imm = CI->getSExtValue();
646
647 // Transform "sdiv exact X, 8" -> "sra X, 3".
648 if (ISDOpcode == ISD::SDIV && isa<BinaryOperator>(I) &&
649 cast<BinaryOperator>(I)->isExact() && isPowerOf2_64(Imm)) {
650 Imm = Log2_64(Imm);
651 ISDOpcode = ISD::SRA;
652 }
653
654 // Transform "urem x, pow2" -> "and x, pow2-1".
655 if (ISDOpcode == ISD::UREM && isa<BinaryOperator>(I) &&
656 isPowerOf2_64(Imm)) {
657 --Imm;
658 ISDOpcode = ISD::AND;
659 }
660
661 Register ResultReg = fastEmit_ri_(VT.getSimpleVT(), ISDOpcode, Op0,
662 Op0IsKill, Imm, VT.getSimpleVT());
663 if (!ResultReg)
664 return false;
665
666 // We successfully emitted code for the given LLVM Instruction.
667 updateValueMap(I, ResultReg);
668 return true;
669 }
670
671 Register Op1 = getRegForValue(I->getOperand(1));
672 if (!Op1) // Unhandled operand. Halt "fast" selection and bail.
673 return false;
674 bool Op1IsKill = hasTrivialKill(I->getOperand(1));
675
676 // Now we have both operands in registers. Emit the instruction.
677 Register ResultReg = fastEmit_rr(VT.getSimpleVT(), VT.getSimpleVT(),
678 ISDOpcode, Op0, Op0IsKill, Op1, Op1IsKill);
679 if (!ResultReg)
680 // Target-specific code wasn't able to find a machine opcode for
681 // the given ISD opcode and type. Halt "fast" selection and bail.
682 return false;
683
684 // We successfully emitted code for the given LLVM Instruction.
685 updateValueMap(I, ResultReg);
686 return true;
687 }
688
selectGetElementPtr(const User * I)689 bool FastISel::selectGetElementPtr(const User *I) {
690 Register N = getRegForValue(I->getOperand(0));
691 if (!N) // Unhandled operand. Halt "fast" selection and bail.
692 return false;
693
694 // FIXME: The code below does not handle vector GEPs. Halt "fast" selection
695 // and bail.
696 if (isa<VectorType>(I->getType()))
697 return false;
698
699 bool NIsKill = hasTrivialKill(I->getOperand(0));
700
701 // Keep a running tab of the total offset to coalesce multiple N = N + Offset
702 // into a single N = N + TotalOffset.
703 uint64_t TotalOffs = 0;
704 // FIXME: What's a good SWAG number for MaxOffs?
705 uint64_t MaxOffs = 2048;
706 MVT VT = TLI.getPointerTy(DL);
707 for (gep_type_iterator GTI = gep_type_begin(I), E = gep_type_end(I);
708 GTI != E; ++GTI) {
709 const Value *Idx = GTI.getOperand();
710 if (StructType *StTy = GTI.getStructTypeOrNull()) {
711 uint64_t Field = cast<ConstantInt>(Idx)->getZExtValue();
712 if (Field) {
713 // N = N + Offset
714 TotalOffs += DL.getStructLayout(StTy)->getElementOffset(Field);
715 if (TotalOffs >= MaxOffs) {
716 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
717 if (!N) // Unhandled operand. Halt "fast" selection and bail.
718 return false;
719 NIsKill = true;
720 TotalOffs = 0;
721 }
722 }
723 } else {
724 Type *Ty = GTI.getIndexedType();
725
726 // If this is a constant subscript, handle it quickly.
727 if (const auto *CI = dyn_cast<ConstantInt>(Idx)) {
728 if (CI->isZero())
729 continue;
730 // N = N + Offset
731 uint64_t IdxN = CI->getValue().sextOrTrunc(64).getSExtValue();
732 TotalOffs += DL.getTypeAllocSize(Ty) * IdxN;
733 if (TotalOffs >= MaxOffs) {
734 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
735 if (!N) // Unhandled operand. Halt "fast" selection and bail.
736 return false;
737 NIsKill = true;
738 TotalOffs = 0;
739 }
740 continue;
741 }
742 if (TotalOffs) {
743 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
744 if (!N) // Unhandled operand. Halt "fast" selection and bail.
745 return false;
746 NIsKill = true;
747 TotalOffs = 0;
748 }
749
750 // N = N + Idx * ElementSize;
751 uint64_t ElementSize = DL.getTypeAllocSize(Ty);
752 std::pair<Register, bool> Pair = getRegForGEPIndex(Idx);
753 Register IdxN = Pair.first;
754 bool IdxNIsKill = Pair.second;
755 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
756 return false;
757
758 if (ElementSize != 1) {
759 IdxN = fastEmit_ri_(VT, ISD::MUL, IdxN, IdxNIsKill, ElementSize, VT);
760 if (!IdxN) // Unhandled operand. Halt "fast" selection and bail.
761 return false;
762 IdxNIsKill = true;
763 }
764 N = fastEmit_rr(VT, VT, ISD::ADD, N, NIsKill, IdxN, IdxNIsKill);
765 if (!N) // Unhandled operand. Halt "fast" selection and bail.
766 return false;
767 }
768 }
769 if (TotalOffs) {
770 N = fastEmit_ri_(VT, ISD::ADD, N, NIsKill, TotalOffs, VT);
771 if (!N) // Unhandled operand. Halt "fast" selection and bail.
772 return false;
773 }
774
775 // We successfully emitted code for the given LLVM Instruction.
776 updateValueMap(I, N);
777 return true;
778 }
779
addStackMapLiveVars(SmallVectorImpl<MachineOperand> & Ops,const CallInst * CI,unsigned StartIdx)780 bool FastISel::addStackMapLiveVars(SmallVectorImpl<MachineOperand> &Ops,
781 const CallInst *CI, unsigned StartIdx) {
782 for (unsigned i = StartIdx, e = CI->getNumArgOperands(); i != e; ++i) {
783 Value *Val = CI->getArgOperand(i);
784 // Check for constants and encode them with a StackMaps::ConstantOp prefix.
785 if (const auto *C = dyn_cast<ConstantInt>(Val)) {
786 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
787 Ops.push_back(MachineOperand::CreateImm(C->getSExtValue()));
788 } else if (isa<ConstantPointerNull>(Val)) {
789 Ops.push_back(MachineOperand::CreateImm(StackMaps::ConstantOp));
790 Ops.push_back(MachineOperand::CreateImm(0));
791 } else if (auto *AI = dyn_cast<AllocaInst>(Val)) {
792 // Values coming from a stack location also require a special encoding,
793 // but that is added later on by the target specific frame index
794 // elimination implementation.
795 auto SI = FuncInfo.StaticAllocaMap.find(AI);
796 if (SI != FuncInfo.StaticAllocaMap.end())
797 Ops.push_back(MachineOperand::CreateFI(SI->second));
798 else
799 return false;
800 } else {
801 Register Reg = getRegForValue(Val);
802 if (!Reg)
803 return false;
804 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
805 }
806 }
807 return true;
808 }
809
selectStackmap(const CallInst * I)810 bool FastISel::selectStackmap(const CallInst *I) {
811 // void @llvm.experimental.stackmap(i64 <id>, i32 <numShadowBytes>,
812 // [live variables...])
813 assert(I->getCalledFunction()->getReturnType()->isVoidTy() &&
814 "Stackmap cannot return a value.");
815
816 // The stackmap intrinsic only records the live variables (the arguments
817 // passed to it) and emits NOPS (if requested). Unlike the patchpoint
818 // intrinsic, this won't be lowered to a function call. This means we don't
819 // have to worry about calling conventions and target-specific lowering code.
820 // Instead we perform the call lowering right here.
821 //
822 // CALLSEQ_START(0, 0...)
823 // STACKMAP(id, nbytes, ...)
824 // CALLSEQ_END(0, 0)
825 //
826 SmallVector<MachineOperand, 32> Ops;
827
828 // Add the <id> and <numBytes> constants.
829 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
830 "Expected a constant integer.");
831 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
832 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
833
834 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
835 "Expected a constant integer.");
836 const auto *NumBytes =
837 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
838 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
839
840 // Push live variables for the stack map (skipping the first two arguments
841 // <id> and <numBytes>).
842 if (!addStackMapLiveVars(Ops, I, 2))
843 return false;
844
845 // We are not adding any register mask info here, because the stackmap doesn't
846 // clobber anything.
847
848 // Add scratch registers as implicit def and early clobber.
849 CallingConv::ID CC = I->getCallingConv();
850 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
851 for (unsigned i = 0; ScratchRegs[i]; ++i)
852 Ops.push_back(MachineOperand::CreateReg(
853 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
854 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
855
856 // Issue CALLSEQ_START
857 unsigned AdjStackDown = TII.getCallFrameSetupOpcode();
858 auto Builder =
859 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackDown));
860 const MCInstrDesc &MCID = Builder.getInstr()->getDesc();
861 for (unsigned I = 0, E = MCID.getNumOperands(); I < E; ++I)
862 Builder.addImm(0);
863
864 // Issue STACKMAP.
865 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
866 TII.get(TargetOpcode::STACKMAP));
867 for (auto const &MO : Ops)
868 MIB.add(MO);
869
870 // Issue CALLSEQ_END
871 unsigned AdjStackUp = TII.getCallFrameDestroyOpcode();
872 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(AdjStackUp))
873 .addImm(0)
874 .addImm(0);
875
876 // Inform the Frame Information that we have a stackmap in this function.
877 FuncInfo.MF->getFrameInfo().setHasStackMap();
878
879 return true;
880 }
881
882 /// Lower an argument list according to the target calling convention.
883 ///
884 /// This is a helper for lowering intrinsics that follow a target calling
885 /// convention or require stack pointer adjustment. Only a subset of the
886 /// intrinsic's operands need to participate in the calling convention.
lowerCallOperands(const CallInst * CI,unsigned ArgIdx,unsigned NumArgs,const Value * Callee,bool ForceRetVoidTy,CallLoweringInfo & CLI)887 bool FastISel::lowerCallOperands(const CallInst *CI, unsigned ArgIdx,
888 unsigned NumArgs, const Value *Callee,
889 bool ForceRetVoidTy, CallLoweringInfo &CLI) {
890 ArgListTy Args;
891 Args.reserve(NumArgs);
892
893 // Populate the argument list.
894 for (unsigned ArgI = ArgIdx, ArgE = ArgIdx + NumArgs; ArgI != ArgE; ++ArgI) {
895 Value *V = CI->getOperand(ArgI);
896
897 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
898
899 ArgListEntry Entry;
900 Entry.Val = V;
901 Entry.Ty = V->getType();
902 Entry.setAttributes(CI, ArgI);
903 Args.push_back(Entry);
904 }
905
906 Type *RetTy = ForceRetVoidTy ? Type::getVoidTy(CI->getType()->getContext())
907 : CI->getType();
908 CLI.setCallee(CI->getCallingConv(), RetTy, Callee, std::move(Args), NumArgs);
909
910 return lowerCallTo(CLI);
911 }
912
setCallee(const DataLayout & DL,MCContext & Ctx,CallingConv::ID CC,Type * ResultTy,StringRef Target,ArgListTy && ArgsList,unsigned FixedArgs)913 FastISel::CallLoweringInfo &FastISel::CallLoweringInfo::setCallee(
914 const DataLayout &DL, MCContext &Ctx, CallingConv::ID CC, Type *ResultTy,
915 StringRef Target, ArgListTy &&ArgsList, unsigned FixedArgs) {
916 SmallString<32> MangledName;
917 Mangler::getNameWithPrefix(MangledName, Target, DL);
918 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
919 return setCallee(CC, ResultTy, Sym, std::move(ArgsList), FixedArgs);
920 }
921
selectPatchpoint(const CallInst * I)922 bool FastISel::selectPatchpoint(const CallInst *I) {
923 // void|i64 @llvm.experimental.patchpoint.void|i64(i64 <id>,
924 // i32 <numBytes>,
925 // i8* <target>,
926 // i32 <numArgs>,
927 // [Args...],
928 // [live variables...])
929 CallingConv::ID CC = I->getCallingConv();
930 bool IsAnyRegCC = CC == CallingConv::AnyReg;
931 bool HasDef = !I->getType()->isVoidTy();
932 Value *Callee = I->getOperand(PatchPointOpers::TargetPos)->stripPointerCasts();
933
934 // Get the real number of arguments participating in the call <numArgs>
935 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos)) &&
936 "Expected a constant integer.");
937 const auto *NumArgsVal =
938 cast<ConstantInt>(I->getOperand(PatchPointOpers::NArgPos));
939 unsigned NumArgs = NumArgsVal->getZExtValue();
940
941 // Skip the four meta args: <id>, <numNopBytes>, <target>, <numArgs>
942 // This includes all meta-operands up to but not including CC.
943 unsigned NumMetaOpers = PatchPointOpers::CCPos;
944 assert(I->getNumArgOperands() >= NumMetaOpers + NumArgs &&
945 "Not enough arguments provided to the patchpoint intrinsic");
946
947 // For AnyRegCC the arguments are lowered later on manually.
948 unsigned NumCallArgs = IsAnyRegCC ? 0 : NumArgs;
949 CallLoweringInfo CLI;
950 CLI.setIsPatchPoint();
951 if (!lowerCallOperands(I, NumMetaOpers, NumCallArgs, Callee, IsAnyRegCC, CLI))
952 return false;
953
954 assert(CLI.Call && "No call instruction specified.");
955
956 SmallVector<MachineOperand, 32> Ops;
957
958 // Add an explicit result reg if we use the anyreg calling convention.
959 if (IsAnyRegCC && HasDef) {
960 assert(CLI.NumResultRegs == 0 && "Unexpected result register.");
961 CLI.ResultReg = createResultReg(TLI.getRegClassFor(MVT::i64));
962 CLI.NumResultRegs = 1;
963 Ops.push_back(MachineOperand::CreateReg(CLI.ResultReg, /*isDef=*/true));
964 }
965
966 // Add the <id> and <numBytes> constants.
967 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::IDPos)) &&
968 "Expected a constant integer.");
969 const auto *ID = cast<ConstantInt>(I->getOperand(PatchPointOpers::IDPos));
970 Ops.push_back(MachineOperand::CreateImm(ID->getZExtValue()));
971
972 assert(isa<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos)) &&
973 "Expected a constant integer.");
974 const auto *NumBytes =
975 cast<ConstantInt>(I->getOperand(PatchPointOpers::NBytesPos));
976 Ops.push_back(MachineOperand::CreateImm(NumBytes->getZExtValue()));
977
978 // Add the call target.
979 if (const auto *C = dyn_cast<IntToPtrInst>(Callee)) {
980 uint64_t CalleeConstAddr =
981 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
982 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
983 } else if (const auto *C = dyn_cast<ConstantExpr>(Callee)) {
984 if (C->getOpcode() == Instruction::IntToPtr) {
985 uint64_t CalleeConstAddr =
986 cast<ConstantInt>(C->getOperand(0))->getZExtValue();
987 Ops.push_back(MachineOperand::CreateImm(CalleeConstAddr));
988 } else
989 llvm_unreachable("Unsupported ConstantExpr.");
990 } else if (const auto *GV = dyn_cast<GlobalValue>(Callee)) {
991 Ops.push_back(MachineOperand::CreateGA(GV, 0));
992 } else if (isa<ConstantPointerNull>(Callee))
993 Ops.push_back(MachineOperand::CreateImm(0));
994 else
995 llvm_unreachable("Unsupported callee address.");
996
997 // Adjust <numArgs> to account for any arguments that have been passed on
998 // the stack instead.
999 unsigned NumCallRegArgs = IsAnyRegCC ? NumArgs : CLI.OutRegs.size();
1000 Ops.push_back(MachineOperand::CreateImm(NumCallRegArgs));
1001
1002 // Add the calling convention
1003 Ops.push_back(MachineOperand::CreateImm((unsigned)CC));
1004
1005 // Add the arguments we omitted previously. The register allocator should
1006 // place these in any free register.
1007 if (IsAnyRegCC) {
1008 for (unsigned i = NumMetaOpers, e = NumMetaOpers + NumArgs; i != e; ++i) {
1009 Register Reg = getRegForValue(I->getArgOperand(i));
1010 if (!Reg)
1011 return false;
1012 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
1013 }
1014 }
1015
1016 // Push the arguments from the call instruction.
1017 for (auto Reg : CLI.OutRegs)
1018 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/false));
1019
1020 // Push live variables for the stack map.
1021 if (!addStackMapLiveVars(Ops, I, NumMetaOpers + NumArgs))
1022 return false;
1023
1024 // Push the register mask info.
1025 Ops.push_back(MachineOperand::CreateRegMask(
1026 TRI.getCallPreservedMask(*FuncInfo.MF, CC)));
1027
1028 // Add scratch registers as implicit def and early clobber.
1029 const MCPhysReg *ScratchRegs = TLI.getScratchRegisters(CC);
1030 for (unsigned i = 0; ScratchRegs[i]; ++i)
1031 Ops.push_back(MachineOperand::CreateReg(
1032 ScratchRegs[i], /*isDef=*/true, /*isImp=*/true, /*isKill=*/false,
1033 /*isDead=*/false, /*isUndef=*/false, /*isEarlyClobber=*/true));
1034
1035 // Add implicit defs (return values).
1036 for (auto Reg : CLI.InRegs)
1037 Ops.push_back(MachineOperand::CreateReg(Reg, /*isDef=*/true,
1038 /*isImp=*/true));
1039
1040 // Insert the patchpoint instruction before the call generated by the target.
1041 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, CLI.Call, DbgLoc,
1042 TII.get(TargetOpcode::PATCHPOINT));
1043
1044 for (auto &MO : Ops)
1045 MIB.add(MO);
1046
1047 MIB->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1048
1049 // Delete the original call instruction.
1050 CLI.Call->eraseFromParent();
1051
1052 // Inform the Frame Information that we have a patchpoint in this function.
1053 FuncInfo.MF->getFrameInfo().setHasPatchPoint();
1054
1055 if (CLI.NumResultRegs)
1056 updateValueMap(I, CLI.ResultReg, CLI.NumResultRegs);
1057 return true;
1058 }
1059
selectXRayCustomEvent(const CallInst * I)1060 bool FastISel::selectXRayCustomEvent(const CallInst *I) {
1061 const auto &Triple = TM.getTargetTriple();
1062 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
1063 return true; // don't do anything to this instruction.
1064 SmallVector<MachineOperand, 8> Ops;
1065 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
1066 /*isDef=*/false));
1067 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
1068 /*isDef=*/false));
1069 MachineInstrBuilder MIB =
1070 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1071 TII.get(TargetOpcode::PATCHABLE_EVENT_CALL));
1072 for (auto &MO : Ops)
1073 MIB.add(MO);
1074
1075 // Insert the Patchable Event Call instruction, that gets lowered properly.
1076 return true;
1077 }
1078
selectXRayTypedEvent(const CallInst * I)1079 bool FastISel::selectXRayTypedEvent(const CallInst *I) {
1080 const auto &Triple = TM.getTargetTriple();
1081 if (Triple.getArch() != Triple::x86_64 || !Triple.isOSLinux())
1082 return true; // don't do anything to this instruction.
1083 SmallVector<MachineOperand, 8> Ops;
1084 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(0)),
1085 /*isDef=*/false));
1086 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(1)),
1087 /*isDef=*/false));
1088 Ops.push_back(MachineOperand::CreateReg(getRegForValue(I->getArgOperand(2)),
1089 /*isDef=*/false));
1090 MachineInstrBuilder MIB =
1091 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1092 TII.get(TargetOpcode::PATCHABLE_TYPED_EVENT_CALL));
1093 for (auto &MO : Ops)
1094 MIB.add(MO);
1095
1096 // Insert the Patchable Typed Event Call instruction, that gets lowered properly.
1097 return true;
1098 }
1099
1100 /// Returns an AttributeList representing the attributes applied to the return
1101 /// value of the given call.
getReturnAttrs(FastISel::CallLoweringInfo & CLI)1102 static AttributeList getReturnAttrs(FastISel::CallLoweringInfo &CLI) {
1103 SmallVector<Attribute::AttrKind, 2> Attrs;
1104 if (CLI.RetSExt)
1105 Attrs.push_back(Attribute::SExt);
1106 if (CLI.RetZExt)
1107 Attrs.push_back(Attribute::ZExt);
1108 if (CLI.IsInReg)
1109 Attrs.push_back(Attribute::InReg);
1110
1111 return AttributeList::get(CLI.RetTy->getContext(), AttributeList::ReturnIndex,
1112 Attrs);
1113 }
1114
lowerCallTo(const CallInst * CI,const char * SymName,unsigned NumArgs)1115 bool FastISel::lowerCallTo(const CallInst *CI, const char *SymName,
1116 unsigned NumArgs) {
1117 MCContext &Ctx = MF->getContext();
1118 SmallString<32> MangledName;
1119 Mangler::getNameWithPrefix(MangledName, SymName, DL);
1120 MCSymbol *Sym = Ctx.getOrCreateSymbol(MangledName);
1121 return lowerCallTo(CI, Sym, NumArgs);
1122 }
1123
lowerCallTo(const CallInst * CI,MCSymbol * Symbol,unsigned NumArgs)1124 bool FastISel::lowerCallTo(const CallInst *CI, MCSymbol *Symbol,
1125 unsigned NumArgs) {
1126 FunctionType *FTy = CI->getFunctionType();
1127 Type *RetTy = CI->getType();
1128
1129 ArgListTy Args;
1130 Args.reserve(NumArgs);
1131
1132 // Populate the argument list.
1133 // Attributes for args start at offset 1, after the return attribute.
1134 for (unsigned ArgI = 0; ArgI != NumArgs; ++ArgI) {
1135 Value *V = CI->getOperand(ArgI);
1136
1137 assert(!V->getType()->isEmptyTy() && "Empty type passed to intrinsic.");
1138
1139 ArgListEntry Entry;
1140 Entry.Val = V;
1141 Entry.Ty = V->getType();
1142 Entry.setAttributes(CI, ArgI);
1143 Args.push_back(Entry);
1144 }
1145 TLI.markLibCallAttributes(MF, CI->getCallingConv(), Args);
1146
1147 CallLoweringInfo CLI;
1148 CLI.setCallee(RetTy, FTy, Symbol, std::move(Args), *CI, NumArgs);
1149
1150 return lowerCallTo(CLI);
1151 }
1152
lowerCallTo(CallLoweringInfo & CLI)1153 bool FastISel::lowerCallTo(CallLoweringInfo &CLI) {
1154 // Handle the incoming return values from the call.
1155 CLI.clearIns();
1156 SmallVector<EVT, 4> RetTys;
1157 ComputeValueVTs(TLI, DL, CLI.RetTy, RetTys);
1158
1159 SmallVector<ISD::OutputArg, 4> Outs;
1160 GetReturnInfo(CLI.CallConv, CLI.RetTy, getReturnAttrs(CLI), Outs, TLI, DL);
1161
1162 bool CanLowerReturn = TLI.CanLowerReturn(
1163 CLI.CallConv, *FuncInfo.MF, CLI.IsVarArg, Outs, CLI.RetTy->getContext());
1164
1165 // FIXME: sret demotion isn't supported yet - bail out.
1166 if (!CanLowerReturn)
1167 return false;
1168
1169 for (unsigned I = 0, E = RetTys.size(); I != E; ++I) {
1170 EVT VT = RetTys[I];
1171 MVT RegisterVT = TLI.getRegisterType(CLI.RetTy->getContext(), VT);
1172 unsigned NumRegs = TLI.getNumRegisters(CLI.RetTy->getContext(), VT);
1173 for (unsigned i = 0; i != NumRegs; ++i) {
1174 ISD::InputArg MyFlags;
1175 MyFlags.VT = RegisterVT;
1176 MyFlags.ArgVT = VT;
1177 MyFlags.Used = CLI.IsReturnValueUsed;
1178 if (CLI.RetSExt)
1179 MyFlags.Flags.setSExt();
1180 if (CLI.RetZExt)
1181 MyFlags.Flags.setZExt();
1182 if (CLI.IsInReg)
1183 MyFlags.Flags.setInReg();
1184 CLI.Ins.push_back(MyFlags);
1185 }
1186 }
1187
1188 // Handle all of the outgoing arguments.
1189 CLI.clearOuts();
1190 for (auto &Arg : CLI.getArgs()) {
1191 Type *FinalType = Arg.Ty;
1192 if (Arg.IsByVal)
1193 FinalType = cast<PointerType>(Arg.Ty)->getElementType();
1194 bool NeedsRegBlock = TLI.functionArgumentNeedsConsecutiveRegisters(
1195 FinalType, CLI.CallConv, CLI.IsVarArg);
1196
1197 ISD::ArgFlagsTy Flags;
1198 if (Arg.IsZExt)
1199 Flags.setZExt();
1200 if (Arg.IsSExt)
1201 Flags.setSExt();
1202 if (Arg.IsInReg)
1203 Flags.setInReg();
1204 if (Arg.IsSRet)
1205 Flags.setSRet();
1206 if (Arg.IsSwiftSelf)
1207 Flags.setSwiftSelf();
1208 if (Arg.IsSwiftError)
1209 Flags.setSwiftError();
1210 if (Arg.IsCFGuardTarget)
1211 Flags.setCFGuardTarget();
1212 if (Arg.IsByVal)
1213 Flags.setByVal();
1214 if (Arg.IsInAlloca) {
1215 Flags.setInAlloca();
1216 // Set the byval flag for CCAssignFn callbacks that don't know about
1217 // inalloca. This way we can know how many bytes we should've allocated
1218 // and how many bytes a callee cleanup function will pop. If we port
1219 // inalloca to more targets, we'll have to add custom inalloca handling in
1220 // the various CC lowering callbacks.
1221 Flags.setByVal();
1222 }
1223 if (Arg.IsPreallocated) {
1224 Flags.setPreallocated();
1225 // Set the byval flag for CCAssignFn callbacks that don't know about
1226 // preallocated. This way we can know how many bytes we should've
1227 // allocated and how many bytes a callee cleanup function will pop. If we
1228 // port preallocated to more targets, we'll have to add custom
1229 // preallocated handling in the various CC lowering callbacks.
1230 Flags.setByVal();
1231 }
1232 if (Arg.IsByVal || Arg.IsInAlloca || Arg.IsPreallocated) {
1233 PointerType *Ty = cast<PointerType>(Arg.Ty);
1234 Type *ElementTy = Ty->getElementType();
1235 unsigned FrameSize =
1236 DL.getTypeAllocSize(Arg.ByValType ? Arg.ByValType : ElementTy);
1237
1238 // For ByVal, alignment should come from FE. BE will guess if this info
1239 // is not there, but there are cases it cannot get right.
1240 MaybeAlign FrameAlign = Arg.Alignment;
1241 if (!FrameAlign)
1242 FrameAlign = Align(TLI.getByValTypeAlignment(ElementTy, DL));
1243 Flags.setByValSize(FrameSize);
1244 Flags.setByValAlign(*FrameAlign);
1245 }
1246 if (Arg.IsNest)
1247 Flags.setNest();
1248 if (NeedsRegBlock)
1249 Flags.setInConsecutiveRegs();
1250 Flags.setOrigAlign(DL.getABITypeAlign(Arg.Ty));
1251
1252 CLI.OutVals.push_back(Arg.Val);
1253 CLI.OutFlags.push_back(Flags);
1254 }
1255
1256 if (!fastLowerCall(CLI))
1257 return false;
1258
1259 // Set all unused physreg defs as dead.
1260 assert(CLI.Call && "No call instruction specified.");
1261 CLI.Call->setPhysRegsDeadExcept(CLI.InRegs, TRI);
1262
1263 if (CLI.NumResultRegs && CLI.CB)
1264 updateValueMap(CLI.CB, CLI.ResultReg, CLI.NumResultRegs);
1265
1266 // Set labels for heapallocsite call.
1267 if (CLI.CB)
1268 if (MDNode *MD = CLI.CB->getMetadata("heapallocsite"))
1269 CLI.Call->setHeapAllocMarker(*MF, MD);
1270
1271 return true;
1272 }
1273
lowerCall(const CallInst * CI)1274 bool FastISel::lowerCall(const CallInst *CI) {
1275 FunctionType *FuncTy = CI->getFunctionType();
1276 Type *RetTy = CI->getType();
1277
1278 ArgListTy Args;
1279 ArgListEntry Entry;
1280 Args.reserve(CI->arg_size());
1281
1282 for (auto i = CI->arg_begin(), e = CI->arg_end(); i != e; ++i) {
1283 Value *V = *i;
1284
1285 // Skip empty types
1286 if (V->getType()->isEmptyTy())
1287 continue;
1288
1289 Entry.Val = V;
1290 Entry.Ty = V->getType();
1291
1292 // Skip the first return-type Attribute to get to params.
1293 Entry.setAttributes(CI, i - CI->arg_begin());
1294 Args.push_back(Entry);
1295 }
1296
1297 // Check if target-independent constraints permit a tail call here.
1298 // Target-dependent constraints are checked within fastLowerCall.
1299 bool IsTailCall = CI->isTailCall();
1300 if (IsTailCall && !isInTailCallPosition(*CI, TM))
1301 IsTailCall = false;
1302 if (IsTailCall && MF->getFunction()
1303 .getFnAttribute("disable-tail-calls")
1304 .getValueAsString() == "true")
1305 IsTailCall = false;
1306
1307 CallLoweringInfo CLI;
1308 CLI.setCallee(RetTy, FuncTy, CI->getCalledOperand(), std::move(Args), *CI)
1309 .setTailCall(IsTailCall);
1310
1311 return lowerCallTo(CLI);
1312 }
1313
selectCall(const User * I)1314 bool FastISel::selectCall(const User *I) {
1315 const CallInst *Call = cast<CallInst>(I);
1316
1317 // Handle simple inline asms.
1318 if (const InlineAsm *IA = dyn_cast<InlineAsm>(Call->getCalledOperand())) {
1319 // If the inline asm has side effects, then make sure that no local value
1320 // lives across by flushing the local value map.
1321 if (IA->hasSideEffects())
1322 flushLocalValueMap();
1323
1324 // Don't attempt to handle constraints.
1325 if (!IA->getConstraintString().empty())
1326 return false;
1327
1328 unsigned ExtraInfo = 0;
1329 if (IA->hasSideEffects())
1330 ExtraInfo |= InlineAsm::Extra_HasSideEffects;
1331 if (IA->isAlignStack())
1332 ExtraInfo |= InlineAsm::Extra_IsAlignStack;
1333 if (Call->isConvergent())
1334 ExtraInfo |= InlineAsm::Extra_IsConvergent;
1335 ExtraInfo |= IA->getDialect() * InlineAsm::Extra_AsmDialect;
1336
1337 MachineInstrBuilder MIB = BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1338 TII.get(TargetOpcode::INLINEASM));
1339 MIB.addExternalSymbol(IA->getAsmString().c_str());
1340 MIB.addImm(ExtraInfo);
1341
1342 const MDNode *SrcLoc = Call->getMetadata("srcloc");
1343 if (SrcLoc)
1344 MIB.addMetadata(SrcLoc);
1345
1346 return true;
1347 }
1348
1349 // Handle intrinsic function calls.
1350 if (const auto *II = dyn_cast<IntrinsicInst>(Call))
1351 return selectIntrinsicCall(II);
1352
1353 // Usually, it does not make sense to initialize a value,
1354 // make an unrelated function call and use the value, because
1355 // it tends to be spilled on the stack. So, we move the pointer
1356 // to the last local value to the beginning of the block, so that
1357 // all the values which have already been materialized,
1358 // appear after the call. It also makes sense to skip intrinsics
1359 // since they tend to be inlined.
1360 flushLocalValueMap();
1361
1362 return lowerCall(Call);
1363 }
1364
selectIntrinsicCall(const IntrinsicInst * II)1365 bool FastISel::selectIntrinsicCall(const IntrinsicInst *II) {
1366 switch (II->getIntrinsicID()) {
1367 default:
1368 break;
1369 // At -O0 we don't care about the lifetime intrinsics.
1370 case Intrinsic::lifetime_start:
1371 case Intrinsic::lifetime_end:
1372 // The donothing intrinsic does, well, nothing.
1373 case Intrinsic::donothing:
1374 // Neither does the sideeffect intrinsic.
1375 case Intrinsic::sideeffect:
1376 // Neither does the assume intrinsic; it's also OK not to codegen its operand.
1377 case Intrinsic::assume:
1378 return true;
1379 case Intrinsic::dbg_declare: {
1380 const DbgDeclareInst *DI = cast<DbgDeclareInst>(II);
1381 assert(DI->getVariable() && "Missing variable");
1382 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1383 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1384 << " (!hasDebugInfo)\n");
1385 return true;
1386 }
1387
1388 const Value *Address = DI->getAddress();
1389 if (!Address || isa<UndefValue>(Address)) {
1390 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1391 << " (bad/undef address)\n");
1392 return true;
1393 }
1394
1395 // Byval arguments with frame indices were already handled after argument
1396 // lowering and before isel.
1397 const auto *Arg =
1398 dyn_cast<Argument>(Address->stripInBoundsConstantOffsets());
1399 if (Arg && FuncInfo.getArgumentFrameIndex(Arg) != INT_MAX)
1400 return true;
1401
1402 Optional<MachineOperand> Op;
1403 if (Register Reg = lookUpRegForValue(Address))
1404 Op = MachineOperand::CreateReg(Reg, false);
1405
1406 // If we have a VLA that has a "use" in a metadata node that's then used
1407 // here but it has no other uses, then we have a problem. E.g.,
1408 //
1409 // int foo (const int *x) {
1410 // char a[*x];
1411 // return 0;
1412 // }
1413 //
1414 // If we assign 'a' a vreg and fast isel later on has to use the selection
1415 // DAG isel, it will want to copy the value to the vreg. However, there are
1416 // no uses, which goes counter to what selection DAG isel expects.
1417 if (!Op && !Address->use_empty() && isa<Instruction>(Address) &&
1418 (!isa<AllocaInst>(Address) ||
1419 !FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(Address))))
1420 Op = MachineOperand::CreateReg(FuncInfo.InitializeRegForValue(Address),
1421 false);
1422
1423 if (Op) {
1424 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1425 "Expected inlined-at fields to agree");
1426 // A dbg.declare describes the address of a source variable, so lower it
1427 // into an indirect DBG_VALUE.
1428 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1429 TII.get(TargetOpcode::DBG_VALUE), /*IsIndirect*/ true,
1430 *Op, DI->getVariable(), DI->getExpression());
1431 } else {
1432 // We can't yet handle anything else here because it would require
1433 // generating code, thus altering codegen because of debug info.
1434 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI
1435 << " (no materialized reg for address)\n");
1436 }
1437 return true;
1438 }
1439 case Intrinsic::dbg_value: {
1440 // This form of DBG_VALUE is target-independent.
1441 const DbgValueInst *DI = cast<DbgValueInst>(II);
1442 const MCInstrDesc &II = TII.get(TargetOpcode::DBG_VALUE);
1443 const Value *V = DI->getValue();
1444 assert(DI->getVariable()->isValidLocationForIntrinsic(DbgLoc) &&
1445 "Expected inlined-at fields to agree");
1446 if (!V || isa<UndefValue>(V)) {
1447 // Currently the optimizer can produce this; insert an undef to
1448 // help debugging.
1449 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, false, 0U,
1450 DI->getVariable(), DI->getExpression());
1451 } else if (const auto *CI = dyn_cast<ConstantInt>(V)) {
1452 if (CI->getBitWidth() > 64)
1453 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1454 .addCImm(CI)
1455 .addImm(0U)
1456 .addMetadata(DI->getVariable())
1457 .addMetadata(DI->getExpression());
1458 else
1459 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1460 .addImm(CI->getZExtValue())
1461 .addImm(0U)
1462 .addMetadata(DI->getVariable())
1463 .addMetadata(DI->getExpression());
1464 } else if (const auto *CF = dyn_cast<ConstantFP>(V)) {
1465 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
1466 .addFPImm(CF)
1467 .addImm(0U)
1468 .addMetadata(DI->getVariable())
1469 .addMetadata(DI->getExpression());
1470 } else if (Register Reg = lookUpRegForValue(V)) {
1471 // FIXME: This does not handle register-indirect values at offset 0.
1472 bool IsIndirect = false;
1473 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, IsIndirect, Reg,
1474 DI->getVariable(), DI->getExpression());
1475 } else {
1476 // We don't know how to handle other cases, so we drop.
1477 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1478 }
1479 return true;
1480 }
1481 case Intrinsic::dbg_label: {
1482 const DbgLabelInst *DI = cast<DbgLabelInst>(II);
1483 assert(DI->getLabel() && "Missing label");
1484 if (!FuncInfo.MF->getMMI().hasDebugInfo()) {
1485 LLVM_DEBUG(dbgs() << "Dropping debug info for " << *DI << "\n");
1486 return true;
1487 }
1488
1489 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1490 TII.get(TargetOpcode::DBG_LABEL)).addMetadata(DI->getLabel());
1491 return true;
1492 }
1493 case Intrinsic::objectsize:
1494 llvm_unreachable("llvm.objectsize.* should have been lowered already");
1495
1496 case Intrinsic::is_constant:
1497 llvm_unreachable("llvm.is.constant.* should have been lowered already");
1498
1499 case Intrinsic::launder_invariant_group:
1500 case Intrinsic::strip_invariant_group:
1501 case Intrinsic::expect: {
1502 Register ResultReg = getRegForValue(II->getArgOperand(0));
1503 if (!ResultReg)
1504 return false;
1505 updateValueMap(II, ResultReg);
1506 return true;
1507 }
1508 case Intrinsic::experimental_stackmap:
1509 return selectStackmap(II);
1510 case Intrinsic::experimental_patchpoint_void:
1511 case Intrinsic::experimental_patchpoint_i64:
1512 return selectPatchpoint(II);
1513
1514 case Intrinsic::xray_customevent:
1515 return selectXRayCustomEvent(II);
1516 case Intrinsic::xray_typedevent:
1517 return selectXRayTypedEvent(II);
1518
1519 case Intrinsic::memcpy:
1520 case Intrinsic::memcpy_element_unordered_atomic:
1521 case Intrinsic::memcpy_inline:
1522 case Intrinsic::memmove:
1523 case Intrinsic::memmove_element_unordered_atomic:
1524 case Intrinsic::memset:
1525 case Intrinsic::memset_element_unordered_atomic:
1526 // Flush the local value map just like we do for regular calls,
1527 // to avoid excessive spills and reloads.
1528 // These intrinsics mostly turn into library calls at O0; and
1529 // even memcpy_inline should be treated like one for this purpose.
1530 flushLocalValueMap();
1531 break;
1532 }
1533
1534 return fastLowerIntrinsicCall(II);
1535 }
1536
selectCast(const User * I,unsigned Opcode)1537 bool FastISel::selectCast(const User *I, unsigned Opcode) {
1538 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1539 EVT DstVT = TLI.getValueType(DL, I->getType());
1540
1541 if (SrcVT == MVT::Other || !SrcVT.isSimple() || DstVT == MVT::Other ||
1542 !DstVT.isSimple())
1543 // Unhandled type. Halt "fast" selection and bail.
1544 return false;
1545
1546 // Check if the destination type is legal.
1547 if (!TLI.isTypeLegal(DstVT))
1548 return false;
1549
1550 // Check if the source operand is legal.
1551 if (!TLI.isTypeLegal(SrcVT))
1552 return false;
1553
1554 Register InputReg = getRegForValue(I->getOperand(0));
1555 if (!InputReg)
1556 // Unhandled operand. Halt "fast" selection and bail.
1557 return false;
1558
1559 bool InputRegIsKill = hasTrivialKill(I->getOperand(0));
1560
1561 Register ResultReg = fastEmit_r(SrcVT.getSimpleVT(), DstVT.getSimpleVT(),
1562 Opcode, InputReg, InputRegIsKill);
1563 if (!ResultReg)
1564 return false;
1565
1566 updateValueMap(I, ResultReg);
1567 return true;
1568 }
1569
selectBitCast(const User * I)1570 bool FastISel::selectBitCast(const User *I) {
1571 // If the bitcast doesn't change the type, just use the operand value.
1572 if (I->getType() == I->getOperand(0)->getType()) {
1573 Register Reg = getRegForValue(I->getOperand(0));
1574 if (!Reg)
1575 return false;
1576 updateValueMap(I, Reg);
1577 return true;
1578 }
1579
1580 // Bitcasts of other values become reg-reg copies or BITCAST operators.
1581 EVT SrcEVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1582 EVT DstEVT = TLI.getValueType(DL, I->getType());
1583 if (SrcEVT == MVT::Other || DstEVT == MVT::Other ||
1584 !TLI.isTypeLegal(SrcEVT) || !TLI.isTypeLegal(DstEVT))
1585 // Unhandled type. Halt "fast" selection and bail.
1586 return false;
1587
1588 MVT SrcVT = SrcEVT.getSimpleVT();
1589 MVT DstVT = DstEVT.getSimpleVT();
1590 Register Op0 = getRegForValue(I->getOperand(0));
1591 if (!Op0) // Unhandled operand. Halt "fast" selection and bail.
1592 return false;
1593 bool Op0IsKill = hasTrivialKill(I->getOperand(0));
1594
1595 // First, try to perform the bitcast by inserting a reg-reg copy.
1596 Register ResultReg;
1597 if (SrcVT == DstVT) {
1598 const TargetRegisterClass *SrcClass = TLI.getRegClassFor(SrcVT);
1599 const TargetRegisterClass *DstClass = TLI.getRegClassFor(DstVT);
1600 // Don't attempt a cross-class copy. It will likely fail.
1601 if (SrcClass == DstClass) {
1602 ResultReg = createResultReg(DstClass);
1603 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1604 TII.get(TargetOpcode::COPY), ResultReg).addReg(Op0);
1605 }
1606 }
1607
1608 // If the reg-reg copy failed, select a BITCAST opcode.
1609 if (!ResultReg)
1610 ResultReg = fastEmit_r(SrcVT, DstVT, ISD::BITCAST, Op0, Op0IsKill);
1611
1612 if (!ResultReg)
1613 return false;
1614
1615 updateValueMap(I, ResultReg);
1616 return true;
1617 }
1618
selectFreeze(const User * I)1619 bool FastISel::selectFreeze(const User *I) {
1620 Register Reg = getRegForValue(I->getOperand(0));
1621 if (!Reg)
1622 // Unhandled operand.
1623 return false;
1624
1625 EVT ETy = TLI.getValueType(DL, I->getOperand(0)->getType());
1626 if (ETy == MVT::Other || !TLI.isTypeLegal(ETy))
1627 // Unhandled type, bail out.
1628 return false;
1629
1630 MVT Ty = ETy.getSimpleVT();
1631 const TargetRegisterClass *TyRegClass = TLI.getRegClassFor(Ty);
1632 Register ResultReg = createResultReg(TyRegClass);
1633 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
1634 TII.get(TargetOpcode::COPY), ResultReg).addReg(Reg);
1635
1636 updateValueMap(I, ResultReg);
1637 return true;
1638 }
1639
1640 // Remove local value instructions starting from the instruction after
1641 // SavedLastLocalValue to the current function insert point.
removeDeadLocalValueCode(MachineInstr * SavedLastLocalValue)1642 void FastISel::removeDeadLocalValueCode(MachineInstr *SavedLastLocalValue)
1643 {
1644 MachineInstr *CurLastLocalValue = getLastLocalValue();
1645 if (CurLastLocalValue != SavedLastLocalValue) {
1646 // Find the first local value instruction to be deleted.
1647 // This is the instruction after SavedLastLocalValue if it is non-NULL.
1648 // Otherwise it's the first instruction in the block.
1649 MachineBasicBlock::iterator FirstDeadInst(SavedLastLocalValue);
1650 if (SavedLastLocalValue)
1651 ++FirstDeadInst;
1652 else
1653 FirstDeadInst = FuncInfo.MBB->getFirstNonPHI();
1654 setLastLocalValue(SavedLastLocalValue);
1655 removeDeadCode(FirstDeadInst, FuncInfo.InsertPt);
1656 }
1657 }
1658
selectInstruction(const Instruction * I)1659 bool FastISel::selectInstruction(const Instruction *I) {
1660 MachineInstr *SavedLastLocalValue = getLastLocalValue();
1661 // Just before the terminator instruction, insert instructions to
1662 // feed PHI nodes in successor blocks.
1663 if (I->isTerminator()) {
1664 if (!handlePHINodesInSuccessorBlocks(I->getParent())) {
1665 // PHI node handling may have generated local value instructions,
1666 // even though it failed to handle all PHI nodes.
1667 // We remove these instructions because SelectionDAGISel will generate
1668 // them again.
1669 removeDeadLocalValueCode(SavedLastLocalValue);
1670 return false;
1671 }
1672 }
1673
1674 // FastISel does not handle any operand bundles except OB_funclet.
1675 if (auto *Call = dyn_cast<CallBase>(I))
1676 for (unsigned i = 0, e = Call->getNumOperandBundles(); i != e; ++i)
1677 if (Call->getOperandBundleAt(i).getTagID() != LLVMContext::OB_funclet)
1678 return false;
1679
1680 DbgLoc = I->getDebugLoc();
1681
1682 SavedInsertPt = FuncInfo.InsertPt;
1683
1684 if (const auto *Call = dyn_cast<CallInst>(I)) {
1685 const Function *F = Call->getCalledFunction();
1686 LibFunc Func;
1687
1688 // As a special case, don't handle calls to builtin library functions that
1689 // may be translated directly to target instructions.
1690 if (F && !F->hasLocalLinkage() && F->hasName() &&
1691 LibInfo->getLibFunc(F->getName(), Func) &&
1692 LibInfo->hasOptimizedCodeGen(Func))
1693 return false;
1694
1695 // Don't handle Intrinsic::trap if a trap function is specified.
1696 if (F && F->getIntrinsicID() == Intrinsic::trap &&
1697 Call->hasFnAttr("trap-func-name"))
1698 return false;
1699 }
1700
1701 // First, try doing target-independent selection.
1702 if (!SkipTargetIndependentISel) {
1703 if (selectOperator(I, I->getOpcode())) {
1704 ++NumFastIselSuccessIndependent;
1705 DbgLoc = DebugLoc();
1706 return true;
1707 }
1708 // Remove dead code.
1709 recomputeInsertPt();
1710 if (SavedInsertPt != FuncInfo.InsertPt)
1711 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1712 SavedInsertPt = FuncInfo.InsertPt;
1713 }
1714 // Next, try calling the target to attempt to handle the instruction.
1715 if (fastSelectInstruction(I)) {
1716 ++NumFastIselSuccessTarget;
1717 DbgLoc = DebugLoc();
1718 return true;
1719 }
1720 // Remove dead code.
1721 recomputeInsertPt();
1722 if (SavedInsertPt != FuncInfo.InsertPt)
1723 removeDeadCode(FuncInfo.InsertPt, SavedInsertPt);
1724
1725 DbgLoc = DebugLoc();
1726 // Undo phi node updates, because they will be added again by SelectionDAG.
1727 if (I->isTerminator()) {
1728 // PHI node handling may have generated local value instructions.
1729 // We remove them because SelectionDAGISel will generate them again.
1730 removeDeadLocalValueCode(SavedLastLocalValue);
1731 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
1732 }
1733 return false;
1734 }
1735
1736 /// Emit an unconditional branch to the given block, unless it is the immediate
1737 /// (fall-through) successor, and update the CFG.
fastEmitBranch(MachineBasicBlock * MSucc,const DebugLoc & DbgLoc)1738 void FastISel::fastEmitBranch(MachineBasicBlock *MSucc,
1739 const DebugLoc &DbgLoc) {
1740 if (FuncInfo.MBB->getBasicBlock()->sizeWithoutDebug() > 1 &&
1741 FuncInfo.MBB->isLayoutSuccessor(MSucc)) {
1742 // For more accurate line information if this is the only non-debug
1743 // instruction in the block then emit it, otherwise we have the
1744 // unconditional fall-through case, which needs no instructions.
1745 } else {
1746 // The unconditional branch case.
1747 TII.insertBranch(*FuncInfo.MBB, MSucc, nullptr,
1748 SmallVector<MachineOperand, 0>(), DbgLoc);
1749 }
1750 if (FuncInfo.BPI) {
1751 auto BranchProbability = FuncInfo.BPI->getEdgeProbability(
1752 FuncInfo.MBB->getBasicBlock(), MSucc->getBasicBlock());
1753 FuncInfo.MBB->addSuccessor(MSucc, BranchProbability);
1754 } else
1755 FuncInfo.MBB->addSuccessorWithoutProb(MSucc);
1756 }
1757
finishCondBranch(const BasicBlock * BranchBB,MachineBasicBlock * TrueMBB,MachineBasicBlock * FalseMBB)1758 void FastISel::finishCondBranch(const BasicBlock *BranchBB,
1759 MachineBasicBlock *TrueMBB,
1760 MachineBasicBlock *FalseMBB) {
1761 // Add TrueMBB as successor unless it is equal to the FalseMBB: This can
1762 // happen in degenerate IR and MachineIR forbids to have a block twice in the
1763 // successor/predecessor lists.
1764 if (TrueMBB != FalseMBB) {
1765 if (FuncInfo.BPI) {
1766 auto BranchProbability =
1767 FuncInfo.BPI->getEdgeProbability(BranchBB, TrueMBB->getBasicBlock());
1768 FuncInfo.MBB->addSuccessor(TrueMBB, BranchProbability);
1769 } else
1770 FuncInfo.MBB->addSuccessorWithoutProb(TrueMBB);
1771 }
1772
1773 fastEmitBranch(FalseMBB, DbgLoc);
1774 }
1775
1776 /// Emit an FNeg operation.
selectFNeg(const User * I,const Value * In)1777 bool FastISel::selectFNeg(const User *I, const Value *In) {
1778 Register OpReg = getRegForValue(In);
1779 if (!OpReg)
1780 return false;
1781 bool OpRegIsKill = hasTrivialKill(In);
1782
1783 // If the target has ISD::FNEG, use it.
1784 EVT VT = TLI.getValueType(DL, I->getType());
1785 Register ResultReg = fastEmit_r(VT.getSimpleVT(), VT.getSimpleVT(), ISD::FNEG,
1786 OpReg, OpRegIsKill);
1787 if (ResultReg) {
1788 updateValueMap(I, ResultReg);
1789 return true;
1790 }
1791
1792 // Bitcast the value to integer, twiddle the sign bit with xor,
1793 // and then bitcast it back to floating-point.
1794 if (VT.getSizeInBits() > 64)
1795 return false;
1796 EVT IntVT = EVT::getIntegerVT(I->getContext(), VT.getSizeInBits());
1797 if (!TLI.isTypeLegal(IntVT))
1798 return false;
1799
1800 Register IntReg = fastEmit_r(VT.getSimpleVT(), IntVT.getSimpleVT(),
1801 ISD::BITCAST, OpReg, OpRegIsKill);
1802 if (!IntReg)
1803 return false;
1804
1805 Register IntResultReg = fastEmit_ri_(
1806 IntVT.getSimpleVT(), ISD::XOR, IntReg, /*Op0IsKill=*/true,
1807 UINT64_C(1) << (VT.getSizeInBits() - 1), IntVT.getSimpleVT());
1808 if (!IntResultReg)
1809 return false;
1810
1811 ResultReg = fastEmit_r(IntVT.getSimpleVT(), VT.getSimpleVT(), ISD::BITCAST,
1812 IntResultReg, /*Op0IsKill=*/true);
1813 if (!ResultReg)
1814 return false;
1815
1816 updateValueMap(I, ResultReg);
1817 return true;
1818 }
1819
selectExtractValue(const User * U)1820 bool FastISel::selectExtractValue(const User *U) {
1821 const ExtractValueInst *EVI = dyn_cast<ExtractValueInst>(U);
1822 if (!EVI)
1823 return false;
1824
1825 // Make sure we only try to handle extracts with a legal result. But also
1826 // allow i1 because it's easy.
1827 EVT RealVT = TLI.getValueType(DL, EVI->getType(), /*AllowUnknown=*/true);
1828 if (!RealVT.isSimple())
1829 return false;
1830 MVT VT = RealVT.getSimpleVT();
1831 if (!TLI.isTypeLegal(VT) && VT != MVT::i1)
1832 return false;
1833
1834 const Value *Op0 = EVI->getOperand(0);
1835 Type *AggTy = Op0->getType();
1836
1837 // Get the base result register.
1838 unsigned ResultReg;
1839 DenseMap<const Value *, Register>::iterator I = FuncInfo.ValueMap.find(Op0);
1840 if (I != FuncInfo.ValueMap.end())
1841 ResultReg = I->second;
1842 else if (isa<Instruction>(Op0))
1843 ResultReg = FuncInfo.InitializeRegForValue(Op0);
1844 else
1845 return false; // fast-isel can't handle aggregate constants at the moment
1846
1847 // Get the actual result register, which is an offset from the base register.
1848 unsigned VTIndex = ComputeLinearIndex(AggTy, EVI->getIndices());
1849
1850 SmallVector<EVT, 4> AggValueVTs;
1851 ComputeValueVTs(TLI, DL, AggTy, AggValueVTs);
1852
1853 for (unsigned i = 0; i < VTIndex; i++)
1854 ResultReg += TLI.getNumRegisters(FuncInfo.Fn->getContext(), AggValueVTs[i]);
1855
1856 updateValueMap(EVI, ResultReg);
1857 return true;
1858 }
1859
selectOperator(const User * I,unsigned Opcode)1860 bool FastISel::selectOperator(const User *I, unsigned Opcode) {
1861 switch (Opcode) {
1862 case Instruction::Add:
1863 return selectBinaryOp(I, ISD::ADD);
1864 case Instruction::FAdd:
1865 return selectBinaryOp(I, ISD::FADD);
1866 case Instruction::Sub:
1867 return selectBinaryOp(I, ISD::SUB);
1868 case Instruction::FSub:
1869 return selectBinaryOp(I, ISD::FSUB);
1870 case Instruction::Mul:
1871 return selectBinaryOp(I, ISD::MUL);
1872 case Instruction::FMul:
1873 return selectBinaryOp(I, ISD::FMUL);
1874 case Instruction::SDiv:
1875 return selectBinaryOp(I, ISD::SDIV);
1876 case Instruction::UDiv:
1877 return selectBinaryOp(I, ISD::UDIV);
1878 case Instruction::FDiv:
1879 return selectBinaryOp(I, ISD::FDIV);
1880 case Instruction::SRem:
1881 return selectBinaryOp(I, ISD::SREM);
1882 case Instruction::URem:
1883 return selectBinaryOp(I, ISD::UREM);
1884 case Instruction::FRem:
1885 return selectBinaryOp(I, ISD::FREM);
1886 case Instruction::Shl:
1887 return selectBinaryOp(I, ISD::SHL);
1888 case Instruction::LShr:
1889 return selectBinaryOp(I, ISD::SRL);
1890 case Instruction::AShr:
1891 return selectBinaryOp(I, ISD::SRA);
1892 case Instruction::And:
1893 return selectBinaryOp(I, ISD::AND);
1894 case Instruction::Or:
1895 return selectBinaryOp(I, ISD::OR);
1896 case Instruction::Xor:
1897 return selectBinaryOp(I, ISD::XOR);
1898
1899 case Instruction::FNeg:
1900 return selectFNeg(I, I->getOperand(0));
1901
1902 case Instruction::GetElementPtr:
1903 return selectGetElementPtr(I);
1904
1905 case Instruction::Br: {
1906 const BranchInst *BI = cast<BranchInst>(I);
1907
1908 if (BI->isUnconditional()) {
1909 const BasicBlock *LLVMSucc = BI->getSuccessor(0);
1910 MachineBasicBlock *MSucc = FuncInfo.MBBMap[LLVMSucc];
1911 fastEmitBranch(MSucc, BI->getDebugLoc());
1912 return true;
1913 }
1914
1915 // Conditional branches are not handed yet.
1916 // Halt "fast" selection and bail.
1917 return false;
1918 }
1919
1920 case Instruction::Unreachable:
1921 if (TM.Options.TrapUnreachable)
1922 return fastEmit_(MVT::Other, MVT::Other, ISD::TRAP) != 0;
1923 else
1924 return true;
1925
1926 case Instruction::Alloca:
1927 // FunctionLowering has the static-sized case covered.
1928 if (FuncInfo.StaticAllocaMap.count(cast<AllocaInst>(I)))
1929 return true;
1930
1931 // Dynamic-sized alloca is not handled yet.
1932 return false;
1933
1934 case Instruction::Call:
1935 // On AIX, call lowering uses the DAG-ISEL path currently so that the
1936 // callee of the direct function call instruction will be mapped to the
1937 // symbol for the function's entry point, which is distinct from the
1938 // function descriptor symbol. The latter is the symbol whose XCOFF symbol
1939 // name is the C-linkage name of the source level function.
1940 if (TM.getTargetTriple().isOSAIX())
1941 return false;
1942 return selectCall(I);
1943
1944 case Instruction::BitCast:
1945 return selectBitCast(I);
1946
1947 case Instruction::FPToSI:
1948 return selectCast(I, ISD::FP_TO_SINT);
1949 case Instruction::ZExt:
1950 return selectCast(I, ISD::ZERO_EXTEND);
1951 case Instruction::SExt:
1952 return selectCast(I, ISD::SIGN_EXTEND);
1953 case Instruction::Trunc:
1954 return selectCast(I, ISD::TRUNCATE);
1955 case Instruction::SIToFP:
1956 return selectCast(I, ISD::SINT_TO_FP);
1957
1958 case Instruction::IntToPtr: // Deliberate fall-through.
1959 case Instruction::PtrToInt: {
1960 EVT SrcVT = TLI.getValueType(DL, I->getOperand(0)->getType());
1961 EVT DstVT = TLI.getValueType(DL, I->getType());
1962 if (DstVT.bitsGT(SrcVT))
1963 return selectCast(I, ISD::ZERO_EXTEND);
1964 if (DstVT.bitsLT(SrcVT))
1965 return selectCast(I, ISD::TRUNCATE);
1966 Register Reg = getRegForValue(I->getOperand(0));
1967 if (!Reg)
1968 return false;
1969 updateValueMap(I, Reg);
1970 return true;
1971 }
1972
1973 case Instruction::ExtractValue:
1974 return selectExtractValue(I);
1975
1976 case Instruction::Freeze:
1977 return selectFreeze(I);
1978
1979 case Instruction::PHI:
1980 llvm_unreachable("FastISel shouldn't visit PHI nodes!");
1981
1982 default:
1983 // Unhandled instruction. Halt "fast" selection and bail.
1984 return false;
1985 }
1986 }
1987
FastISel(FunctionLoweringInfo & FuncInfo,const TargetLibraryInfo * LibInfo,bool SkipTargetIndependentISel)1988 FastISel::FastISel(FunctionLoweringInfo &FuncInfo,
1989 const TargetLibraryInfo *LibInfo,
1990 bool SkipTargetIndependentISel)
1991 : FuncInfo(FuncInfo), MF(FuncInfo.MF), MRI(FuncInfo.MF->getRegInfo()),
1992 MFI(FuncInfo.MF->getFrameInfo()), MCP(*FuncInfo.MF->getConstantPool()),
1993 TM(FuncInfo.MF->getTarget()), DL(MF->getDataLayout()),
1994 TII(*MF->getSubtarget().getInstrInfo()),
1995 TLI(*MF->getSubtarget().getTargetLowering()),
1996 TRI(*MF->getSubtarget().getRegisterInfo()), LibInfo(LibInfo),
1997 SkipTargetIndependentISel(SkipTargetIndependentISel),
1998 LastLocalValue(nullptr), EmitStartPt(nullptr) {}
1999
2000 FastISel::~FastISel() = default;
2001
fastLowerArguments()2002 bool FastISel::fastLowerArguments() { return false; }
2003
fastLowerCall(CallLoweringInfo &)2004 bool FastISel::fastLowerCall(CallLoweringInfo & /*CLI*/) { return false; }
2005
fastLowerIntrinsicCall(const IntrinsicInst *)2006 bool FastISel::fastLowerIntrinsicCall(const IntrinsicInst * /*II*/) {
2007 return false;
2008 }
2009
fastEmit_(MVT,MVT,unsigned)2010 unsigned FastISel::fastEmit_(MVT, MVT, unsigned) { return 0; }
2011
fastEmit_r(MVT,MVT,unsigned,unsigned,bool)2012 unsigned FastISel::fastEmit_r(MVT, MVT, unsigned, unsigned /*Op0*/,
2013 bool /*Op0IsKill*/) {
2014 return 0;
2015 }
2016
fastEmit_rr(MVT,MVT,unsigned,unsigned,bool,unsigned,bool)2017 unsigned FastISel::fastEmit_rr(MVT, MVT, unsigned, unsigned /*Op0*/,
2018 bool /*Op0IsKill*/, unsigned /*Op1*/,
2019 bool /*Op1IsKill*/) {
2020 return 0;
2021 }
2022
fastEmit_i(MVT,MVT,unsigned,uint64_t)2023 unsigned FastISel::fastEmit_i(MVT, MVT, unsigned, uint64_t /*Imm*/) {
2024 return 0;
2025 }
2026
fastEmit_f(MVT,MVT,unsigned,const ConstantFP *)2027 unsigned FastISel::fastEmit_f(MVT, MVT, unsigned,
2028 const ConstantFP * /*FPImm*/) {
2029 return 0;
2030 }
2031
fastEmit_ri(MVT,MVT,unsigned,unsigned,bool,uint64_t)2032 unsigned FastISel::fastEmit_ri(MVT, MVT, unsigned, unsigned /*Op0*/,
2033 bool /*Op0IsKill*/, uint64_t /*Imm*/) {
2034 return 0;
2035 }
2036
2037 /// This method is a wrapper of fastEmit_ri. It first tries to emit an
2038 /// instruction with an immediate operand using fastEmit_ri.
2039 /// If that fails, it materializes the immediate into a register and try
2040 /// fastEmit_rr instead.
fastEmit_ri_(MVT VT,unsigned Opcode,unsigned Op0,bool Op0IsKill,uint64_t Imm,MVT ImmType)2041 Register FastISel::fastEmit_ri_(MVT VT, unsigned Opcode, unsigned Op0,
2042 bool Op0IsKill, uint64_t Imm, MVT ImmType) {
2043 // If this is a multiply by a power of two, emit this as a shift left.
2044 if (Opcode == ISD::MUL && isPowerOf2_64(Imm)) {
2045 Opcode = ISD::SHL;
2046 Imm = Log2_64(Imm);
2047 } else if (Opcode == ISD::UDIV && isPowerOf2_64(Imm)) {
2048 // div x, 8 -> srl x, 3
2049 Opcode = ISD::SRL;
2050 Imm = Log2_64(Imm);
2051 }
2052
2053 // Horrible hack (to be removed), check to make sure shift amounts are
2054 // in-range.
2055 if ((Opcode == ISD::SHL || Opcode == ISD::SRA || Opcode == ISD::SRL) &&
2056 Imm >= VT.getSizeInBits())
2057 return 0;
2058
2059 // First check if immediate type is legal. If not, we can't use the ri form.
2060 Register ResultReg = fastEmit_ri(VT, VT, Opcode, Op0, Op0IsKill, Imm);
2061 if (ResultReg)
2062 return ResultReg;
2063 Register MaterialReg = fastEmit_i(ImmType, ImmType, ISD::Constant, Imm);
2064 bool IsImmKill = true;
2065 if (!MaterialReg) {
2066 // This is a bit ugly/slow, but failing here means falling out of
2067 // fast-isel, which would be very slow.
2068 IntegerType *ITy =
2069 IntegerType::get(FuncInfo.Fn->getContext(), VT.getSizeInBits());
2070 MaterialReg = getRegForValue(ConstantInt::get(ITy, Imm));
2071 if (!MaterialReg)
2072 return 0;
2073 // FIXME: If the materialized register here has no uses yet then this
2074 // will be the first use and we should be able to mark it as killed.
2075 // However, the local value area for materialising constant expressions
2076 // grows down, not up, which means that any constant expressions we generate
2077 // later which also use 'Imm' could be after this instruction and therefore
2078 // after this kill.
2079 IsImmKill = false;
2080 }
2081 return fastEmit_rr(VT, VT, Opcode, Op0, Op0IsKill, MaterialReg, IsImmKill);
2082 }
2083
createResultReg(const TargetRegisterClass * RC)2084 Register FastISel::createResultReg(const TargetRegisterClass *RC) {
2085 return MRI.createVirtualRegister(RC);
2086 }
2087
constrainOperandRegClass(const MCInstrDesc & II,Register Op,unsigned OpNum)2088 Register FastISel::constrainOperandRegClass(const MCInstrDesc &II, Register Op,
2089 unsigned OpNum) {
2090 if (Op.isVirtual()) {
2091 const TargetRegisterClass *RegClass =
2092 TII.getRegClass(II, OpNum, &TRI, *FuncInfo.MF);
2093 if (!MRI.constrainRegClass(Op, RegClass)) {
2094 // If it's not legal to COPY between the register classes, something
2095 // has gone very wrong before we got here.
2096 Register NewOp = createResultReg(RegClass);
2097 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2098 TII.get(TargetOpcode::COPY), NewOp).addReg(Op);
2099 return NewOp;
2100 }
2101 }
2102 return Op;
2103 }
2104
fastEmitInst_(unsigned MachineInstOpcode,const TargetRegisterClass * RC)2105 Register FastISel::fastEmitInst_(unsigned MachineInstOpcode,
2106 const TargetRegisterClass *RC) {
2107 Register ResultReg = createResultReg(RC);
2108 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2109
2110 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg);
2111 return ResultReg;
2112 }
2113
fastEmitInst_r(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill)2114 Register FastISel::fastEmitInst_r(unsigned MachineInstOpcode,
2115 const TargetRegisterClass *RC, unsigned Op0,
2116 bool Op0IsKill) {
2117 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2118
2119 Register ResultReg = createResultReg(RC);
2120 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2121
2122 if (II.getNumDefs() >= 1)
2123 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2124 .addReg(Op0, getKillRegState(Op0IsKill));
2125 else {
2126 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2127 .addReg(Op0, getKillRegState(Op0IsKill));
2128 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2129 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2130 }
2131
2132 return ResultReg;
2133 }
2134
fastEmitInst_rr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill)2135 Register FastISel::fastEmitInst_rr(unsigned MachineInstOpcode,
2136 const TargetRegisterClass *RC, unsigned Op0,
2137 bool Op0IsKill, unsigned Op1,
2138 bool Op1IsKill) {
2139 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2140
2141 Register ResultReg = createResultReg(RC);
2142 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2143 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2144
2145 if (II.getNumDefs() >= 1)
2146 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2147 .addReg(Op0, getKillRegState(Op0IsKill))
2148 .addReg(Op1, getKillRegState(Op1IsKill));
2149 else {
2150 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2151 .addReg(Op0, getKillRegState(Op0IsKill))
2152 .addReg(Op1, getKillRegState(Op1IsKill));
2153 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2154 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2155 }
2156 return ResultReg;
2157 }
2158
fastEmitInst_rrr(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,unsigned Op2,bool Op2IsKill)2159 Register FastISel::fastEmitInst_rrr(unsigned MachineInstOpcode,
2160 const TargetRegisterClass *RC, unsigned Op0,
2161 bool Op0IsKill, unsigned Op1,
2162 bool Op1IsKill, unsigned Op2,
2163 bool Op2IsKill) {
2164 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2165
2166 Register ResultReg = createResultReg(RC);
2167 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2168 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2169 Op2 = constrainOperandRegClass(II, Op2, II.getNumDefs() + 2);
2170
2171 if (II.getNumDefs() >= 1)
2172 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2173 .addReg(Op0, getKillRegState(Op0IsKill))
2174 .addReg(Op1, getKillRegState(Op1IsKill))
2175 .addReg(Op2, getKillRegState(Op2IsKill));
2176 else {
2177 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2178 .addReg(Op0, getKillRegState(Op0IsKill))
2179 .addReg(Op1, getKillRegState(Op1IsKill))
2180 .addReg(Op2, getKillRegState(Op2IsKill));
2181 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2182 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2183 }
2184 return ResultReg;
2185 }
2186
fastEmitInst_ri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm)2187 Register FastISel::fastEmitInst_ri(unsigned MachineInstOpcode,
2188 const TargetRegisterClass *RC, unsigned Op0,
2189 bool Op0IsKill, uint64_t Imm) {
2190 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2191
2192 Register ResultReg = createResultReg(RC);
2193 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2194
2195 if (II.getNumDefs() >= 1)
2196 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2197 .addReg(Op0, getKillRegState(Op0IsKill))
2198 .addImm(Imm);
2199 else {
2200 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2201 .addReg(Op0, getKillRegState(Op0IsKill))
2202 .addImm(Imm);
2203 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2204 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2205 }
2206 return ResultReg;
2207 }
2208
fastEmitInst_rii(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,uint64_t Imm1,uint64_t Imm2)2209 Register FastISel::fastEmitInst_rii(unsigned MachineInstOpcode,
2210 const TargetRegisterClass *RC, unsigned Op0,
2211 bool Op0IsKill, uint64_t Imm1,
2212 uint64_t Imm2) {
2213 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2214
2215 Register ResultReg = createResultReg(RC);
2216 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2217
2218 if (II.getNumDefs() >= 1)
2219 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2220 .addReg(Op0, getKillRegState(Op0IsKill))
2221 .addImm(Imm1)
2222 .addImm(Imm2);
2223 else {
2224 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2225 .addReg(Op0, getKillRegState(Op0IsKill))
2226 .addImm(Imm1)
2227 .addImm(Imm2);
2228 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2229 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2230 }
2231 return ResultReg;
2232 }
2233
fastEmitInst_f(unsigned MachineInstOpcode,const TargetRegisterClass * RC,const ConstantFP * FPImm)2234 Register FastISel::fastEmitInst_f(unsigned MachineInstOpcode,
2235 const TargetRegisterClass *RC,
2236 const ConstantFP *FPImm) {
2237 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2238
2239 Register ResultReg = createResultReg(RC);
2240
2241 if (II.getNumDefs() >= 1)
2242 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2243 .addFPImm(FPImm);
2244 else {
2245 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2246 .addFPImm(FPImm);
2247 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2248 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2249 }
2250 return ResultReg;
2251 }
2252
fastEmitInst_rri(unsigned MachineInstOpcode,const TargetRegisterClass * RC,unsigned Op0,bool Op0IsKill,unsigned Op1,bool Op1IsKill,uint64_t Imm)2253 Register FastISel::fastEmitInst_rri(unsigned MachineInstOpcode,
2254 const TargetRegisterClass *RC, unsigned Op0,
2255 bool Op0IsKill, unsigned Op1,
2256 bool Op1IsKill, uint64_t Imm) {
2257 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2258
2259 Register ResultReg = createResultReg(RC);
2260 Op0 = constrainOperandRegClass(II, Op0, II.getNumDefs());
2261 Op1 = constrainOperandRegClass(II, Op1, II.getNumDefs() + 1);
2262
2263 if (II.getNumDefs() >= 1)
2264 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2265 .addReg(Op0, getKillRegState(Op0IsKill))
2266 .addReg(Op1, getKillRegState(Op1IsKill))
2267 .addImm(Imm);
2268 else {
2269 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II)
2270 .addReg(Op0, getKillRegState(Op0IsKill))
2271 .addReg(Op1, getKillRegState(Op1IsKill))
2272 .addImm(Imm);
2273 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2274 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2275 }
2276 return ResultReg;
2277 }
2278
fastEmitInst_i(unsigned MachineInstOpcode,const TargetRegisterClass * RC,uint64_t Imm)2279 Register FastISel::fastEmitInst_i(unsigned MachineInstOpcode,
2280 const TargetRegisterClass *RC, uint64_t Imm) {
2281 Register ResultReg = createResultReg(RC);
2282 const MCInstrDesc &II = TII.get(MachineInstOpcode);
2283
2284 if (II.getNumDefs() >= 1)
2285 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II, ResultReg)
2286 .addImm(Imm);
2287 else {
2288 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, II).addImm(Imm);
2289 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc,
2290 TII.get(TargetOpcode::COPY), ResultReg).addReg(II.ImplicitDefs[0]);
2291 }
2292 return ResultReg;
2293 }
2294
fastEmitInst_extractsubreg(MVT RetVT,unsigned Op0,bool Op0IsKill,uint32_t Idx)2295 Register FastISel::fastEmitInst_extractsubreg(MVT RetVT, unsigned Op0,
2296 bool Op0IsKill, uint32_t Idx) {
2297 Register ResultReg = createResultReg(TLI.getRegClassFor(RetVT));
2298 assert(Register::isVirtualRegister(Op0) &&
2299 "Cannot yet extract from physregs");
2300 const TargetRegisterClass *RC = MRI.getRegClass(Op0);
2301 MRI.constrainRegClass(Op0, TRI.getSubClassWithSubReg(RC, Idx));
2302 BuildMI(*FuncInfo.MBB, FuncInfo.InsertPt, DbgLoc, TII.get(TargetOpcode::COPY),
2303 ResultReg).addReg(Op0, getKillRegState(Op0IsKill), Idx);
2304 return ResultReg;
2305 }
2306
2307 /// Emit MachineInstrs to compute the value of Op with all but the least
2308 /// significant bit set to zero.
fastEmitZExtFromI1(MVT VT,unsigned Op0,bool Op0IsKill)2309 Register FastISel::fastEmitZExtFromI1(MVT VT, unsigned Op0, bool Op0IsKill) {
2310 return fastEmit_ri(VT, VT, ISD::AND, Op0, Op0IsKill, 1);
2311 }
2312
2313 /// HandlePHINodesInSuccessorBlocks - Handle PHI nodes in successor blocks.
2314 /// Emit code to ensure constants are copied into registers when needed.
2315 /// Remember the virtual registers that need to be added to the Machine PHI
2316 /// nodes as input. We cannot just directly add them, because expansion
2317 /// might result in multiple MBB's for one BB. As such, the start of the
2318 /// BB might correspond to a different MBB than the end.
handlePHINodesInSuccessorBlocks(const BasicBlock * LLVMBB)2319 bool FastISel::handlePHINodesInSuccessorBlocks(const BasicBlock *LLVMBB) {
2320 const Instruction *TI = LLVMBB->getTerminator();
2321
2322 SmallPtrSet<MachineBasicBlock *, 4> SuccsHandled;
2323 FuncInfo.OrigNumPHINodesToUpdate = FuncInfo.PHINodesToUpdate.size();
2324
2325 // Check successor nodes' PHI nodes that expect a constant to be available
2326 // from this block.
2327 for (unsigned succ = 0, e = TI->getNumSuccessors(); succ != e; ++succ) {
2328 const BasicBlock *SuccBB = TI->getSuccessor(succ);
2329 if (!isa<PHINode>(SuccBB->begin()))
2330 continue;
2331 MachineBasicBlock *SuccMBB = FuncInfo.MBBMap[SuccBB];
2332
2333 // If this terminator has multiple identical successors (common for
2334 // switches), only handle each succ once.
2335 if (!SuccsHandled.insert(SuccMBB).second)
2336 continue;
2337
2338 MachineBasicBlock::iterator MBBI = SuccMBB->begin();
2339
2340 // At this point we know that there is a 1-1 correspondence between LLVM PHI
2341 // nodes and Machine PHI nodes, but the incoming operands have not been
2342 // emitted yet.
2343 for (const PHINode &PN : SuccBB->phis()) {
2344 // Ignore dead phi's.
2345 if (PN.use_empty())
2346 continue;
2347
2348 // Only handle legal types. Two interesting things to note here. First,
2349 // by bailing out early, we may leave behind some dead instructions,
2350 // since SelectionDAG's HandlePHINodesInSuccessorBlocks will insert its
2351 // own moves. Second, this check is necessary because FastISel doesn't
2352 // use CreateRegs to create registers, so it always creates
2353 // exactly one register for each non-void instruction.
2354 EVT VT = TLI.getValueType(DL, PN.getType(), /*AllowUnknown=*/true);
2355 if (VT == MVT::Other || !TLI.isTypeLegal(VT)) {
2356 // Handle integer promotions, though, because they're common and easy.
2357 if (!(VT == MVT::i1 || VT == MVT::i8 || VT == MVT::i16)) {
2358 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2359 return false;
2360 }
2361 }
2362
2363 const Value *PHIOp = PN.getIncomingValueForBlock(LLVMBB);
2364
2365 // Set the DebugLoc for the copy. Prefer the location of the operand
2366 // if there is one; use the location of the PHI otherwise.
2367 DbgLoc = PN.getDebugLoc();
2368 if (const auto *Inst = dyn_cast<Instruction>(PHIOp))
2369 DbgLoc = Inst->getDebugLoc();
2370
2371 Register Reg = getRegForValue(PHIOp);
2372 if (!Reg) {
2373 FuncInfo.PHINodesToUpdate.resize(FuncInfo.OrigNumPHINodesToUpdate);
2374 return false;
2375 }
2376 FuncInfo.PHINodesToUpdate.push_back(std::make_pair(&*MBBI++, Reg));
2377 DbgLoc = DebugLoc();
2378 }
2379 }
2380
2381 return true;
2382 }
2383
tryToFoldLoad(const LoadInst * LI,const Instruction * FoldInst)2384 bool FastISel::tryToFoldLoad(const LoadInst *LI, const Instruction *FoldInst) {
2385 assert(LI->hasOneUse() &&
2386 "tryToFoldLoad expected a LoadInst with a single use");
2387 // We know that the load has a single use, but don't know what it is. If it
2388 // isn't one of the folded instructions, then we can't succeed here. Handle
2389 // this by scanning the single-use users of the load until we get to FoldInst.
2390 unsigned MaxUsers = 6; // Don't scan down huge single-use chains of instrs.
2391
2392 const Instruction *TheUser = LI->user_back();
2393 while (TheUser != FoldInst && // Scan up until we find FoldInst.
2394 // Stay in the right block.
2395 TheUser->getParent() == FoldInst->getParent() &&
2396 --MaxUsers) { // Don't scan too far.
2397 // If there are multiple or no uses of this instruction, then bail out.
2398 if (!TheUser->hasOneUse())
2399 return false;
2400
2401 TheUser = TheUser->user_back();
2402 }
2403
2404 // If we didn't find the fold instruction, then we failed to collapse the
2405 // sequence.
2406 if (TheUser != FoldInst)
2407 return false;
2408
2409 // Don't try to fold volatile loads. Target has to deal with alignment
2410 // constraints.
2411 if (LI->isVolatile())
2412 return false;
2413
2414 // Figure out which vreg this is going into. If there is no assigned vreg yet
2415 // then there actually was no reference to it. Perhaps the load is referenced
2416 // by a dead instruction.
2417 Register LoadReg = getRegForValue(LI);
2418 if (!LoadReg)
2419 return false;
2420
2421 // We can't fold if this vreg has no uses or more than one use. Multiple uses
2422 // may mean that the instruction got lowered to multiple MIs, or the use of
2423 // the loaded value ended up being multiple operands of the result.
2424 if (!MRI.hasOneUse(LoadReg))
2425 return false;
2426
2427 MachineRegisterInfo::reg_iterator RI = MRI.reg_begin(LoadReg);
2428 MachineInstr *User = RI->getParent();
2429
2430 // Set the insertion point properly. Folding the load can cause generation of
2431 // other random instructions (like sign extends) for addressing modes; make
2432 // sure they get inserted in a logical place before the new instruction.
2433 FuncInfo.InsertPt = User;
2434 FuncInfo.MBB = User->getParent();
2435
2436 // Ask the target to try folding the load.
2437 return tryToFoldLoadIntoMI(User, RI.getOperandNo(), LI);
2438 }
2439
canFoldAddIntoGEP(const User * GEP,const Value * Add)2440 bool FastISel::canFoldAddIntoGEP(const User *GEP, const Value *Add) {
2441 // Must be an add.
2442 if (!isa<AddOperator>(Add))
2443 return false;
2444 // Type size needs to match.
2445 if (DL.getTypeSizeInBits(GEP->getType()) !=
2446 DL.getTypeSizeInBits(Add->getType()))
2447 return false;
2448 // Must be in the same basic block.
2449 if (isa<Instruction>(Add) &&
2450 FuncInfo.MBBMap[cast<Instruction>(Add)->getParent()] != FuncInfo.MBB)
2451 return false;
2452 // Must have a constant operand.
2453 return isa<ConstantInt>(cast<AddOperator>(Add)->getOperand(1));
2454 }
2455
2456 MachineMemOperand *
createMachineMemOperandFor(const Instruction * I) const2457 FastISel::createMachineMemOperandFor(const Instruction *I) const {
2458 const Value *Ptr;
2459 Type *ValTy;
2460 MaybeAlign Alignment;
2461 MachineMemOperand::Flags Flags;
2462 bool IsVolatile;
2463
2464 if (const auto *LI = dyn_cast<LoadInst>(I)) {
2465 Alignment = LI->getAlign();
2466 IsVolatile = LI->isVolatile();
2467 Flags = MachineMemOperand::MOLoad;
2468 Ptr = LI->getPointerOperand();
2469 ValTy = LI->getType();
2470 } else if (const auto *SI = dyn_cast<StoreInst>(I)) {
2471 Alignment = SI->getAlign();
2472 IsVolatile = SI->isVolatile();
2473 Flags = MachineMemOperand::MOStore;
2474 Ptr = SI->getPointerOperand();
2475 ValTy = SI->getValueOperand()->getType();
2476 } else
2477 return nullptr;
2478
2479 bool IsNonTemporal = I->hasMetadata(LLVMContext::MD_nontemporal);
2480 bool IsInvariant = I->hasMetadata(LLVMContext::MD_invariant_load);
2481 bool IsDereferenceable = I->hasMetadata(LLVMContext::MD_dereferenceable);
2482 const MDNode *Ranges = I->getMetadata(LLVMContext::MD_range);
2483
2484 AAMDNodes AAInfo;
2485 I->getAAMetadata(AAInfo);
2486
2487 if (!Alignment) // Ensure that codegen never sees alignment 0.
2488 Alignment = DL.getABITypeAlign(ValTy);
2489
2490 unsigned Size = DL.getTypeStoreSize(ValTy);
2491
2492 if (IsVolatile)
2493 Flags |= MachineMemOperand::MOVolatile;
2494 if (IsNonTemporal)
2495 Flags |= MachineMemOperand::MONonTemporal;
2496 if (IsDereferenceable)
2497 Flags |= MachineMemOperand::MODereferenceable;
2498 if (IsInvariant)
2499 Flags |= MachineMemOperand::MOInvariant;
2500
2501 return FuncInfo.MF->getMachineMemOperand(MachinePointerInfo(Ptr), Flags, Size,
2502 *Alignment, AAInfo, Ranges);
2503 }
2504
optimizeCmpPredicate(const CmpInst * CI) const2505 CmpInst::Predicate FastISel::optimizeCmpPredicate(const CmpInst *CI) const {
2506 // If both operands are the same, then try to optimize or fold the cmp.
2507 CmpInst::Predicate Predicate = CI->getPredicate();
2508 if (CI->getOperand(0) != CI->getOperand(1))
2509 return Predicate;
2510
2511 switch (Predicate) {
2512 default: llvm_unreachable("Invalid predicate!");
2513 case CmpInst::FCMP_FALSE: Predicate = CmpInst::FCMP_FALSE; break;
2514 case CmpInst::FCMP_OEQ: Predicate = CmpInst::FCMP_ORD; break;
2515 case CmpInst::FCMP_OGT: Predicate = CmpInst::FCMP_FALSE; break;
2516 case CmpInst::FCMP_OGE: Predicate = CmpInst::FCMP_ORD; break;
2517 case CmpInst::FCMP_OLT: Predicate = CmpInst::FCMP_FALSE; break;
2518 case CmpInst::FCMP_OLE: Predicate = CmpInst::FCMP_ORD; break;
2519 case CmpInst::FCMP_ONE: Predicate = CmpInst::FCMP_FALSE; break;
2520 case CmpInst::FCMP_ORD: Predicate = CmpInst::FCMP_ORD; break;
2521 case CmpInst::FCMP_UNO: Predicate = CmpInst::FCMP_UNO; break;
2522 case CmpInst::FCMP_UEQ: Predicate = CmpInst::FCMP_TRUE; break;
2523 case CmpInst::FCMP_UGT: Predicate = CmpInst::FCMP_UNO; break;
2524 case CmpInst::FCMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2525 case CmpInst::FCMP_ULT: Predicate = CmpInst::FCMP_UNO; break;
2526 case CmpInst::FCMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2527 case CmpInst::FCMP_UNE: Predicate = CmpInst::FCMP_UNO; break;
2528 case CmpInst::FCMP_TRUE: Predicate = CmpInst::FCMP_TRUE; break;
2529
2530 case CmpInst::ICMP_EQ: Predicate = CmpInst::FCMP_TRUE; break;
2531 case CmpInst::ICMP_NE: Predicate = CmpInst::FCMP_FALSE; break;
2532 case CmpInst::ICMP_UGT: Predicate = CmpInst::FCMP_FALSE; break;
2533 case CmpInst::ICMP_UGE: Predicate = CmpInst::FCMP_TRUE; break;
2534 case CmpInst::ICMP_ULT: Predicate = CmpInst::FCMP_FALSE; break;
2535 case CmpInst::ICMP_ULE: Predicate = CmpInst::FCMP_TRUE; break;
2536 case CmpInst::ICMP_SGT: Predicate = CmpInst::FCMP_FALSE; break;
2537 case CmpInst::ICMP_SGE: Predicate = CmpInst::FCMP_TRUE; break;
2538 case CmpInst::ICMP_SLT: Predicate = CmpInst::FCMP_FALSE; break;
2539 case CmpInst::ICMP_SLE: Predicate = CmpInst::FCMP_TRUE; break;
2540 }
2541
2542 return Predicate;
2543 }
2544