1 //===- ARMInstructionSelector.cpp ----------------------------*- C++ -*-==//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the InstructionSelector class for ARM.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
12 
13 #include "ARMRegisterBankInfo.h"
14 #include "ARMSubtarget.h"
15 #include "ARMTargetMachine.h"
16 #include "llvm/CodeGen/GlobalISel/GIMatchTableExecutorImpl.h"
17 #include "llvm/CodeGen/GlobalISel/InstructionSelector.h"
18 #include "llvm/CodeGen/MachineConstantPool.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/IR/IntrinsicsARM.h"
21 #include "llvm/Support/Debug.h"
22 
23 #define DEBUG_TYPE "arm-isel"
24 
25 using namespace llvm;
26 
27 namespace {
28 
29 #define GET_GLOBALISEL_PREDICATE_BITSET
30 #include "ARMGenGlobalISel.inc"
31 #undef GET_GLOBALISEL_PREDICATE_BITSET
32 
33 class ARMInstructionSelector : public InstructionSelector {
34 public:
35   ARMInstructionSelector(const ARMBaseTargetMachine &TM, const ARMSubtarget &STI,
36                          const ARMRegisterBankInfo &RBI);
37 
38   bool select(MachineInstr &I) override;
39   static const char *getName() { return DEBUG_TYPE; }
40 
41 private:
42   bool selectImpl(MachineInstr &I, CodeGenCoverage &CoverageInfo) const;
43 
44   struct CmpConstants;
45   struct InsertInfo;
46 
47   bool selectCmp(CmpConstants Helper, MachineInstrBuilder &MIB,
48                  MachineRegisterInfo &MRI) const;
49 
50   // Helper for inserting a comparison sequence that sets \p ResReg to either 1
51   // if \p LHSReg and \p RHSReg are in the relationship defined by \p Cond, or
52   // \p PrevRes otherwise. In essence, it computes PrevRes OR (LHS Cond RHS).
53   bool insertComparison(CmpConstants Helper, InsertInfo I, unsigned ResReg,
54                         ARMCC::CondCodes Cond, unsigned LHSReg, unsigned RHSReg,
55                         unsigned PrevRes) const;
56 
57   // Set \p DestReg to \p Constant.
58   void putConstant(InsertInfo I, unsigned DestReg, unsigned Constant) const;
59 
60   bool selectGlobal(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const;
61   bool selectSelect(MachineInstrBuilder &MIB, MachineRegisterInfo &MRI) const;
62   bool selectShift(unsigned ShiftOpc, MachineInstrBuilder &MIB) const;
63 
64   // Check if the types match and both operands have the expected size and
65   // register bank.
66   bool validOpRegPair(MachineRegisterInfo &MRI, unsigned LHS, unsigned RHS,
67                       unsigned ExpectedSize, unsigned ExpectedRegBankID) const;
68 
69   // Check if the register has the expected size and register bank.
70   bool validReg(MachineRegisterInfo &MRI, unsigned Reg, unsigned ExpectedSize,
71                 unsigned ExpectedRegBankID) const;
72 
73   const ARMBaseInstrInfo &TII;
74   const ARMBaseRegisterInfo &TRI;
75   const ARMBaseTargetMachine &TM;
76   const ARMRegisterBankInfo &RBI;
77   const ARMSubtarget &STI;
78 
79   // FIXME: This is necessary because DAGISel uses "Subtarget->" and GlobalISel
80   // uses "STI." in the code generated by TableGen. If we want to reuse some of
81   // the custom C++ predicates written for DAGISel, we need to have both around.
82   const ARMSubtarget *Subtarget = &STI;
83 
84   // Store the opcodes that we might need, so we don't have to check what kind
85   // of subtarget (ARM vs Thumb) we have all the time.
86   struct OpcodeCache {
87     unsigned ZEXT16;
88     unsigned SEXT16;
89 
90     unsigned ZEXT8;
91     unsigned SEXT8;
92 
93     // Used for implementing ZEXT/SEXT from i1
94     unsigned AND;
95     unsigned RSB;
96 
97     unsigned STORE32;
98     unsigned LOAD32;
99 
100     unsigned STORE16;
101     unsigned LOAD16;
102 
103     unsigned STORE8;
104     unsigned LOAD8;
105 
106     unsigned ADDrr;
107     unsigned ADDri;
108 
109     // Used for G_ICMP
110     unsigned CMPrr;
111     unsigned MOVi;
112     unsigned MOVCCi;
113 
114     // Used for G_SELECT
115     unsigned MOVCCr;
116 
117     unsigned TSTri;
118     unsigned Bcc;
119 
120     // Used for G_GLOBAL_VALUE
121     unsigned MOVi32imm;
122     unsigned ConstPoolLoad;
123     unsigned MOV_ga_pcrel;
124     unsigned LDRLIT_ga_pcrel;
125     unsigned LDRLIT_ga_abs;
126 
127     OpcodeCache(const ARMSubtarget &STI);
128   } const Opcodes;
129 
130   // Select the opcode for simple extensions (that translate to a single SXT/UXT
131   // instruction). Extension operations more complicated than that should not
132   // invoke this. Returns the original opcode if it doesn't know how to select a
133   // better one.
134   unsigned selectSimpleExtOpc(unsigned Opc, unsigned Size) const;
135 
136   // Select the opcode for simple loads and stores. Returns the original opcode
137   // if it doesn't know how to select a better one.
138   unsigned selectLoadStoreOpCode(unsigned Opc, unsigned RegBank,
139                                  unsigned Size) const;
140 
141   void renderVFPF32Imm(MachineInstrBuilder &New, const MachineInstr &Old,
142                        int OpIdx = -1) const;
143   void renderVFPF64Imm(MachineInstrBuilder &New, const MachineInstr &Old,
144                        int OpIdx = -1) const;
145 
146 #define GET_GLOBALISEL_PREDICATES_DECL
147 #include "ARMGenGlobalISel.inc"
148 #undef GET_GLOBALISEL_PREDICATES_DECL
149 
150 // We declare the temporaries used by selectImpl() in the class to minimize the
151 // cost of constructing placeholder values.
152 #define GET_GLOBALISEL_TEMPORARIES_DECL
153 #include "ARMGenGlobalISel.inc"
154 #undef GET_GLOBALISEL_TEMPORARIES_DECL
155 };
156 } // end anonymous namespace
157 
158 namespace llvm {
159 InstructionSelector *
160 createARMInstructionSelector(const ARMBaseTargetMachine &TM,
161                              const ARMSubtarget &STI,
162                              const ARMRegisterBankInfo &RBI) {
163   return new ARMInstructionSelector(TM, STI, RBI);
164 }
165 }
166 
167 #define GET_GLOBALISEL_IMPL
168 #include "ARMGenGlobalISel.inc"
169 #undef GET_GLOBALISEL_IMPL
170 
171 ARMInstructionSelector::ARMInstructionSelector(const ARMBaseTargetMachine &TM,
172                                                const ARMSubtarget &STI,
173                                                const ARMRegisterBankInfo &RBI)
174     : TII(*STI.getInstrInfo()), TRI(*STI.getRegisterInfo()), TM(TM), RBI(RBI),
175       STI(STI), Opcodes(STI),
176 #define GET_GLOBALISEL_PREDICATES_INIT
177 #include "ARMGenGlobalISel.inc"
178 #undef GET_GLOBALISEL_PREDICATES_INIT
179 #define GET_GLOBALISEL_TEMPORARIES_INIT
180 #include "ARMGenGlobalISel.inc"
181 #undef GET_GLOBALISEL_TEMPORARIES_INIT
182 {
183 }
184 
185 static const TargetRegisterClass *guessRegClass(unsigned Reg,
186                                                 MachineRegisterInfo &MRI,
187                                                 const TargetRegisterInfo &TRI,
188                                                 const RegisterBankInfo &RBI) {
189   const RegisterBank *RegBank = RBI.getRegBank(Reg, MRI, TRI);
190   assert(RegBank && "Can't get reg bank for virtual register");
191 
192   const unsigned Size = MRI.getType(Reg).getSizeInBits();
193   assert((RegBank->getID() == ARM::GPRRegBankID ||
194           RegBank->getID() == ARM::FPRRegBankID) &&
195          "Unsupported reg bank");
196 
197   if (RegBank->getID() == ARM::FPRRegBankID) {
198     if (Size == 32)
199       return &ARM::SPRRegClass;
200     else if (Size == 64)
201       return &ARM::DPRRegClass;
202     else if (Size == 128)
203       return &ARM::QPRRegClass;
204     else
205       llvm_unreachable("Unsupported destination size");
206   }
207 
208   return &ARM::GPRRegClass;
209 }
210 
211 static bool selectCopy(MachineInstr &I, const TargetInstrInfo &TII,
212                        MachineRegisterInfo &MRI, const TargetRegisterInfo &TRI,
213                        const RegisterBankInfo &RBI) {
214   Register DstReg = I.getOperand(0).getReg();
215   if (DstReg.isPhysical())
216     return true;
217 
218   const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI);
219 
220   // No need to constrain SrcReg. It will get constrained when
221   // we hit another of its uses or its defs.
222   // Copies do not have constraints.
223   if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
224     LLVM_DEBUG(dbgs() << "Failed to constrain " << TII.getName(I.getOpcode())
225                       << " operand\n");
226     return false;
227   }
228   return true;
229 }
230 
231 static bool selectMergeValues(MachineInstrBuilder &MIB,
232                               const ARMBaseInstrInfo &TII,
233                               MachineRegisterInfo &MRI,
234                               const TargetRegisterInfo &TRI,
235                               const RegisterBankInfo &RBI) {
236   assert(TII.getSubtarget().hasVFP2Base() && "Can't select merge without VFP");
237 
238   // We only support G_MERGE_VALUES as a way to stick together two scalar GPRs
239   // into one DPR.
240   Register VReg0 = MIB.getReg(0);
241   (void)VReg0;
242   assert(MRI.getType(VReg0).getSizeInBits() == 64 &&
243          RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::FPRRegBankID &&
244          "Unsupported operand for G_MERGE_VALUES");
245   Register VReg1 = MIB.getReg(1);
246   (void)VReg1;
247   assert(MRI.getType(VReg1).getSizeInBits() == 32 &&
248          RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID &&
249          "Unsupported operand for G_MERGE_VALUES");
250   Register VReg2 = MIB.getReg(2);
251   (void)VReg2;
252   assert(MRI.getType(VReg2).getSizeInBits() == 32 &&
253          RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::GPRRegBankID &&
254          "Unsupported operand for G_MERGE_VALUES");
255 
256   MIB->setDesc(TII.get(ARM::VMOVDRR));
257   MIB.add(predOps(ARMCC::AL));
258 
259   return true;
260 }
261 
262 static bool selectUnmergeValues(MachineInstrBuilder &MIB,
263                                 const ARMBaseInstrInfo &TII,
264                                 MachineRegisterInfo &MRI,
265                                 const TargetRegisterInfo &TRI,
266                                 const RegisterBankInfo &RBI) {
267   assert(TII.getSubtarget().hasVFP2Base() &&
268          "Can't select unmerge without VFP");
269 
270   // We only support G_UNMERGE_VALUES as a way to break up one DPR into two
271   // GPRs.
272   Register VReg0 = MIB.getReg(0);
273   (void)VReg0;
274   assert(MRI.getType(VReg0).getSizeInBits() == 32 &&
275          RBI.getRegBank(VReg0, MRI, TRI)->getID() == ARM::GPRRegBankID &&
276          "Unsupported operand for G_UNMERGE_VALUES");
277   Register VReg1 = MIB.getReg(1);
278   (void)VReg1;
279   assert(MRI.getType(VReg1).getSizeInBits() == 32 &&
280          RBI.getRegBank(VReg1, MRI, TRI)->getID() == ARM::GPRRegBankID &&
281          "Unsupported operand for G_UNMERGE_VALUES");
282   Register VReg2 = MIB.getReg(2);
283   (void)VReg2;
284   assert(MRI.getType(VReg2).getSizeInBits() == 64 &&
285          RBI.getRegBank(VReg2, MRI, TRI)->getID() == ARM::FPRRegBankID &&
286          "Unsupported operand for G_UNMERGE_VALUES");
287 
288   MIB->setDesc(TII.get(ARM::VMOVRRD));
289   MIB.add(predOps(ARMCC::AL));
290 
291   return true;
292 }
293 
294 ARMInstructionSelector::OpcodeCache::OpcodeCache(const ARMSubtarget &STI) {
295   bool isThumb = STI.isThumb();
296 
297   using namespace TargetOpcode;
298 
299 #define STORE_OPCODE(VAR, OPC) VAR = isThumb ? ARM::t2##OPC : ARM::OPC
300   STORE_OPCODE(SEXT16, SXTH);
301   STORE_OPCODE(ZEXT16, UXTH);
302 
303   STORE_OPCODE(SEXT8, SXTB);
304   STORE_OPCODE(ZEXT8, UXTB);
305 
306   STORE_OPCODE(AND, ANDri);
307   STORE_OPCODE(RSB, RSBri);
308 
309   STORE_OPCODE(STORE32, STRi12);
310   STORE_OPCODE(LOAD32, LDRi12);
311 
312   // LDRH/STRH are special...
313   STORE16 = isThumb ? ARM::t2STRHi12 : ARM::STRH;
314   LOAD16 = isThumb ? ARM::t2LDRHi12 : ARM::LDRH;
315 
316   STORE_OPCODE(STORE8, STRBi12);
317   STORE_OPCODE(LOAD8, LDRBi12);
318 
319   STORE_OPCODE(ADDrr, ADDrr);
320   STORE_OPCODE(ADDri, ADDri);
321 
322   STORE_OPCODE(CMPrr, CMPrr);
323   STORE_OPCODE(MOVi, MOVi);
324   STORE_OPCODE(MOVCCi, MOVCCi);
325 
326   STORE_OPCODE(MOVCCr, MOVCCr);
327 
328   STORE_OPCODE(TSTri, TSTri);
329   STORE_OPCODE(Bcc, Bcc);
330 
331   STORE_OPCODE(MOVi32imm, MOVi32imm);
332   ConstPoolLoad = isThumb ? ARM::t2LDRpci : ARM::LDRi12;
333   STORE_OPCODE(MOV_ga_pcrel, MOV_ga_pcrel);
334   LDRLIT_ga_pcrel = isThumb ? ARM::tLDRLIT_ga_pcrel : ARM::LDRLIT_ga_pcrel;
335   LDRLIT_ga_abs = isThumb ? ARM::tLDRLIT_ga_abs : ARM::LDRLIT_ga_abs;
336 #undef MAP_OPCODE
337 }
338 
339 unsigned ARMInstructionSelector::selectSimpleExtOpc(unsigned Opc,
340                                                     unsigned Size) const {
341   using namespace TargetOpcode;
342 
343   if (Size != 8 && Size != 16)
344     return Opc;
345 
346   if (Opc == G_SEXT)
347     return Size == 8 ? Opcodes.SEXT8 : Opcodes.SEXT16;
348 
349   if (Opc == G_ZEXT)
350     return Size == 8 ? Opcodes.ZEXT8 : Opcodes.ZEXT16;
351 
352   return Opc;
353 }
354 
355 unsigned ARMInstructionSelector::selectLoadStoreOpCode(unsigned Opc,
356                                                        unsigned RegBank,
357                                                        unsigned Size) const {
358   bool isStore = Opc == TargetOpcode::G_STORE;
359 
360   if (RegBank == ARM::GPRRegBankID) {
361     switch (Size) {
362     case 1:
363     case 8:
364       return isStore ? Opcodes.STORE8 : Opcodes.LOAD8;
365     case 16:
366       return isStore ? Opcodes.STORE16 : Opcodes.LOAD16;
367     case 32:
368       return isStore ? Opcodes.STORE32 : Opcodes.LOAD32;
369     default:
370       return Opc;
371     }
372   }
373 
374   if (RegBank == ARM::FPRRegBankID) {
375     switch (Size) {
376     case 32:
377       return isStore ? ARM::VSTRS : ARM::VLDRS;
378     case 64:
379       return isStore ? ARM::VSTRD : ARM::VLDRD;
380     default:
381       return Opc;
382     }
383   }
384 
385   return Opc;
386 }
387 
388 // When lowering comparisons, we sometimes need to perform two compares instead
389 // of just one. Get the condition codes for both comparisons. If only one is
390 // needed, the second member of the pair is ARMCC::AL.
391 static std::pair<ARMCC::CondCodes, ARMCC::CondCodes>
392 getComparePreds(CmpInst::Predicate Pred) {
393   std::pair<ARMCC::CondCodes, ARMCC::CondCodes> Preds = {ARMCC::AL, ARMCC::AL};
394   switch (Pred) {
395   case CmpInst::FCMP_ONE:
396     Preds = {ARMCC::GT, ARMCC::MI};
397     break;
398   case CmpInst::FCMP_UEQ:
399     Preds = {ARMCC::EQ, ARMCC::VS};
400     break;
401   case CmpInst::ICMP_EQ:
402   case CmpInst::FCMP_OEQ:
403     Preds.first = ARMCC::EQ;
404     break;
405   case CmpInst::ICMP_SGT:
406   case CmpInst::FCMP_OGT:
407     Preds.first = ARMCC::GT;
408     break;
409   case CmpInst::ICMP_SGE:
410   case CmpInst::FCMP_OGE:
411     Preds.first = ARMCC::GE;
412     break;
413   case CmpInst::ICMP_UGT:
414   case CmpInst::FCMP_UGT:
415     Preds.first = ARMCC::HI;
416     break;
417   case CmpInst::FCMP_OLT:
418     Preds.first = ARMCC::MI;
419     break;
420   case CmpInst::ICMP_ULE:
421   case CmpInst::FCMP_OLE:
422     Preds.first = ARMCC::LS;
423     break;
424   case CmpInst::FCMP_ORD:
425     Preds.first = ARMCC::VC;
426     break;
427   case CmpInst::FCMP_UNO:
428     Preds.first = ARMCC::VS;
429     break;
430   case CmpInst::FCMP_UGE:
431     Preds.first = ARMCC::PL;
432     break;
433   case CmpInst::ICMP_SLT:
434   case CmpInst::FCMP_ULT:
435     Preds.first = ARMCC::LT;
436     break;
437   case CmpInst::ICMP_SLE:
438   case CmpInst::FCMP_ULE:
439     Preds.first = ARMCC::LE;
440     break;
441   case CmpInst::FCMP_UNE:
442   case CmpInst::ICMP_NE:
443     Preds.first = ARMCC::NE;
444     break;
445   case CmpInst::ICMP_UGE:
446     Preds.first = ARMCC::HS;
447     break;
448   case CmpInst::ICMP_ULT:
449     Preds.first = ARMCC::LO;
450     break;
451   default:
452     break;
453   }
454   assert(Preds.first != ARMCC::AL && "No comparisons needed?");
455   return Preds;
456 }
457 
458 struct ARMInstructionSelector::CmpConstants {
459   CmpConstants(unsigned CmpOpcode, unsigned FlagsOpcode, unsigned SelectOpcode,
460                unsigned OpRegBank, unsigned OpSize)
461       : ComparisonOpcode(CmpOpcode), ReadFlagsOpcode(FlagsOpcode),
462         SelectResultOpcode(SelectOpcode), OperandRegBankID(OpRegBank),
463         OperandSize(OpSize) {}
464 
465   // The opcode used for performing the comparison.
466   const unsigned ComparisonOpcode;
467 
468   // The opcode used for reading the flags set by the comparison. May be
469   // ARM::INSTRUCTION_LIST_END if we don't need to read the flags.
470   const unsigned ReadFlagsOpcode;
471 
472   // The opcode used for materializing the result of the comparison.
473   const unsigned SelectResultOpcode;
474 
475   // The assumed register bank ID for the operands.
476   const unsigned OperandRegBankID;
477 
478   // The assumed size in bits for the operands.
479   const unsigned OperandSize;
480 };
481 
482 struct ARMInstructionSelector::InsertInfo {
483   InsertInfo(MachineInstrBuilder &MIB)
484       : MBB(*MIB->getParent()), InsertBefore(std::next(MIB->getIterator())),
485         DbgLoc(MIB->getDebugLoc()) {}
486 
487   MachineBasicBlock &MBB;
488   const MachineBasicBlock::instr_iterator InsertBefore;
489   const DebugLoc &DbgLoc;
490 };
491 
492 void ARMInstructionSelector::putConstant(InsertInfo I, unsigned DestReg,
493                                          unsigned Constant) const {
494   (void)BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Opcodes.MOVi))
495       .addDef(DestReg)
496       .addImm(Constant)
497       .add(predOps(ARMCC::AL))
498       .add(condCodeOp());
499 }
500 
501 bool ARMInstructionSelector::validOpRegPair(MachineRegisterInfo &MRI,
502                                             unsigned LHSReg, unsigned RHSReg,
503                                             unsigned ExpectedSize,
504                                             unsigned ExpectedRegBankID) const {
505   return MRI.getType(LHSReg) == MRI.getType(RHSReg) &&
506          validReg(MRI, LHSReg, ExpectedSize, ExpectedRegBankID) &&
507          validReg(MRI, RHSReg, ExpectedSize, ExpectedRegBankID);
508 }
509 
510 bool ARMInstructionSelector::validReg(MachineRegisterInfo &MRI, unsigned Reg,
511                                       unsigned ExpectedSize,
512                                       unsigned ExpectedRegBankID) const {
513   if (MRI.getType(Reg).getSizeInBits() != ExpectedSize) {
514     LLVM_DEBUG(dbgs() << "Unexpected size for register");
515     return false;
516   }
517 
518   if (RBI.getRegBank(Reg, MRI, TRI)->getID() != ExpectedRegBankID) {
519     LLVM_DEBUG(dbgs() << "Unexpected register bank for register");
520     return false;
521   }
522 
523   return true;
524 }
525 
526 bool ARMInstructionSelector::selectCmp(CmpConstants Helper,
527                                        MachineInstrBuilder &MIB,
528                                        MachineRegisterInfo &MRI) const {
529   const InsertInfo I(MIB);
530 
531   auto ResReg = MIB.getReg(0);
532   if (!validReg(MRI, ResReg, 1, ARM::GPRRegBankID))
533     return false;
534 
535   auto Cond =
536       static_cast<CmpInst::Predicate>(MIB->getOperand(1).getPredicate());
537   if (Cond == CmpInst::FCMP_TRUE || Cond == CmpInst::FCMP_FALSE) {
538     putConstant(I, ResReg, Cond == CmpInst::FCMP_TRUE ? 1 : 0);
539     MIB->eraseFromParent();
540     return true;
541   }
542 
543   auto LHSReg = MIB.getReg(2);
544   auto RHSReg = MIB.getReg(3);
545   if (!validOpRegPair(MRI, LHSReg, RHSReg, Helper.OperandSize,
546                       Helper.OperandRegBankID))
547     return false;
548 
549   auto ARMConds = getComparePreds(Cond);
550   auto ZeroReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
551   putConstant(I, ZeroReg, 0);
552 
553   if (ARMConds.second == ARMCC::AL) {
554     // Simple case, we only need one comparison and we're done.
555     if (!insertComparison(Helper, I, ResReg, ARMConds.first, LHSReg, RHSReg,
556                           ZeroReg))
557       return false;
558   } else {
559     // Not so simple, we need two successive comparisons.
560     auto IntermediateRes = MRI.createVirtualRegister(&ARM::GPRRegClass);
561     if (!insertComparison(Helper, I, IntermediateRes, ARMConds.first, LHSReg,
562                           RHSReg, ZeroReg))
563       return false;
564     if (!insertComparison(Helper, I, ResReg, ARMConds.second, LHSReg, RHSReg,
565                           IntermediateRes))
566       return false;
567   }
568 
569   MIB->eraseFromParent();
570   return true;
571 }
572 
573 bool ARMInstructionSelector::insertComparison(CmpConstants Helper, InsertInfo I,
574                                               unsigned ResReg,
575                                               ARMCC::CondCodes Cond,
576                                               unsigned LHSReg, unsigned RHSReg,
577                                               unsigned PrevRes) const {
578   // Perform the comparison.
579   auto CmpI =
580       BuildMI(I.MBB, I.InsertBefore, I.DbgLoc, TII.get(Helper.ComparisonOpcode))
581           .addUse(LHSReg)
582           .addUse(RHSReg)
583           .add(predOps(ARMCC::AL));
584   if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI))
585     return false;
586 
587   // Read the comparison flags (if necessary).
588   if (Helper.ReadFlagsOpcode != ARM::INSTRUCTION_LIST_END) {
589     auto ReadI = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc,
590                          TII.get(Helper.ReadFlagsOpcode))
591                      .add(predOps(ARMCC::AL));
592     if (!constrainSelectedInstRegOperands(*ReadI, TII, TRI, RBI))
593       return false;
594   }
595 
596   // Select either 1 or the previous result based on the value of the flags.
597   auto Mov1I = BuildMI(I.MBB, I.InsertBefore, I.DbgLoc,
598                        TII.get(Helper.SelectResultOpcode))
599                    .addDef(ResReg)
600                    .addUse(PrevRes)
601                    .addImm(1)
602                    .add(predOps(Cond, ARM::CPSR));
603   if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI))
604     return false;
605 
606   return true;
607 }
608 
609 bool ARMInstructionSelector::selectGlobal(MachineInstrBuilder &MIB,
610                                           MachineRegisterInfo &MRI) const {
611   if ((STI.isROPI() || STI.isRWPI()) && !STI.isTargetELF()) {
612     LLVM_DEBUG(dbgs() << "ROPI and RWPI only supported for ELF\n");
613     return false;
614   }
615 
616   auto GV = MIB->getOperand(1).getGlobal();
617   if (GV->isThreadLocal()) {
618     LLVM_DEBUG(dbgs() << "TLS variables not supported yet\n");
619     return false;
620   }
621 
622   auto &MBB = *MIB->getParent();
623   auto &MF = *MBB.getParent();
624 
625   bool UseMovt = STI.useMovt();
626 
627   LLT PtrTy = MRI.getType(MIB->getOperand(0).getReg());
628   const Align Alignment(4);
629 
630   auto addOpsForConstantPoolLoad = [&MF, Alignment, PtrTy](
631                                        MachineInstrBuilder &MIB,
632                                        const GlobalValue *GV, bool IsSBREL) {
633     assert((MIB->getOpcode() == ARM::LDRi12 ||
634             MIB->getOpcode() == ARM::t2LDRpci) &&
635            "Unsupported instruction");
636     auto ConstPool = MF.getConstantPool();
637     auto CPIndex =
638         // For SB relative entries we need a target-specific constant pool.
639         // Otherwise, just use a regular constant pool entry.
640         IsSBREL
641             ? ConstPool->getConstantPoolIndex(
642                   ARMConstantPoolConstant::Create(GV, ARMCP::SBREL), Alignment)
643             : ConstPool->getConstantPoolIndex(GV, Alignment);
644     MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)
645         .addMemOperand(MF.getMachineMemOperand(
646             MachinePointerInfo::getConstantPool(MF), MachineMemOperand::MOLoad,
647             PtrTy, Alignment));
648     if (MIB->getOpcode() == ARM::LDRi12)
649       MIB.addImm(0);
650     MIB.add(predOps(ARMCC::AL));
651   };
652 
653   auto addGOTMemOperand = [this, &MF, Alignment](MachineInstrBuilder &MIB) {
654     MIB.addMemOperand(MF.getMachineMemOperand(
655         MachinePointerInfo::getGOT(MF), MachineMemOperand::MOLoad,
656         TM.getProgramPointerSize(), Alignment));
657   };
658 
659   if (TM.isPositionIndependent()) {
660     bool Indirect = STI.isGVIndirectSymbol(GV);
661 
662     // For ARM mode, we have different pseudoinstructions for direct accesses
663     // and indirect accesses, and the ones for indirect accesses include the
664     // load from GOT. For Thumb mode, we use the same pseudoinstruction for both
665     // direct and indirect accesses, and we need to manually generate the load
666     // from GOT.
667     bool UseOpcodeThatLoads = Indirect && !STI.isThumb();
668 
669     // FIXME: Taking advantage of MOVT for ELF is pretty involved, so we don't
670     // support it yet. See PR28229.
671     unsigned Opc =
672         UseMovt && !STI.isTargetELF()
673             ? (UseOpcodeThatLoads ? (unsigned)ARM::MOV_ga_pcrel_ldr
674                                   : Opcodes.MOV_ga_pcrel)
675             : (UseOpcodeThatLoads ? (unsigned)ARM::LDRLIT_ga_pcrel_ldr
676                                   : Opcodes.LDRLIT_ga_pcrel);
677     MIB->setDesc(TII.get(Opc));
678 
679     int TargetFlags = ARMII::MO_NO_FLAG;
680     if (STI.isTargetDarwin())
681       TargetFlags |= ARMII::MO_NONLAZY;
682     if (STI.isGVInGOT(GV))
683       TargetFlags |= ARMII::MO_GOT;
684     MIB->getOperand(1).setTargetFlags(TargetFlags);
685 
686     if (Indirect) {
687       if (!UseOpcodeThatLoads) {
688         auto ResultReg = MIB.getReg(0);
689         auto AddressReg = MRI.createVirtualRegister(&ARM::GPRRegClass);
690 
691         MIB->getOperand(0).setReg(AddressReg);
692 
693         auto InsertBefore = std::next(MIB->getIterator());
694         auto MIBLoad = BuildMI(MBB, InsertBefore, MIB->getDebugLoc(),
695                                TII.get(Opcodes.LOAD32))
696                            .addDef(ResultReg)
697                            .addReg(AddressReg)
698                            .addImm(0)
699                            .add(predOps(ARMCC::AL));
700         addGOTMemOperand(MIBLoad);
701 
702         if (!constrainSelectedInstRegOperands(*MIBLoad, TII, TRI, RBI))
703           return false;
704       } else {
705         addGOTMemOperand(MIB);
706       }
707     }
708 
709     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
710   }
711 
712   bool isReadOnly = STI.getTargetLowering()->isReadOnly(GV);
713   if (STI.isROPI() && isReadOnly) {
714     unsigned Opc = UseMovt ? Opcodes.MOV_ga_pcrel : Opcodes.LDRLIT_ga_pcrel;
715     MIB->setDesc(TII.get(Opc));
716     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
717   }
718   if (STI.isRWPI() && !isReadOnly) {
719     auto Offset = MRI.createVirtualRegister(&ARM::GPRRegClass);
720     MachineInstrBuilder OffsetMIB;
721     if (UseMovt) {
722       OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(),
723                           TII.get(Opcodes.MOVi32imm), Offset);
724       OffsetMIB.addGlobalAddress(GV, /*Offset*/ 0, ARMII::MO_SBREL);
725     } else {
726       // Load the offset from the constant pool.
727       OffsetMIB = BuildMI(MBB, *MIB, MIB->getDebugLoc(),
728                           TII.get(Opcodes.ConstPoolLoad), Offset);
729       addOpsForConstantPoolLoad(OffsetMIB, GV, /*IsSBREL*/ true);
730     }
731     if (!constrainSelectedInstRegOperands(*OffsetMIB, TII, TRI, RBI))
732       return false;
733 
734     // Add the offset to the SB register.
735     MIB->setDesc(TII.get(Opcodes.ADDrr));
736     MIB->removeOperand(1);
737     MIB.addReg(ARM::R9) // FIXME: don't hardcode R9
738         .addReg(Offset)
739         .add(predOps(ARMCC::AL))
740         .add(condCodeOp());
741 
742     return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
743   }
744 
745   if (STI.isTargetELF()) {
746     if (UseMovt) {
747       MIB->setDesc(TII.get(Opcodes.MOVi32imm));
748     } else {
749       // Load the global's address from the constant pool.
750       MIB->setDesc(TII.get(Opcodes.ConstPoolLoad));
751       MIB->removeOperand(1);
752       addOpsForConstantPoolLoad(MIB, GV, /*IsSBREL*/ false);
753     }
754   } else if (STI.isTargetMachO()) {
755     if (UseMovt)
756       MIB->setDesc(TII.get(Opcodes.MOVi32imm));
757     else
758       MIB->setDesc(TII.get(Opcodes.LDRLIT_ga_abs));
759   } else {
760     LLVM_DEBUG(dbgs() << "Object format not supported yet\n");
761     return false;
762   }
763 
764   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
765 }
766 
767 bool ARMInstructionSelector::selectSelect(MachineInstrBuilder &MIB,
768                                           MachineRegisterInfo &MRI) const {
769   auto &MBB = *MIB->getParent();
770   auto InsertBefore = std::next(MIB->getIterator());
771   auto &DbgLoc = MIB->getDebugLoc();
772 
773   // Compare the condition to 1.
774   auto CondReg = MIB.getReg(1);
775   assert(validReg(MRI, CondReg, 1, ARM::GPRRegBankID) &&
776          "Unsupported types for select operation");
777   auto CmpI = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.TSTri))
778                   .addUse(CondReg)
779                   .addImm(1)
780                   .add(predOps(ARMCC::AL));
781   if (!constrainSelectedInstRegOperands(*CmpI, TII, TRI, RBI))
782     return false;
783 
784   // Move a value into the result register based on the result of the
785   // comparison.
786   auto ResReg = MIB.getReg(0);
787   auto TrueReg = MIB.getReg(2);
788   auto FalseReg = MIB.getReg(3);
789   assert(validOpRegPair(MRI, ResReg, TrueReg, 32, ARM::GPRRegBankID) &&
790          validOpRegPair(MRI, TrueReg, FalseReg, 32, ARM::GPRRegBankID) &&
791          "Unsupported types for select operation");
792   auto Mov1I = BuildMI(MBB, InsertBefore, DbgLoc, TII.get(Opcodes.MOVCCr))
793                    .addDef(ResReg)
794                    .addUse(TrueReg)
795                    .addUse(FalseReg)
796                    .add(predOps(ARMCC::EQ, ARM::CPSR));
797   if (!constrainSelectedInstRegOperands(*Mov1I, TII, TRI, RBI))
798     return false;
799 
800   MIB->eraseFromParent();
801   return true;
802 }
803 
804 bool ARMInstructionSelector::selectShift(unsigned ShiftOpc,
805                                          MachineInstrBuilder &MIB) const {
806   assert(!STI.isThumb() && "Unsupported subtarget");
807   MIB->setDesc(TII.get(ARM::MOVsr));
808   MIB.addImm(ShiftOpc);
809   MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
810   return constrainSelectedInstRegOperands(*MIB, TII, TRI, RBI);
811 }
812 
813 void ARMInstructionSelector::renderVFPF32Imm(
814   MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst,
815   int OpIdx) const {
816   assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT &&
817          OpIdx == -1 && "Expected G_FCONSTANT");
818 
819   APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF();
820   int FPImmEncoding = ARM_AM::getFP32Imm(FPImmValue);
821   assert(FPImmEncoding != -1 && "Invalid immediate value");
822 
823   NewInstBuilder.addImm(FPImmEncoding);
824 }
825 
826 void ARMInstructionSelector::renderVFPF64Imm(
827   MachineInstrBuilder &NewInstBuilder, const MachineInstr &OldInst, int OpIdx) const {
828   assert(OldInst.getOpcode() == TargetOpcode::G_FCONSTANT &&
829          OpIdx == -1 && "Expected G_FCONSTANT");
830 
831   APFloat FPImmValue = OldInst.getOperand(1).getFPImm()->getValueAPF();
832   int FPImmEncoding = ARM_AM::getFP64Imm(FPImmValue);
833   assert(FPImmEncoding != -1 && "Invalid immediate value");
834 
835   NewInstBuilder.addImm(FPImmEncoding);
836 }
837 
838 bool ARMInstructionSelector::select(MachineInstr &I) {
839   assert(I.getParent() && "Instruction should be in a basic block!");
840   assert(I.getParent()->getParent() && "Instruction should be in a function!");
841 
842   auto &MBB = *I.getParent();
843   auto &MF = *MBB.getParent();
844   auto &MRI = MF.getRegInfo();
845 
846   if (!isPreISelGenericOpcode(I.getOpcode())) {
847     if (I.isCopy())
848       return selectCopy(I, TII, MRI, TRI, RBI);
849 
850     return true;
851   }
852 
853   using namespace TargetOpcode;
854 
855   if (selectImpl(I, *CoverageInfo))
856     return true;
857 
858   MachineInstrBuilder MIB{MF, I};
859   bool isSExt = false;
860 
861   switch (I.getOpcode()) {
862   case G_SEXT:
863     isSExt = true;
864     [[fallthrough]];
865   case G_ZEXT: {
866     assert(MRI.getType(I.getOperand(0).getReg()).getSizeInBits() <= 32 &&
867            "Unsupported destination size for extension");
868 
869     LLT SrcTy = MRI.getType(I.getOperand(1).getReg());
870     unsigned SrcSize = SrcTy.getSizeInBits();
871     switch (SrcSize) {
872     case 1: {
873       // ZExt boils down to & 0x1; for SExt we also subtract that from 0
874       I.setDesc(TII.get(Opcodes.AND));
875       MIB.addImm(1).add(predOps(ARMCC::AL)).add(condCodeOp());
876 
877       if (isSExt) {
878         Register SExtResult = I.getOperand(0).getReg();
879 
880         // Use a new virtual register for the result of the AND
881         Register AndResult = MRI.createVirtualRegister(&ARM::GPRRegClass);
882         I.getOperand(0).setReg(AndResult);
883 
884         auto InsertBefore = std::next(I.getIterator());
885         auto SubI =
886             BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(Opcodes.RSB))
887                 .addDef(SExtResult)
888                 .addUse(AndResult)
889                 .addImm(0)
890                 .add(predOps(ARMCC::AL))
891                 .add(condCodeOp());
892         if (!constrainSelectedInstRegOperands(*SubI, TII, TRI, RBI))
893           return false;
894       }
895       break;
896     }
897     case 8:
898     case 16: {
899       unsigned NewOpc = selectSimpleExtOpc(I.getOpcode(), SrcSize);
900       if (NewOpc == I.getOpcode())
901         return false;
902       I.setDesc(TII.get(NewOpc));
903       MIB.addImm(0).add(predOps(ARMCC::AL));
904       break;
905     }
906     default:
907       LLVM_DEBUG(dbgs() << "Unsupported source size for extension");
908       return false;
909     }
910     break;
911   }
912   case G_ANYEXT:
913   case G_TRUNC: {
914     // The high bits are undefined, so there's nothing special to do, just
915     // treat it as a copy.
916     auto SrcReg = I.getOperand(1).getReg();
917     auto DstReg = I.getOperand(0).getReg();
918 
919     const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
920     const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
921 
922     if (SrcRegBank.getID() == ARM::FPRRegBankID) {
923       // This should only happen in the obscure case where we have put a 64-bit
924       // integer into a D register. Get it out of there and keep only the
925       // interesting part.
926       assert(I.getOpcode() == G_TRUNC && "Unsupported operand for G_ANYEXT");
927       assert(DstRegBank.getID() == ARM::GPRRegBankID &&
928              "Unsupported combination of register banks");
929       assert(MRI.getType(SrcReg).getSizeInBits() == 64 && "Unsupported size");
930       assert(MRI.getType(DstReg).getSizeInBits() <= 32 && "Unsupported size");
931 
932       Register IgnoredBits = MRI.createVirtualRegister(&ARM::GPRRegClass);
933       auto InsertBefore = std::next(I.getIterator());
934       auto MovI =
935           BuildMI(MBB, InsertBefore, I.getDebugLoc(), TII.get(ARM::VMOVRRD))
936               .addDef(DstReg)
937               .addDef(IgnoredBits)
938               .addUse(SrcReg)
939               .add(predOps(ARMCC::AL));
940       if (!constrainSelectedInstRegOperands(*MovI, TII, TRI, RBI))
941         return false;
942 
943       MIB->eraseFromParent();
944       return true;
945     }
946 
947     if (SrcRegBank.getID() != DstRegBank.getID()) {
948       LLVM_DEBUG(
949           dbgs() << "G_TRUNC/G_ANYEXT operands on different register banks\n");
950       return false;
951     }
952 
953     if (SrcRegBank.getID() != ARM::GPRRegBankID) {
954       LLVM_DEBUG(dbgs() << "G_TRUNC/G_ANYEXT on non-GPR not supported yet\n");
955       return false;
956     }
957 
958     I.setDesc(TII.get(COPY));
959     return selectCopy(I, TII, MRI, TRI, RBI);
960   }
961   case G_CONSTANT: {
962     if (!MRI.getType(I.getOperand(0).getReg()).isPointer()) {
963       // Non-pointer constants should be handled by TableGen.
964       LLVM_DEBUG(dbgs() << "Unsupported constant type\n");
965       return false;
966     }
967 
968     auto &Val = I.getOperand(1);
969     if (Val.isCImm()) {
970       if (!Val.getCImm()->isZero()) {
971         LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
972         return false;
973       }
974       Val.ChangeToImmediate(0);
975     } else {
976       assert(Val.isImm() && "Unexpected operand for G_CONSTANT");
977       if (Val.getImm() != 0) {
978         LLVM_DEBUG(dbgs() << "Unsupported pointer constant value\n");
979         return false;
980       }
981     }
982 
983     assert(!STI.isThumb() && "Unsupported subtarget");
984     I.setDesc(TII.get(ARM::MOVi));
985     MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
986     break;
987   }
988   case G_FCONSTANT: {
989     // Load from constant pool
990     unsigned Size = MRI.getType(I.getOperand(0).getReg()).getSizeInBits() / 8;
991     Align Alignment(Size);
992 
993     assert((Size == 4 || Size == 8) && "Unsupported FP constant type");
994     auto LoadOpcode = Size == 4 ? ARM::VLDRS : ARM::VLDRD;
995 
996     auto ConstPool = MF.getConstantPool();
997     auto CPIndex =
998         ConstPool->getConstantPoolIndex(I.getOperand(1).getFPImm(), Alignment);
999     MIB->setDesc(TII.get(LoadOpcode));
1000     MIB->removeOperand(1);
1001     MIB.addConstantPoolIndex(CPIndex, /*Offset*/ 0, /*TargetFlags*/ 0)
1002         .addMemOperand(
1003             MF.getMachineMemOperand(MachinePointerInfo::getConstantPool(MF),
1004                                     MachineMemOperand::MOLoad, Size, Alignment))
1005         .addImm(0)
1006         .add(predOps(ARMCC::AL));
1007     break;
1008   }
1009   case G_INTTOPTR:
1010   case G_PTRTOINT: {
1011     auto SrcReg = I.getOperand(1).getReg();
1012     auto DstReg = I.getOperand(0).getReg();
1013 
1014     const auto &SrcRegBank = *RBI.getRegBank(SrcReg, MRI, TRI);
1015     const auto &DstRegBank = *RBI.getRegBank(DstReg, MRI, TRI);
1016 
1017     if (SrcRegBank.getID() != DstRegBank.getID()) {
1018       LLVM_DEBUG(
1019           dbgs()
1020           << "G_INTTOPTR/G_PTRTOINT operands on different register banks\n");
1021       return false;
1022     }
1023 
1024     if (SrcRegBank.getID() != ARM::GPRRegBankID) {
1025       LLVM_DEBUG(
1026           dbgs() << "G_INTTOPTR/G_PTRTOINT on non-GPR not supported yet\n");
1027       return false;
1028     }
1029 
1030     I.setDesc(TII.get(COPY));
1031     return selectCopy(I, TII, MRI, TRI, RBI);
1032   }
1033   case G_SELECT:
1034     return selectSelect(MIB, MRI);
1035   case G_ICMP: {
1036     CmpConstants Helper(Opcodes.CMPrr, ARM::INSTRUCTION_LIST_END,
1037                         Opcodes.MOVCCi, ARM::GPRRegBankID, 32);
1038     return selectCmp(Helper, MIB, MRI);
1039   }
1040   case G_FCMP: {
1041     assert(STI.hasVFP2Base() && "Can't select fcmp without VFP");
1042 
1043     Register OpReg = I.getOperand(2).getReg();
1044     unsigned Size = MRI.getType(OpReg).getSizeInBits();
1045 
1046     if (Size == 64 && !STI.hasFP64()) {
1047       LLVM_DEBUG(dbgs() << "Subtarget only supports single precision");
1048       return false;
1049     }
1050     if (Size != 32 && Size != 64) {
1051       LLVM_DEBUG(dbgs() << "Unsupported size for G_FCMP operand");
1052       return false;
1053     }
1054 
1055     CmpConstants Helper(Size == 32 ? ARM::VCMPS : ARM::VCMPD, ARM::FMSTAT,
1056                         Opcodes.MOVCCi, ARM::FPRRegBankID, Size);
1057     return selectCmp(Helper, MIB, MRI);
1058   }
1059   case G_LSHR:
1060     return selectShift(ARM_AM::ShiftOpc::lsr, MIB);
1061   case G_ASHR:
1062     return selectShift(ARM_AM::ShiftOpc::asr, MIB);
1063   case G_SHL: {
1064     return selectShift(ARM_AM::ShiftOpc::lsl, MIB);
1065   }
1066   case G_PTR_ADD:
1067     I.setDesc(TII.get(Opcodes.ADDrr));
1068     MIB.add(predOps(ARMCC::AL)).add(condCodeOp());
1069     break;
1070   case G_FRAME_INDEX:
1071     // Add 0 to the given frame index and hope it will eventually be folded into
1072     // the user(s).
1073     I.setDesc(TII.get(Opcodes.ADDri));
1074     MIB.addImm(0).add(predOps(ARMCC::AL)).add(condCodeOp());
1075     break;
1076   case G_GLOBAL_VALUE:
1077     return selectGlobal(MIB, MRI);
1078   case G_STORE:
1079   case G_LOAD: {
1080     const auto &MemOp = **I.memoperands_begin();
1081     if (MemOp.isAtomic()) {
1082       LLVM_DEBUG(dbgs() << "Atomic load/store not supported yet\n");
1083       return false;
1084     }
1085 
1086     Register Reg = I.getOperand(0).getReg();
1087     unsigned RegBank = RBI.getRegBank(Reg, MRI, TRI)->getID();
1088 
1089     LLT ValTy = MRI.getType(Reg);
1090     const auto ValSize = ValTy.getSizeInBits();
1091 
1092     assert((ValSize != 64 || STI.hasVFP2Base()) &&
1093            "Don't know how to load/store 64-bit value without VFP");
1094 
1095     const auto NewOpc = selectLoadStoreOpCode(I.getOpcode(), RegBank, ValSize);
1096     if (NewOpc == G_LOAD || NewOpc == G_STORE)
1097       return false;
1098 
1099     I.setDesc(TII.get(NewOpc));
1100 
1101     if (NewOpc == ARM::LDRH || NewOpc == ARM::STRH)
1102       // LDRH has a funny addressing mode (there's already a FIXME for it).
1103       MIB.addReg(0);
1104     MIB.addImm(0).add(predOps(ARMCC::AL));
1105     break;
1106   }
1107   case G_MERGE_VALUES: {
1108     if (!selectMergeValues(MIB, TII, MRI, TRI, RBI))
1109       return false;
1110     break;
1111   }
1112   case G_UNMERGE_VALUES: {
1113     if (!selectUnmergeValues(MIB, TII, MRI, TRI, RBI))
1114       return false;
1115     break;
1116   }
1117   case G_BRCOND: {
1118     if (!validReg(MRI, I.getOperand(0).getReg(), 1, ARM::GPRRegBankID)) {
1119       LLVM_DEBUG(dbgs() << "Unsupported condition register for G_BRCOND");
1120       return false;
1121     }
1122 
1123     // Set the flags.
1124     auto Test =
1125         BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.TSTri))
1126             .addReg(I.getOperand(0).getReg())
1127             .addImm(1)
1128             .add(predOps(ARMCC::AL));
1129     if (!constrainSelectedInstRegOperands(*Test, TII, TRI, RBI))
1130       return false;
1131 
1132     // Branch conditionally.
1133     auto Branch =
1134         BuildMI(*I.getParent(), I, I.getDebugLoc(), TII.get(Opcodes.Bcc))
1135             .add(I.getOperand(1))
1136             .add(predOps(ARMCC::NE, ARM::CPSR));
1137     if (!constrainSelectedInstRegOperands(*Branch, TII, TRI, RBI))
1138       return false;
1139     I.eraseFromParent();
1140     return true;
1141   }
1142   case G_PHI: {
1143     I.setDesc(TII.get(PHI));
1144 
1145     Register DstReg = I.getOperand(0).getReg();
1146     const TargetRegisterClass *RC = guessRegClass(DstReg, MRI, TRI, RBI);
1147     if (!RBI.constrainGenericRegister(DstReg, *RC, MRI)) {
1148       break;
1149     }
1150 
1151     return true;
1152   }
1153   default:
1154     return false;
1155   }
1156 
1157   return constrainSelectedInstRegOperands(I, TII, TRI, RBI);
1158 }
1159