1 //===-- RISCVLegalizerInfo.cpp ----------------------------------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 /// \file
9 /// This file implements the targeting of the Machinelegalizer class for RISC-V.
10 /// \todo This should be generated by TableGen.
11 //===----------------------------------------------------------------------===//
12 
13 #include "RISCVLegalizerInfo.h"
14 #include "RISCVMachineFunctionInfo.h"
15 #include "RISCVSubtarget.h"
16 #include "llvm/CodeGen/GlobalISel/GenericMachineInstrs.h"
17 #include "llvm/CodeGen/GlobalISel/LegalizerHelper.h"
18 #include "llvm/CodeGen/GlobalISel/MachineIRBuilder.h"
19 #include "llvm/CodeGen/MachineRegisterInfo.h"
20 #include "llvm/CodeGen/TargetOpcodes.h"
21 #include "llvm/CodeGen/ValueTypes.h"
22 #include "llvm/IR/DerivedTypes.h"
23 #include "llvm/IR/Type.h"
24 
25 using namespace llvm;
26 using namespace LegalityPredicates;
27 using namespace LegalizeMutations;
28 
29 // Is this type supported by scalar FP arithmetic operations given the current
30 // subtarget.
31 static LegalityPredicate typeIsScalarFPArith(unsigned TypeIdx,
32                                              const RISCVSubtarget &ST) {
33   return [=, &ST](const LegalityQuery &Query) {
34     return Query.Types[TypeIdx].isScalar() &&
35            ((ST.hasStdExtF() && Query.Types[TypeIdx].getSizeInBits() == 32) ||
36             (ST.hasStdExtD() && Query.Types[TypeIdx].getSizeInBits() == 64));
37   };
38 }
39 
40 RISCVLegalizerInfo::RISCVLegalizerInfo(const RISCVSubtarget &ST)
41     : STI(ST), XLen(STI.getXLen()), sXLen(LLT::scalar(XLen)) {
42   const LLT sDoubleXLen = LLT::scalar(2 * XLen);
43   const LLT p0 = LLT::pointer(0, XLen);
44   const LLT s1 = LLT::scalar(1);
45   const LLT s8 = LLT::scalar(8);
46   const LLT s16 = LLT::scalar(16);
47   const LLT s32 = LLT::scalar(32);
48   const LLT s64 = LLT::scalar(64);
49 
50   const LLT nxv1s8 = LLT::scalable_vector(1, s8);
51   const LLT nxv2s8 = LLT::scalable_vector(2, s8);
52   const LLT nxv4s8 = LLT::scalable_vector(4, s8);
53   const LLT nxv8s8 = LLT::scalable_vector(8, s8);
54   const LLT nxv16s8 = LLT::scalable_vector(16, s8);
55   const LLT nxv32s8 = LLT::scalable_vector(32, s8);
56   const LLT nxv64s8 = LLT::scalable_vector(64, s8);
57 
58   const LLT nxv1s16 = LLT::scalable_vector(1, s16);
59   const LLT nxv2s16 = LLT::scalable_vector(2, s16);
60   const LLT nxv4s16 = LLT::scalable_vector(4, s16);
61   const LLT nxv8s16 = LLT::scalable_vector(8, s16);
62   const LLT nxv16s16 = LLT::scalable_vector(16, s16);
63   const LLT nxv32s16 = LLT::scalable_vector(32, s16);
64 
65   const LLT nxv1s32 = LLT::scalable_vector(1, s32);
66   const LLT nxv2s32 = LLT::scalable_vector(2, s32);
67   const LLT nxv4s32 = LLT::scalable_vector(4, s32);
68   const LLT nxv8s32 = LLT::scalable_vector(8, s32);
69   const LLT nxv16s32 = LLT::scalable_vector(16, s32);
70 
71   const LLT nxv1s64 = LLT::scalable_vector(1, s64);
72   const LLT nxv2s64 = LLT::scalable_vector(2, s64);
73   const LLT nxv4s64 = LLT::scalable_vector(4, s64);
74   const LLT nxv8s64 = LLT::scalable_vector(8, s64);
75 
76   using namespace TargetOpcode;
77 
78   auto AllVecTys = {nxv1s8,   nxv2s8,  nxv4s8,  nxv8s8,  nxv16s8, nxv32s8,
79                     nxv64s8,  nxv1s16, nxv2s16, nxv4s16, nxv8s16, nxv16s16,
80                     nxv32s16, nxv1s32, nxv2s32, nxv4s32, nxv8s32, nxv16s32,
81                     nxv1s64,  nxv2s64, nxv4s64, nxv8s64};
82 
83   getActionDefinitionsBuilder({G_ADD, G_SUB, G_AND, G_OR, G_XOR})
84       .legalFor({s32, sXLen})
85       .legalIf(all(
86           typeInSet(0, AllVecTys),
87           LegalityPredicate([=, &ST](const LegalityQuery &Query) {
88             return ST.hasVInstructions() &&
89                    (Query.Types[0].getScalarSizeInBits() != 64 ||
90                     ST.hasVInstructionsI64()) &&
91                    (Query.Types[0].getElementCount().getKnownMinValue() != 1 ||
92                     ST.getELen() == 64);
93           })))
94       .widenScalarToNextPow2(0)
95       .clampScalar(0, s32, sXLen);
96 
97   getActionDefinitionsBuilder(
98       {G_UADDE, G_UADDO, G_USUBE, G_USUBO}).lower();
99 
100   getActionDefinitionsBuilder({G_SADDO, G_SSUBO}).minScalar(0, sXLen).lower();
101 
102   auto &ShiftActions = getActionDefinitionsBuilder({G_ASHR, G_LSHR, G_SHL});
103   if (ST.is64Bit())
104     ShiftActions.customFor({{s32, s32}});
105   ShiftActions.legalFor({{s32, s32}, {s32, sXLen}, {sXLen, sXLen}})
106       .widenScalarToNextPow2(0)
107       .clampScalar(1, s32, sXLen)
108       .clampScalar(0, s32, sXLen)
109       .minScalarSameAs(1, 0);
110 
111   if (ST.is64Bit()) {
112     getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT})
113         .legalFor({{sXLen, s32}})
114         .maxScalar(0, sXLen);
115 
116     getActionDefinitionsBuilder(G_SEXT_INREG)
117         .customFor({sXLen})
118         .maxScalar(0, sXLen)
119         .lower();
120   } else {
121     getActionDefinitionsBuilder({G_ZEXT, G_SEXT, G_ANYEXT}).maxScalar(0, sXLen);
122 
123     getActionDefinitionsBuilder(G_SEXT_INREG).maxScalar(0, sXLen).lower();
124   }
125 
126   // Merge/Unmerge
127   for (unsigned Op : {G_MERGE_VALUES, G_UNMERGE_VALUES}) {
128     auto &MergeUnmergeActions = getActionDefinitionsBuilder(Op);
129     unsigned BigTyIdx = Op == G_MERGE_VALUES ? 0 : 1;
130     unsigned LitTyIdx = Op == G_MERGE_VALUES ? 1 : 0;
131     if (XLen == 32 && ST.hasStdExtD()) {
132       MergeUnmergeActions.legalIf(
133           all(typeIs(BigTyIdx, s64), typeIs(LitTyIdx, s32)));
134     }
135     MergeUnmergeActions.widenScalarToNextPow2(LitTyIdx, XLen)
136         .widenScalarToNextPow2(BigTyIdx, XLen)
137         .clampScalar(LitTyIdx, sXLen, sXLen)
138         .clampScalar(BigTyIdx, sXLen, sXLen);
139   }
140 
141   getActionDefinitionsBuilder({G_FSHL, G_FSHR}).lower();
142 
143   auto &RotateActions = getActionDefinitionsBuilder({G_ROTL, G_ROTR});
144   if (ST.hasStdExtZbb() || ST.hasStdExtZbkb()) {
145     RotateActions.legalFor({{s32, sXLen}, {sXLen, sXLen}});
146     // Widen s32 rotate amount to s64 so SDAG patterns will match.
147     if (ST.is64Bit())
148       RotateActions.widenScalarIf(all(typeIs(0, s32), typeIs(1, s32)),
149                                   changeTo(1, sXLen));
150   }
151   RotateActions.lower();
152 
153   getActionDefinitionsBuilder(G_BITREVERSE).maxScalar(0, sXLen).lower();
154 
155   auto &BSWAPActions = getActionDefinitionsBuilder(G_BSWAP);
156   if (ST.hasStdExtZbb() || ST.hasStdExtZbkb())
157     BSWAPActions.legalFor({sXLen}).clampScalar(0, sXLen, sXLen);
158   else
159     BSWAPActions.maxScalar(0, sXLen).lower();
160 
161   auto &CountZerosActions = getActionDefinitionsBuilder({G_CTLZ, G_CTTZ});
162   auto &CountZerosUndefActions =
163       getActionDefinitionsBuilder({G_CTLZ_ZERO_UNDEF, G_CTTZ_ZERO_UNDEF});
164   if (ST.hasStdExtZbb()) {
165     CountZerosActions.legalFor({{s32, s32}, {sXLen, sXLen}})
166         .clampScalar(0, s32, sXLen)
167         .widenScalarToNextPow2(0)
168         .scalarSameSizeAs(1, 0);
169   } else {
170     CountZerosActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
171     CountZerosUndefActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0);
172   }
173   CountZerosUndefActions.lower();
174 
175   auto &CTPOPActions = getActionDefinitionsBuilder(G_CTPOP);
176   if (ST.hasStdExtZbb()) {
177     CTPOPActions.legalFor({{s32, s32}, {sXLen, sXLen}})
178         .clampScalar(0, s32, sXLen)
179         .widenScalarToNextPow2(0)
180         .scalarSameSizeAs(1, 0);
181   } else {
182     CTPOPActions.maxScalar(0, sXLen).scalarSameSizeAs(1, 0).lower();
183   }
184 
185   getActionDefinitionsBuilder({G_CONSTANT, G_IMPLICIT_DEF})
186       .legalFor({s32, sXLen, p0})
187       .widenScalarToNextPow2(0)
188       .clampScalar(0, s32, sXLen);
189 
190   getActionDefinitionsBuilder(G_ICMP)
191       .legalFor({{sXLen, sXLen}, {sXLen, p0}})
192       .widenScalarToNextPow2(1)
193       .clampScalar(1, sXLen, sXLen)
194       .clampScalar(0, sXLen, sXLen);
195 
196   auto &SelectActions = getActionDefinitionsBuilder(G_SELECT).legalFor(
197       {{s32, sXLen}, {p0, sXLen}});
198   if (XLen == 64 || ST.hasStdExtD())
199     SelectActions.legalFor({{s64, sXLen}});
200   SelectActions.widenScalarToNextPow2(0)
201       .clampScalar(0, s32, (XLen == 64 || ST.hasStdExtD()) ? s64 : s32)
202       .clampScalar(1, sXLen, sXLen);
203 
204   auto &LoadStoreActions =
205       getActionDefinitionsBuilder({G_LOAD, G_STORE})
206           .legalForTypesWithMemDesc({{s32, p0, s8, 8},
207                                      {s32, p0, s16, 16},
208                                      {s32, p0, s32, 32},
209                                      {p0, p0, sXLen, XLen}});
210   auto &ExtLoadActions =
211       getActionDefinitionsBuilder({G_SEXTLOAD, G_ZEXTLOAD})
212           .legalForTypesWithMemDesc({{s32, p0, s8, 8}, {s32, p0, s16, 16}});
213   if (XLen == 64) {
214     LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s8, 8},
215                                                {s64, p0, s16, 16},
216                                                {s64, p0, s32, 32},
217                                                {s64, p0, s64, 64}});
218     ExtLoadActions.legalForTypesWithMemDesc(
219         {{s64, p0, s8, 8}, {s64, p0, s16, 16}, {s64, p0, s32, 32}});
220   } else if (ST.hasStdExtD()) {
221     LoadStoreActions.legalForTypesWithMemDesc({{s64, p0, s64, 64}});
222   }
223   LoadStoreActions.clampScalar(0, s32, sXLen).lower();
224   ExtLoadActions.widenScalarToNextPow2(0).clampScalar(0, s32, sXLen).lower();
225 
226   getActionDefinitionsBuilder({G_PTR_ADD, G_PTRMASK}).legalFor({{p0, sXLen}});
227 
228   getActionDefinitionsBuilder(G_PTRTOINT)
229       .legalFor({{sXLen, p0}})
230       .clampScalar(0, sXLen, sXLen);
231 
232   getActionDefinitionsBuilder(G_INTTOPTR)
233       .legalFor({{p0, sXLen}})
234       .clampScalar(1, sXLen, sXLen);
235 
236   getActionDefinitionsBuilder(G_BRCOND).legalFor({sXLen}).minScalar(0, sXLen);
237 
238   getActionDefinitionsBuilder(G_BRJT).legalFor({{p0, sXLen}});
239 
240   getActionDefinitionsBuilder(G_BRINDIRECT).legalFor({p0});
241 
242   getActionDefinitionsBuilder(G_PHI)
243       .legalFor({p0, sXLen})
244       .widenScalarToNextPow2(0)
245       .clampScalar(0, sXLen, sXLen);
246 
247   getActionDefinitionsBuilder({G_GLOBAL_VALUE, G_JUMP_TABLE, G_CONSTANT_POOL})
248       .legalFor({p0});
249 
250   if (ST.hasStdExtM() || ST.hasStdExtZmmul()) {
251     getActionDefinitionsBuilder(G_MUL)
252         .legalFor({s32, sXLen})
253         .widenScalarToNextPow2(0)
254         .clampScalar(0, s32, sXLen);
255 
256     // clang-format off
257     getActionDefinitionsBuilder({G_SMULH, G_UMULH})
258         .legalFor({sXLen})
259         .lower();
260     // clang-format on
261 
262     getActionDefinitionsBuilder({G_SMULO, G_UMULO}).minScalar(0, sXLen).lower();
263   } else {
264     getActionDefinitionsBuilder(G_MUL)
265         .libcallFor({sXLen, sDoubleXLen})
266         .widenScalarToNextPow2(0)
267         .clampScalar(0, sXLen, sDoubleXLen);
268 
269     getActionDefinitionsBuilder({G_SMULH, G_UMULH}).lowerFor({sXLen});
270 
271     getActionDefinitionsBuilder({G_SMULO, G_UMULO})
272         .minScalar(0, sXLen)
273         // Widen sXLen to sDoubleXLen so we can use a single libcall to get
274         // the low bits for the mul result and high bits to do the overflow
275         // check.
276         .widenScalarIf(typeIs(0, sXLen),
277                        LegalizeMutations::changeTo(0, sDoubleXLen))
278         .lower();
279   }
280 
281   if (ST.hasStdExtM()) {
282     getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
283         .legalFor({s32, sXLen})
284         .libcallFor({sDoubleXLen})
285         .clampScalar(0, s32, sDoubleXLen)
286         .widenScalarToNextPow2(0);
287   } else {
288     getActionDefinitionsBuilder({G_UDIV, G_SDIV, G_UREM, G_SREM})
289         .libcallFor({sXLen, sDoubleXLen})
290         .clampScalar(0, sXLen, sDoubleXLen)
291         .widenScalarToNextPow2(0);
292   }
293 
294   auto &AbsActions = getActionDefinitionsBuilder(G_ABS);
295   if (ST.hasStdExtZbb())
296     AbsActions.customFor({s32, sXLen}).minScalar(0, sXLen);
297   AbsActions.lower();
298 
299   auto &MinMaxActions =
300       getActionDefinitionsBuilder({G_UMAX, G_UMIN, G_SMAX, G_SMIN});
301   if (ST.hasStdExtZbb())
302     MinMaxActions.legalFor({sXLen}).minScalar(0, sXLen);
303   MinMaxActions.lower();
304 
305   getActionDefinitionsBuilder(G_FRAME_INDEX).legalFor({p0});
306 
307   getActionDefinitionsBuilder({G_MEMCPY, G_MEMMOVE, G_MEMSET}).libcall();
308 
309   getActionDefinitionsBuilder(G_DYN_STACKALLOC).lower();
310 
311   // FP Operations
312 
313   getActionDefinitionsBuilder({G_FADD, G_FSUB, G_FMUL, G_FDIV, G_FMA, G_FNEG,
314                                G_FABS, G_FSQRT, G_FMAXNUM, G_FMINNUM})
315       .legalIf(typeIsScalarFPArith(0, ST));
316 
317   getActionDefinitionsBuilder(G_FCOPYSIGN)
318       .legalIf(all(typeIsScalarFPArith(0, ST), typeIsScalarFPArith(1, ST)));
319 
320   getActionDefinitionsBuilder(G_FPTRUNC).legalIf(
321       [=, &ST](const LegalityQuery &Query) -> bool {
322         return (ST.hasStdExtD() && typeIs(0, s32)(Query) &&
323                 typeIs(1, s64)(Query));
324       });
325   getActionDefinitionsBuilder(G_FPEXT).legalIf(
326       [=, &ST](const LegalityQuery &Query) -> bool {
327         return (ST.hasStdExtD() && typeIs(0, s64)(Query) &&
328                 typeIs(1, s32)(Query));
329       });
330 
331   getActionDefinitionsBuilder(G_FCMP)
332       .legalIf(all(typeIs(0, sXLen), typeIsScalarFPArith(1, ST)))
333       .clampScalar(0, sXLen, sXLen);
334 
335   // TODO: Support vector version of G_IS_FPCLASS.
336   getActionDefinitionsBuilder(G_IS_FPCLASS)
337       .customIf(all(typeIs(0, s1), typeIsScalarFPArith(1, ST)));
338 
339   getActionDefinitionsBuilder(G_FCONSTANT)
340       .legalIf(typeIsScalarFPArith(0, ST))
341       .lowerFor({s32, s64});
342 
343   getActionDefinitionsBuilder({G_FPTOSI, G_FPTOUI})
344       .legalIf(all(typeInSet(0, {s32, sXLen}), typeIsScalarFPArith(1, ST)))
345       .widenScalarToNextPow2(0)
346       .clampScalar(0, s32, sXLen);
347 
348   getActionDefinitionsBuilder({G_SITOFP, G_UITOFP})
349       .legalIf(all(typeIsScalarFPArith(0, ST), typeInSet(1, {s32, sXLen})))
350       .widenScalarToNextPow2(1)
351       .clampScalar(1, s32, sXLen);
352 
353   // FIXME: We can do custom inline expansion like SelectionDAG.
354   // FIXME: Legal with Zfa.
355   getActionDefinitionsBuilder({G_FCEIL, G_FFLOOR})
356       .libcallFor({s32, s64});
357 
358   getActionDefinitionsBuilder(G_VASTART).customFor({p0});
359 
360   // va_list must be a pointer, but most sized types are pretty easy to handle
361   // as the destination.
362   getActionDefinitionsBuilder(G_VAARG)
363       // TODO: Implement narrowScalar and widenScalar for G_VAARG for types
364       // outside the [s32, sXLen] range.
365       .clampScalar(0, s32, sXLen)
366       .lowerForCartesianProduct({s32, sXLen, p0}, {p0});
367 
368   getLegacyLegalizerInfo().computeTables();
369 }
370 
371 static Type *getTypeForLLT(LLT Ty, LLVMContext &C) {
372   if (Ty.isVector())
373     return FixedVectorType::get(IntegerType::get(C, Ty.getScalarSizeInBits()),
374                                 Ty.getNumElements());
375   return IntegerType::get(C, Ty.getSizeInBits());
376 }
377 
378 bool RISCVLegalizerInfo::legalizeIntrinsic(LegalizerHelper &Helper,
379                                            MachineInstr &MI) const {
380   Intrinsic::ID IntrinsicID = cast<GIntrinsic>(MI).getIntrinsicID();
381   switch (IntrinsicID) {
382   default:
383     return false;
384   case Intrinsic::vacopy: {
385     // vacopy arguments must be legal because of the intrinsic signature.
386     // No need to check here.
387 
388     MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
389     MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
390     MachineFunction &MF = *MI.getMF();
391     const DataLayout &DL = MIRBuilder.getDataLayout();
392     LLVMContext &Ctx = MF.getFunction().getContext();
393 
394     Register DstLst = MI.getOperand(1).getReg();
395     LLT PtrTy = MRI.getType(DstLst);
396 
397     // Load the source va_list
398     Align Alignment = DL.getABITypeAlign(getTypeForLLT(PtrTy, Ctx));
399     MachineMemOperand *LoadMMO = MF.getMachineMemOperand(
400         MachinePointerInfo(), MachineMemOperand::MOLoad, PtrTy, Alignment);
401     auto Tmp = MIRBuilder.buildLoad(PtrTy, MI.getOperand(2), *LoadMMO);
402 
403     // Store the result in the destination va_list
404     MachineMemOperand *StoreMMO = MF.getMachineMemOperand(
405         MachinePointerInfo(), MachineMemOperand::MOStore, PtrTy, Alignment);
406     MIRBuilder.buildStore(DstLst, Tmp, *StoreMMO);
407 
408     MI.eraseFromParent();
409     return true;
410   }
411   }
412 }
413 
414 bool RISCVLegalizerInfo::legalizeShlAshrLshr(
415     MachineInstr &MI, MachineIRBuilder &MIRBuilder,
416     GISelChangeObserver &Observer) const {
417   assert(MI.getOpcode() == TargetOpcode::G_ASHR ||
418          MI.getOpcode() == TargetOpcode::G_LSHR ||
419          MI.getOpcode() == TargetOpcode::G_SHL);
420   MachineRegisterInfo &MRI = *MIRBuilder.getMRI();
421   // If the shift amount is a G_CONSTANT, promote it to a 64 bit type so the
422   // imported patterns can select it later. Either way, it will be legal.
423   Register AmtReg = MI.getOperand(2).getReg();
424   auto VRegAndVal = getIConstantVRegValWithLookThrough(AmtReg, MRI);
425   if (!VRegAndVal)
426     return true;
427   // Check the shift amount is in range for an immediate form.
428   uint64_t Amount = VRegAndVal->Value.getZExtValue();
429   if (Amount > 31)
430     return true; // This will have to remain a register variant.
431   auto ExtCst = MIRBuilder.buildConstant(LLT::scalar(64), Amount);
432   Observer.changingInstr(MI);
433   MI.getOperand(2).setReg(ExtCst.getReg(0));
434   Observer.changedInstr(MI);
435   return true;
436 }
437 
438 bool RISCVLegalizerInfo::legalizeVAStart(MachineInstr &MI,
439                                          MachineIRBuilder &MIRBuilder) const {
440   // Stores the address of the VarArgsFrameIndex slot into the memory location
441   assert(MI.getOpcode() == TargetOpcode::G_VASTART);
442   MachineFunction *MF = MI.getParent()->getParent();
443   RISCVMachineFunctionInfo *FuncInfo = MF->getInfo<RISCVMachineFunctionInfo>();
444   int FI = FuncInfo->getVarArgsFrameIndex();
445   LLT AddrTy = MIRBuilder.getMRI()->getType(MI.getOperand(0).getReg());
446   auto FINAddr = MIRBuilder.buildFrameIndex(AddrTy, FI);
447   assert(MI.hasOneMemOperand());
448   MIRBuilder.buildStore(FINAddr, MI.getOperand(0).getReg(),
449                         *MI.memoperands()[0]);
450   MI.eraseFromParent();
451   return true;
452 }
453 
454 bool RISCVLegalizerInfo::legalizeCustom(
455     LegalizerHelper &Helper, MachineInstr &MI,
456     LostDebugLocObserver &LocObserver) const {
457   MachineIRBuilder &MIRBuilder = Helper.MIRBuilder;
458   GISelChangeObserver &Observer = Helper.Observer;
459   switch (MI.getOpcode()) {
460   default:
461     // No idea what to do.
462     return false;
463   case TargetOpcode::G_ABS:
464     return Helper.lowerAbsToMaxNeg(MI);
465   case TargetOpcode::G_SHL:
466   case TargetOpcode::G_ASHR:
467   case TargetOpcode::G_LSHR:
468     return legalizeShlAshrLshr(MI, MIRBuilder, Observer);
469   case TargetOpcode::G_SEXT_INREG: {
470     // Source size of 32 is sext.w.
471     int64_t SizeInBits = MI.getOperand(2).getImm();
472     if (SizeInBits == 32)
473       return true;
474 
475     return Helper.lower(MI, 0, /* Unused hint type */ LLT()) ==
476            LegalizerHelper::Legalized;
477   }
478   case TargetOpcode::G_IS_FPCLASS: {
479     Register GISFPCLASS = MI.getOperand(0).getReg();
480     Register Src = MI.getOperand(1).getReg();
481     const MachineOperand &ImmOp = MI.getOperand(2);
482     MachineIRBuilder MIB(MI);
483 
484     // Turn LLVM IR's floating point classes to that in RISC-V,
485     // by simply rotating the 10-bit immediate right by two bits.
486     APInt GFpClassImm(10, static_cast<uint64_t>(ImmOp.getImm()));
487     auto FClassMask = MIB.buildConstant(sXLen, GFpClassImm.rotr(2).zext(XLen));
488     auto ConstZero = MIB.buildConstant(sXLen, 0);
489 
490     auto GFClass = MIB.buildInstr(RISCV::G_FCLASS, {sXLen}, {Src});
491     auto And = MIB.buildAnd(sXLen, GFClass, FClassMask);
492     MIB.buildICmp(CmpInst::ICMP_NE, GISFPCLASS, And, ConstZero);
493 
494     MI.eraseFromParent();
495     return true;
496   }
497   case TargetOpcode::G_VASTART:
498     return legalizeVAStart(MI, MIRBuilder);
499   }
500 
501   llvm_unreachable("expected switch to return");
502 }
503