1 //===-- RISCVISelLowering.h - RISCV DAG Lowering Interface ------*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
15 #define LLVM_LIB_TARGET_RISCV_RISCVISELLOWERING_H
16 
17 #include "RISCV.h"
18 #include "llvm/CodeGen/SelectionDAG.h"
19 #include "llvm/CodeGen/TargetLowering.h"
20 
21 namespace llvm {
22 class RISCVSubtarget;
23 namespace RISCVISD {
24 enum NodeType : unsigned {
25   FIRST_NUMBER = ISD::BUILTIN_OP_END,
26   RET_FLAG,
27   URET_FLAG,
28   SRET_FLAG,
29   MRET_FLAG,
30   CALL,
31   /// Select with condition operator - This selects between a true value and
32   /// a false value (ops #3 and #4) based on the boolean result of comparing
33   /// the lhs and rhs (ops #0 and #1) of a conditional expression with the
34   /// condition code in op #2, a XLenVT constant from the ISD::CondCode enum.
35   /// The lhs and rhs are XLenVT integers. The true and false values can be
36   /// integer or floating point.
37   SELECT_CC,
38   BuildPairF64,
39   SplitF64,
40   TAIL,
41   // RV64I shifts, directly matching the semantics of the named RISC-V
42   // instructions.
43   SLLW,
44   SRAW,
45   SRLW,
46   // 32-bit operations from RV64M that can't be simply matched with a pattern
47   // at instruction selection time. These have undefined behavior for division
48   // by 0 or overflow (divw) like their target independent counterparts.
49   DIVW,
50   DIVUW,
51   REMUW,
52   // RV64IB rotates, directly matching the semantics of the named RISC-V
53   // instructions.
54   ROLW,
55   RORW,
56   // RV64IB funnel shifts, with the semantics of the named RISC-V instructions,
57   // but the same operand order as fshl/fshr intrinsics.
58   FSRW,
59   FSLW,
60   // FPR<->GPR transfer operations when the FPR is smaller than XLEN, needed as
61   // XLEN is the only legal integer width.
62   //
63   // FMV_H_X matches the semantics of the FMV.H.X.
64   // FMV_X_ANYEXTH is similar to FMV.X.H but has an any-extended result.
65   // FMV_W_X_RV64 matches the semantics of the FMV.W.X.
66   // FMV_X_ANYEXTW_RV64 is similar to FMV.X.W but has an any-extended result.
67   //
68   // This is a more convenient semantic for producing dagcombines that remove
69   // unnecessary GPR->FPR->GPR moves.
70   FMV_H_X,
71   FMV_X_ANYEXTH,
72   FMV_W_X_RV64,
73   FMV_X_ANYEXTW_RV64,
74   // READ_CYCLE_WIDE - A read of the 64-bit cycle CSR on a 32-bit target
75   // (returns (Lo, Hi)). It takes a chain operand.
76   READ_CYCLE_WIDE,
77   // Generalized Reverse and Generalized Or-Combine - directly matching the
78   // semantics of the named RISC-V instructions. Lowered as custom nodes as
79   // TableGen chokes when faced with commutative permutations in deeply-nested
80   // DAGs. Each node takes an input operand and a TargetConstant immediate
81   // shift amount, and outputs a bit-manipulated version of input. All operands
82   // are of type XLenVT.
83   GREVI,
84   GREVIW,
85   GORCI,
86   GORCIW,
87   // Vector Extension
88   // VMV_X_S matches the semantics of vmv.x.s. The result is always XLenVT
89   // sign extended from the vector element size. NOTE: The result size will
90   // never be less than the vector element size.
91   VMV_X_S,
92   // Splats an i64 scalar to a vector type (with element type i64) where the
93   // scalar is a sign-extended i32.
94   SPLAT_VECTOR_I64,
95   // Read VLENB CSR
96   READ_VLENB,
97   // Truncates a RVV integer vector by one power-of-two.
98   TRUNCATE_VECTOR,
99   // Unit-stride fault-only-first load
100   VLEFF,
101   VLEFF_MASK,
102   // Unit-stride fault-only-first segment load
103   VLSEGFF,
104   VLSEGFF_MASK,
105   // read vl CSR
106   READ_VL,
107   // Matches the semantics of vslideup/vslidedown. The first operand is the
108   // pass-thru operand, the second is the source vector, and the third is the
109   // XLenVT index (either constant or non-constant).
110   VSLIDEUP,
111   VSLIDEDOWN,
112   // Matches the semantics of the unmasked vid.v instruction.
113   VID,
114 };
115 } // namespace RISCVISD
116 
117 class RISCVTargetLowering : public TargetLowering {
118   const RISCVSubtarget &Subtarget;
119 
120 public:
121   explicit RISCVTargetLowering(const TargetMachine &TM,
122                                const RISCVSubtarget &STI);
123 
124   const RISCVSubtarget &getSubtarget() const { return Subtarget; }
125 
126   bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
127                           MachineFunction &MF,
128                           unsigned Intrinsic) const override;
129   bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM, Type *Ty,
130                              unsigned AS,
131                              Instruction *I = nullptr) const override;
132   bool isLegalICmpImmediate(int64_t Imm) const override;
133   bool isLegalAddImmediate(int64_t Imm) const override;
134   bool isTruncateFree(Type *SrcTy, Type *DstTy) const override;
135   bool isTruncateFree(EVT SrcVT, EVT DstVT) const override;
136   bool isZExtFree(SDValue Val, EVT VT2) const override;
137   bool isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const override;
138   bool isCheapToSpeculateCttz() const override;
139   bool isCheapToSpeculateCtlz() const override;
140   bool isFPImmLegal(const APFloat &Imm, EVT VT,
141                     bool ForCodeSize) const override;
142 
143   bool hasBitPreservingFPLogic(EVT VT) const override;
144 
145   // Provide custom lowering hooks for some operations.
146   SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
147   void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
148                           SelectionDAG &DAG) const override;
149 
150   SDValue PerformDAGCombine(SDNode *N, DAGCombinerInfo &DCI) const override;
151 
152   bool targetShrinkDemandedConstant(SDValue Op, const APInt &DemandedBits,
153                                     const APInt &DemandedElts,
154                                     TargetLoweringOpt &TLO) const override;
155 
156   void computeKnownBitsForTargetNode(const SDValue Op,
157                                      KnownBits &Known,
158                                      const APInt &DemandedElts,
159                                      const SelectionDAG &DAG,
160                                      unsigned Depth) const override;
161   unsigned ComputeNumSignBitsForTargetNode(SDValue Op,
162                                            const APInt &DemandedElts,
163                                            const SelectionDAG &DAG,
164                                            unsigned Depth) const override;
165 
166   // This method returns the name of a target specific DAG node.
167   const char *getTargetNodeName(unsigned Opcode) const override;
168 
169   ConstraintType getConstraintType(StringRef Constraint) const override;
170 
171   unsigned getInlineAsmMemConstraint(StringRef ConstraintCode) const override;
172 
173   std::pair<unsigned, const TargetRegisterClass *>
174   getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
175                                StringRef Constraint, MVT VT) const override;
176 
177   void LowerAsmOperandForConstraint(SDValue Op, std::string &Constraint,
178                                     std::vector<SDValue> &Ops,
179                                     SelectionDAG &DAG) const override;
180 
181   MachineBasicBlock *
182   EmitInstrWithCustomInserter(MachineInstr &MI,
183                               MachineBasicBlock *BB) const override;
184 
185   EVT getSetCCResultType(const DataLayout &DL, LLVMContext &Context,
186                          EVT VT) const override;
187 
188   bool convertSetCCLogicToBitwiseLogic(EVT VT) const override {
189     return VT.isScalarInteger();
190   }
191   bool convertSelectOfConstantsToMath(EVT VT) const override { return true; }
192 
193   bool shouldInsertFencesForAtomic(const Instruction *I) const override {
194     return isa<LoadInst>(I) || isa<StoreInst>(I);
195   }
196   Instruction *emitLeadingFence(IRBuilder<> &Builder, Instruction *Inst,
197                                 AtomicOrdering Ord) const override;
198   Instruction *emitTrailingFence(IRBuilder<> &Builder, Instruction *Inst,
199                                  AtomicOrdering Ord) const override;
200 
201   bool isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
202                                   EVT VT) const override;
203 
204   ISD::NodeType getExtendForAtomicOps() const override {
205     return ISD::SIGN_EXTEND;
206   }
207 
208   ISD::NodeType getExtendForAtomicCmpSwapArg() const override {
209     return ISD::SIGN_EXTEND;
210   }
211 
212   bool shouldExpandShift(SelectionDAG &DAG, SDNode *N) const override {
213     if (DAG.getMachineFunction().getFunction().hasMinSize())
214       return false;
215     return true;
216   }
217   bool isDesirableToCommuteWithShift(const SDNode *N,
218                                      CombineLevel Level) const override;
219 
220   /// If a physical register, this returns the register that receives the
221   /// exception address on entry to an EH pad.
222   Register
223   getExceptionPointerRegister(const Constant *PersonalityFn) const override;
224 
225   /// If a physical register, this returns the register that receives the
226   /// exception typeid on entry to a landing pad.
227   Register
228   getExceptionSelectorRegister(const Constant *PersonalityFn) const override;
229 
230   bool shouldExtendTypeInLibCall(EVT Type) const override;
231   bool shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const override;
232 
233   /// Returns the register with the specified architectural or ABI name. This
234   /// method is necessary to lower the llvm.read_register.* and
235   /// llvm.write_register.* intrinsics. Allocatable registers must be reserved
236   /// with the clang -ffixed-xX flag for access to be allowed.
237   Register getRegisterByName(const char *RegName, LLT VT,
238                              const MachineFunction &MF) const override;
239 
240   // Lower incoming arguments, copy physregs into vregs
241   SDValue LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv,
242                                bool IsVarArg,
243                                const SmallVectorImpl<ISD::InputArg> &Ins,
244                                const SDLoc &DL, SelectionDAG &DAG,
245                                SmallVectorImpl<SDValue> &InVals) const override;
246   bool CanLowerReturn(CallingConv::ID CallConv, MachineFunction &MF,
247                       bool IsVarArg,
248                       const SmallVectorImpl<ISD::OutputArg> &Outs,
249                       LLVMContext &Context) const override;
250   SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
251                       const SmallVectorImpl<ISD::OutputArg> &Outs,
252                       const SmallVectorImpl<SDValue> &OutVals, const SDLoc &DL,
253                       SelectionDAG &DAG) const override;
254   SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
255                     SmallVectorImpl<SDValue> &InVals) const override;
256 
257   bool shouldConvertConstantLoadToIntImm(const APInt &Imm,
258                                          Type *Ty) const override {
259     return true;
260   }
261   bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
262   bool shouldConsiderGEPOffsetSplit() const override { return true; }
263 
264   bool decomposeMulByConstant(LLVMContext &Context, EVT VT,
265                               SDValue C) const override;
266 
267   TargetLowering::AtomicExpansionKind
268   shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override;
269   Value *emitMaskedAtomicRMWIntrinsic(IRBuilder<> &Builder, AtomicRMWInst *AI,
270                                       Value *AlignedAddr, Value *Incr,
271                                       Value *Mask, Value *ShiftAmt,
272                                       AtomicOrdering Ord) const override;
273   TargetLowering::AtomicExpansionKind
274   shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *CI) const override;
275   Value *emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> &Builder,
276                                           AtomicCmpXchgInst *CI,
277                                           Value *AlignedAddr, Value *CmpVal,
278                                           Value *NewVal, Value *Mask,
279                                           AtomicOrdering Ord) const override;
280 
281 private:
282   void analyzeInputArgs(MachineFunction &MF, CCState &CCInfo,
283                         const SmallVectorImpl<ISD::InputArg> &Ins,
284                         bool IsRet) const;
285   void analyzeOutputArgs(MachineFunction &MF, CCState &CCInfo,
286                          const SmallVectorImpl<ISD::OutputArg> &Outs,
287                          bool IsRet, CallLoweringInfo *CLI) const;
288 
289   template <class NodeTy>
290   SDValue getAddr(NodeTy *N, SelectionDAG &DAG, bool IsLocal = true) const;
291 
292   SDValue getStaticTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG,
293                            bool UseGOT) const;
294   SDValue getDynamicTLSAddr(GlobalAddressSDNode *N, SelectionDAG &DAG) const;
295 
296   SDValue lowerGlobalAddress(SDValue Op, SelectionDAG &DAG) const;
297   SDValue lowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
298   SDValue lowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
299   SDValue lowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
300   SDValue lowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
301   SDValue lowerSELECT(SDValue Op, SelectionDAG &DAG) const;
302   SDValue lowerVASTART(SDValue Op, SelectionDAG &DAG) const;
303   SDValue lowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
304   SDValue lowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
305   SDValue lowerShiftLeftParts(SDValue Op, SelectionDAG &DAG) const;
306   SDValue lowerShiftRightParts(SDValue Op, SelectionDAG &DAG, bool IsSRA) const;
307   SDValue lowerSPLATVECTOR(SDValue Op, SelectionDAG &DAG) const;
308   SDValue lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
309                              int64_t ExtTrueVal) const;
310   SDValue lowerVectorMaskTrunc(SDValue Op, SelectionDAG &DAG) const;
311   SDValue lowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
312   SDValue lowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
313   SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
314   SDValue LowerINTRINSIC_W_CHAIN(SDValue Op, SelectionDAG &DAG) const;
315 
316   bool isEligibleForTailCallOptimization(
317       CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
318       const SmallVector<CCValAssign, 16> &ArgLocs) const;
319 
320   /// Generate error diagnostics if any register used by CC has been marked
321   /// reserved.
322   void validateCCReservedRegs(
323       const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
324       MachineFunction &MF) const;
325 };
326 
327 namespace RISCVVIntrinsicsTable {
328 
329 struct RISCVVIntrinsicInfo {
330   unsigned int IntrinsicID;
331   unsigned int ExtendedOperand;
332 };
333 
334 using namespace RISCV;
335 
336 #define GET_RISCVVIntrinsicsTable_DECL
337 #include "RISCVGenSearchableTables.inc"
338 
339 } // end namespace RISCVVIntrinsicsTable
340 
341 namespace RISCVZvlssegTable {
342 
343 struct RISCVZvlsseg {
344   unsigned int IntrinsicID;
345   unsigned int SEW;
346   unsigned int LMUL;
347   unsigned int IndexLMUL;
348   unsigned int Pseudo;
349 };
350 
351 using namespace RISCV;
352 
353 #define GET_RISCVZvlssegTable_DECL
354 #include "RISCVGenSearchableTables.inc"
355 
356 } // namespace RISCVZvlssegTable
357 }
358 
359 #endif
360