1 //===-- HexagonISelLowering.h - Hexagon DAG Lowering Interface --*- C++ -*-===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that Hexagon uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13 
14 #ifndef LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
15 #define LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
16 
17 #include "Hexagon.h"
18 #include "llvm/ADT/StringRef.h"
19 #include "llvm/CodeGen/ISDOpcodes.h"
20 #include "llvm/CodeGen/SelectionDAGNodes.h"
21 #include "llvm/CodeGen/TargetLowering.h"
22 #include "llvm/CodeGen/ValueTypes.h"
23 #include "llvm/IR/CallingConv.h"
24 #include "llvm/IR/InlineAsm.h"
25 #include "llvm/Support/MachineValueType.h"
26 #include <cstdint>
27 #include <utility>
28 
29 namespace llvm {
30 
31 namespace HexagonISD {
32 
33     enum NodeType : unsigned {
34       OP_BEGIN = ISD::BUILTIN_OP_END,
35 
36       CONST32 = OP_BEGIN,
37       CONST32_GP,  // For marking data present in GP.
38       ADDC,        // Add with carry: (X, Y, Cin) -> (X+Y, Cout).
39       SUBC,        // Sub with carry: (X, Y, Cin) -> (X+~Y+Cin, Cout).
40       ALLOCA,
41 
42       AT_GOT,      // Index in GOT.
43       AT_PCREL,    // Offset relative to PC.
44 
45       CALL,        // Function call.
46       CALLnr,      // Function call that does not return.
47       CALLR,
48 
49       RET_FLAG,    // Return with a flag operand.
50       BARRIER,     // Memory barrier.
51       JT,          // Jump table.
52       CP,          // Constant pool.
53 
54       COMBINE,
55       VSPLAT,      // Generic splat, selection depends on argument/return
56                    // types.
57       VASL,
58       VASR,
59       VLSR,
60 
61       TSTBIT,
62       INSERT,
63       EXTRACTU,
64       VEXTRACTW,
65       VINSERTW0,
66       VROR,
67       TC_RETURN,
68       EH_RETURN,
69       DCFETCH,
70       READCYCLE,
71       D2P,         // Convert 8-byte value to 8-bit predicate register. [*]
72       P2D,         // Convert 8-bit predicate register to 8-byte value. [*]
73       V2Q,         // Convert HVX vector to a vector predicate reg. [*]
74       Q2V,         // Convert vector predicate to an HVX vector. [*]
75                    // [*] The equivalence is defined as "Q <=> (V != 0)",
76                    //     where the != operation compares bytes.
77                    // Note: V != 0 is implemented as V >u 0.
78       QCAT,
79       QTRUE,
80       QFALSE,
81       VZERO,
82       VSPLATW,     // HVX splat of a 32-bit word with an arbitrary result type.
83       TYPECAST,    // No-op that's used to convert between different legal
84                    // types in a register.
85       VALIGN,      // Align two vectors (in Op0, Op1) to one that would have
86                    // been loaded from address in Op2.
87       VALIGNADDR,  // Align vector address: Op0 & -Op1, except when it is
88                    // an address in a vector load, then it's a no-op.
89       OP_END
90     };
91 
92 } // end namespace HexagonISD
93 
94   class HexagonSubtarget;
95 
96   class HexagonTargetLowering : public TargetLowering {
97     int VarArgsFrameOffset;   // Frame offset to start of varargs area.
98     const HexagonTargetMachine &HTM;
99     const HexagonSubtarget &Subtarget;
100 
101     bool CanReturnSmallStruct(const Function* CalleeFn, unsigned& RetSize)
102         const;
103 
104   public:
105     explicit HexagonTargetLowering(const TargetMachine &TM,
106                                    const HexagonSubtarget &ST);
107 
108     bool isHVXVectorType(MVT Ty) const;
109 
110     /// IsEligibleForTailCallOptimization - Check whether the call is eligible
111     /// for tail call optimization. Targets which want to do tail call
112     /// optimization should implement this function.
113     bool IsEligibleForTailCallOptimization(SDValue Callee,
114         CallingConv::ID CalleeCC, bool isVarArg, bool isCalleeStructRet,
115         bool isCallerStructRet, const SmallVectorImpl<ISD::OutputArg> &Outs,
116         const SmallVectorImpl<SDValue> &OutVals,
117         const SmallVectorImpl<ISD::InputArg> &Ins, SelectionDAG& DAG) const;
118 
119     bool getTgtMemIntrinsic(IntrinsicInfo &Info, const CallInst &I,
120                             MachineFunction &MF,
121                             unsigned Intrinsic) const override;
122 
123     bool isTruncateFree(Type *Ty1, Type *Ty2) const override;
124     bool isTruncateFree(EVT VT1, EVT VT2) const override;
125 
isCheapToSpeculateCttz()126     bool isCheapToSpeculateCttz() const override { return true; }
isCheapToSpeculateCtlz()127     bool isCheapToSpeculateCtlz() const override { return true; }
isCtlzFast()128     bool isCtlzFast() const override { return true; }
129 
130     bool allowTruncateForTailCall(Type *Ty1, Type *Ty2) const override;
131 
132     /// Return true if an FMA operation is faster than a pair of mul and add
133     /// instructions. fmuladd intrinsics will be expanded to FMAs when this
134     /// method returns true (and FMAs are legal), otherwise fmuladd is
135     /// expanded to mul + add.
136     bool isFMAFasterThanFMulAndFAdd(EVT) const override;
137 
138     // Should we expand the build vector with shuffles?
139     bool shouldExpandBuildVectorWithShuffles(EVT VT,
140         unsigned DefinedValues) const override;
141 
142     bool isShuffleMaskLegal(ArrayRef<int> Mask, EVT VT) const override;
143     TargetLoweringBase::LegalizeTypeAction getPreferredVectorAction(MVT VT)
144         const override;
145 
146     SDValue LowerOperation(SDValue Op, SelectionDAG &DAG) const override;
147     void LowerOperationWrapper(SDNode *N, SmallVectorImpl<SDValue> &Results,
148                                SelectionDAG &DAG) const override;
149     void ReplaceNodeResults(SDNode *N, SmallVectorImpl<SDValue> &Results,
150                             SelectionDAG &DAG) const override;
151 
152     const char *getTargetNodeName(unsigned Opcode) const override;
153 
154     SDValue LowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG) const;
155     SDValue LowerCONCAT_VECTORS(SDValue Op, SelectionDAG &DAG) const;
156     SDValue LowerEXTRACT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
157     SDValue LowerEXTRACT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
158     SDValue LowerINSERT_VECTOR_ELT(SDValue Op, SelectionDAG &DAG) const;
159     SDValue LowerINSERT_SUBVECTOR(SDValue Op, SelectionDAG &DAG) const;
160     SDValue LowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG) const;
161     SDValue LowerVECTOR_SHIFT(SDValue Op, SelectionDAG &DAG) const;
162     SDValue LowerROTL(SDValue Op, SelectionDAG &DAG) const;
163     SDValue LowerBITCAST(SDValue Op, SelectionDAG &DAG) const;
164     SDValue LowerANY_EXTEND(SDValue Op, SelectionDAG &DAG) const;
165     SDValue LowerSIGN_EXTEND(SDValue Op, SelectionDAG &DAG) const;
166     SDValue LowerZERO_EXTEND(SDValue Op, SelectionDAG &DAG) const;
167     SDValue LowerLoad(SDValue Op, SelectionDAG &DAG) const;
168     SDValue LowerStore(SDValue Op, SelectionDAG &DAG) const;
169     SDValue LowerUnalignedLoad(SDValue Op, SelectionDAG &DAG) const;
170     SDValue LowerUAddSubO(SDValue Op, SelectionDAG &DAG) const;
171     SDValue LowerAddSubCarry(SDValue Op, SelectionDAG &DAG) const;
172 
173     SDValue LowerDYNAMIC_STACKALLOC(SDValue Op, SelectionDAG &DAG) const;
174     SDValue LowerINLINEASM(SDValue Op, SelectionDAG &DAG) const;
175     SDValue LowerPREFETCH(SDValue Op, SelectionDAG &DAG) const;
176     SDValue LowerREADCYCLECOUNTER(SDValue Op, SelectionDAG &DAG) const;
177     SDValue LowerEH_LABEL(SDValue Op, SelectionDAG &DAG) const;
178     SDValue LowerEH_RETURN(SDValue Op, SelectionDAG &DAG) const;
179     SDValue
180     LowerFormalArguments(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
181                          const SmallVectorImpl<ISD::InputArg> &Ins,
182                          const SDLoc &dl, SelectionDAG &DAG,
183                          SmallVectorImpl<SDValue> &InVals) const override;
184     SDValue LowerGLOBALADDRESS(SDValue Op, SelectionDAG &DAG) const;
185     SDValue LowerBlockAddress(SDValue Op, SelectionDAG &DAG) const;
186     SDValue LowerGlobalTLSAddress(SDValue Op, SelectionDAG &DAG) const;
187     SDValue LowerToTLSGeneralDynamicModel(GlobalAddressSDNode *GA,
188         SelectionDAG &DAG) const;
189     SDValue LowerToTLSInitialExecModel(GlobalAddressSDNode *GA,
190         SelectionDAG &DAG) const;
191     SDValue LowerToTLSLocalExecModel(GlobalAddressSDNode *GA,
192         SelectionDAG &DAG) const;
193     SDValue GetDynamicTLSAddr(SelectionDAG &DAG, SDValue Chain,
194         GlobalAddressSDNode *GA, SDValue InFlag, EVT PtrVT,
195         unsigned ReturnReg, unsigned char OperandFlags) const;
196     SDValue LowerGLOBAL_OFFSET_TABLE(SDValue Op, SelectionDAG &DAG) const;
197 
198     SDValue LowerCall(TargetLowering::CallLoweringInfo &CLI,
199         SmallVectorImpl<SDValue> &InVals) const override;
200     SDValue LowerCallResult(SDValue Chain, SDValue InFlag,
201                             CallingConv::ID CallConv, bool isVarArg,
202                             const SmallVectorImpl<ISD::InputArg> &Ins,
203                             const SDLoc &dl, SelectionDAG &DAG,
204                             SmallVectorImpl<SDValue> &InVals,
205                             const SmallVectorImpl<SDValue> &OutVals,
206                             SDValue Callee) const;
207 
208     SDValue LowerSETCC(SDValue Op, SelectionDAG &DAG) const;
209     SDValue LowerVSELECT(SDValue Op, SelectionDAG &DAG) const;
210     SDValue LowerFRAMEADDR(SDValue Op, SelectionDAG &DAG) const;
211     SDValue LowerATOMIC_FENCE(SDValue Op, SelectionDAG& DAG) const;
212     SDValue LowerRETURNADDR(SDValue Op, SelectionDAG &DAG) const;
213 
214     bool CanLowerReturn(CallingConv::ID CallConv,
215                         MachineFunction &MF, bool isVarArg,
216                         const SmallVectorImpl<ISD::OutputArg> &Outs,
217                         LLVMContext &Context) const override;
218 
219     SDValue LowerReturn(SDValue Chain, CallingConv::ID CallConv, bool isVarArg,
220                         const SmallVectorImpl<ISD::OutputArg> &Outs,
221                         const SmallVectorImpl<SDValue> &OutVals,
222                         const SDLoc &dl, SelectionDAG &DAG) const override;
223 
224     bool mayBeEmittedAsTailCall(const CallInst *CI) const override;
225 
226     unsigned getRegisterByName(const char* RegName, EVT VT,
227                                SelectionDAG &DAG) const override;
228 
229     /// If a physical register, this returns the register that receives the
230     /// exception address on entry to an EH pad.
231     unsigned
getExceptionPointerRegister(const Constant * PersonalityFn)232     getExceptionPointerRegister(const Constant *PersonalityFn) const override {
233       return Hexagon::R0;
234     }
235 
236     /// If a physical register, this returns the register that receives the
237     /// exception typeid on entry to a landing pad.
238     unsigned
getExceptionSelectorRegister(const Constant * PersonalityFn)239     getExceptionSelectorRegister(const Constant *PersonalityFn) const override {
240       return Hexagon::R1;
241     }
242 
243     SDValue LowerVASTART(SDValue Op, SelectionDAG &DAG) const;
244     SDValue LowerConstantPool(SDValue Op, SelectionDAG &DAG) const;
245     SDValue LowerJumpTable(SDValue Op, SelectionDAG &DAG) const;
246 
getSetCCResultType(const DataLayout &,LLVMContext & C,EVT VT)247     EVT getSetCCResultType(const DataLayout &, LLVMContext &C,
248                            EVT VT) const override {
249       if (!VT.isVector())
250         return MVT::i1;
251       else
252         return EVT::getVectorVT(C, MVT::i1, VT.getVectorNumElements());
253     }
254 
255     bool getPostIndexedAddressParts(SDNode *N, SDNode *Op,
256                                     SDValue &Base, SDValue &Offset,
257                                     ISD::MemIndexedMode &AM,
258                                     SelectionDAG &DAG) const override;
259 
260     ConstraintType getConstraintType(StringRef Constraint) const override;
261 
262     std::pair<unsigned, const TargetRegisterClass *>
263     getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
264                                  StringRef Constraint, MVT VT) const override;
265 
266     unsigned
getInlineAsmMemConstraint(StringRef ConstraintCode)267     getInlineAsmMemConstraint(StringRef ConstraintCode) const override {
268       if (ConstraintCode == "o")
269         return InlineAsm::Constraint_o;
270       return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
271     }
272 
273     // Intrinsics
274     SDValue LowerINTRINSIC_WO_CHAIN(SDValue Op, SelectionDAG &DAG) const;
275     SDValue LowerINTRINSIC_VOID(SDValue Op, SelectionDAG &DAG) const;
276     /// isLegalAddressingMode - Return true if the addressing mode represented
277     /// by AM is legal for this target, for a load/store of the specified type.
278     /// The type may be VoidTy, in which case only return true if the addressing
279     /// mode is legal for a load/store of any legal type.
280     /// TODO: Handle pre/postinc as well.
281     bool isLegalAddressingMode(const DataLayout &DL, const AddrMode &AM,
282                                Type *Ty, unsigned AS,
283                                Instruction *I = nullptr) const override;
284     /// Return true if folding a constant offset with the given GlobalAddress
285     /// is legal.  It is frequently not legal in PIC relocation models.
286     bool isOffsetFoldingLegal(const GlobalAddressSDNode *GA) const override;
287 
288     bool isFPImmLegal(const APFloat &Imm, EVT VT,
289                       bool ForCodeSize) const override;
290 
291     /// isLegalICmpImmediate - Return true if the specified immediate is legal
292     /// icmp immediate, that is the target has icmp instructions which can
293     /// compare a register against the immediate without having to materialize
294     /// the immediate into a register.
295     bool isLegalICmpImmediate(int64_t Imm) const override;
296 
297     EVT getOptimalMemOpType(uint64_t Size, unsigned DstAlign,
298         unsigned SrcAlign, bool IsMemset, bool ZeroMemset, bool MemcpyStrSrc,
299         const AttributeList &FuncAttributes) const override;
300 
301     bool allowsMisalignedMemoryAccesses(EVT VT, unsigned AddrSpace,
302         unsigned Align, MachineMemOperand::Flags Flags, bool *Fast) const override;
303 
304     /// Returns relocation base for the given PIC jumptable.
305     SDValue getPICJumpTableRelocBase(SDValue Table, SelectionDAG &DAG)
306                                      const override;
307 
308     bool shouldReduceLoadWidth(SDNode *Load, ISD::LoadExtType ExtTy,
309                                EVT NewVT) const override;
310 
311     // Handling of atomic RMW instructions.
312     Value *emitLoadLinked(IRBuilder<> &Builder, Value *Addr,
313         AtomicOrdering Ord) const override;
314     Value *emitStoreConditional(IRBuilder<> &Builder, Value *Val,
315         Value *Addr, AtomicOrdering Ord) const override;
316     AtomicExpansionKind shouldExpandAtomicLoadInIR(LoadInst *LI) const override;
317     bool shouldExpandAtomicStoreInIR(StoreInst *SI) const override;
318     AtomicExpansionKind
319     shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst *AI) const override;
320 
321     AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI)322     shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const override {
323       return AtomicExpansionKind::LLSC;
324     }
325 
326   private:
327     void initializeHVXLowering();
328     void validateConstPtrAlignment(SDValue Ptr, const SDLoc &dl,
329                                    unsigned NeedAlign) const;
330 
331     std::pair<SDValue,int> getBaseAndOffset(SDValue Addr) const;
332 
333     bool getBuildVectorConstInts(ArrayRef<SDValue> Values, MVT VecTy,
334                                  SelectionDAG &DAG,
335                                  MutableArrayRef<ConstantInt*> Consts) const;
336     SDValue buildVector32(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
337                           SelectionDAG &DAG) const;
338     SDValue buildVector64(ArrayRef<SDValue> Elem, const SDLoc &dl, MVT VecTy,
339                           SelectionDAG &DAG) const;
340     SDValue extractVector(SDValue VecV, SDValue IdxV, const SDLoc &dl,
341                           MVT ValTy, MVT ResTy, SelectionDAG &DAG) const;
342     SDValue insertVector(SDValue VecV, SDValue ValV, SDValue IdxV,
343                          const SDLoc &dl, MVT ValTy, SelectionDAG &DAG) const;
344     SDValue expandPredicate(SDValue Vec32, const SDLoc &dl,
345                             SelectionDAG &DAG) const;
346     SDValue contractPredicate(SDValue Vec64, const SDLoc &dl,
347                               SelectionDAG &DAG) const;
348     SDValue getVectorShiftByInt(SDValue Op, SelectionDAG &DAG) const;
349 
isUndef(SDValue Op)350     bool isUndef(SDValue Op) const {
351       if (Op.isMachineOpcode())
352         return Op.getMachineOpcode() == TargetOpcode::IMPLICIT_DEF;
353       return Op.getOpcode() == ISD::UNDEF;
354     }
getInstr(unsigned MachineOpc,const SDLoc & dl,MVT Ty,ArrayRef<SDValue> Ops,SelectionDAG & DAG)355     SDValue getInstr(unsigned MachineOpc, const SDLoc &dl, MVT Ty,
356                      ArrayRef<SDValue> Ops, SelectionDAG &DAG) const {
357       SDNode *N = DAG.getMachineNode(MachineOpc, dl, Ty, Ops);
358       return SDValue(N, 0);
359     }
360     SDValue getZero(const SDLoc &dl, MVT Ty, SelectionDAG &DAG) const;
361 
362     using VectorPair = std::pair<SDValue, SDValue>;
363     using TypePair = std::pair<MVT, MVT>;
364 
365     SDValue getInt(unsigned IntId, MVT ResTy, ArrayRef<SDValue> Ops,
366                    const SDLoc &dl, SelectionDAG &DAG) const;
367 
ty(SDValue Op)368     MVT ty(SDValue Op) const {
369       return Op.getValueType().getSimpleVT();
370     }
ty(const VectorPair & Ops)371     TypePair ty(const VectorPair &Ops) const {
372       return { Ops.first.getValueType().getSimpleVT(),
373                Ops.second.getValueType().getSimpleVT() };
374     }
tyScalar(MVT Ty)375     MVT tyScalar(MVT Ty) const {
376       if (!Ty.isVector())
377         return Ty;
378       return MVT::getIntegerVT(Ty.getSizeInBits());
379     }
tyVector(MVT Ty,MVT ElemTy)380     MVT tyVector(MVT Ty, MVT ElemTy) const {
381       if (Ty.isVector() && Ty.getVectorElementType() == ElemTy)
382         return Ty;
383       unsigned TyWidth = Ty.getSizeInBits();
384       unsigned ElemWidth = ElemTy.getSizeInBits();
385       assert((TyWidth % ElemWidth) == 0);
386       return MVT::getVectorVT(ElemTy, TyWidth/ElemWidth);
387     }
388 
389     MVT typeJoin(const TypePair &Tys) const;
390     TypePair typeSplit(MVT Ty) const;
391     MVT typeExtElem(MVT VecTy, unsigned Factor) const;
392     MVT typeTruncElem(MVT VecTy, unsigned Factor) const;
393 
394     SDValue opJoin(const VectorPair &Ops, const SDLoc &dl,
395                    SelectionDAG &DAG) const;
396     VectorPair opSplit(SDValue Vec, const SDLoc &dl, SelectionDAG &DAG) const;
397     SDValue opCastElem(SDValue Vec, MVT ElemTy, SelectionDAG &DAG) const;
398 
399     bool isHvxSingleTy(MVT Ty) const;
400     bool isHvxPairTy(MVT Ty) const;
401     SDValue convertToByteIndex(SDValue ElemIdx, MVT ElemTy,
402                                SelectionDAG &DAG) const;
403     SDValue getIndexInWord32(SDValue Idx, MVT ElemTy, SelectionDAG &DAG) const;
404     SDValue getByteShuffle(const SDLoc &dl, SDValue Op0, SDValue Op1,
405                            ArrayRef<int> Mask, SelectionDAG &DAG) const;
406 
407     SDValue buildHvxVectorReg(ArrayRef<SDValue> Values, const SDLoc &dl,
408                               MVT VecTy, SelectionDAG &DAG) const;
409     SDValue buildHvxVectorPred(ArrayRef<SDValue> Values, const SDLoc &dl,
410                                MVT VecTy, SelectionDAG &DAG) const;
411     SDValue createHvxPrefixPred(SDValue PredV, const SDLoc &dl,
412                                 unsigned BitBytes, bool ZeroFill,
413                                 SelectionDAG &DAG) const;
414     SDValue extractHvxElementReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
415                                  MVT ResTy, SelectionDAG &DAG) const;
416     SDValue extractHvxElementPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
417                                   MVT ResTy, SelectionDAG &DAG) const;
418     SDValue insertHvxElementReg(SDValue VecV, SDValue IdxV, SDValue ValV,
419                                 const SDLoc &dl, SelectionDAG &DAG) const;
420     SDValue insertHvxElementPred(SDValue VecV, SDValue IdxV, SDValue ValV,
421                                  const SDLoc &dl, SelectionDAG &DAG) const;
422     SDValue extractHvxSubvectorReg(SDValue VecV, SDValue IdxV, const SDLoc &dl,
423                                    MVT ResTy, SelectionDAG &DAG) const;
424     SDValue extractHvxSubvectorPred(SDValue VecV, SDValue IdxV, const SDLoc &dl,
425                                     MVT ResTy, SelectionDAG &DAG) const;
426     SDValue insertHvxSubvectorReg(SDValue VecV, SDValue SubV, SDValue IdxV,
427                                   const SDLoc &dl, SelectionDAG &DAG) const;
428     SDValue insertHvxSubvectorPred(SDValue VecV, SDValue SubV, SDValue IdxV,
429                                    const SDLoc &dl, SelectionDAG &DAG) const;
430     SDValue extendHvxVectorPred(SDValue VecV, const SDLoc &dl, MVT ResTy,
431                                 bool ZeroExt, SelectionDAG &DAG) const;
432 
433     SDValue LowerHvxBuildVector(SDValue Op, SelectionDAG &DAG) const;
434     SDValue LowerHvxConcatVectors(SDValue Op, SelectionDAG &DAG) const;
435     SDValue LowerHvxExtractElement(SDValue Op, SelectionDAG &DAG) const;
436     SDValue LowerHvxInsertElement(SDValue Op, SelectionDAG &DAG) const;
437     SDValue LowerHvxExtractSubvector(SDValue Op, SelectionDAG &DAG) const;
438     SDValue LowerHvxInsertSubvector(SDValue Op, SelectionDAG &DAG) const;
439 
440     SDValue LowerHvxAnyExt(SDValue Op, SelectionDAG &DAG) const;
441     SDValue LowerHvxSignExt(SDValue Op, SelectionDAG &DAG) const;
442     SDValue LowerHvxZeroExt(SDValue Op, SelectionDAG &DAG) const;
443     SDValue LowerHvxCttz(SDValue Op, SelectionDAG &DAG) const;
444     SDValue LowerHvxMul(SDValue Op, SelectionDAG &DAG) const;
445     SDValue LowerHvxMulh(SDValue Op, SelectionDAG &DAG) const;
446     SDValue LowerHvxSetCC(SDValue Op, SelectionDAG &DAG) const;
447     SDValue LowerHvxExtend(SDValue Op, SelectionDAG &DAG) const;
448     SDValue LowerHvxShift(SDValue Op, SelectionDAG &DAG) const;
449 
450     SDValue SplitHvxPairOp(SDValue Op, SelectionDAG &DAG) const;
451     SDValue SplitHvxMemOp(SDValue Op, SelectionDAG &DAG) const;
452 
453     std::pair<const TargetRegisterClass*, uint8_t>
454     findRepresentativeClass(const TargetRegisterInfo *TRI, MVT VT)
455         const override;
456 
457     bool isHvxOperation(SDValue Op) const;
458     SDValue LowerHvxOperation(SDValue Op, SelectionDAG &DAG) const;
459   };
460 
461 } // end namespace llvm
462 
463 #endif // LLVM_LIB_TARGET_HEXAGON_HEXAGONISELLOWERING_H
464