1//===-- RISCVInstrInfo.td - Target Description for RISC-V --*- tablegen -*-===// 2// 3// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions. 4// See https://llvm.org/LICENSE.txt for license information. 5// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception 6// 7//===----------------------------------------------------------------------===// 8// 9// This file describes the RISC-V instructions in TableGen format. 10// 11//===----------------------------------------------------------------------===// 12 13//===----------------------------------------------------------------------===// 14// RISC-V specific DAG Nodes. 15//===----------------------------------------------------------------------===// 16 17// Target-independent type requirements, but with target-specific formats. 18def SDT_CallSeqStart : SDCallSeqStart<[SDTCisVT<0, i32>, 19 SDTCisVT<1, i32>]>; 20def SDT_CallSeqEnd : SDCallSeqEnd<[SDTCisVT<0, i32>, 21 SDTCisVT<1, i32>]>; 22 23// Target-dependent type requirements. 24def SDT_RISCVCall : SDTypeProfile<0, -1, [SDTCisVT<0, XLenVT>]>; 25def SDT_RISCVSelectCC : SDTypeProfile<1, 5, [SDTCisSameAs<1, 2>, 26 SDTCisVT<3, OtherVT>, 27 SDTCisSameAs<0, 4>, 28 SDTCisSameAs<4, 5>]>; 29def SDT_RISCVBrCC : SDTypeProfile<0, 4, [SDTCisSameAs<0, 1>, 30 SDTCisVT<2, OtherVT>, 31 SDTCisVT<3, OtherVT>]>; 32def SDT_RISCVReadCSR : SDTypeProfile<1, 1, [SDTCisInt<0>, SDTCisInt<1>]>; 33def SDT_RISCVWriteCSR : SDTypeProfile<0, 2, [SDTCisInt<0>, SDTCisInt<1>]>; 34def SDT_RISCVSwapCSR : SDTypeProfile<1, 2, [SDTCisInt<0>, SDTCisInt<1>, 35 SDTCisInt<2>]>; 36def SDT_RISCVReadCycleWide : SDTypeProfile<2, 0, [SDTCisVT<0, i32>, 37 SDTCisVT<1, i32>]>; 38def SDT_RISCVIntUnaryOpW : SDTypeProfile<1, 1, [ 39 SDTCisSameAs<0, 1>, SDTCisVT<0, i64> 40]>; 41def SDT_RISCVIntBinOpW : SDTypeProfile<1, 2, [ 42 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64> 43]>; 44def SDT_RISCVIntShiftDOpW : SDTypeProfile<1, 3, [ 45 SDTCisSameAs<0, 1>, SDTCisSameAs<0, 2>, SDTCisVT<0, i64>, SDTCisVT<3, i64> 46]>; 47 48// Target-independent nodes, but with target-specific formats. 49def callseq_start : SDNode<"ISD::CALLSEQ_START", SDT_CallSeqStart, 50 [SDNPHasChain, SDNPOutGlue]>; 51def callseq_end : SDNode<"ISD::CALLSEQ_END", SDT_CallSeqEnd, 52 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue]>; 53 54// Target-dependent nodes. 55def riscv_call : SDNode<"RISCVISD::CALL", SDT_RISCVCall, 56 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 57 SDNPVariadic]>; 58def riscv_ret_glue : SDNode<"RISCVISD::RET_GLUE", SDTNone, 59 [SDNPHasChain, SDNPOptInGlue, SDNPVariadic]>; 60def riscv_sret_glue : SDNode<"RISCVISD::SRET_GLUE", SDTNone, 61 [SDNPHasChain, SDNPOptInGlue]>; 62def riscv_mret_glue : SDNode<"RISCVISD::MRET_GLUE", SDTNone, 63 [SDNPHasChain, SDNPOptInGlue]>; 64def riscv_selectcc : SDNode<"RISCVISD::SELECT_CC", SDT_RISCVSelectCC>; 65def riscv_brcc : SDNode<"RISCVISD::BR_CC", SDT_RISCVBrCC, 66 [SDNPHasChain]>; 67def riscv_tail : SDNode<"RISCVISD::TAIL", SDT_RISCVCall, 68 [SDNPHasChain, SDNPOptInGlue, SDNPOutGlue, 69 SDNPVariadic]>; 70def riscv_sllw : SDNode<"RISCVISD::SLLW", SDT_RISCVIntBinOpW>; 71def riscv_sraw : SDNode<"RISCVISD::SRAW", SDT_RISCVIntBinOpW>; 72def riscv_srlw : SDNode<"RISCVISD::SRLW", SDT_RISCVIntBinOpW>; 73def riscv_read_csr : SDNode<"RISCVISD::READ_CSR", SDT_RISCVReadCSR, 74 [SDNPHasChain]>; 75def riscv_write_csr : SDNode<"RISCVISD::WRITE_CSR", SDT_RISCVWriteCSR, 76 [SDNPHasChain]>; 77def riscv_swap_csr : SDNode<"RISCVISD::SWAP_CSR", SDT_RISCVSwapCSR, 78 [SDNPHasChain]>; 79 80def riscv_read_cycle_wide : SDNode<"RISCVISD::READ_CYCLE_WIDE", 81 SDT_RISCVReadCycleWide, 82 [SDNPHasChain, SDNPSideEffect]>; 83 84def riscv_add_lo : SDNode<"RISCVISD::ADD_LO", SDTIntBinOp>; 85def riscv_hi : SDNode<"RISCVISD::HI", SDTIntUnaryOp>; 86def riscv_lla : SDNode<"RISCVISD::LLA", SDTIntUnaryOp>; 87def riscv_add_tprel : SDNode<"RISCVISD::ADD_TPREL", 88 SDTypeProfile<1, 3, [SDTCisSameAs<0, 1>, 89 SDTCisSameAs<0, 2>, 90 SDTCisSameAs<0, 3>, 91 SDTCisInt<0>]>>; 92 93//===----------------------------------------------------------------------===// 94// Operand and SDNode transformation definitions. 95//===----------------------------------------------------------------------===// 96 97class ImmXLenAsmOperand<string prefix, string suffix = ""> : AsmOperandClass { 98 let Name = prefix # "ImmXLen" # suffix; 99 let RenderMethod = "addImmOperands"; 100 let DiagnosticType = !strconcat("Invalid", Name); 101} 102 103class ImmAsmOperand<string prefix, int width, string suffix> : AsmOperandClass { 104 let Name = prefix # "Imm" # width # suffix; 105 let RenderMethod = "addImmOperands"; 106 let DiagnosticType = !strconcat("Invalid", Name); 107} 108 109def ImmZeroAsmOperand : AsmOperandClass { 110 let Name = "ImmZero"; 111 let RenderMethod = "addImmOperands"; 112 let DiagnosticType = !strconcat("Invalid", Name); 113} 114 115// A parse method for (${gpr}) or 0(${gpr}), where the 0 is be silently ignored. 116def ZeroOffsetMemOpOperand : AsmOperandClass { 117 let Name = "ZeroOffsetMemOpOperand"; 118 let RenderMethod = "addRegOperands"; 119 let PredicateMethod = "isGPR"; 120 let ParserMethod = "parseZeroOffsetMemOp"; 121} 122 123class MemOperand<RegisterClass regClass> : RegisterOperand<regClass>{ 124 let OperandType = "OPERAND_MEMORY"; 125} 126 127def GPRMemZeroOffset : MemOperand<GPR> { 128 let ParserMatchClass = ZeroOffsetMemOpOperand; 129 let PrintMethod = "printZeroOffsetMemOp"; 130} 131 132def GPRMem : MemOperand<GPR>; 133 134def SPMem : MemOperand<SP>; 135 136def GPRCMem : MemOperand<GPRC>; 137 138class SImmAsmOperand<int width, string suffix = ""> 139 : ImmAsmOperand<"S", width, suffix> { 140} 141 142class UImmAsmOperand<int width, string suffix = ""> 143 : ImmAsmOperand<"U", width, suffix> { 144} 145 146class RISCVOp<ValueType vt = XLenVT> : Operand<vt> { 147 let OperandNamespace = "RISCVOp"; 148} 149 150class RISCVUImmOp<int bitsNum> : RISCVOp { 151 let ParserMatchClass = UImmAsmOperand<bitsNum>; 152 let DecoderMethod = "decodeUImmOperand<" # bitsNum # ">"; 153 let OperandType = "OPERAND_UIMM" # bitsNum; 154} 155 156class RISCVUImmLeafOp<int bitsNum> : 157 RISCVUImmOp<bitsNum>, ImmLeaf<XLenVT, "return isUInt<" # bitsNum # ">(Imm);">; 158 159class RISCVSImmOp<int bitsNum> : RISCVOp { 160 let ParserMatchClass = SImmAsmOperand<bitsNum>; 161 let EncoderMethod = "getImmOpValue"; 162 let DecoderMethod = "decodeSImmOperand<" # bitsNum # ">"; 163 let OperandType = "OPERAND_SIMM" # bitsNum; 164} 165 166class RISCVSImmLeafOp<int bitsNum> : 167 RISCVSImmOp<bitsNum>, ImmLeaf<XLenVT, "return isInt<" # bitsNum # ">(Imm);">; 168 169def FenceArg : AsmOperandClass { 170 let Name = "FenceArg"; 171 let RenderMethod = "addFenceArgOperands"; 172 let ParserMethod = "parseFenceArg"; 173} 174 175def fencearg : RISCVOp { 176 let ParserMatchClass = FenceArg; 177 let PrintMethod = "printFenceArg"; 178 let DecoderMethod = "decodeUImmOperand<4>"; 179 let OperandType = "OPERAND_UIMM4"; 180} 181 182def UImmLog2XLenAsmOperand : AsmOperandClass { 183 let Name = "UImmLog2XLen"; 184 let RenderMethod = "addImmOperands"; 185 let DiagnosticType = "InvalidUImmLog2XLen"; 186} 187 188def uimmlog2xlen : RISCVOp, ImmLeaf<XLenVT, [{ 189 if (Subtarget->is64Bit()) 190 return isUInt<6>(Imm); 191 return isUInt<5>(Imm); 192}]> { 193 let ParserMatchClass = UImmLog2XLenAsmOperand; 194 // TODO: should ensure invalid shamt is rejected when decoding. 195 let DecoderMethod = "decodeUImmOperand<6>"; 196 let MCOperandPredicate = [{ 197 int64_t Imm; 198 if (!MCOp.evaluateAsConstantImm(Imm)) 199 return false; 200 if (STI.getTargetTriple().isArch64Bit()) 201 return isUInt<6>(Imm); 202 return isUInt<5>(Imm); 203 }]; 204 let OperandType = "OPERAND_UIMMLOG2XLEN"; 205} 206 207def InsnDirectiveOpcode : AsmOperandClass { 208 let Name = "InsnDirectiveOpcode"; 209 let ParserMethod = "parseInsnDirectiveOpcode"; 210 let RenderMethod = "addImmOperands"; 211 let PredicateMethod = "isImm"; 212} 213 214def uimm1 : RISCVUImmLeafOp<1>; 215def uimm2 : RISCVUImmLeafOp<2> { 216 let MCOperandPredicate = [{ 217 int64_t Imm; 218 if (!MCOp.evaluateAsConstantImm(Imm)) 219 return false; 220 return isUInt<2>(Imm); 221 }]; 222} 223def uimm3 : RISCVUImmOp<3>; 224def uimm4 : RISCVUImmOp<4>; 225def uimm5 : RISCVUImmLeafOp<5>; 226def uimm6 : RISCVUImmLeafOp<6>; 227def uimm7_opcode : RISCVUImmOp<7> { 228 let ParserMatchClass = InsnDirectiveOpcode; 229} 230def uimm7 : RISCVUImmOp<7>; 231def uimm8 : RISCVUImmOp<8>; 232def simm12 : RISCVSImmLeafOp<12> { 233 let MCOperandPredicate = [{ 234 int64_t Imm; 235 if (MCOp.evaluateAsConstantImm(Imm)) 236 return isInt<12>(Imm); 237 return MCOp.isBareSymbolRef(); 238 }]; 239} 240 241// A 12-bit signed immediate which cannot fit in 6-bit signed immediate, 242// but even negative value fit in 12-bit. 243def simm12_no6 : ImmLeaf<XLenVT, [{ 244 return isInt<12>(Imm) && !isInt<6>(Imm) && isInt<12>(-Imm);}]>; 245 246// A 13-bit signed immediate where the least significant bit is zero. 247def simm13_lsb0 : Operand<OtherVT> { 248 let ParserMatchClass = SImmAsmOperand<13, "Lsb0">; 249 let PrintMethod = "printBranchOperand"; 250 let EncoderMethod = "getImmOpValueAsr1"; 251 let DecoderMethod = "decodeSImmOperandAndLsl1<13>"; 252 let MCOperandPredicate = [{ 253 int64_t Imm; 254 if (MCOp.evaluateAsConstantImm(Imm)) 255 return isShiftedInt<12, 1>(Imm); 256 return MCOp.isBareSymbolRef(); 257 }]; 258 let OperandType = "OPERAND_PCREL"; 259} 260 261class UImm20Operand : RISCVOp { 262 let EncoderMethod = "getImmOpValue"; 263 let DecoderMethod = "decodeUImmOperand<20>"; 264 let OperandType = "OPERAND_UIMM20"; 265} 266 267class UImm20OperandMaybeSym : UImm20Operand { 268 let MCOperandPredicate = [{ 269 int64_t Imm; 270 if (MCOp.evaluateAsConstantImm(Imm)) 271 return isUInt<20>(Imm); 272 return MCOp.isBareSymbolRef(); 273 }]; 274} 275 276def uimm20_lui : UImm20OperandMaybeSym { 277 let ParserMatchClass = UImmAsmOperand<20, "LUI">; 278} 279def uimm20_auipc : UImm20OperandMaybeSym { 280 let ParserMatchClass = UImmAsmOperand<20, "AUIPC">; 281} 282 283def uimm20 : UImm20Operand { 284 let ParserMatchClass = UImmAsmOperand<20>; 285 let MCOperandPredicate = [{ 286 int64_t Imm; 287 if (!MCOp.evaluateAsConstantImm(Imm)) 288 return false; 289 return isUInt<20>(Imm); 290 }]; 291} 292 293def Simm21Lsb0JALAsmOperand : SImmAsmOperand<21, "Lsb0JAL"> { 294 let ParserMethod = "parseJALOffset"; 295} 296 297// A 21-bit signed immediate where the least significant bit is zero. 298def simm21_lsb0_jal : Operand<OtherVT> { 299 let ParserMatchClass = Simm21Lsb0JALAsmOperand; 300 let PrintMethod = "printBranchOperand"; 301 let EncoderMethod = "getImmOpValueAsr1"; 302 let DecoderMethod = "decodeSImmOperandAndLsl1<21>"; 303 let MCOperandPredicate = [{ 304 int64_t Imm; 305 if (MCOp.evaluateAsConstantImm(Imm)) 306 return isShiftedInt<20, 1>(Imm); 307 return MCOp.isBareSymbolRef(); 308 }]; 309 let OperandType = "OPERAND_PCREL"; 310} 311 312def BareSymbol : AsmOperandClass { 313 let Name = "BareSymbol"; 314 let RenderMethod = "addImmOperands"; 315 let DiagnosticType = "InvalidBareSymbol"; 316 let ParserMethod = "parseBareSymbol"; 317} 318 319// A bare symbol. 320def bare_symbol : Operand<XLenVT> { 321 let ParserMatchClass = BareSymbol; 322} 323 324def CallSymbol : AsmOperandClass { 325 let Name = "CallSymbol"; 326 let RenderMethod = "addImmOperands"; 327 let DiagnosticType = "InvalidCallSymbol"; 328 let ParserMethod = "parseCallSymbol"; 329} 330 331// A bare symbol used in call/tail only. 332def call_symbol : Operand<XLenVT> { 333 let ParserMatchClass = CallSymbol; 334} 335 336def PseudoJumpSymbol : AsmOperandClass { 337 let Name = "PseudoJumpSymbol"; 338 let RenderMethod = "addImmOperands"; 339 let DiagnosticType = "InvalidPseudoJumpSymbol"; 340 let ParserMethod = "parsePseudoJumpSymbol"; 341} 342 343// A bare symbol used for pseudo jumps only. 344def pseudo_jump_symbol : Operand<XLenVT> { 345 let ParserMatchClass = PseudoJumpSymbol; 346} 347 348def TPRelAddSymbol : AsmOperandClass { 349 let Name = "TPRelAddSymbol"; 350 let RenderMethod = "addImmOperands"; 351 let DiagnosticType = "InvalidTPRelAddSymbol"; 352 let ParserMethod = "parseOperandWithModifier"; 353} 354 355// A bare symbol with the %tprel_add variant. 356def tprel_add_symbol : Operand<XLenVT> { 357 let ParserMatchClass = TPRelAddSymbol; 358} 359 360def CSRSystemRegister : AsmOperandClass { 361 let Name = "CSRSystemRegister"; 362 let ParserMethod = "parseCSRSystemRegister"; 363 let DiagnosticType = "InvalidCSRSystemRegister"; 364} 365 366def csr_sysreg : RISCVOp { 367 let ParserMatchClass = CSRSystemRegister; 368 let PrintMethod = "printCSRSystemRegister"; 369 let DecoderMethod = "decodeUImmOperand<12>"; 370 let OperandType = "OPERAND_UIMM12"; 371} 372 373// A parameterized register class alternative to i32imm/i64imm from Target.td. 374def ixlenimm : Operand<XLenVT>; 375 376def ixlenimm_li : Operand<XLenVT> { 377 let ParserMatchClass = ImmXLenAsmOperand<"", "LI">; 378} 379 380// Accepts subset of LI operands, used by LAImm and LLAImm 381def ixlenimm_li_restricted : Operand<XLenVT> { 382 let ParserMatchClass = ImmXLenAsmOperand<"", "LI_Restricted">; 383} 384 385// Standalone (codegen-only) immleaf patterns. 386 387// A 6-bit constant greater than 32. 388def uimm6gt32 : ImmLeaf<XLenVT, [{ 389 return isUInt<6>(Imm) && Imm > 32; 390}]>; 391 392// Addressing modes. 393// Necessary because a frameindex can't be matched directly in a pattern. 394def FrameAddrRegImm : ComplexPattern<iPTR, 2, "SelectFrameAddrRegImm", 395 [frameindex, or, add]>; 396def AddrRegImm : ComplexPattern<iPTR, 2, "SelectAddrRegImm">; 397 398// Return the negation of an immediate value. 399def NegImm : SDNodeXForm<imm, [{ 400 return CurDAG->getTargetConstant(-N->getSExtValue(), SDLoc(N), 401 N->getValueType(0)); 402}]>; 403 404// Return an immediate value minus 32. 405def ImmSub32 : SDNodeXForm<imm, [{ 406 return CurDAG->getTargetConstant(N->getSExtValue() - 32, SDLoc(N), 407 N->getValueType(0)); 408}]>; 409 410// Return an immediate subtracted from XLen. 411def ImmSubFromXLen : SDNodeXForm<imm, [{ 412 uint64_t XLen = Subtarget->getXLen(); 413 return CurDAG->getTargetConstant(XLen - N->getZExtValue(), SDLoc(N), 414 N->getValueType(0)); 415}]>; 416 417// Return an immediate subtracted from 32. 418def ImmSubFrom32 : SDNodeXForm<imm, [{ 419 return CurDAG->getTargetConstant(32 - N->getZExtValue(), SDLoc(N), 420 N->getValueType(0)); 421}]>; 422 423// Check if (add r, imm) can be optimized to (ADDI (ADDI r, imm0), imm1), 424// in which imm = imm0 + imm1 and both imm0 and imm1 are simm12. We make imm0 425// as large as possible and imm1 as small as possible so that we might be able 426// to use c.addi for the small immediate. 427def AddiPair : PatLeaf<(imm), [{ 428 if (!N->hasOneUse()) 429 return false; 430 // The immediate operand must be in range [-4096,-2049] or [2048,4094]. 431 int64_t Imm = N->getSExtValue(); 432 return (-4096 <= Imm && Imm <= -2049) || (2048 <= Imm && Imm <= 4094); 433}]>; 434 435// Return imm - (imm < 0 ? -2048 : 2047). 436def AddiPairImmSmall : SDNodeXForm<imm, [{ 437 int64_t Imm = N->getSExtValue(); 438 int64_t Adj = N->getSExtValue() < 0 ? -2048 : 2047; 439 return CurDAG->getTargetConstant(Imm - Adj, SDLoc(N), 440 N->getValueType(0)); 441}]>; 442 443// Return -2048 if immediate is negative or 2047 if positive. These are the 444// largest simm12 values. 445def AddiPairImmLarge : SDNodeXForm<imm, [{ 446 int64_t Imm = N->getSExtValue() < 0 ? -2048 : 2047; 447 return CurDAG->getTargetConstant(Imm, SDLoc(N), 448 N->getValueType(0)); 449}]>; 450 451def TrailingZeros : SDNodeXForm<imm, [{ 452 return CurDAG->getTargetConstant(llvm::countr_zero(N->getZExtValue()), 453 SDLoc(N), N->getValueType(0)); 454}]>; 455 456def XLenSubTrailingOnes : SDNodeXForm<imm, [{ 457 uint64_t XLen = Subtarget->getXLen(); 458 uint64_t TrailingOnes = llvm::countr_one(N->getZExtValue()); 459 return CurDAG->getTargetConstant(XLen - TrailingOnes, SDLoc(N), 460 N->getValueType(0)); 461}]>; 462 463// Checks if this mask is a non-empty sequence of ones starting at the 464// most/least significant bit with the remainder zero and exceeds simm32/simm12. 465def LeadingOnesMask : PatLeaf<(imm), [{ 466 if (!N->hasOneUse()) 467 return false; 468 return !isInt<32>(N->getSExtValue()) && isMask_64(~N->getSExtValue()); 469}], TrailingZeros>; 470 471def TrailingOnesMask : PatLeaf<(imm), [{ 472 if (!N->hasOneUse()) 473 return false; 474 return !isInt<12>(N->getSExtValue()) && isMask_64(N->getZExtValue()); 475}], XLenSubTrailingOnes>; 476 477// Similar to LeadingOnesMask, but only consider leading ones in the lower 32 478// bits. 479def LeadingOnesWMask : PatLeaf<(imm), [{ 480 if (!N->hasOneUse()) 481 return false; 482 // If the value is a uint32 but not an int32, it must have bit 31 set and 483 // bits 63:32 cleared. After that we're looking for a shifted mask but not 484 // an all ones mask. 485 int64_t Imm = N->getSExtValue(); 486 return !isInt<32>(Imm) && isUInt<32>(Imm) && isShiftedMask_64(Imm) && 487 Imm != UINT64_C(0xffffffff); 488}], TrailingZeros>; 489 490//===----------------------------------------------------------------------===// 491// Instruction Formats 492//===----------------------------------------------------------------------===// 493 494include "RISCVInstrFormats.td" 495 496//===----------------------------------------------------------------------===// 497// Instruction Class Templates 498//===----------------------------------------------------------------------===// 499 500let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 501class BranchCC_rri<bits<3> funct3, string opcodestr> 502 : RVInstB<funct3, OPC_BRANCH, (outs), 503 (ins GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12), 504 opcodestr, "$rs1, $rs2, $imm12">, 505 Sched<[WriteJmp, ReadJmp, ReadJmp]> { 506 let isBranch = 1; 507 let isTerminator = 1; 508} 509 510let hasSideEffects = 0, mayLoad = 1, mayStore = 0 in { 511class Load_ri<bits<3> funct3, string opcodestr> 512 : RVInstI<funct3, OPC_LOAD, (outs GPR:$rd), (ins GPRMem:$rs1, simm12:$imm12), 513 opcodestr, "$rd, ${imm12}(${rs1})">; 514 515class HLoad_r<bits<7> funct7, bits<5> funct5, string opcodestr> 516 : RVInstR<funct7, 0b100, OPC_SYSTEM, (outs GPR:$rd), 517 (ins GPRMemZeroOffset:$rs1), opcodestr, "$rd, $rs1"> { 518 let rs2 = funct5; 519} 520} 521 522// Operands for stores are in the order srcreg, base, offset rather than 523// reflecting the order these fields are specified in the instruction 524// encoding. 525let hasSideEffects = 0, mayLoad = 0, mayStore = 1 in { 526class Store_rri<bits<3> funct3, string opcodestr> 527 : RVInstS<funct3, OPC_STORE, (outs), 528 (ins GPR:$rs2, GPRMem:$rs1, simm12:$imm12), 529 opcodestr, "$rs2, ${imm12}(${rs1})">; 530 531class HStore_rr<bits<7> funct7, string opcodestr> 532 : RVInstR<funct7, 0b100, OPC_SYSTEM, (outs), 533 (ins GPR:$rs2, GPRMemZeroOffset:$rs1), 534 opcodestr, "$rs2, $rs1"> { 535 let rd = 0; 536} 537} 538 539let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 540class ALU_ri<bits<3> funct3, string opcodestr> 541 : RVInstI<funct3, OPC_OP_IMM, (outs GPR:$rd), (ins GPR:$rs1, simm12:$imm12), 542 opcodestr, "$rd, $rs1, $imm12">, 543 Sched<[WriteIALU, ReadIALU]>; 544 545let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 546class Shift_ri<bits<5> imm11_7, bits<3> funct3, string opcodestr> 547 : RVInstIShift<imm11_7, funct3, OPC_OP_IMM, (outs GPR:$rd), 548 (ins GPR:$rs1, uimmlog2xlen:$shamt), opcodestr, 549 "$rd, $rs1, $shamt">, 550 Sched<[WriteShiftImm, ReadShiftImm]>; 551 552let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 553class ALU_rr<bits<7> funct7, bits<3> funct3, string opcodestr, 554 bit Commutable = 0> 555 : RVInstR<funct7, funct3, OPC_OP, (outs GPR:$rd), (ins GPR:$rs1, GPR:$rs2), 556 opcodestr, "$rd, $rs1, $rs2"> { 557 let isCommutable = Commutable; 558} 559 560let hasNoSchedulingInfo = 1, 561 hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 562class CSR_ir<bits<3> funct3, string opcodestr> 563 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), (ins csr_sysreg:$imm12, GPR:$rs1), 564 opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR, ReadCSR]>; 565 566let hasNoSchedulingInfo = 1, 567 hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 568class CSR_ii<bits<3> funct3, string opcodestr> 569 : RVInstI<funct3, OPC_SYSTEM, (outs GPR:$rd), 570 (ins csr_sysreg:$imm12, uimm5:$rs1), 571 opcodestr, "$rd, $imm12, $rs1">, Sched<[WriteCSR]>; 572 573let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 574class ShiftW_ri<bits<7> imm11_5, bits<3> funct3, string opcodestr> 575 : RVInstIShiftW<imm11_5, funct3, OPC_OP_IMM_32, (outs GPR:$rd), 576 (ins GPR:$rs1, uimm5:$shamt), opcodestr, 577 "$rd, $rs1, $shamt">, 578 Sched<[WriteShiftImm32, ReadShiftImm32]>; 579 580let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 581class ALUW_rr<bits<7> funct7, bits<3> funct3, string opcodestr, 582 bit Commutable = 0> 583 : RVInstR<funct7, funct3, OPC_OP_32, (outs GPR:$rd), 584 (ins GPR:$rs1, GPR:$rs2), opcodestr, "$rd, $rs1, $rs2"> { 585 let isCommutable = Commutable; 586} 587 588let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 589class Priv<string opcodestr, bits<7> funct7> 590 : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2), 591 opcodestr, "">; 592 593let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in 594class Priv_rr<string opcodestr, bits<7> funct7> 595 : RVInstR<funct7, 0b000, OPC_SYSTEM, (outs), (ins GPR:$rs1, GPR:$rs2), 596 opcodestr, "$rs1, $rs2"> { 597 let rd = 0; 598} 599 600//===----------------------------------------------------------------------===// 601// Instructions 602//===----------------------------------------------------------------------===// 603 604let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in { 605let isReMaterializable = 1, isAsCheapAsAMove = 1, 606 IsSignExtendingOpW = 1 in 607def LUI : RVInstU<OPC_LUI, (outs GPR:$rd), (ins uimm20_lui:$imm20), 608 "lui", "$rd, $imm20">, Sched<[WriteIALU]>; 609 610def AUIPC : RVInstU<OPC_AUIPC, (outs GPR:$rd), (ins uimm20_auipc:$imm20), 611 "auipc", "$rd, $imm20">, Sched<[WriteIALU]>; 612 613def JAL : RVInstJ<OPC_JAL, (outs GPR:$rd), (ins simm21_lsb0_jal:$imm20), 614 "jal", "$rd, $imm20">, Sched<[WriteJal]>; 615 616def JALR : RVInstI<0b000, OPC_JALR, (outs GPR:$rd), 617 (ins GPR:$rs1, simm12:$imm12), 618 "jalr", "$rd, ${imm12}(${rs1})">, 619 Sched<[WriteJalr, ReadJalr]>; 620} // hasSideEffects = 0, mayLoad = 0, mayStore = 0 621 622def BEQ : BranchCC_rri<0b000, "beq">; 623def BNE : BranchCC_rri<0b001, "bne">; 624def BLT : BranchCC_rri<0b100, "blt">; 625def BGE : BranchCC_rri<0b101, "bge">; 626def BLTU : BranchCC_rri<0b110, "bltu">; 627def BGEU : BranchCC_rri<0b111, "bgeu">; 628 629let IsSignExtendingOpW = 1 in { 630def LB : Load_ri<0b000, "lb">, Sched<[WriteLDB, ReadMemBase]>; 631def LH : Load_ri<0b001, "lh">, Sched<[WriteLDH, ReadMemBase]>; 632def LW : Load_ri<0b010, "lw">, Sched<[WriteLDW, ReadMemBase]>; 633def LBU : Load_ri<0b100, "lbu">, Sched<[WriteLDB, ReadMemBase]>; 634def LHU : Load_ri<0b101, "lhu">, Sched<[WriteLDH, ReadMemBase]>; 635} 636 637def SB : Store_rri<0b000, "sb">, Sched<[WriteSTB, ReadStoreData, ReadMemBase]>; 638def SH : Store_rri<0b001, "sh">, Sched<[WriteSTH, ReadStoreData, ReadMemBase]>; 639def SW : Store_rri<0b010, "sw">, Sched<[WriteSTW, ReadStoreData, ReadMemBase]>; 640 641// ADDI isn't always rematerializable, but isReMaterializable will be used as 642// a hint which is verified in isReallyTriviallyReMaterializable. 643let isReMaterializable = 1, isAsCheapAsAMove = 1 in 644def ADDI : ALU_ri<0b000, "addi">; 645 646let IsSignExtendingOpW = 1 in { 647def SLTI : ALU_ri<0b010, "slti">; 648def SLTIU : ALU_ri<0b011, "sltiu">; 649} 650 651let isReMaterializable = 1, isAsCheapAsAMove = 1 in { 652def XORI : ALU_ri<0b100, "xori">; 653def ORI : ALU_ri<0b110, "ori">; 654} 655 656def ANDI : ALU_ri<0b111, "andi">; 657 658def SLLI : Shift_ri<0b00000, 0b001, "slli">; 659def SRLI : Shift_ri<0b00000, 0b101, "srli">; 660def SRAI : Shift_ri<0b01000, 0b101, "srai">; 661 662def ADD : ALU_rr<0b0000000, 0b000, "add", Commutable=1>, 663 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 664def SUB : ALU_rr<0b0100000, 0b000, "sub">, 665 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 666def SLL : ALU_rr<0b0000000, 0b001, "sll">, 667 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 668let IsSignExtendingOpW = 1 in { 669def SLT : ALU_rr<0b0000000, 0b010, "slt">, 670 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 671def SLTU : ALU_rr<0b0000000, 0b011, "sltu">, 672 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 673} 674def XOR : ALU_rr<0b0000000, 0b100, "xor", Commutable=1>, 675 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 676def SRL : ALU_rr<0b0000000, 0b101, "srl">, 677 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 678def SRA : ALU_rr<0b0100000, 0b101, "sra">, 679 Sched<[WriteShiftReg, ReadShiftReg, ReadShiftReg]>; 680def OR : ALU_rr<0b0000000, 0b110, "or", Commutable=1>, 681 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 682def AND : ALU_rr<0b0000000, 0b111, "and", Commutable=1>, 683 Sched<[WriteIALU, ReadIALU, ReadIALU]>; 684 685let hasSideEffects = 1, mayLoad = 0, mayStore = 0 in { 686def FENCE : RVInstI<0b000, OPC_MISC_MEM, (outs), 687 (ins fencearg:$pred, fencearg:$succ), 688 "fence", "$pred, $succ">, Sched<[]> { 689 bits<4> pred; 690 bits<4> succ; 691 692 let rs1 = 0; 693 let rd = 0; 694 let imm12 = {0b0000,pred,succ}; 695} 696 697def FENCE_TSO : RVInstI<0b000, OPC_MISC_MEM, (outs), (ins), "fence.tso", "">, Sched<[]> { 698 let rs1 = 0; 699 let rd = 0; 700 let imm12 = {0b1000,0b0011,0b0011}; 701} 702 703def FENCE_I : RVInstI<0b001, OPC_MISC_MEM, (outs), (ins), "fence.i", "">, Sched<[]> { 704 let rs1 = 0; 705 let rd = 0; 706 let imm12 = 0; 707} 708 709def ECALL : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ecall", "">, Sched<[WriteJmp]> { 710 let rs1 = 0; 711 let rd = 0; 712 let imm12 = 0; 713} 714 715def EBREAK : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "ebreak", "">, 716 Sched<[]> { 717 let rs1 = 0; 718 let rd = 0; 719 let imm12 = 1; 720} 721 722// This is a de facto standard (as set by GNU binutils) 32-bit unimplemented 723// instruction (i.e., it should always trap, if your implementation has invalid 724// instruction traps). 725def UNIMP : RVInstI<0b001, OPC_SYSTEM, (outs), (ins), "unimp", "">, 726 Sched<[]> { 727 let rs1 = 0; 728 let rd = 0; 729 let imm12 = 0b110000000000; 730} 731 732let Predicates = [HasStdExtZawrs] in { 733def WRS_NTO : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "wrs.nto", "">, 734 Sched<[]> { 735 let rs1 = 0; 736 let rd = 0; 737 let imm12 = 0b000000001101; 738} 739 740def WRS_STO : RVInstI<0b000, OPC_SYSTEM, (outs), (ins), "wrs.sto", "">, 741 Sched<[]> { 742 let rs1 = 0; 743 let rd = 0; 744 let imm12 = 0b000000011101; 745} 746} // Predicates = [HasStdExtZawrs] 747 748} // hasSideEffects = 1, mayLoad = 0, mayStore = 0 749 750def CSRRW : CSR_ir<0b001, "csrrw">; 751def CSRRS : CSR_ir<0b010, "csrrs">; 752def CSRRC : CSR_ir<0b011, "csrrc">; 753 754def CSRRWI : CSR_ii<0b101, "csrrwi">; 755def CSRRSI : CSR_ii<0b110, "csrrsi">; 756def CSRRCI : CSR_ii<0b111, "csrrci">; 757 758/// RV64I instructions 759 760let Predicates = [IsRV64] in { 761def LWU : Load_ri<0b110, "lwu">, Sched<[WriteLDW, ReadMemBase]>; 762def LD : Load_ri<0b011, "ld">, Sched<[WriteLDD, ReadMemBase]>; 763def SD : Store_rri<0b011, "sd">, Sched<[WriteSTD, ReadStoreData, ReadMemBase]>; 764 765let IsSignExtendingOpW = 1 in { 766let hasSideEffects = 0, mayLoad = 0, mayStore = 0 in 767def ADDIW : RVInstI<0b000, OPC_OP_IMM_32, (outs GPR:$rd), 768 (ins GPR:$rs1, simm12:$imm12), 769 "addiw", "$rd, $rs1, $imm12">, 770 Sched<[WriteIALU32, ReadIALU32]>; 771 772def SLLIW : ShiftW_ri<0b0000000, 0b001, "slliw">; 773def SRLIW : ShiftW_ri<0b0000000, 0b101, "srliw">; 774def SRAIW : ShiftW_ri<0b0100000, 0b101, "sraiw">; 775 776def ADDW : ALUW_rr<0b0000000, 0b000, "addw", Commutable=1>, 777 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>; 778def SUBW : ALUW_rr<0b0100000, 0b000, "subw">, 779 Sched<[WriteIALU32, ReadIALU32, ReadIALU32]>; 780def SLLW : ALUW_rr<0b0000000, 0b001, "sllw">, 781 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 782def SRLW : ALUW_rr<0b0000000, 0b101, "srlw">, 783 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 784def SRAW : ALUW_rr<0b0100000, 0b101, "sraw">, 785 Sched<[WriteShiftReg32, ReadShiftReg32, ReadShiftReg32]>; 786} // IsSignExtendingOpW = 1 787} // Predicates = [IsRV64] 788 789//===----------------------------------------------------------------------===// 790// Privileged instructions 791//===----------------------------------------------------------------------===// 792 793let isBarrier = 1, isReturn = 1, isTerminator = 1 in { 794def SRET : Priv<"sret", 0b0001000>, Sched<[]> { 795 let rd = 0; 796 let rs1 = 0; 797 let rs2 = 0b00010; 798} 799 800def MRET : Priv<"mret", 0b0011000>, Sched<[]> { 801 let rd = 0; 802 let rs1 = 0; 803 let rs2 = 0b00010; 804} 805} // isBarrier = 1, isReturn = 1, isTerminator = 1 806 807def WFI : Priv<"wfi", 0b0001000>, Sched<[]> { 808 let rd = 0; 809 let rs1 = 0; 810 let rs2 = 0b00101; 811} 812 813let Predicates = [HasStdExtSvinval] in { 814def SFENCE_W_INVAL : Priv<"sfence.w.inval", 0b0001100>, Sched<[]> { 815 let rd = 0; 816 let rs1 = 0; 817 let rs2 = 0; 818} 819 820def SFENCE_INVAL_IR : Priv<"sfence.inval.ir", 0b0001100>, Sched<[]> { 821 let rd = 0; 822 let rs1 = 0; 823 let rs2 = 0b00001; 824} 825def SINVAL_VMA : Priv_rr<"sinval.vma", 0b0001011>, Sched<[]>; 826def HINVAL_VVMA : Priv_rr<"hinval.vvma", 0b0010011>, Sched<[]>; 827def HINVAL_GVMA : Priv_rr<"hinval.gvma", 0b0110011>, Sched<[]>; 828} // Predicates = [HasStdExtSvinval] 829 830def SFENCE_VMA : Priv_rr<"sfence.vma", 0b0001001>, Sched<[]>; 831 832let Predicates = [HasStdExtH] in { 833def HFENCE_VVMA : Priv_rr<"hfence.vvma", 0b0010001>, Sched<[]>; 834def HFENCE_GVMA : Priv_rr<"hfence.gvma", 0b0110001>, Sched<[]>; 835 836def HLV_B : HLoad_r<0b0110000, 0b00000, "hlv.b">, Sched<[]>; 837def HLV_BU : HLoad_r<0b0110000, 0b00001, "hlv.bu">, Sched<[]>; 838def HLV_H : HLoad_r<0b0110010, 0b00000, "hlv.h">, Sched<[]>; 839def HLV_HU : HLoad_r<0b0110010, 0b00001, "hlv.hu">, Sched<[]>; 840def HLVX_HU : HLoad_r<0b0110010, 0b00011, "hlvx.hu">, Sched<[]>; 841def HLV_W : HLoad_r<0b0110100, 0b00000, "hlv.w">, Sched<[]>; 842def HLVX_WU : HLoad_r<0b0110100, 0b00011, "hlvx.wu">, Sched<[]>; 843def HSV_B : HStore_rr<0b0110001, "hsv.b">, Sched<[]>; 844def HSV_H : HStore_rr<0b0110011, "hsv.h">, Sched<[]>; 845def HSV_W : HStore_rr<0b0110101, "hsv.w">, Sched<[]>; 846} 847let Predicates = [IsRV64, HasStdExtH] in { 848def HLV_WU : HLoad_r<0b0110100, 0b00001, "hlv.wu">, Sched<[]>; 849def HLV_D : HLoad_r<0b0110110, 0b00000, "hlv.d">, Sched<[]>; 850def HSV_D : HStore_rr<0b0110111, "hsv.d">, Sched<[]>; 851} 852 853//===----------------------------------------------------------------------===// 854// Debug instructions 855//===----------------------------------------------------------------------===// 856 857let isBarrier = 1, isReturn = 1, isTerminator = 1 in { 858def DRET : Priv<"dret", 0b0111101>, Sched<[]> { 859 let rd = 0; 860 let rs1 = 0; 861 let rs2 = 0b10010; 862} 863} // isBarrier = 1, isReturn = 1, isTerminator = 1 864 865//===----------------------------------------------------------------------===// 866// Assembler Pseudo Instructions (User-Level ISA, Version 2.2, Chapter 20) 867//===----------------------------------------------------------------------===// 868 869def : InstAlias<"nop", (ADDI X0, X0, 0)>; 870 871// Note that the size is 32 because up to 8 32-bit instructions are needed to 872// generate an arbitrary 64-bit immediate. However, the size does not really 873// matter since PseudoLI is currently only used in the AsmParser where it gets 874// expanded to real instructions immediately. 875let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32, 876 isCodeGenOnly = 0, isAsmParserOnly = 1 in 877def PseudoLI : Pseudo<(outs GPR:$rd), (ins ixlenimm_li:$imm), [], 878 "li", "$rd, $imm">; 879 880def PseudoLB : PseudoLoad<"lb">; 881def PseudoLBU : PseudoLoad<"lbu">; 882def PseudoLH : PseudoLoad<"lh">; 883def PseudoLHU : PseudoLoad<"lhu">; 884def PseudoLW : PseudoLoad<"lw">; 885 886def PseudoSB : PseudoStore<"sb">; 887def PseudoSH : PseudoStore<"sh">; 888def PseudoSW : PseudoStore<"sw">; 889 890let Predicates = [IsRV64] in { 891def PseudoLWU : PseudoLoad<"lwu">; 892def PseudoLD : PseudoLoad<"ld">; 893def PseudoSD : PseudoStore<"sd">; 894} // Predicates = [IsRV64] 895 896def : InstAlias<"li $rd, $imm", (ADDI GPR:$rd, X0, simm12:$imm)>; 897def : InstAlias<"mv $rd, $rs", (ADDI GPR:$rd, GPR:$rs, 0)>; 898def : InstAlias<"not $rd, $rs", (XORI GPR:$rd, GPR:$rs, -1)>; 899def : InstAlias<"neg $rd, $rs", (SUB GPR:$rd, X0, GPR:$rs)>; 900 901let Predicates = [IsRV64] in { 902def : InstAlias<"negw $rd, $rs", (SUBW GPR:$rd, X0, GPR:$rs)>; 903def : InstAlias<"sext.w $rd, $rs", (ADDIW GPR:$rd, GPR:$rs, 0)>; 904} // Predicates = [IsRV64] 905 906def : InstAlias<"seqz $rd, $rs", (SLTIU GPR:$rd, GPR:$rs, 1)>; 907def : InstAlias<"snez $rd, $rs", (SLTU GPR:$rd, X0, GPR:$rs)>; 908def : InstAlias<"sltz $rd, $rs", (SLT GPR:$rd, GPR:$rs, X0)>; 909def : InstAlias<"sgtz $rd, $rs", (SLT GPR:$rd, X0, GPR:$rs)>; 910 911// sgt/sgtu are recognised by the GNU assembler but the canonical slt/sltu 912// form will always be printed. Therefore, set a zero weight. 913def : InstAlias<"sgt $rd, $rs, $rt", (SLT GPR:$rd, GPR:$rt, GPR:$rs), 0>; 914def : InstAlias<"sgtu $rd, $rs, $rt", (SLTU GPR:$rd, GPR:$rt, GPR:$rs), 0>; 915 916def : InstAlias<"beqz $rs, $offset", 917 (BEQ GPR:$rs, X0, simm13_lsb0:$offset)>; 918def : InstAlias<"bnez $rs, $offset", 919 (BNE GPR:$rs, X0, simm13_lsb0:$offset)>; 920def : InstAlias<"blez $rs, $offset", 921 (BGE X0, GPR:$rs, simm13_lsb0:$offset)>; 922def : InstAlias<"bgez $rs, $offset", 923 (BGE GPR:$rs, X0, simm13_lsb0:$offset)>; 924def : InstAlias<"bltz $rs, $offset", 925 (BLT GPR:$rs, X0, simm13_lsb0:$offset)>; 926def : InstAlias<"bgtz $rs, $offset", 927 (BLT X0, GPR:$rs, simm13_lsb0:$offset)>; 928 929// Always output the canonical mnemonic for the pseudo branch instructions. 930// The GNU tools emit the canonical mnemonic for the branch pseudo instructions 931// as well (e.g. "bgt" will be recognised by the assembler but never printed by 932// objdump). Match this behaviour by setting a zero weight. 933def : InstAlias<"bgt $rs, $rt, $offset", 934 (BLT GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 935def : InstAlias<"ble $rs, $rt, $offset", 936 (BGE GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 937def : InstAlias<"bgtu $rs, $rt, $offset", 938 (BLTU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 939def : InstAlias<"bleu $rs, $rt, $offset", 940 (BGEU GPR:$rt, GPR:$rs, simm13_lsb0:$offset), 0>; 941 942def : InstAlias<"j $offset", (JAL X0, simm21_lsb0_jal:$offset)>; 943def : InstAlias<"jal $offset", (JAL X1, simm21_lsb0_jal:$offset)>; 944 945// Non-zero offset aliases of "jalr" are the lowest weight, followed by the 946// two-register form, then the one-register forms and finally "ret". 947def : InstAlias<"jr $rs", (JALR X0, GPR:$rs, 0), 3>; 948def : InstAlias<"jr ${offset}(${rs})", (JALR X0, GPR:$rs, simm12:$offset)>; 949def : InstAlias<"jalr $rs", (JALR X1, GPR:$rs, 0), 3>; 950def : InstAlias<"jalr ${offset}(${rs})", (JALR X1, GPR:$rs, simm12:$offset)>; 951def : InstAlias<"jalr $rd, $rs", (JALR GPR:$rd, GPR:$rs, 0), 2>; 952def : InstAlias<"ret", (JALR X0, X1, 0), 4>; 953 954// Non-canonical forms for jump targets also accepted by the assembler. 955def : InstAlias<"jr $rs, $offset", (JALR X0, GPR:$rs, simm12:$offset), 0>; 956def : InstAlias<"jalr $rs, $offset", (JALR X1, GPR:$rs, simm12:$offset), 0>; 957def : InstAlias<"jalr $rd, $rs, $offset", (JALR GPR:$rd, GPR:$rs, simm12:$offset), 0>; 958 959def : InstAlias<"fence", (FENCE 0xF, 0xF)>; // 0xF == iorw 960 961let Predicates = [HasStdExtZihintpause] in 962def : InstAlias<"pause", (FENCE 0x1, 0x0)>; // 0x1 == w 963 964def : InstAlias<"rdinstret $rd", (CSRRS GPR:$rd, INSTRET.Encoding, X0)>; 965def : InstAlias<"rdcycle $rd", (CSRRS GPR:$rd, CYCLE.Encoding, X0)>; 966def : InstAlias<"rdtime $rd", (CSRRS GPR:$rd, TIME.Encoding, X0)>; 967 968let Predicates = [IsRV32] in { 969def : InstAlias<"rdinstreth $rd", (CSRRS GPR:$rd, INSTRETH.Encoding, X0)>; 970def : InstAlias<"rdcycleh $rd", (CSRRS GPR:$rd, CYCLEH.Encoding, X0)>; 971def : InstAlias<"rdtimeh $rd", (CSRRS GPR:$rd, TIMEH.Encoding, X0)>; 972} // Predicates = [IsRV32] 973 974def : InstAlias<"csrr $rd, $csr", (CSRRS GPR:$rd, csr_sysreg:$csr, X0)>; 975def : InstAlias<"csrw $csr, $rs", (CSRRW X0, csr_sysreg:$csr, GPR:$rs)>; 976def : InstAlias<"csrs $csr, $rs", (CSRRS X0, csr_sysreg:$csr, GPR:$rs)>; 977def : InstAlias<"csrc $csr, $rs", (CSRRC X0, csr_sysreg:$csr, GPR:$rs)>; 978 979def : InstAlias<"csrwi $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>; 980def : InstAlias<"csrsi $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>; 981def : InstAlias<"csrci $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>; 982 983let EmitPriority = 0 in { 984def : InstAlias<"csrw $csr, $imm", (CSRRWI X0, csr_sysreg:$csr, uimm5:$imm)>; 985def : InstAlias<"csrs $csr, $imm", (CSRRSI X0, csr_sysreg:$csr, uimm5:$imm)>; 986def : InstAlias<"csrc $csr, $imm", (CSRRCI X0, csr_sysreg:$csr, uimm5:$imm)>; 987 988def : InstAlias<"csrrw $rd, $csr, $imm", (CSRRWI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 989def : InstAlias<"csrrs $rd, $csr, $imm", (CSRRSI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 990def : InstAlias<"csrrc $rd, $csr, $imm", (CSRRCI GPR:$rd, csr_sysreg:$csr, uimm5:$imm)>; 991} 992 993def : InstAlias<"sfence.vma", (SFENCE_VMA X0, X0)>; 994def : InstAlias<"sfence.vma $rs", (SFENCE_VMA GPR:$rs, X0)>; 995 996def : InstAlias<"hfence.gvma", (HFENCE_GVMA X0, X0)>; 997def : InstAlias<"hfence.gvma $rs", (HFENCE_GVMA GPR:$rs, X0)>; 998 999def : InstAlias<"hfence.vvma", (HFENCE_VVMA X0, X0)>; 1000def : InstAlias<"hfence.vvma $rs", (HFENCE_VVMA GPR:$rs, X0)>; 1001 1002let Predicates = [HasStdExtZihintntl] in { 1003 def : InstAlias<"ntl.p1", (ADD X0, X0, X2)>; 1004 def : InstAlias<"ntl.pall", (ADD X0, X0, X3)>; 1005 def : InstAlias<"ntl.s1", (ADD X0, X0, X4)>; 1006 def : InstAlias<"ntl.all", (ADD X0, X0, X5)>; 1007} // Predicates = [HasStdExtZihintntl] 1008 1009let EmitPriority = 0 in { 1010def : InstAlias<"lb $rd, (${rs1})", 1011 (LB GPR:$rd, GPR:$rs1, 0)>; 1012def : InstAlias<"lh $rd, (${rs1})", 1013 (LH GPR:$rd, GPR:$rs1, 0)>; 1014def : InstAlias<"lw $rd, (${rs1})", 1015 (LW GPR:$rd, GPR:$rs1, 0)>; 1016def : InstAlias<"lbu $rd, (${rs1})", 1017 (LBU GPR:$rd, GPR:$rs1, 0)>; 1018def : InstAlias<"lhu $rd, (${rs1})", 1019 (LHU GPR:$rd, GPR:$rs1, 0)>; 1020 1021def : InstAlias<"sb $rs2, (${rs1})", 1022 (SB GPR:$rs2, GPR:$rs1, 0)>; 1023def : InstAlias<"sh $rs2, (${rs1})", 1024 (SH GPR:$rs2, GPR:$rs1, 0)>; 1025def : InstAlias<"sw $rs2, (${rs1})", 1026 (SW GPR:$rs2, GPR:$rs1, 0)>; 1027 1028def : InstAlias<"add $rd, $rs1, $imm12", 1029 (ADDI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1030def : InstAlias<"and $rd, $rs1, $imm12", 1031 (ANDI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1032def : InstAlias<"xor $rd, $rs1, $imm12", 1033 (XORI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1034def : InstAlias<"or $rd, $rs1, $imm12", 1035 (ORI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1036def : InstAlias<"sll $rd, $rs1, $shamt", 1037 (SLLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1038def : InstAlias<"srl $rd, $rs1, $shamt", 1039 (SRLI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1040def : InstAlias<"sra $rd, $rs1, $shamt", 1041 (SRAI GPR:$rd, GPR:$rs1, uimmlog2xlen:$shamt)>; 1042let Predicates = [IsRV64] in { 1043def : InstAlias<"lwu $rd, (${rs1})", 1044 (LWU GPR:$rd, GPR:$rs1, 0)>; 1045def : InstAlias<"ld $rd, (${rs1})", 1046 (LD GPR:$rd, GPR:$rs1, 0)>; 1047def : InstAlias<"sd $rs2, (${rs1})", 1048 (SD GPR:$rs2, GPR:$rs1, 0)>; 1049 1050def : InstAlias<"addw $rd, $rs1, $imm12", 1051 (ADDIW GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1052def : InstAlias<"sllw $rd, $rs1, $shamt", 1053 (SLLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1054def : InstAlias<"srlw $rd, $rs1, $shamt", 1055 (SRLIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1056def : InstAlias<"sraw $rd, $rs1, $shamt", 1057 (SRAIW GPR:$rd, GPR:$rs1, uimm5:$shamt)>; 1058} // Predicates = [IsRV64] 1059def : InstAlias<"slt $rd, $rs1, $imm12", 1060 (SLTI GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1061def : InstAlias<"sltu $rd, $rs1, $imm12", 1062 (SLTIU GPR:$rd, GPR:$rs1, simm12:$imm12)>; 1063} 1064 1065def : MnemonicAlias<"move", "mv">; 1066 1067// The SCALL and SBREAK instructions wererenamed to ECALL and EBREAK in 1068// version 2.1 of the user-level ISA. Like the GNU toolchain, we still accept 1069// the old name for backwards compatibility. 1070def : MnemonicAlias<"scall", "ecall">; 1071def : MnemonicAlias<"sbreak", "ebreak">; 1072 1073// This alias was added to the spec in December 2020. Don't print it by default 1074// to allow assembly we print to be compatible with versions of GNU assembler 1075// that don't support this alias. 1076def : InstAlias<"zext.b $rd, $rs", (ANDI GPR:$rd, GPR:$rs, 0xFF), 0>; 1077 1078let Predicates = [HasStdExtZicfilp] in { 1079def : InstAlias<"lpad $imm20", (AUIPC X0, uimm20:$imm20)>; 1080} 1081 1082//===----------------------------------------------------------------------===// 1083// .insn directive instructions 1084//===----------------------------------------------------------------------===// 1085 1086def AnyRegOperand : AsmOperandClass { 1087 let Name = "AnyRegOperand"; 1088 let RenderMethod = "addRegOperands"; 1089 let PredicateMethod = "isAnyReg"; 1090} 1091 1092def AnyReg : Operand<XLenVT> { 1093 let OperandType = "OPERAND_REGISTER"; 1094 let ParserMatchClass = AnyRegOperand; 1095} 1096 1097// isCodeGenOnly = 1 to hide them from the tablegened assembly parser. 1098let isCodeGenOnly = 1, hasSideEffects = 1, mayLoad = 1, mayStore = 1, 1099 hasNoSchedulingInfo = 1 in { 1100def InsnR : DirectiveInsnR<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1101 uimm7:$funct7, AnyReg:$rs1, 1102 AnyReg:$rs2), 1103 "$opcode, $funct3, $funct7, $rd, $rs1, $rs2">; 1104def InsnR4 : DirectiveInsnR4<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1105 uimm3:$funct3, 1106 uimm2:$funct2, 1107 AnyReg:$rs1, AnyReg:$rs2, 1108 AnyReg:$rs3), 1109 "$opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3">; 1110def InsnI : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1111 AnyReg:$rs1, simm12:$imm12), 1112 "$opcode, $funct3, $rd, $rs1, $imm12">; 1113def InsnI_Mem : DirectiveInsnI<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1114 uimm3:$funct3, 1115 AnyReg:$rs1, 1116 simm12:$imm12), 1117 "$opcode, $funct3, $rd, ${imm12}(${rs1})">; 1118def InsnB : DirectiveInsnB<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1119 AnyReg:$rs1, AnyReg:$rs2, 1120 simm13_lsb0:$imm12), 1121 "$opcode, $funct3, $rs1, $rs2, $imm12">; 1122def InsnU : DirectiveInsnU<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1123 uimm20_lui:$imm20), 1124 "$opcode, $rd, $imm20">; 1125def InsnJ : DirectiveInsnJ<(outs AnyReg:$rd), (ins uimm7_opcode:$opcode, 1126 simm21_lsb0_jal:$imm20), 1127 "$opcode, $rd, $imm20">; 1128def InsnS : DirectiveInsnS<(outs), (ins uimm7_opcode:$opcode, uimm3:$funct3, 1129 AnyReg:$rs2, AnyReg:$rs1, 1130 simm12:$imm12), 1131 "$opcode, $funct3, $rs2, ${imm12}(${rs1})">; 1132} 1133 1134// Use InstAliases to match these so that we can combine the insn and format 1135// into a mnemonic to use as the key for the tablegened asm matcher table. The 1136// parser will take care of creating these fake mnemonics and will only do it 1137// for known formats. 1138let EmitPriority = 0 in { 1139def : InstAlias<".insn_r $opcode, $funct3, $funct7, $rd, $rs1, $rs2", 1140 (InsnR AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm7:$funct7, 1141 AnyReg:$rs1, AnyReg:$rs2)>; 1142// Accept 4 register form of ".insn r" as alias for ".insn r4". 1143def : InstAlias<".insn_r $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3", 1144 (InsnR4 AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm2:$funct2, 1145 AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>; 1146def : InstAlias<".insn_r4 $opcode, $funct3, $funct2, $rd, $rs1, $rs2, $rs3", 1147 (InsnR4 AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, uimm2:$funct2, 1148 AnyReg:$rs1, AnyReg:$rs2, AnyReg:$rs3)>; 1149def : InstAlias<".insn_i $opcode, $funct3, $rd, $rs1, $imm12", 1150 (InsnI AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1151 simm12:$imm12)>; 1152def : InstAlias<".insn_i $opcode, $funct3, $rd, ${imm12}(${rs1})", 1153 (InsnI_Mem AnyReg:$rd, uimm7_opcode:$opcode, uimm3:$funct3, 1154 AnyReg:$rs1, simm12:$imm12)>; 1155def : InstAlias<".insn_b $opcode, $funct3, $rs1, $rs2, $imm12", 1156 (InsnB uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1157 AnyReg:$rs2, simm13_lsb0:$imm12)>; 1158// Accept sb as an alias for b. 1159def : InstAlias<".insn_sb $opcode, $funct3, $rs1, $rs2, $imm12", 1160 (InsnB uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs1, 1161 AnyReg:$rs2, simm13_lsb0:$imm12)>; 1162def : InstAlias<".insn_u $opcode, $rd, $imm20", 1163 (InsnU AnyReg:$rd, uimm7_opcode:$opcode, uimm20_lui:$imm20)>; 1164def : InstAlias<".insn_j $opcode, $rd, $imm20", 1165 (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>; 1166// Accept uj as an alias for j. 1167def : InstAlias<".insn_uj $opcode, $rd, $imm20", 1168 (InsnJ AnyReg:$rd, uimm7_opcode:$opcode, simm21_lsb0_jal:$imm20)>; 1169def : InstAlias<".insn_s $opcode, $funct3, $rs2, ${imm12}(${rs1})", 1170 (InsnS uimm7_opcode:$opcode, uimm3:$funct3, AnyReg:$rs2, 1171 AnyReg:$rs1, simm12:$imm12)>; 1172} 1173 1174//===----------------------------------------------------------------------===// 1175// Pseudo-instructions and codegen patterns 1176// 1177// Naming convention: For 'generic' pattern classes, we use the naming 1178// convention PatTy1Ty2. For pattern classes which offer a more complex 1179// expansion, prefix the class name, e.g. BccPat. 1180//===----------------------------------------------------------------------===// 1181 1182/// Generic pattern classes 1183 1184class PatGpr<SDPatternOperator OpNode, RVInst Inst, ValueType vt = XLenVT> 1185 : Pat<(vt (OpNode (vt GPR:$rs1))), (Inst GPR:$rs1)>; 1186class PatGprGpr<SDPatternOperator OpNode, RVInst Inst, ValueType vt1 = XLenVT, 1187 ValueType vt2 = XLenVT> 1188 : Pat<(vt1 (OpNode (vt1 GPR:$rs1), (vt2 GPR:$rs2))), (Inst GPR:$rs1, GPR:$rs2)>; 1189 1190class PatGprImm<SDPatternOperator OpNode, RVInst Inst, ImmLeaf ImmType, 1191 ValueType vt = XLenVT> 1192 : Pat<(vt (OpNode (vt GPR:$rs1), ImmType:$imm)), 1193 (Inst GPR:$rs1, ImmType:$imm)>; 1194class PatGprSimm12<SDPatternOperator OpNode, RVInstI Inst> 1195 : PatGprImm<OpNode, Inst, simm12>; 1196class PatGprUimmLog2XLen<SDPatternOperator OpNode, RVInstIShift Inst> 1197 : PatGprImm<OpNode, Inst, uimmlog2xlen>; 1198 1199/// Predicates 1200 1201def assertsexti32 : PatFrag<(ops node:$src), (assertsext node:$src), [{ 1202 return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32); 1203}]>; 1204def sexti16 : ComplexPattern<XLenVT, 1, "selectSExtBits<16>">; 1205def sexti32 : ComplexPattern<i64, 1, "selectSExtBits<32>">; 1206def assertzexti32 : PatFrag<(ops node:$src), (assertzext node:$src), [{ 1207 return cast<VTSDNode>(N->getOperand(1))->getVT().bitsLE(MVT::i32); 1208}]>; 1209def zexti32 : ComplexPattern<i64, 1, "selectZExtBits<32>">; 1210def zexti16 : ComplexPattern<XLenVT, 1, "selectZExtBits<16>">; 1211def zexti16i32 : ComplexPattern<i32, 1, "selectZExtBits<16>">; 1212def zexti8 : ComplexPattern<XLenVT, 1, "selectZExtBits<8>">; 1213def zexti8i32 : ComplexPattern<i32, 1, "selectZExtBits<8>">; 1214 1215def ext : PatFrags<(ops node:$A), [(sext node:$A), (zext node:$A)]>; 1216 1217class binop_oneuse<SDPatternOperator operator> 1218 : PatFrag<(ops node:$A, node:$B), 1219 (operator node:$A, node:$B), [{ 1220 return N->hasOneUse(); 1221}]>; 1222 1223def and_oneuse : binop_oneuse<and>; 1224def mul_oneuse : binop_oneuse<mul>; 1225 1226def mul_const_oneuse : PatFrag<(ops node:$A, node:$B), 1227 (mul node:$A, node:$B), [{ 1228 if (auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1))) 1229 return N1C->hasOneUse(); 1230 return false; 1231}]>; 1232 1233class unop_oneuse<SDPatternOperator operator> 1234 : PatFrag<(ops node:$A), 1235 (operator node:$A), [{ 1236 return N->hasOneUse(); 1237}]>; 1238 1239def sext_oneuse : unop_oneuse<sext>; 1240def zext_oneuse : unop_oneuse<zext>; 1241def anyext_oneuse : unop_oneuse<anyext>; 1242def ext_oneuse : unop_oneuse<ext>; 1243def fpext_oneuse : unop_oneuse<any_fpextend>; 1244 1245def 33signbits_node : PatLeaf<(i64 GPR:$src), [{ 1246 return CurDAG->ComputeNumSignBits(SDValue(N, 0)) > 32; 1247}]>; 1248 1249/// Simple arithmetic operations 1250 1251def : PatGprGpr<add, ADD>; 1252def : PatGprSimm12<add, ADDI>; 1253def : PatGprGpr<sub, SUB>; 1254def : PatGprGpr<or, OR>; 1255def : PatGprSimm12<or, ORI>; 1256def : PatGprGpr<and, AND>; 1257def : PatGprSimm12<and, ANDI>; 1258def : PatGprGpr<xor, XOR>; 1259def : PatGprSimm12<xor, XORI>; 1260def : PatGprUimmLog2XLen<shl, SLLI>; 1261def : PatGprUimmLog2XLen<srl, SRLI>; 1262def : PatGprUimmLog2XLen<sra, SRAI>; 1263 1264// Select 'or' as ADDI if the immediate bits are known to be 0 in $rs1. This 1265// can improve compressibility. 1266def or_is_add : PatFrag<(ops node:$lhs, node:$rhs), (or node:$lhs, node:$rhs),[{ 1267 KnownBits Known0 = CurDAG->computeKnownBits(N->getOperand(0), 0); 1268 KnownBits Known1 = CurDAG->computeKnownBits(N->getOperand(1), 0); 1269 return KnownBits::haveNoCommonBitsSet(Known0, Known1); 1270}]>; 1271def : PatGprSimm12<or_is_add, ADDI>; 1272 1273// negate of low bit can be done via two (compressible) shifts. The negate 1274// is never compressible since rs1 and rd can't be the same register. 1275def : Pat<(XLenVT (sub 0, (and_oneuse GPR:$rs, 1))), 1276 (SRAI (SLLI $rs, (ImmSubFromXLen (XLenVT 1))), 1277 (ImmSubFromXLen (XLenVT 1)))>; 1278 1279// AND with leading/trailing ones mask exceeding simm32/simm12. 1280def : Pat<(i64 (and GPR:$rs, LeadingOnesMask:$mask)), 1281 (SLLI (SRLI $rs, LeadingOnesMask:$mask), LeadingOnesMask:$mask)>; 1282def : Pat<(XLenVT (and GPR:$rs, TrailingOnesMask:$mask)), 1283 (SRLI (SLLI $rs, TrailingOnesMask:$mask), TrailingOnesMask:$mask)>; 1284 1285// Match both a plain shift and one where the shift amount is masked (this is 1286// typically introduced when the legalizer promotes the shift amount and 1287// zero-extends it). For RISC-V, the mask is unnecessary as shifts in the base 1288// ISA only read the least significant 5 bits (RV32I) or 6 bits (RV64I). 1289def shiftMaskXLen : ComplexPattern<XLenVT, 1, "selectShiftMaskXLen", [], [], 0>; 1290def shiftMask32 : ComplexPattern<i64, 1, "selectShiftMask32", [], [], 0>; 1291 1292class shiftop<SDPatternOperator operator> 1293 : PatFrag<(ops node:$val, node:$count), 1294 (operator node:$val, (XLenVT (shiftMaskXLen node:$count)))>; 1295class shiftopw<SDPatternOperator operator> 1296 : PatFrag<(ops node:$val, node:$count), 1297 (operator node:$val, (i64 (shiftMask32 node:$count)))>; 1298 1299def : PatGprGpr<shiftop<shl>, SLL>; 1300def : PatGprGpr<shiftop<srl>, SRL>; 1301def : PatGprGpr<shiftop<sra>, SRA>; 1302 1303// This is a special case of the ADD instruction used to facilitate the use of a 1304// fourth operand to emit a relocation on a symbol relating to this instruction. 1305// The relocation does not affect any bits of the instruction itself but is used 1306// as a hint to the linker. 1307let hasSideEffects = 0, mayLoad = 0, mayStore = 0, isCodeGenOnly = 0 in 1308def PseudoAddTPRel : Pseudo<(outs GPR:$rd), 1309 (ins GPR:$rs1, GPR:$rs2, tprel_add_symbol:$src), [], 1310 "add", "$rd, $rs1, $rs2, $src">; 1311 1312/// FrameIndex calculations 1313 1314def : Pat<(FrameAddrRegImm (iPTR GPR:$rs1), simm12:$imm12), 1315 (ADDI GPR:$rs1, simm12:$imm12)>; 1316 1317/// HI and ADD_LO address nodes. 1318 1319def : Pat<(riscv_hi tglobaladdr:$in), (LUI tglobaladdr:$in)>; 1320def : Pat<(riscv_hi tblockaddress:$in), (LUI tblockaddress:$in)>; 1321def : Pat<(riscv_hi tjumptable:$in), (LUI tjumptable:$in)>; 1322def : Pat<(riscv_hi tconstpool:$in), (LUI tconstpool:$in)>; 1323 1324def : Pat<(riscv_add_lo GPR:$hi, tglobaladdr:$lo), 1325 (ADDI GPR:$hi, tglobaladdr:$lo)>; 1326def : Pat<(riscv_add_lo GPR:$hi, tblockaddress:$lo), 1327 (ADDI GPR:$hi, tblockaddress:$lo)>; 1328def : Pat<(riscv_add_lo GPR:$hi, tjumptable:$lo), 1329 (ADDI GPR:$hi, tjumptable:$lo)>; 1330def : Pat<(riscv_add_lo GPR:$hi, tconstpool:$lo), 1331 (ADDI GPR:$hi, tconstpool:$lo)>; 1332 1333/// TLS address nodes. 1334 1335def : Pat<(riscv_hi tglobaltlsaddr:$in), (LUI tglobaltlsaddr:$in)>; 1336def : Pat<(riscv_add_tprel GPR:$rs1, GPR:$rs2, tglobaltlsaddr:$src), 1337 (PseudoAddTPRel GPR:$rs1, GPR:$rs2, tglobaltlsaddr:$src)>; 1338def : Pat<(riscv_add_lo GPR:$src, tglobaltlsaddr:$lo), 1339 (ADDI GPR:$src, tglobaltlsaddr:$lo)>; 1340 1341/// Setcc 1342 1343def : PatGprGpr<setlt, SLT>; 1344def : PatGprSimm12<setlt, SLTI>; 1345def : PatGprGpr<setult, SLTU>; 1346def : PatGprSimm12<setult, SLTIU>; 1347 1348// RISC-V doesn't have general instructions for integer setne/seteq, but we can 1349// check for equality with 0. These ComplexPatterns rewrite the setne/seteq into 1350// something that can be compared with 0. 1351// These ComplexPatterns must be used in pairs. 1352def riscv_setne : ComplexPattern<XLenVT, 1, "selectSETNE", [setcc]>; 1353def riscv_seteq : ComplexPattern<XLenVT, 1, "selectSETEQ", [setcc]>; 1354 1355// Define pattern expansions for setcc operations that aren't directly 1356// handled by a RISC-V instruction. 1357def : Pat<(riscv_seteq (XLenVT GPR:$rs1)), (SLTIU GPR:$rs1, 1)>; 1358def : Pat<(riscv_setne (XLenVT GPR:$rs1)), (SLTU (XLenVT X0), GPR:$rs1)>; 1359def : Pat<(XLenVT (setne (XLenVT GPR:$rs1), -1)), (SLTIU GPR:$rs1, -1)>; 1360 1361def IntCCtoRISCVCC : SDNodeXForm<riscv_selectcc, [{ 1362 ISD::CondCode CC = cast<CondCodeSDNode>(N->getOperand(2))->get(); 1363 RISCVCC::CondCode BrCC = getRISCVCCForIntCC(CC); 1364 return CurDAG->getTargetConstant(BrCC, SDLoc(N), Subtarget->getXLenVT()); 1365}]>; 1366 1367def riscv_selectcc_frag : PatFrag<(ops node:$lhs, node:$rhs, node:$cc, 1368 node:$truev, node:$falsev), 1369 (riscv_selectcc node:$lhs, node:$rhs, 1370 node:$cc, node:$truev, 1371 node:$falsev), [{}], 1372 IntCCtoRISCVCC>; 1373 1374let Predicates = [HasShortForwardBranchOpt], isSelect = 1, 1375 Constraints = "$dst = $falsev", isCommutable = 1, Size = 8 in { 1376// This instruction moves $truev to $dst when the condition is true. It will 1377// be expanded to control flow in RISCVExpandPseudoInsts. 1378def PseudoCCMOVGPR : Pseudo<(outs GPR:$dst), 1379 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1380 GPR:$falsev, GPR:$truev), 1381 [(set GPR:$dst, 1382 (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), 1383 GPR:$rhs, cond, 1384 (XLenVT GPR:$truev), 1385 GPR:$falsev))]>, 1386 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1387 ReadSFBALU, ReadSFBALU]>; 1388} 1389 1390// Conditional binops, that updates update $dst to (op rs1, rs2) when condition 1391// is true. Returns $falsev otherwise. Selected by optimizeSelect. 1392// TODO: Can we use DefaultOperands on the regular binop to accomplish this more 1393// like how ARM does predication? 1394let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, 1395 Constraints = "$dst = $falsev" in { 1396def PseudoCCADD : Pseudo<(outs GPR:$dst), 1397 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1398 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1399 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1400 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1401def PseudoCCSUB : Pseudo<(outs GPR:$dst), 1402 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1403 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1404 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1405 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1406def PseudoCCSLL : Pseudo<(outs GPR:$dst), 1407 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1408 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1409 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1410 ReadSFBALU, ReadSFBALU]>; 1411def PseudoCCSRL : Pseudo<(outs GPR:$dst), 1412 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1413 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1414 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1415 ReadSFBALU, ReadSFBALU]>; 1416def PseudoCCSRA : Pseudo<(outs GPR:$dst), 1417 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1418 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1419 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1420 ReadSFBALU, ReadSFBALU]>; 1421def PseudoCCAND : Pseudo<(outs GPR:$dst), 1422 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1423 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1424 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1425 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1426def PseudoCCOR : Pseudo<(outs GPR:$dst), 1427 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1428 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1429 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1430 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1431def PseudoCCXOR : Pseudo<(outs GPR:$dst), 1432 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1433 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1434 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1435 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1436 1437def PseudoCCADDI : Pseudo<(outs GPR:$dst), 1438 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1439 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1440 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1441 ReadSFBALU]>; 1442def PseudoCCSLLI : Pseudo<(outs GPR:$dst), 1443 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1444 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1445 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1446 ReadSFBALU]>; 1447def PseudoCCSRLI : Pseudo<(outs GPR:$dst), 1448 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1449 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1450 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1451 ReadSFBALU]>; 1452def PseudoCCSRAI : Pseudo<(outs GPR:$dst), 1453 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1454 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1455 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1456 ReadSFBALU]>; 1457def PseudoCCANDI : Pseudo<(outs GPR:$dst), 1458 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1459 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1460 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1461 ReadSFBALU]>; 1462def PseudoCCORI : Pseudo<(outs GPR:$dst), 1463 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1464 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1465 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1466 ReadSFBALU]>; 1467def PseudoCCXORI : Pseudo<(outs GPR:$dst), 1468 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1469 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1470 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1471 ReadSFBALU]>; 1472 1473// RV64I instructions 1474def PseudoCCADDW : Pseudo<(outs GPR:$dst), 1475 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1476 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1477 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1478 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1479def PseudoCCSUBW : Pseudo<(outs GPR:$dst), 1480 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1481 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1482 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, 1483 ReadSFBALU, ReadSFBALU, ReadSFBALU]>; 1484def PseudoCCSLLW : Pseudo<(outs GPR:$dst), 1485 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1486 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1487 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1488 ReadSFBALU, ReadSFBALU]>; 1489def PseudoCCSRLW : Pseudo<(outs GPR:$dst), 1490 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1491 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1492 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1493 ReadSFBALU, ReadSFBALU]>; 1494def PseudoCCSRAW : Pseudo<(outs GPR:$dst), 1495 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1496 GPR:$falsev, GPR:$rs1, GPR:$rs2), []>, 1497 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1498 ReadSFBALU, ReadSFBALU]>; 1499 1500def PseudoCCADDIW : Pseudo<(outs GPR:$dst), 1501 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1502 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1503 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1504 ReadSFBALU]>; 1505def PseudoCCSLLIW : Pseudo<(outs GPR:$dst), 1506 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1507 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1508 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1509 ReadSFBALU]>; 1510def PseudoCCSRLIW : Pseudo<(outs GPR:$dst), 1511 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1512 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1513 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1514 ReadSFBALU]>; 1515def PseudoCCSRAIW : Pseudo<(outs GPR:$dst), 1516 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1517 GPR:$falsev, GPR:$rs1, simm12:$rs2), []>, 1518 Sched<[WriteSFB, ReadSFBJmp, ReadSFBJmp, ReadSFBALU, 1519 ReadSFBALU]>; 1520} 1521 1522multiclass SelectCC_GPR_rrirr<DAGOperand valty, ValueType vt> { 1523 let usesCustomInserter = 1 in 1524 def _Using_CC_GPR : Pseudo<(outs valty:$dst), 1525 (ins GPR:$lhs, GPR:$rhs, ixlenimm:$cc, 1526 valty:$truev, valty:$falsev), 1527 [(set valty:$dst, 1528 (riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), GPR:$rhs, cond, 1529 (vt valty:$truev), valty:$falsev))]>; 1530 // Explicitly select 0 in the condition to X0. The register coalescer doesn't 1531 // always do it. 1532 def : Pat<(riscv_selectcc_frag:$cc (XLenVT GPR:$lhs), 0, cond, (vt valty:$truev), 1533 valty:$falsev), 1534 (!cast<Instruction>(NAME#"_Using_CC_GPR") GPR:$lhs, (XLenVT X0), 1535 (IntCCtoRISCVCC $cc), valty:$truev, valty:$falsev)>; 1536} 1537 1538let Predicates = [NoShortForwardBranchOpt] in 1539defm Select_GPR : SelectCC_GPR_rrirr<GPR, XLenVT>; 1540 1541class SelectCompressOpt<CondCode Cond> 1542 : Pat<(riscv_selectcc_frag:$select (XLenVT GPR:$lhs), simm12_no6:$Constant, Cond, 1543 (XLenVT GPR:$truev), GPR:$falsev), 1544 (Select_GPR_Using_CC_GPR (ADDI GPR:$lhs, (NegImm simm12:$Constant)), (XLenVT X0), 1545 (IntCCtoRISCVCC $select), GPR:$truev, GPR:$falsev)>; 1546 1547def OptForMinSize : Predicate<"MF ? MF->getFunction().hasMinSize() : false">; 1548 1549let Predicates = [HasStdExtC, OptForMinSize] in { 1550 def : SelectCompressOpt<SETEQ>; 1551 def : SelectCompressOpt<SETNE>; 1552} 1553 1554/// Branches and jumps 1555 1556// Match `riscv_brcc` and lower to the appropriate RISC-V branch instruction. 1557multiclass BccPat<CondCode Cond, RVInstB Inst> { 1558 def : Pat<(riscv_brcc (XLenVT GPR:$rs1), GPR:$rs2, Cond, bb:$imm12), 1559 (Inst GPR:$rs1, GPR:$rs2, simm13_lsb0:$imm12)>; 1560 // Explicitly select 0 to X0. The register coalescer doesn't always do it. 1561 def : Pat<(riscv_brcc (XLenVT GPR:$rs1), 0, Cond, bb:$imm12), 1562 (Inst GPR:$rs1, (XLenVT X0), simm13_lsb0:$imm12)>; 1563} 1564 1565class BrccCompressOpt<CondCode Cond, RVInstB Inst> 1566 : Pat<(riscv_brcc GPR:$lhs, simm12_no6:$Constant, Cond, bb:$place), 1567 (Inst (ADDI GPR:$lhs, (NegImm simm12:$Constant)), (XLenVT X0), bb:$place)>; 1568 1569defm : BccPat<SETEQ, BEQ>; 1570defm : BccPat<SETNE, BNE>; 1571defm : BccPat<SETLT, BLT>; 1572defm : BccPat<SETGE, BGE>; 1573defm : BccPat<SETULT, BLTU>; 1574defm : BccPat<SETUGE, BGEU>; 1575 1576let Predicates = [HasStdExtC, OptForMinSize] in { 1577 def : BrccCompressOpt<SETEQ, BEQ>; 1578 def : BrccCompressOpt<SETNE, BNE>; 1579} 1580 1581class LongBccPseudo : Pseudo<(outs), 1582 (ins GPR:$rs1, GPR:$rs2, simm21_lsb0_jal:$imm20), 1583 []> { 1584 let Size = 8; 1585 let isBarrier = 1; 1586 let isBranch = 1; 1587 let hasSideEffects = 0; 1588 let mayStore = 0; 1589 let mayLoad = 0; 1590 let isAsmParserOnly = 1; 1591 let hasNoSchedulingInfo = 1; 1592} 1593 1594def PseudoLongBEQ : LongBccPseudo; 1595def PseudoLongBNE : LongBccPseudo; 1596def PseudoLongBLT : LongBccPseudo; 1597def PseudoLongBGE : LongBccPseudo; 1598def PseudoLongBLTU : LongBccPseudo; 1599def PseudoLongBGEU : LongBccPseudo; 1600 1601let isBarrier = 1, isBranch = 1, isTerminator = 1 in 1602def PseudoBR : Pseudo<(outs), (ins simm21_lsb0_jal:$imm20), [(br bb:$imm20)]>, 1603 PseudoInstExpansion<(JAL X0, simm21_lsb0_jal:$imm20)>; 1604 1605let isBarrier = 1, isBranch = 1, isIndirectBranch = 1, isTerminator = 1 in 1606def PseudoBRIND : Pseudo<(outs), (ins GPRJALR:$rs1, simm12:$imm12), []>, 1607 PseudoInstExpansion<(JALR X0, GPR:$rs1, simm12:$imm12)>; 1608 1609def : Pat<(brind GPRJALR:$rs1), (PseudoBRIND GPRJALR:$rs1, 0)>; 1610def : Pat<(brind (add GPRJALR:$rs1, simm12:$imm12)), 1611 (PseudoBRIND GPRJALR:$rs1, simm12:$imm12)>; 1612 1613// PseudoCALLReg is a generic pseudo instruction for calls which will eventually 1614// expand to auipc and jalr while encoding, with any given register used as the 1615// destination. 1616// Define AsmString to print "call" when compile with -S flag. 1617// Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. 1618let isCall = 1, isBarrier = 1, isCodeGenOnly = 0, Size = 8, hasSideEffects = 0, 1619 mayStore = 0, mayLoad = 0 in 1620def PseudoCALLReg : Pseudo<(outs GPR:$rd), (ins call_symbol:$func), [], 1621 "call", "$rd, $func">, 1622 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1623 1624// PseudoCALL is a pseudo instruction which will eventually expand to auipc 1625// and jalr while encoding. This is desirable, as an auipc+jalr pair with 1626// R_RISCV_CALL and R_RISCV_RELAX relocations can be be relaxed by the linker 1627// if the offset fits in a signed 21-bit immediate. 1628// Define AsmString to print "call" when compile with -S flag. 1629// Define isCodeGenOnly = 0 to support parsing assembly "call" instruction. 1630let isCall = 1, Defs = [X1], isCodeGenOnly = 0, Size = 8 in 1631def PseudoCALL : Pseudo<(outs), (ins call_symbol:$func), [], 1632 "call", "$func">, 1633 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1634 1635def : Pat<(riscv_call tglobaladdr:$func), (PseudoCALL tglobaladdr:$func)>; 1636def : Pat<(riscv_call texternalsym:$func), (PseudoCALL texternalsym:$func)>; 1637 1638def : Pat<(riscv_sret_glue), (SRET (XLenVT X0), (XLenVT X0))>; 1639def : Pat<(riscv_mret_glue), (MRET (XLenVT X0), (XLenVT X0))>; 1640 1641let isCall = 1, Defs = [X1] in 1642def PseudoCALLIndirect : Pseudo<(outs), (ins GPRJALR:$rs1), 1643 [(riscv_call GPRJALR:$rs1)]>, 1644 PseudoInstExpansion<(JALR X1, GPR:$rs1, 0)>; 1645 1646let isBarrier = 1, isReturn = 1, isTerminator = 1 in 1647def PseudoRET : Pseudo<(outs), (ins), [(riscv_ret_glue)]>, 1648 PseudoInstExpansion<(JALR X0, X1, 0)>; 1649 1650// PseudoTAIL is a pseudo instruction similar to PseudoCALL and will eventually 1651// expand to auipc and jalr while encoding. 1652// Define AsmString to print "tail" when compile with -S flag. 1653let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2], 1654 Size = 8, isCodeGenOnly = 0 in 1655def PseudoTAIL : Pseudo<(outs), (ins call_symbol:$dst), [], 1656 "tail", "$dst">, 1657 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1658 1659let isCall = 1, isTerminator = 1, isReturn = 1, isBarrier = 1, Uses = [X2] in 1660def PseudoTAILIndirect : Pseudo<(outs), (ins GPRTC:$rs1), 1661 [(riscv_tail GPRTC:$rs1)]>, 1662 PseudoInstExpansion<(JALR X0, GPR:$rs1, 0)>; 1663 1664def : Pat<(riscv_tail (iPTR tglobaladdr:$dst)), 1665 (PseudoTAIL tglobaladdr:$dst)>; 1666def : Pat<(riscv_tail (iPTR texternalsym:$dst)), 1667 (PseudoTAIL texternalsym:$dst)>; 1668 1669let isCall = 0, isBarrier = 1, isBranch = 1, isTerminator = 1, Size = 8, 1670 isCodeGenOnly = 0, hasSideEffects = 0, mayStore = 0, mayLoad = 0 in 1671def PseudoJump : Pseudo<(outs GPR:$rd), (ins pseudo_jump_symbol:$target), [], 1672 "jump", "$target, $rd">, 1673 Sched<[WriteIALU, WriteJalr, ReadJalr]>; 1674 1675// Pseudo for a rematerializable constant materialization sequence. 1676// This is an experimental feature enabled by 1677// -riscv-use-rematerializable-movimm in RISCVISelDAGToDAG.cpp 1678// It will be expanded after register allocation. 1679// FIXME: The scheduling information does not reflect the multiple instructions. 1680let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 1, 1681 isPseudo = 1, isReMaterializable = 1, IsSignExtendingOpW = 1 in 1682def PseudoMovImm : Pseudo<(outs GPR:$dst), (ins i32imm:$imm), []>, 1683 Sched<[WriteIALU]>; 1684 1685let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1686 isAsmParserOnly = 1 in 1687def PseudoLLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1688 "lla", "$dst, $src">; 1689 1690// Refer to comment on PseudoLI for explanation of Size=32 1691let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1692 isAsmParserOnly = 1 in 1693def PseudoLLAImm : Pseudo<(outs GPR:$dst), (ins ixlenimm_li_restricted:$imm), [], 1694 "lla", "$dst, $imm">; 1695def : Pat<(riscv_lla tglobaladdr:$in), (PseudoLLA tglobaladdr:$in)>; 1696def : Pat<(riscv_lla tblockaddress:$in), (PseudoLLA tblockaddress:$in)>; 1697def : Pat<(riscv_lla tjumptable:$in), (PseudoLLA tjumptable:$in)>; 1698def : Pat<(riscv_lla tconstpool:$in), (PseudoLLA tconstpool:$in)>; 1699 1700let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1701 isAsmParserOnly = 1 in 1702def PseudoLGA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1703 "lga", "$dst, $src">; 1704 1705let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1706 isAsmParserOnly = 1 in 1707def PseudoLA : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1708 "la", "$dst, $src">; 1709 1710// Refer to comment on PseudoLI for explanation of Size=32 1711let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 32, 1712 isCodeGenOnly = 0, isAsmParserOnly = 1 in 1713def PseudoLAImm : Pseudo<(outs GPR:$rd), (ins ixlenimm_li_restricted:$imm), [], 1714 "la", "$rd, $imm">; 1715 1716let hasSideEffects = 0, mayLoad = 1, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1717 isAsmParserOnly = 1 in 1718def PseudoLA_TLS_IE : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1719 "la.tls.ie", "$dst, $src">; 1720 1721let hasSideEffects = 0, mayLoad = 0, mayStore = 0, Size = 8, isCodeGenOnly = 0, 1722 isAsmParserOnly = 1 in 1723def PseudoLA_TLS_GD : Pseudo<(outs GPR:$dst), (ins bare_symbol:$src), [], 1724 "la.tls.gd", "$dst, $src">; 1725 1726 1727/// Sign/Zero Extends 1728 1729// There are single-instruction versions of these in Zbb, so disable these 1730// Pseudos if that extension is present. 1731let hasSideEffects = 0, mayLoad = 0, 1732 mayStore = 0, isCodeGenOnly = 0, isAsmParserOnly = 1 in { 1733def PseudoSEXT_B : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "sext.b", "$rd, $rs">; 1734def PseudoSEXT_H : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "sext.h", "$rd, $rs">; 1735// rv64's sext.w is defined above, using InstAlias<"sext.w ... 1736// zext.b is defined above, using InstAlias<"zext.b ... 1737def PseudoZEXT_H : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.h", "$rd, $rs">; 1738} // hasSideEffects = 0, ... 1739 1740let Predicates = [IsRV64], hasSideEffects = 0, mayLoad = 0, mayStore = 0, 1741 isCodeGenOnly = 0, isAsmParserOnly = 1 in { 1742def PseudoZEXT_W : Pseudo<(outs GPR:$rd), (ins GPR:$rs), [], "zext.w", "$rd, $rs">; 1743} // Predicates = [IsRV64], ... 1744 1745/// Loads 1746 1747class LdPat<PatFrag LoadOp, RVInst Inst, ValueType vt = XLenVT> 1748 : Pat<(vt (LoadOp (AddrRegImm (XLenVT GPR:$rs1), simm12:$imm12))), 1749 (Inst GPR:$rs1, simm12:$imm12)>; 1750 1751def : LdPat<sextloadi8, LB>; 1752def : LdPat<extloadi8, LBU>; // Prefer unsigned due to no c.lb in Zcb. 1753def : LdPat<sextloadi16, LH>; 1754def : LdPat<extloadi16, LH>; 1755def : LdPat<load, LW, i32>; 1756def : LdPat<zextloadi8, LBU>; 1757def : LdPat<zextloadi16, LHU>; 1758 1759/// Stores 1760 1761class StPat<PatFrag StoreOp, RVInst Inst, RegisterClass StTy, 1762 ValueType vt> 1763 : Pat<(StoreOp (vt StTy:$rs2), (AddrRegImm (XLenVT GPR:$rs1), 1764 simm12:$imm12)), 1765 (Inst StTy:$rs2, GPR:$rs1, simm12:$imm12)>; 1766 1767def : StPat<truncstorei8, SB, GPR, XLenVT>; 1768def : StPat<truncstorei16, SH, GPR, XLenVT>; 1769def : StPat<store, SW, GPR, i32>; 1770 1771/// Fences 1772 1773// Refer to Table A.6 in the version 2.3 draft of the RISC-V Instruction Set 1774// Manual: Volume I. 1775 1776// fence acquire -> fence r, rw 1777def : Pat<(atomic_fence (XLenVT 4), (timm)), (FENCE 0b10, 0b11)>; 1778// fence release -> fence rw, w 1779def : Pat<(atomic_fence (XLenVT 5), (timm)), (FENCE 0b11, 0b1)>; 1780// fence acq_rel -> fence.tso 1781def : Pat<(atomic_fence (XLenVT 6), (timm)), (FENCE_TSO)>; 1782// fence seq_cst -> fence rw, rw 1783def : Pat<(atomic_fence (XLenVT 7), (timm)), (FENCE 0b11, 0b11)>; 1784 1785// Lowering for atomic load and store is defined in RISCVInstrInfoA.td. 1786// Although these are lowered to fence+load/store instructions defined in the 1787// base RV32I/RV64I ISA, this lowering is only used when the A extension is 1788// present. This is necessary as it isn't valid to mix __atomic_* libcalls 1789// with inline atomic operations for the same object. 1790 1791/// Access to system registers 1792 1793// Helpers for defining specific operations. They are defined for each system 1794// register separately. Side effect is not used because dependencies are 1795// expressed via use-def properties. 1796 1797class ReadSysReg<SysReg SR, list<Register> Regs> 1798 : Pseudo<(outs GPR:$rd), (ins), 1799 [(set GPR:$rd, (XLenVT (riscv_read_csr (XLenVT SR.Encoding))))]>, 1800 PseudoInstExpansion<(CSRRS GPR:$rd, SR.Encoding, X0)> { 1801 let hasSideEffects = 0; 1802 let Uses = Regs; 1803} 1804 1805class WriteSysReg<SysReg SR, list<Register> Regs> 1806 : Pseudo<(outs), (ins GPR:$val), 1807 [(riscv_write_csr (XLenVT SR.Encoding), (XLenVT GPR:$val))]>, 1808 PseudoInstExpansion<(CSRRW X0, SR.Encoding, GPR:$val)> { 1809 let hasSideEffects = 0; 1810 let Defs = Regs; 1811} 1812 1813class WriteSysRegImm<SysReg SR, list<Register> Regs> 1814 : Pseudo<(outs), (ins uimm5:$val), 1815 [(riscv_write_csr (XLenVT SR.Encoding), uimm5:$val)]>, 1816 PseudoInstExpansion<(CSRRWI X0, SR.Encoding, uimm5:$val)> { 1817 let hasSideEffects = 0; 1818 let Defs = Regs; 1819} 1820 1821class SwapSysReg<SysReg SR, list<Register> Regs> 1822 : Pseudo<(outs GPR:$rd), (ins GPR:$val), 1823 [(set GPR:$rd, (riscv_swap_csr (XLenVT SR.Encoding), (XLenVT GPR:$val)))]>, 1824 PseudoInstExpansion<(CSRRW GPR:$rd, SR.Encoding, GPR:$val)> { 1825 let hasSideEffects = 0; 1826 let Uses = Regs; 1827 let Defs = Regs; 1828} 1829 1830class SwapSysRegImm<SysReg SR, list<Register> Regs> 1831 : Pseudo<(outs GPR:$rd), (ins uimm5:$val), 1832 [(set GPR:$rd, (XLenVT (riscv_swap_csr (XLenVT SR.Encoding), uimm5:$val)))]>, 1833 PseudoInstExpansion<(CSRRWI GPR:$rd, SR.Encoding, uimm5:$val)> { 1834 let hasSideEffects = 0; 1835 let Uses = Regs; 1836 let Defs = Regs; 1837} 1838 1839def ReadFRM : ReadSysReg<SysRegFRM, [FRM]>; 1840def WriteFRM : WriteSysReg<SysRegFRM, [FRM]>; 1841def WriteFRMImm : WriteSysRegImm<SysRegFRM, [FRM]>; 1842def SwapFRMImm : SwapSysRegImm<SysRegFRM, [FRM]>; 1843 1844def WriteVXRMImm : WriteSysRegImm<SysRegVXRM, [VXRM]>; 1845 1846let hasSideEffects = true in { 1847def ReadFFLAGS : ReadSysReg<SysRegFFLAGS, [FFLAGS]>; 1848def WriteFFLAGS : WriteSysReg<SysRegFFLAGS, [FFLAGS]>; 1849} 1850/// Other pseudo-instructions 1851 1852// Pessimistically assume the stack pointer will be clobbered 1853let Defs = [X2], Uses = [X2] in { 1854def ADJCALLSTACKDOWN : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 1855 [(callseq_start timm:$amt1, timm:$amt2)]>; 1856def ADJCALLSTACKUP : Pseudo<(outs), (ins i32imm:$amt1, i32imm:$amt2), 1857 [(callseq_end timm:$amt1, timm:$amt2)]>; 1858} // Defs = [X2], Uses = [X2] 1859 1860/// RV64 patterns 1861 1862let Predicates = [IsRV64, NotHasStdExtZba] in { 1863def : Pat<(i64 (and GPR:$rs1, 0xffffffff)), (SRLI (SLLI GPR:$rs1, 32), 32)>; 1864 1865// If we're shifting a 32-bit zero extended value left by 0-31 bits, use 2 1866// shifts instead of 3. This can occur when unsigned is used to index an array. 1867def : Pat<(i64 (shl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)), 1868 (SRLI (SLLI GPR:$rs1, 32), (ImmSubFrom32 uimm5:$shamt))>; 1869} 1870 1871class binop_allhusers<SDPatternOperator operator> 1872 : PatFrag<(ops node:$lhs, node:$rhs), 1873 (XLenVT (operator node:$lhs, node:$rhs)), [{ 1874 return hasAllHUsers(Node); 1875}]>; 1876 1877// PatFrag to allow ADDW/SUBW/MULW/SLLW to be selected from i64 add/sub/mul/shl 1878// if only the lower 32 bits of their result is used. 1879class binop_allwusers<SDPatternOperator operator> 1880 : PatFrag<(ops node:$lhs, node:$rhs), 1881 (i64 (operator node:$lhs, node:$rhs)), [{ 1882 return hasAllWUsers(Node); 1883}]>; 1884 1885def sexti32_allwusers : PatFrag<(ops node:$src), 1886 (sext_inreg node:$src, i32), [{ 1887 return hasAllWUsers(Node); 1888}]>; 1889 1890def ImmSExt32 : SDNodeXForm<imm, [{ 1891 return CurDAG->getTargetConstant(SignExtend64<32>(N->getSExtValue()), 1892 SDLoc(N), N->getValueType(0)); 1893}]>; 1894// Look for constants where the upper 32 bits are 0, but sign extending bit 31 1895// would be an simm12. 1896def u32simm12 : ImmLeaf<XLenVT, [{ 1897 return isUInt<32>(Imm) && isInt<12>(SignExtend64<32>(Imm)); 1898}], ImmSExt32>; 1899 1900let Predicates = [IsRV64] in { 1901 1902def : Pat<(i64 (and GPR:$rs, LeadingOnesWMask:$mask)), 1903 (SLLI (SRLIW $rs, LeadingOnesWMask:$mask), LeadingOnesWMask:$mask)>; 1904 1905/// sext and zext 1906 1907// Sign extend is not needed if all users are W instructions. 1908def : Pat<(sexti32_allwusers GPR:$rs1), (XLenVT GPR:$rs1)>; 1909 1910def : Pat<(sext_inreg GPR:$rs1, i32), (ADDIW GPR:$rs1, 0)>; 1911 1912/// ALU operations 1913 1914def : Pat<(i64 (srl (and GPR:$rs1, 0xffffffff), uimm5:$shamt)), 1915 (SRLIW GPR:$rs1, uimm5:$shamt)>; 1916def : Pat<(i64 (srl (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)), 1917 (SRLIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>; 1918def : Pat<(sra (sext_inreg GPR:$rs1, i32), uimm5:$shamt), 1919 (SRAIW GPR:$rs1, uimm5:$shamt)>; 1920def : Pat<(i64 (sra (shl GPR:$rs1, (i64 32)), uimm6gt32:$shamt)), 1921 (SRAIW GPR:$rs1, (ImmSub32 uimm6gt32:$shamt))>; 1922 1923def : PatGprGpr<shiftopw<riscv_sllw>, SLLW>; 1924def : PatGprGpr<shiftopw<riscv_srlw>, SRLW>; 1925def : PatGprGpr<shiftopw<riscv_sraw>, SRAW>; 1926 1927// Select W instructions if only the lower 32 bits of the result are used. 1928def : PatGprGpr<binop_allwusers<add>, ADDW>; 1929def : PatGprSimm12<binop_allwusers<add>, ADDIW>; 1930def : PatGprGpr<binop_allwusers<sub>, SUBW>; 1931def : PatGprImm<binop_allwusers<shl>, SLLIW, uimm5>; 1932 1933// If this is a shr of a value sign extended from i32, and all the users only 1934// use the lower 32 bits, we can use an sraiw to remove the sext_inreg. This 1935// occurs because SimplifyDemandedBits prefers srl over sra. 1936def : Pat<(binop_allwusers<srl> (sext_inreg GPR:$rs1, i32), uimm5:$shamt), 1937 (SRAIW GPR:$rs1, uimm5:$shamt)>; 1938 1939// Use binop_allwusers to recover immediates that may have been broken by 1940// SimplifyDemandedBits. 1941def : Pat<(binop_allwusers<and> GPR:$rs1, u32simm12:$imm), 1942 (ANDI GPR:$rs1, u32simm12:$imm)>; 1943 1944def : Pat<(binop_allwusers<or> GPR:$rs1, u32simm12:$imm), 1945 (ORI GPR:$rs1, u32simm12:$imm)>; 1946 1947def : Pat<(binop_allwusers<xor> GPR:$rs1, u32simm12:$imm), 1948 (XORI GPR:$rs1, u32simm12:$imm)>; 1949/// Loads 1950 1951def : LdPat<sextloadi32, LW, i64>; 1952def : LdPat<extloadi32, LW, i64>; 1953def : LdPat<zextloadi32, LWU, i64>; 1954def : LdPat<load, LD, i64>; 1955 1956/// Stores 1957 1958def : StPat<truncstorei32, SW, GPR, i64>; 1959def : StPat<store, SD, GPR, i64>; 1960} // Predicates = [IsRV64] 1961 1962/// readcyclecounter 1963// On RV64, we can directly read the 64-bit "cycle" CSR. 1964let Predicates = [IsRV64] in 1965def : Pat<(i64 (readcyclecounter)), (CSRRS CYCLE.Encoding, (XLenVT X0))>; 1966// On RV32, ReadCycleWide will be expanded to the suggested loop reading both 1967// halves of the 64-bit "cycle" CSR. 1968let Predicates = [IsRV32], usesCustomInserter = 1, hasNoSchedulingInfo = 1 in 1969def ReadCycleWide : Pseudo<(outs GPR:$lo, GPR:$hi), (ins), 1970 [(set GPR:$lo, GPR:$hi, (riscv_read_cycle_wide))], 1971 "", "">; 1972 1973/// traps 1974 1975// We lower `trap` to `unimp`, as this causes a hard exception on nearly all 1976// systems. 1977def : Pat<(trap), (UNIMP)>; 1978 1979// We lower `debugtrap` to `ebreak`, as this will get the attention of the 1980// debugger if possible. 1981def : Pat<(debugtrap), (EBREAK)>; 1982 1983let Predicates = [IsRV64], Uses = [X5], 1984 Defs = [X1, X6, X7, X28, X29, X30, X31] in 1985def HWASAN_CHECK_MEMACCESS_SHORTGRANULES 1986 : Pseudo<(outs), (ins GPRJALR:$ptr, i32imm:$accessinfo), 1987 [(int_hwasan_check_memaccess_shortgranules (i64 X5), GPRJALR:$ptr, 1988 (i32 timm:$accessinfo))]>; 1989 1990// This gets lowered into a 20-byte instruction sequence (at most) 1991let hasSideEffects = 0, mayLoad = 1, mayStore = 0, 1992 Defs = [ X6, X7, X28, X29, X30, X31 ], Size = 20 in { 1993def KCFI_CHECK 1994 : Pseudo<(outs), (ins GPRJALR:$ptr, i32imm:$type), []>, Sched<[]>; 1995} 1996 1997/// Simple optimization 1998def : Pat<(XLenVT (add GPR:$rs1, (AddiPair:$rs2))), 1999 (ADDI (ADDI GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2)), 2000 (AddiPairImmSmall GPR:$rs2))>; 2001 2002let Predicates = [IsRV64] in { 2003// Select W instructions if only the lower 32-bits of the result are used. 2004def : Pat<(binop_allwusers<add> GPR:$rs1, (AddiPair:$rs2)), 2005 (ADDIW (ADDIW GPR:$rs1, (AddiPairImmLarge AddiPair:$rs2)), 2006 (AddiPairImmSmall AddiPair:$rs2))>; 2007} 2008 2009let Predicates = [HasShortForwardBranchOpt] in 2010def : Pat<(XLenVT (abs GPR:$rs1)), 2011 (PseudoCCSUB (XLenVT GPR:$rs1), (XLenVT X0), /* COND_LT */ 2, 2012 (XLenVT GPR:$rs1), (XLenVT X0), (XLenVT GPR:$rs1))>; 2013let Predicates = [HasShortForwardBranchOpt, IsRV64] in 2014def : Pat<(sext_inreg (abs 33signbits_node:$rs1), i32), 2015 (PseudoCCSUBW (i64 GPR:$rs1), (i64 X0), /* COND_LT */ 2, 2016 (i64 GPR:$rs1), (i64 X0), (i64 GPR:$rs1))>; 2017 2018//===----------------------------------------------------------------------===// 2019// Experimental RV64 i32 legalization patterns. 2020//===----------------------------------------------------------------------===// 2021 2022def simm12i32 : ImmLeaf<i32, [{return isInt<12>(Imm);}]>; 2023 2024// Convert from i32 immediate to i64 target immediate to make SelectionDAG type 2025// checking happy so we can use ADDIW which expects an XLen immediate. 2026def as_i64imm : SDNodeXForm<imm, [{ 2027 return CurDAG->getTargetConstant(N->getSExtValue(), SDLoc(N), MVT::i64); 2028}]>; 2029 2030def zext_is_sext : PatFrag<(ops node:$src), (zext node:$src), [{ 2031 KnownBits Known = CurDAG->computeKnownBits(N->getOperand(0), 0); 2032 return Known.isNonNegative(); 2033}]>; 2034 2035let Predicates = [IsRV64] in { 2036def : LdPat<sextloadi8, LB, i32>; 2037def : LdPat<extloadi8, LBU, i32>; // Prefer unsigned due to no c.lb in Zcb. 2038def : LdPat<sextloadi16, LH, i32>; 2039def : LdPat<extloadi16, LH, i32>; 2040def : LdPat<zextloadi8, LBU, i32>; 2041def : LdPat<zextloadi16, LHU, i32>; 2042 2043def : StPat<truncstorei8, SB, GPR, i32>; 2044def : StPat<truncstorei16, SH, GPR, i32>; 2045 2046def : Pat<(anyext GPR:$src), (COPY GPR:$src)>; 2047def : Pat<(sext GPR:$src), (ADDIW GPR:$src, 0)>; 2048def : Pat<(trunc GPR:$src), (COPY GPR:$src)>; 2049 2050def : PatGprGpr<add, ADDW, i32, i32>; 2051def : PatGprGpr<sub, SUBW, i32, i32>; 2052def : PatGprGpr<and, AND, i32, i32>; 2053def : PatGprGpr<or, OR, i32, i32>; 2054def : PatGprGpr<xor, XOR, i32, i32>; 2055def : PatGprGpr<shiftopw<shl>, SLLW, i32, i64>; 2056def : PatGprGpr<shiftopw<srl>, SRLW, i32, i64>; 2057def : PatGprGpr<shiftopw<sra>, SRAW, i32, i64>; 2058 2059def : Pat<(i32 (add GPR:$rs1, simm12i32:$imm)), 2060 (ADDIW GPR:$rs1, (i64 (as_i64imm $imm)))>; 2061def : Pat<(i32 (and GPR:$rs1, simm12i32:$imm)), 2062 (ANDI GPR:$rs1, (i64 (as_i64imm $imm)))>; 2063def : Pat<(i32 (or GPR:$rs1, simm12i32:$imm)), 2064 (ORI GPR:$rs1, (i64 (as_i64imm $imm)))>; 2065def : Pat<(i32 (xor GPR:$rs1, simm12i32:$imm)), 2066 (XORI GPR:$rs1, (i64 (as_i64imm $imm)))>; 2067 2068def : PatGprImm<shl, SLLIW, uimm5, i32>; 2069def : PatGprImm<srl, SRLIW, uimm5, i32>; 2070def : PatGprImm<sra, SRAIW, uimm5, i32>; 2071 2072def : Pat<(i32 (and GPR:$rs, TrailingOnesMask:$mask)), 2073 (SRLI (SLLI $rs, (i64 (XLenSubTrailingOnes $mask))), 2074 (i64 (XLenSubTrailingOnes $mask)))>; 2075 2076// Use sext if the sign bit of the input is 0. 2077def : Pat<(zext_is_sext GPR:$src), (ADDIW GPR:$src, 0)>; 2078} 2079 2080let Predicates = [IsRV64, NotHasStdExtZba] in { 2081def : Pat<(zext GPR:$src), (SRLI (SLLI GPR:$src, 32), 32)>; 2082 2083// If we're shifting a 32-bit zero extended value left by 0-31 bits, use 2 2084// shifts instead of 3. This can occur when unsigned is used to index an array. 2085def : Pat<(shl (zext GPR:$rs), uimm5:$shamt), 2086 (SRLI (SLLI GPR:$rs, 32), (ImmSubFrom32 uimm5:$shamt))>; 2087} 2088 2089//===----------------------------------------------------------------------===// 2090// Standard extensions 2091//===----------------------------------------------------------------------===// 2092 2093// Multiply and Division 2094include "RISCVInstrInfoM.td" 2095 2096// Atomic 2097include "RISCVInstrInfoA.td" 2098 2099// Scalar FP 2100include "RISCVInstrInfoF.td" 2101include "RISCVInstrInfoD.td" 2102include "RISCVInstrInfoZfh.td" 2103include "RISCVInstrInfoZfbfmin.td" 2104include "RISCVInstrInfoZfa.td" 2105 2106// Scalar bitmanip and cryptography 2107include "RISCVInstrInfoZb.td" 2108include "RISCVInstrInfoZk.td" 2109 2110// Vector 2111include "RISCVInstrInfoV.td" 2112include "RISCVInstrInfoZvk.td" 2113 2114// Integer 2115include "RISCVInstrInfoZicbo.td" 2116include "RISCVInstrInfoZicond.td" 2117 2118// Compressed 2119include "RISCVInstrInfoC.td" 2120include "RISCVInstrInfoZc.td" 2121 2122//===----------------------------------------------------------------------===// 2123// Vendor extensions 2124//===----------------------------------------------------------------------===// 2125 2126include "RISCVInstrInfoXVentana.td" 2127include "RISCVInstrInfoXTHead.td" 2128include "RISCVInstrInfoXSf.td" 2129include "RISCVInstrInfoXCV.td" 2130 2131//===----------------------------------------------------------------------===// 2132// Global ISel 2133//===----------------------------------------------------------------------===// 2134 2135include "RISCVInstrGISel.td" 2136