1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "RISCVISelLowering.h"
15 #include "RISCV.h"
16 #include "RISCVMachineFunctionInfo.h"
17 #include "RISCVRegisterInfo.h"
18 #include "RISCVSubtarget.h"
19 #include "RISCVTargetMachine.h"
20 #include "Utils/RISCVCompressedCap.h"
21 #include "Utils/RISCVMatInt.h"
22 #include "llvm/ADT/SmallSet.h"
23 #include "llvm/ADT/Statistic.h"
24 #include "llvm/CodeGen/CallingConvLower.h"
25 #include "llvm/CodeGen/MachineFrameInfo.h"
26 #include "llvm/CodeGen/MachineFunction.h"
27 #include "llvm/CodeGen/MachineInstrBuilder.h"
28 #include "llvm/CodeGen/MachineRegisterInfo.h"
29 #include "llvm/CodeGen/SelectionDAGISel.h"
30 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
31 #include "llvm/CodeGen/ValueTypes.h"
32 #include "llvm/IR/DiagnosticInfo.h"
33 #include "llvm/IR/DiagnosticPrinter.h"
34 #include "llvm/IR/IntrinsicsRISCV.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/MathExtras.h"
38 #include "llvm/Support/raw_ostream.h"
39
40 using namespace llvm;
41
42 #define DEBUG_TYPE "riscv-lower"
43
44 STATISTIC(NumTailCalls, "Number of tail calls");
45
RISCVTargetLowering(const TargetMachine & TM,const RISCVSubtarget & STI)46 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
47 const RISCVSubtarget &STI)
48 : TargetLowering(TM), Subtarget(STI) {
49
50 if (Subtarget.isRV32E())
51 report_fatal_error("Codegen not yet implemented for RV32E");
52
53 RISCVABI::ABI ABI = Subtarget.getTargetABI();
54 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
55
56 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
57 !Subtarget.hasStdExtF()) {
58 errs() << "Hard-float 'f' ABI can't be used for a target that "
59 "doesn't support the F instruction set extension (ignoring "
60 "target-abi)\n";
61 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
62 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
63 !Subtarget.hasStdExtD()) {
64 errs() << "Hard-float 'd' ABI can't be used for a target that "
65 "doesn't support the D instruction set extension (ignoring "
66 "target-abi)\n";
67 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
68 }
69
70 switch (ABI) {
71 default:
72 report_fatal_error("Don't know how to lower this ABI");
73 case RISCVABI::ABI_ILP32:
74 case RISCVABI::ABI_ILP32F:
75 case RISCVABI::ABI_ILP32D:
76 case RISCVABI::ABI_IL32PC64:
77 case RISCVABI::ABI_IL32PC64F:
78 case RISCVABI::ABI_IL32PC64D:
79 case RISCVABI::ABI_LP64:
80 case RISCVABI::ABI_LP64F:
81 case RISCVABI::ABI_LP64D:
82 case RISCVABI::ABI_L64PC128:
83 case RISCVABI::ABI_L64PC128F:
84 case RISCVABI::ABI_L64PC128D:
85 break;
86 }
87
88 MVT XLenVT = Subtarget.getXLenVT();
89
90 // Set up the register classes.
91 addRegisterClass(XLenVT, &RISCV::GPRRegClass);
92
93 if (Subtarget.hasStdExtF())
94 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
95 if (Subtarget.hasStdExtD())
96 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
97
98 if (Subtarget.hasCheri()) {
99 CapType = Subtarget.typeForCapabilities();
100 NullCapabilityRegister = RISCV::C0;
101 // TODO: This is a lie to avoid CRRL/CRAM generation; disable once it is
102 // implemented in hardware on RV32 and we have cc64 helpers.
103 CapTypeHasPreciseBounds = !Subtarget.is64Bit();
104 addRegisterClass(CapType, &RISCV::GPCRRegClass);
105 }
106
107 // Compute derived properties from the register classes.
108 computeRegisterProperties(STI.getRegisterInfo());
109
110 if (RISCVABI::isCheriPureCapABI(ABI))
111 setStackPointerRegisterToSaveRestore(RISCV::C2);
112 else
113 setStackPointerRegisterToSaveRestore(RISCV::X2);
114
115 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
116 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
117
118 // TODO: add all necessary setOperationAction calls.
119 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
120 if (Subtarget.hasCheri())
121 setOperationAction(ISD::DYNAMIC_STACKALLOC, CapType, Expand);
122
123 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
124 setOperationAction(ISD::BR_CC, XLenVT, Expand);
125 setOperationAction(ISD::SELECT, XLenVT, Custom);
126 setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
127
128 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
129 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
130
131 setOperationAction(ISD::VASTART, MVT::Other, Custom);
132 setOperationAction(ISD::VAARG, MVT::Other, Expand);
133 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
134 setOperationAction(ISD::VAEND, MVT::Other, Expand);
135
136 for (auto VT : {MVT::i1, MVT::i8, MVT::i16})
137 setOperationAction(ISD::SIGN_EXTEND_INREG, VT, Expand);
138
139 if (Subtarget.is64Bit()) {
140 setOperationAction(ISD::ADD, MVT::i32, Custom);
141 setOperationAction(ISD::SUB, MVT::i32, Custom);
142 setOperationAction(ISD::SHL, MVT::i32, Custom);
143 setOperationAction(ISD::SRA, MVT::i32, Custom);
144 setOperationAction(ISD::SRL, MVT::i32, Custom);
145 }
146
147 if (!Subtarget.hasStdExtM()) {
148 setOperationAction(ISD::MUL, XLenVT, Expand);
149 setOperationAction(ISD::MULHS, XLenVT, Expand);
150 setOperationAction(ISD::MULHU, XLenVT, Expand);
151 setOperationAction(ISD::SDIV, XLenVT, Expand);
152 setOperationAction(ISD::UDIV, XLenVT, Expand);
153 setOperationAction(ISD::SREM, XLenVT, Expand);
154 setOperationAction(ISD::UREM, XLenVT, Expand);
155 }
156
157 if (Subtarget.is64Bit() && Subtarget.hasStdExtM()) {
158 setOperationAction(ISD::MUL, MVT::i32, Custom);
159 setOperationAction(ISD::SDIV, MVT::i32, Custom);
160 setOperationAction(ISD::UDIV, MVT::i32, Custom);
161 setOperationAction(ISD::UREM, MVT::i32, Custom);
162 }
163
164 setOperationAction(ISD::SDIVREM, XLenVT, Expand);
165 setOperationAction(ISD::UDIVREM, XLenVT, Expand);
166 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
167 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
168
169 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
170 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
171 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
172
173 setOperationAction(ISD::ROTL, XLenVT, Expand);
174 setOperationAction(ISD::ROTR, XLenVT, Expand);
175 setOperationAction(ISD::BSWAP, XLenVT, Expand);
176 setOperationAction(ISD::CTTZ, XLenVT, Expand);
177 setOperationAction(ISD::CTLZ, XLenVT, Expand);
178 setOperationAction(ISD::CTPOP, XLenVT, Expand);
179
180 ISD::CondCode FPCCToExtend[] = {
181 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
182 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
183 ISD::SETGE, ISD::SETNE};
184
185 ISD::NodeType FPOpToExtend[] = {
186 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW, ISD::FREM, ISD::FP16_TO_FP,
187 ISD::FP_TO_FP16};
188
189 if (Subtarget.hasStdExtF()) {
190 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
191 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
192 for (auto CC : FPCCToExtend)
193 setCondCodeAction(CC, MVT::f32, Expand);
194 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
195 setOperationAction(ISD::SELECT, MVT::f32, Custom);
196 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
197 for (auto Op : FPOpToExtend)
198 setOperationAction(Op, MVT::f32, Expand);
199 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
200 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
201 }
202
203 if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
204 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
205
206 if (Subtarget.hasStdExtD()) {
207 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
208 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
209 for (auto CC : FPCCToExtend)
210 setCondCodeAction(CC, MVT::f64, Expand);
211 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
212 setOperationAction(ISD::SELECT, MVT::f64, Custom);
213 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
214 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
215 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
216 for (auto Op : FPOpToExtend)
217 setOperationAction(Op, MVT::f64, Expand);
218 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
219 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
220 }
221
222 if (Subtarget.is64Bit() &&
223 !(Subtarget.hasStdExtD() || Subtarget.hasStdExtF())) {
224 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
225 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
226 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
227 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
228 }
229
230 setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
231 setOperationAction(ISD::BlockAddress, XLenVT, Custom);
232 setOperationAction(ISD::ConstantPool, XLenVT, Custom);
233
234 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
235
236 if (Subtarget.hasCheri()) {
237 MVT CLenVT = Subtarget.typeForCapabilities();
238 setOperationAction(ISD::BR_CC, CLenVT, Expand);
239 setOperationAction(ISD::SELECT, CLenVT, Custom);
240 setOperationAction(ISD::SELECT_CC, CLenVT, Expand);
241 setOperationAction(ISD::GlobalAddress, CLenVT, Custom);
242 setOperationAction(ISD::BlockAddress, CLenVT, Custom);
243 setOperationAction(ISD::ConstantPool, CLenVT, Custom);
244 setOperationAction(ISD::GlobalTLSAddress, CLenVT, Custom);
245 setOperationAction(ISD::ADDRSPACECAST, CLenVT, Custom);
246 setOperationAction(ISD::ADDRSPACECAST, XLenVT, Custom);
247 }
248
249 // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
250 // Unfortunately this can't be determined just from the ISA naming string.
251 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
252 Subtarget.is64Bit() ? Legal : Custom);
253
254
255 setOperationAction(ISD::TRAP, MVT::Other, Legal);
256 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
257 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
258
259 // Some CHERI intrinsics return i1, which isn't legal, so we have to custom
260 // lower them in the DAG combine phase before the first type legalization
261 // pass.
262 if (Subtarget.hasCheri())
263 setTargetDAGCombine(ISD::INTRINSIC_WO_CHAIN);
264
265 if (Subtarget.hasStdExtA()) {
266 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
267 if (RISCVABI::isCheriPureCapABI(ABI))
268 setMinCmpXchgSizeInBits(8);
269 else
270 setMinCmpXchgSizeInBits(32);
271
272 if (Subtarget.hasCheri())
273 SupportsAtomicCapabilityOperations = true;
274 } else {
275 setMaxAtomicSizeInBitsSupported(0);
276 }
277
278 setBooleanContents(ZeroOrOneBooleanContent);
279
280 // Function alignments.
281 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
282 setMinFunctionAlignment(FunctionAlignment);
283 setPrefFunctionAlignment(FunctionAlignment);
284
285 // Effectively disable jump table generation.
286 setMinimumJumpTableEntries(INT_MAX);
287
288 // Jumps are expensive, compared to logic
289 setJumpIsExpensive();
290
291 // We can use any register for comparisons
292 setHasMultipleConditionRegisters();
293 }
294
getSetCCResultType(const DataLayout & DL,LLVMContext &,EVT VT) const295 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL, LLVMContext &,
296 EVT VT) const {
297 if (!VT.isVector())
298 return getPointerTy(DL, 0);
299 return VT.changeVectorElementTypeToInteger();
300 }
301
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const302 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
303 const CallInst &I,
304 MachineFunction &MF,
305 unsigned Intrinsic) const {
306 switch (Intrinsic) {
307 default:
308 return false;
309 case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
310 case Intrinsic::riscv_masked_atomicrmw_add_i32:
311 case Intrinsic::riscv_masked_atomicrmw_sub_i32:
312 case Intrinsic::riscv_masked_atomicrmw_nand_i32:
313 case Intrinsic::riscv_masked_atomicrmw_max_i32:
314 case Intrinsic::riscv_masked_atomicrmw_min_i32:
315 case Intrinsic::riscv_masked_atomicrmw_umax_i32:
316 case Intrinsic::riscv_masked_atomicrmw_umin_i32:
317 case Intrinsic::riscv_masked_cmpxchg_i32:
318 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
319 Info.opc = ISD::INTRINSIC_W_CHAIN;
320 Info.memVT = MVT::getVT(PtrTy->getElementType());
321 Info.ptrVal = I.getArgOperand(0);
322 Info.offset = 0;
323 Info.align = Align(4);
324 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
325 MachineMemOperand::MOVolatile;
326 return true;
327 }
328 }
329
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const330 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
331 const AddrMode &AM, Type *Ty,
332 unsigned AS,
333 Instruction *I) const {
334 // No global is ever allowed as a base.
335 if (AM.BaseGV)
336 return false;
337
338 // Require a 12-bit signed offset.
339 if (!isInt<12>(AM.BaseOffs))
340 return false;
341
342 switch (AM.Scale) {
343 case 0: // "r+i" or just "i", depending on HasBaseReg.
344 break;
345 case 1:
346 if (!AM.HasBaseReg) // allow "r+i".
347 break;
348 return false; // disallow "r+r" or "r+r+i".
349 default:
350 return false;
351 }
352
353 return true;
354 }
355
isLegalICmpImmediate(int64_t Imm) const356 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
357 return isInt<12>(Imm);
358 }
359
isLegalAddImmediate(int64_t Imm) const360 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
361 return isInt<12>(Imm);
362 }
363
364 // On RV32, 64-bit integers are split into their high and low parts and held
365 // in two different registers, so the trunc is free since the low register can
366 // just be used.
isTruncateFree(Type * SrcTy,Type * DstTy) const367 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
368 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
369 return false;
370 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
371 unsigned DestBits = DstTy->getPrimitiveSizeInBits();
372 return (SrcBits == 64 && DestBits == 32);
373 }
374
isTruncateFree(EVT SrcVT,EVT DstVT) const375 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
376 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
377 !SrcVT.isInteger() || !DstVT.isInteger())
378 return false;
379 unsigned SrcBits = SrcVT.getSizeInBits();
380 unsigned DestBits = DstVT.getSizeInBits();
381 return (SrcBits == 64 && DestBits == 32);
382 }
383
isZExtFree(SDValue Val,EVT VT2) const384 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
385 // Zexts are free if they can be combined with a load.
386 if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
387 EVT MemVT = LD->getMemoryVT();
388 if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
389 (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
390 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
391 LD->getExtensionType() == ISD::ZEXTLOAD))
392 return true;
393 }
394
395 return TargetLowering::isZExtFree(Val, VT2);
396 }
397
isSExtCheaperThanZExt(EVT SrcVT,EVT DstVT) const398 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
399 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
400 }
401
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const402 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
403 bool ForCodeSize) const {
404 if (VT == MVT::f32 && !Subtarget.hasStdExtF())
405 return false;
406 if (VT == MVT::f64 && !Subtarget.hasStdExtD())
407 return false;
408 if (Imm.isNegZero())
409 return false;
410 return Imm.isZero();
411 }
412
hasBitPreservingFPLogic(EVT VT) const413 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
414 return (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
415 (VT == MVT::f64 && Subtarget.hasStdExtD());
416 }
417
418 // Changes the condition code and swaps operands if necessary, so the SetCC
419 // operation matches one of the comparisons supported directly in the RISC-V
420 // ISA.
normaliseSetCC(SDValue & LHS,SDValue & RHS,ISD::CondCode & CC)421 static void normaliseSetCC(SDValue &LHS, SDValue &RHS, ISD::CondCode &CC) {
422 switch (CC) {
423 default:
424 break;
425 case ISD::SETGT:
426 case ISD::SETLE:
427 case ISD::SETUGT:
428 case ISD::SETULE:
429 CC = ISD::getSetCCSwappedOperands(CC);
430 std::swap(LHS, RHS);
431 break;
432 }
433 }
434
435 // Return the RISC-V branch opcode that matches the given DAG integer
436 // condition code. The CondCode must be one of those supported by the RISC-V
437 // ISA (see normaliseSetCC).
getBranchOpcodeForIntCondCode(ISD::CondCode CC)438 static unsigned getBranchOpcodeForIntCondCode(ISD::CondCode CC) {
439 switch (CC) {
440 default:
441 llvm_unreachable("Unsupported CondCode");
442 case ISD::SETEQ:
443 return RISCV::BEQ;
444 case ISD::SETNE:
445 return RISCV::BNE;
446 case ISD::SETLT:
447 return RISCV::BLT;
448 case ISD::SETGE:
449 return RISCV::BGE;
450 case ISD::SETULT:
451 return RISCV::BLTU;
452 case ISD::SETUGE:
453 return RISCV::BGEU;
454 }
455 }
456
LowerOperation(SDValue Op,SelectionDAG & DAG) const457 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
458 SelectionDAG &DAG) const {
459 switch (Op.getOpcode()) {
460 default:
461 report_fatal_error("unimplemented operand");
462 case ISD::GlobalAddress:
463 return lowerGlobalAddress(Op, DAG);
464 case ISD::BlockAddress:
465 return lowerBlockAddress(Op, DAG);
466 case ISD::ConstantPool:
467 return lowerConstantPool(Op, DAG);
468 case ISD::GlobalTLSAddress:
469 return lowerGlobalTLSAddress(Op, DAG);
470 case ISD::SELECT:
471 return lowerSELECT(Op, DAG);
472 case ISD::VASTART:
473 return lowerVASTART(Op, DAG);
474 case ISD::FRAMEADDR:
475 return lowerFRAMEADDR(Op, DAG);
476 case ISD::RETURNADDR:
477 return lowerRETURNADDR(Op, DAG);
478 case ISD::SHL_PARTS:
479 return lowerShiftLeftParts(Op, DAG);
480 case ISD::SRA_PARTS:
481 return lowerShiftRightParts(Op, DAG, true);
482 case ISD::SRL_PARTS:
483 return lowerShiftRightParts(Op, DAG, false);
484 case ISD::BITCAST: {
485 assert(Subtarget.is64Bit() && Subtarget.hasStdExtF() &&
486 "Unexpected custom legalisation");
487 SDLoc DL(Op);
488 SDValue Op0 = Op.getOperand(0);
489 if (Op.getValueType() != MVT::f32 || Op0.getValueType() != MVT::i32)
490 return SDValue();
491 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
492 SDValue FPConv = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
493 return FPConv;
494 }
495 case ISD::ADDRSPACECAST: {
496 SDLoc DL(Op);
497 SDValue Op0 = Op.getOperand(0);
498 bool ToCap = Op.getValueType().isFatPointer();
499 bool FromCap = Op0.getValueType().isFatPointer();
500 if (ToCap == FromCap)
501 return Op0;
502 unsigned NewOp = ToCap ? ISD::INTTOPTR : ISD::PTRTOINT;
503 return DAG.getNode(NewOp, DL, Op.getValueType(), Op0);
504 }
505 case ISD::INTRINSIC_WO_CHAIN:
506 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
507 }
508 }
509
getTargetNode(GlobalAddressSDNode * N,SDLoc DL,EVT Ty,SelectionDAG & DAG,unsigned Flags)510 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
511 SelectionDAG &DAG, unsigned Flags) {
512 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
513 }
514
getTargetNode(ExternalSymbolSDNode * N,SDLoc DL,EVT Ty,SelectionDAG & DAG,unsigned Flags)515 static SDValue getTargetNode(ExternalSymbolSDNode *N, SDLoc DL, EVT Ty,
516 SelectionDAG &DAG, unsigned Flags) {
517 return DAG.getTargetExternalSymbol(N->getSymbol(), Ty, Flags);
518 }
519
getTargetNode(BlockAddressSDNode * N,SDLoc DL,EVT Ty,SelectionDAG & DAG,unsigned Flags)520 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
521 SelectionDAG &DAG, unsigned Flags) {
522 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
523 Flags);
524 }
525
getTargetNode(ConstantPoolSDNode * N,SDLoc DL,EVT Ty,SelectionDAG & DAG,unsigned Flags)526 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
527 SelectionDAG &DAG, unsigned Flags) {
528 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
529 N->getOffset(), Flags);
530 }
531
532 template <class NodeTy>
getAddr(NodeTy * N,EVT Ty,SelectionDAG & DAG,bool IsLocal,bool CanDeriveFromPcc) const533 SDValue RISCVTargetLowering::getAddr(NodeTy *N, EVT Ty, SelectionDAG &DAG,
534 bool IsLocal, bool CanDeriveFromPcc) const {
535 SDLoc DL(N);
536
537 if (RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) {
538 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
539 if (IsLocal && CanDeriveFromPcc) {
540 // Use PC-relative addressing to access the symbol. This generates the
541 // pattern (PseudoCLLC sym), which expands to
542 // (cincoffsetimm (auipcc %pcrel_hi(sym)) %pcrel_lo(auipc)).
543 //
544 // In general, we can only do this for local functions+block addresses.
545 // However, $pcc also allows for read access so we can avoid a GOT access
546 // for read-only constants (e.g. floating-point constant-pools).
547 return SDValue(DAG.getMachineNode(RISCV::PseudoCLLC, DL, Ty, Addr), 0);
548 }
549 // Generate a sequence to load a capability from the captable. This
550 // generates the pattern (PseudoCLGC sym), which expands to
551 // (clc (auipcc %captab_pcrel_hi(sym)) %pcrel_lo(auipc)).
552 return SDValue(DAG.getMachineNode(RISCV::PseudoCLGC, DL, Ty, Addr), 0);
553 }
554
555 if (isPositionIndependent()) {
556 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
557 if (IsLocal)
558 // Use PC-relative addressing to access the symbol. This generates the
559 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
560 // %pcrel_lo(auipc)).
561 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
562
563 // Use PC-relative addressing to access the GOT for this symbol, then load
564 // the address from the GOT. This generates the pattern (PseudoLA sym),
565 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
566 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
567 }
568
569 switch (getTargetMachine().getCodeModel()) {
570 default:
571 report_fatal_error("Unsupported code model for lowering");
572 case CodeModel::Small: {
573 // Generate a sequence for accessing addresses within the first 2 GiB of
574 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
575 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
576 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
577 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
578 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
579 }
580 case CodeModel::Medium: {
581 // Generate a sequence for accessing addresses within any 2GiB range within
582 // the address space. This generates the pattern (PseudoLLA sym), which
583 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
584 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
585 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
586 }
587 }
588 }
589
lowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const590 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
591 SelectionDAG &DAG) const {
592 SDLoc DL(Op);
593 EVT Ty = Op.getValueType();
594 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
595 int64_t Offset = N->getOffset();
596
597 const GlobalValue *GV = N->getGlobal();
598 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
599 // External variables always have to be loaded from the captable to get bounds
600 // and to allow for them to be provided by another DSO without requiring copy
601 // relocations.
602 // Read-only accesses in the same DSO *could* theoretically use pc-relative
603 // addressing, but that would mean we get a capability bounded to the $pcc
604 // bounds and therefore would not be checked when we pass the reference to
605 // another function. Therefore, we always load from the captable for all
606 // global variables.
607 SDValue Addr = getAddr(N, Ty, DAG, IsLocal, /*CanDeriveFromPcc=*/false);
608
609 // In order to maximise the opportunity for common subexpression elimination,
610 // emit a separate ADD/PTRADD node for the global address offset instead of
611 // folding it in the global address node. Later peephole optimisations may
612 // choose to fold it back in when profitable.
613 if (Offset != 0)
614 return DAG.getPointerAdd(DL, Addr, Offset);
615 return Addr;
616 }
617
lowerBlockAddress(SDValue Op,SelectionDAG & DAG) const618 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
619 SelectionDAG &DAG) const {
620 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
621 EVT Ty = Op.getValueType();
622
623 return getAddr(N, Ty, DAG, /*IsLocal=*/true, /*CanDeriveFromPcc=*/true);
624 }
625
lowerConstantPool(SDValue Op,SelectionDAG & DAG) const626 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
627 SelectionDAG &DAG) const {
628 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
629 EVT Ty = Op.getValueType();
630
631 return getAddr(N, Ty, DAG, /*IsLocal=*/true, /*CanDeriveFromPcc=*/true);
632 }
633
getStaticTLSAddr(GlobalAddressSDNode * N,EVT Ty,SelectionDAG & DAG,bool NotLocal) const634 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
635 EVT Ty, SelectionDAG &DAG,
636 bool NotLocal) const {
637 SDLoc DL(N);
638 const GlobalValue *GV = N->getGlobal();
639 MVT XLenVT = Subtarget.getXLenVT();
640
641 if (RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) {
642 if (NotLocal) {
643 // Use PC-relative addressing to access the captable for this TLS symbol,
644 // then load the address from the captable and add the thread pointer.
645 // This generates the pattern (PseudoCLA_TLS_IE sym), which expands to
646 // (cld (auipcc %tls_ie_captab_pcrel_hi(sym)) %pcrel_lo(auipc)).
647 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
648 SDValue Load = SDValue(
649 DAG.getMachineNode(RISCV::PseudoCLA_TLS_IE, DL, XLenVT, Ty, Addr), 0);
650
651 // Add the thread pointer.
652 SDValue TPReg = DAG.getRegister(RISCV::C4, Ty);
653 return DAG.getPointerAdd(DL, TPReg, Load);
654 }
655
656 // Generate a sequence for accessing the address relative to the thread
657 // pointer, with the appropriate adjustment for the thread pointer offset.
658 // This generates the pattern
659 // (cincoffset (cincoffset_tprel (lui %tprel_hi(sym))
660 // ctp %tprel_cincoffset(sym))
661 // %tprel_lo(sym))
662 SDValue AddrHi =
663 DAG.getTargetGlobalAddress(GV, DL, XLenVT, 0, RISCVII::MO_TPREL_HI);
664 SDValue AddrCIncOffset =
665 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_CINCOFFSET);
666 SDValue AddrLo =
667 DAG.getTargetGlobalAddress(GV, DL, XLenVT, 0, RISCVII::MO_TPREL_LO);
668
669 SDValue MNHi =
670 SDValue(DAG.getMachineNode(RISCV::LUI, DL, XLenVT, AddrHi), 0);
671 SDValue TPReg = DAG.getRegister(RISCV::C4, Ty);
672 SDValue MNAdd = SDValue(
673 DAG.getMachineNode(RISCV::PseudoCIncOffsetTPRel, DL, Ty, TPReg, MNHi,
674 AddrCIncOffset),
675 0);
676 return SDValue(
677 DAG.getMachineNode(RISCV::CIncOffsetImm, DL, Ty, MNAdd, AddrLo),
678 0);
679 }
680
681 if (NotLocal) {
682 // Use PC-relative addressing to access the GOT for this TLS symbol, then
683 // load the address from the GOT and add the thread pointer. This generates
684 // the pattern (PseudoLA_TLS_IE sym), which expands to
685 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
686 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
687 SDValue Load =
688 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
689
690 // Add the thread pointer.
691 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
692 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
693 }
694
695 // Generate a sequence for accessing the address relative to the thread
696 // pointer, with the appropriate adjustment for the thread pointer offset.
697 // This generates the pattern
698 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
699 SDValue AddrHi =
700 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
701 SDValue AddrAdd =
702 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
703 SDValue AddrLo =
704 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
705
706 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
707 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
708 SDValue MNAdd = SDValue(
709 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
710 0);
711 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
712 }
713
getDynamicTLSAddr(GlobalAddressSDNode * N,EVT Ty,SelectionDAG & DAG) const714 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
715 EVT Ty,
716 SelectionDAG &DAG) const {
717 SDLoc DL(N);
718 Type *CallTy = Type::getInt8PtrTy(
719 *DAG.getContext(), DAG.getDataLayout().getGlobalsAddressSpace());
720 const GlobalValue *GV = N->getGlobal();
721
722 // Use a PC-relative addressing mode to access the global dynamic GOT address.
723 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
724 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
725 //
726 // For pure capability TLS, this generates the pattern (PseudoCLC_TLS_GD sym),
727 // which expands to
728 // (cincoffset (auipcc %tls_gd_captab_pcrel_hi(sym)) %pcrel_lo(auipc)).
729 unsigned Opcode = RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())
730 ? RISCV::PseudoCLC_TLS_GD : RISCV::PseudoLA_TLS_GD;
731 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
732 SDValue Load = SDValue(DAG.getMachineNode(Opcode, DL, Ty, Addr), 0);
733
734 // Prepare argument list to generate call.
735 ArgListTy Args;
736 ArgListEntry Entry;
737 Entry.Node = Load;
738 Entry.Ty = CallTy;
739 Args.push_back(Entry);
740
741 // Setup call to __tls_get_addr.
742 TargetLowering::CallLoweringInfo CLI(DAG);
743 CLI.setDebugLoc(DL)
744 .setChain(DAG.getEntryNode())
745 .setLibCallee(CallingConv::C, CallTy,
746 DAG.getExternalSymbol("__tls_get_addr", Ty),
747 std::move(Args));
748
749 return LowerCallTo(CLI).first;
750 }
751
lowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const752 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
753 SelectionDAG &DAG) const {
754 SDLoc DL(Op);
755 EVT Ty = Op.getValueType();
756 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
757 int64_t Offset = N->getOffset();
758
759 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
760
761 SDValue Addr;
762 switch (Model) {
763 case TLSModel::LocalExec:
764 Addr = getStaticTLSAddr(N, Ty, DAG, /*NotLocal=*/false);
765 break;
766 case TLSModel::InitialExec:
767 Addr = getStaticTLSAddr(N, Ty, DAG, /*NotLocal=*/true);
768 break;
769 case TLSModel::LocalDynamic:
770 case TLSModel::GeneralDynamic:
771 Addr = getDynamicTLSAddr(N, Ty, DAG);
772 break;
773 }
774
775 // In order to maximise the opportunity for common subexpression elimination,
776 // emit a separate ADD node for the global address offset instead of folding
777 // it in the global address node. Later peephole optimisations may choose to
778 // fold it back in when profitable.
779 if (Offset != 0)
780 return DAG.getPointerAdd(DL, Addr, Offset);
781 return Addr;
782 }
783
lowerSELECT(SDValue Op,SelectionDAG & DAG) const784 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
785 SDValue CondV = Op.getOperand(0);
786 SDValue TrueV = Op.getOperand(1);
787 SDValue FalseV = Op.getOperand(2);
788 SDLoc DL(Op);
789 MVT XLenVT = Subtarget.getXLenVT();
790
791 // If the result type is XLenVT and CondV is the output of a SETCC node
792 // which also operated on XLenVT inputs, then merge the SETCC node into the
793 // lowered RISCVISD::SELECT_CC to take advantage of the integer
794 // compare+branch instructions. i.e.:
795 // (select (setcc lhs, rhs, cc), truev, falsev)
796 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
797 if (Op.getSimpleValueType() == XLenVT && CondV.getOpcode() == ISD::SETCC &&
798 CondV.getOperand(0).getSimpleValueType() == XLenVT) {
799 SDValue LHS = CondV.getOperand(0);
800 SDValue RHS = CondV.getOperand(1);
801 auto CC = cast<CondCodeSDNode>(CondV.getOperand(2));
802 ISD::CondCode CCVal = CC->get();
803
804 normaliseSetCC(LHS, RHS, CCVal);
805
806 SDValue TargetCC = DAG.getConstant(CCVal, DL, XLenVT);
807 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
808 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
809 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
810 }
811
812 // Otherwise:
813 // (select condv, truev, falsev)
814 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
815 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
816 SDValue SetNE = DAG.getConstant(ISD::SETNE, DL, XLenVT);
817
818 SDVTList VTs = DAG.getVTList(Op.getValueType(), MVT::Glue);
819 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
820
821 return DAG.getNode(RISCVISD::SELECT_CC, DL, VTs, Ops);
822 }
823
lowerVASTART(SDValue Op,SelectionDAG & DAG) const824 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
825 MachineFunction &MF = DAG.getMachineFunction();
826 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
827
828 SDLoc DL(Op);
829 unsigned AllocaAS = MF.getDataLayout().getAllocaAddrSpace();
830 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
831 getPointerTy(MF.getDataLayout(), AllocaAS));
832
833 // vastart just stores the address of the VarArgsFrameIndex slot into the
834 // memory location argument.
835 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
836 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
837 MachinePointerInfo(SV));
838 }
839
lowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const840 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
841 SelectionDAG &DAG) const {
842 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
843 MachineFunction &MF = DAG.getMachineFunction();
844 MachineFrameInfo &MFI = MF.getFrameInfo();
845 MFI.setFrameAddressIsTaken(true);
846 Register FrameReg = RI.getFrameRegister(MF);
847 int XLenInBytes = Subtarget.getXLen() / 8;
848
849 EVT VT = Op.getValueType();
850 SDLoc DL(Op);
851 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
852 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
853 while (Depth--) {
854 int Offset = -(XLenInBytes * 2);
855 SDValue Ptr = DAG.getPointerAdd(DL, FrameAddr, Offset);
856 FrameAddr =
857 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
858 }
859 return FrameAddr;
860 }
861
lowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const862 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
863 SelectionDAG &DAG) const {
864 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
865 MachineFunction &MF = DAG.getMachineFunction();
866 MachineFrameInfo &MFI = MF.getFrameInfo();
867 MFI.setReturnAddressIsTaken(true);
868 int XLenInBytes = Subtarget.getXLen() / 8;
869
870 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
871 return SDValue();
872
873 EVT VT = Op.getValueType();
874 SDLoc DL(Op);
875 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
876 if (Depth) {
877 int Off = -XLenInBytes;
878 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
879 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
880 DAG.getPointerAdd(DL, FrameAddr, Off),
881 MachinePointerInfo());
882 }
883
884 // Return the value of the return address register, marking it an implicit
885 // live-in.
886 Register Reg =
887 MF.addLiveIn(RI.getRARegister(), getRegClassFor(VT.getSimpleVT()));
888 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, VT);
889 }
890
lowerShiftLeftParts(SDValue Op,SelectionDAG & DAG) const891 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
892 SelectionDAG &DAG) const {
893 SDLoc DL(Op);
894 SDValue Lo = Op.getOperand(0);
895 SDValue Hi = Op.getOperand(1);
896 SDValue Shamt = Op.getOperand(2);
897 EVT VT = Lo.getValueType();
898
899 // if Shamt-XLEN < 0: // Shamt < XLEN
900 // Lo = Lo << Shamt
901 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
902 // else:
903 // Lo = 0
904 // Hi = Lo << (Shamt-XLEN)
905
906 SDValue Zero = DAG.getConstant(0, DL, VT);
907 SDValue One = DAG.getConstant(1, DL, VT);
908 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
909 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
910 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
911 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
912
913 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
914 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
915 SDValue ShiftRightLo =
916 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
917 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
918 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
919 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
920
921 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
922
923 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
924 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
925
926 SDValue Parts[2] = {Lo, Hi};
927 return DAG.getMergeValues(Parts, DL);
928 }
929
lowerShiftRightParts(SDValue Op,SelectionDAG & DAG,bool IsSRA) const930 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
931 bool IsSRA) const {
932 SDLoc DL(Op);
933 SDValue Lo = Op.getOperand(0);
934 SDValue Hi = Op.getOperand(1);
935 SDValue Shamt = Op.getOperand(2);
936 EVT VT = Lo.getValueType();
937
938 // SRA expansion:
939 // if Shamt-XLEN < 0: // Shamt < XLEN
940 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
941 // Hi = Hi >>s Shamt
942 // else:
943 // Lo = Hi >>s (Shamt-XLEN);
944 // Hi = Hi >>s (XLEN-1)
945 //
946 // SRL expansion:
947 // if Shamt-XLEN < 0: // Shamt < XLEN
948 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
949 // Hi = Hi >>u Shamt
950 // else:
951 // Lo = Hi >>u (Shamt-XLEN);
952 // Hi = 0;
953
954 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
955
956 SDValue Zero = DAG.getConstant(0, DL, VT);
957 SDValue One = DAG.getConstant(1, DL, VT);
958 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
959 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
960 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
961 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
962
963 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
964 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
965 SDValue ShiftLeftHi =
966 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
967 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
968 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
969 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
970 SDValue HiFalse =
971 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
972
973 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
974
975 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
976 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
977
978 SDValue Parts[2] = {Lo, Hi};
979 return DAG.getMergeValues(Parts, DL);
980 }
981
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const982 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
983 SelectionDAG &DAG) const {
984 unsigned IntNo = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
985 SDLoc DL(Op);
986 switch (IntNo) {
987 default:
988 return SDValue(); // Don't custom lower most intrinsics.
989 case Intrinsic::thread_pointer: {
990 MCPhysReg PhysReg = RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())
991 ? RISCV::C4 : RISCV::X4;
992 EVT PtrVT = getPointerTy(DAG.getDataLayout(),
993 DAG.getDataLayout().getGlobalsAddressSpace());
994 return DAG.getRegister(PhysReg, PtrVT);
995 }
996 }
997 }
998
999 // Returns the opcode of the target-specific SDNode that implements the 32-bit
1000 // form of the given Opcode.
getRISCVWOpcode(unsigned Opcode)1001 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
1002 switch (Opcode) {
1003 default:
1004 llvm_unreachable("Unexpected opcode");
1005 case ISD::SHL:
1006 return RISCVISD::SLLW;
1007 case ISD::SRA:
1008 return RISCVISD::SRAW;
1009 case ISD::SRL:
1010 return RISCVISD::SRLW;
1011 case ISD::SDIV:
1012 return RISCVISD::DIVW;
1013 case ISD::UDIV:
1014 return RISCVISD::DIVUW;
1015 case ISD::UREM:
1016 return RISCVISD::REMUW;
1017 }
1018 }
1019
1020 // Converts the given 32-bit operation to a target-specific SelectionDAG node.
1021 // Because i32 isn't a legal type for RV64, these operations would otherwise
1022 // be promoted to i64, making it difficult to select the SLLW/DIVUW/.../*W
1023 // later one because the fact the operation was originally of type i32 is
1024 // lost.
customLegalizeToWOp(SDNode * N,SelectionDAG & DAG)1025 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG) {
1026 SDLoc DL(N);
1027 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
1028 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1029 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1030 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
1031 // ReplaceNodeResults requires we maintain the same type for the return value.
1032 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
1033 }
1034
1035 // Converts the given 32-bit operation to a i64 operation with signed extension
1036 // semantic to reduce the signed extension instructions.
customLegalizeToWOpWithSExt(SDNode * N,SelectionDAG & DAG)1037 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
1038 SDLoc DL(N);
1039 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
1040 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
1041 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
1042 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
1043 DAG.getValueType(MVT::i32));
1044 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
1045 }
1046
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const1047 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
1048 SmallVectorImpl<SDValue> &Results,
1049 SelectionDAG &DAG) const {
1050 SDLoc DL(N);
1051 switch (N->getOpcode()) {
1052 default:
1053 llvm_unreachable("Don't know how to custom type legalize this operation!");
1054 case ISD::STRICT_FP_TO_SINT:
1055 case ISD::STRICT_FP_TO_UINT:
1056 case ISD::FP_TO_SINT:
1057 case ISD::FP_TO_UINT: {
1058 bool IsStrict = N->isStrictFPOpcode();
1059 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1060 "Unexpected custom legalisation");
1061 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
1062 RTLIB::Libcall LC;
1063 if (N->getOpcode() == ISD::FP_TO_SINT ||
1064 N->getOpcode() == ISD::STRICT_FP_TO_SINT)
1065 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
1066 else
1067 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
1068 MakeLibCallOptions CallOptions;
1069 EVT OpVT = Op0.getValueType();
1070 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
1071 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
1072 SDValue Result;
1073 std::tie(Result, Chain) =
1074 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
1075 Results.push_back(Result);
1076 if (IsStrict)
1077 Results.push_back(Chain);
1078 break;
1079 }
1080 case ISD::READCYCLECOUNTER: {
1081 assert(!Subtarget.is64Bit() &&
1082 "READCYCLECOUNTER only has custom type legalization on riscv32");
1083
1084 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
1085 SDValue RCW =
1086 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
1087
1088 Results.push_back(
1089 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
1090 Results.push_back(RCW.getValue(2));
1091 break;
1092 }
1093 case ISD::ADD:
1094 case ISD::SUB:
1095 case ISD::MUL:
1096 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1097 "Unexpected custom legalisation");
1098 if (N->getOperand(1).getOpcode() == ISD::Constant)
1099 return;
1100 Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
1101 break;
1102 case ISD::SHL:
1103 case ISD::SRA:
1104 case ISD::SRL:
1105 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1106 "Unexpected custom legalisation");
1107 if (N->getOperand(1).getOpcode() == ISD::Constant)
1108 return;
1109 Results.push_back(customLegalizeToWOp(N, DAG));
1110 break;
1111 case ISD::SDIV:
1112 case ISD::UDIV:
1113 case ISD::UREM:
1114 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1115 Subtarget.hasStdExtM() && "Unexpected custom legalisation");
1116 if (N->getOperand(0).getOpcode() == ISD::Constant ||
1117 N->getOperand(1).getOpcode() == ISD::Constant)
1118 return;
1119 Results.push_back(customLegalizeToWOp(N, DAG));
1120 break;
1121 case ISD::BITCAST: {
1122 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
1123 Subtarget.hasStdExtF() && "Unexpected custom legalisation");
1124 SDLoc DL(N);
1125 SDValue Op0 = N->getOperand(0);
1126 if (Op0.getValueType() != MVT::f32)
1127 return;
1128 SDValue FPConv =
1129 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
1130 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
1131 break;
1132 }
1133 }
1134 }
1135
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const1136 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
1137 DAGCombinerInfo &DCI) const {
1138 SelectionDAG &DAG = DCI.DAG;
1139
1140 switch (N->getOpcode()) {
1141 default:
1142 break;
1143 case ISD::INTRINSIC_WO_CHAIN: {
1144 SDLoc DL(N);
1145 unsigned IID = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
1146 EVT XLenVT = Subtarget.getXLenVT();
1147
1148 switch (IID) {
1149 // Lower to our custom node, but with a truncate back to i1 so we can
1150 // replace its uses.
1151 case Intrinsic::cheri_cap_tag_get: {
1152 SDValue IntRes = DAG.getNode(RISCVISD::CAP_TAG_GET, DL, XLenVT,
1153 N->getOperand(1));
1154 IntRes = DAG.getNode(ISD::AssertZext, DL, XLenVT, IntRes,
1155 DAG.getValueType(MVT::i1));
1156 return DAG.getSetCC(DL, MVT::i1, IntRes,
1157 DAG.getConstant(0, DL, XLenVT), ISD::SETNE);
1158 }
1159 case Intrinsic::cheri_cap_sealed_get: {
1160 SDValue IntRes = DAG.getNode(RISCVISD::CAP_SEALED_GET, DL, XLenVT,
1161 N->getOperand(1));
1162 IntRes = DAG.getNode(ISD::AssertZext, DL, XLenVT, IntRes,
1163 DAG.getValueType(MVT::i1));
1164 return DAG.getSetCC(DL, MVT::i1, IntRes,
1165 DAG.getConstant(0, DL, XLenVT), ISD::SETNE);
1166 }
1167 case Intrinsic::cheri_cap_subset_test: {
1168 SDValue IntRes = DAG.getNode(RISCVISD::CAP_SUBSET_TEST, DL, XLenVT,
1169 N->getOperand(1), N->getOperand(2));
1170 IntRes = DAG.getNode(ISD::AssertZext, DL, XLenVT, IntRes,
1171 DAG.getValueType(MVT::i1));
1172 return DAG.getSetCC(DL, MVT::i1, IntRes,
1173 DAG.getConstant(0, DL, XLenVT), ISD::SETNE);
1174 }
1175 case Intrinsic::cheri_cap_equal_exact: {
1176 SDValue IntRes = DAG.getNode(RISCVISD::CAP_EQUAL_EXACT, DL, XLenVT,
1177 N->getOperand(1), N->getOperand(2));
1178 IntRes = DAG.getNode(ISD::AssertZext, DL, XLenVT, IntRes,
1179 DAG.getValueType(MVT::i1));
1180 return DAG.getSetCC(DL, MVT::i1, IntRes,
1181 DAG.getConstant(0, DL, XLenVT), ISD::SETNE);
1182 }
1183 // Constant fold CRRL/CRAM when possible
1184 case Intrinsic::cheri_round_representable_length: {
1185 if (CapTypeHasPreciseBounds)
1186 return N->getOperand(1);
1187
1188 KnownBits Known = DAG.computeKnownBits(SDValue(N, 0));
1189 if (Known.isConstant())
1190 return DAG.getConstant(Known.One, DL, N->getValueType(0));
1191 break;
1192 }
1193 case Intrinsic::cheri_representable_alignment_mask: {
1194 if (CapTypeHasPreciseBounds)
1195 return DAG.getAllOnesConstant(DL, N->getValueType(0));
1196
1197 KnownBits Known = DAG.computeKnownBits(SDValue(N, 0));
1198 if (Known.isConstant())
1199 return DAG.getConstant(Known.One, DL, N->getValueType(0));
1200 break;
1201 }
1202 }
1203
1204 break;
1205 }
1206 case RISCVISD::SplitF64: {
1207 SDValue Op0 = N->getOperand(0);
1208 // If the input to SplitF64 is just BuildPairF64 then the operation is
1209 // redundant. Instead, use BuildPairF64's operands directly.
1210 if (Op0->getOpcode() == RISCVISD::BuildPairF64)
1211 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
1212
1213 SDLoc DL(N);
1214
1215 // It's cheaper to materialise two 32-bit integers than to load a double
1216 // from the constant pool and transfer it to integer registers through the
1217 // stack.
1218 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
1219 APInt V = C->getValueAPF().bitcastToAPInt();
1220 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
1221 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
1222 return DCI.CombineTo(N, Lo, Hi);
1223 }
1224
1225 // This is a target-specific version of a DAGCombine performed in
1226 // DAGCombiner::visitBITCAST. It performs the equivalent of:
1227 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1228 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1229 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1230 !Op0.getNode()->hasOneUse())
1231 break;
1232 SDValue NewSplitF64 =
1233 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
1234 Op0.getOperand(0));
1235 SDValue Lo = NewSplitF64.getValue(0);
1236 SDValue Hi = NewSplitF64.getValue(1);
1237 APInt SignBit = APInt::getSignMask(32);
1238 if (Op0.getOpcode() == ISD::FNEG) {
1239 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
1240 DAG.getConstant(SignBit, DL, MVT::i32));
1241 return DCI.CombineTo(N, Lo, NewHi);
1242 }
1243 assert(Op0.getOpcode() == ISD::FABS);
1244 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
1245 DAG.getConstant(~SignBit, DL, MVT::i32));
1246 return DCI.CombineTo(N, Lo, NewHi);
1247 }
1248 case RISCVISD::SLLW:
1249 case RISCVISD::SRAW:
1250 case RISCVISD::SRLW: {
1251 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
1252 SDValue LHS = N->getOperand(0);
1253 SDValue RHS = N->getOperand(1);
1254 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
1255 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 5);
1256 if ((SimplifyDemandedBits(N->getOperand(0), LHSMask, DCI)) ||
1257 (SimplifyDemandedBits(N->getOperand(1), RHSMask, DCI)))
1258 return SDValue();
1259 break;
1260 }
1261 case RISCVISD::FMV_X_ANYEXTW_RV64: {
1262 SDLoc DL(N);
1263 SDValue Op0 = N->getOperand(0);
1264 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
1265 // conversion is unnecessary and can be replaced with an ANY_EXTEND
1266 // of the FMV_W_X_RV64 operand.
1267 if (Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) {
1268 SDValue AExtOp =
1269 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0.getOperand(0));
1270 return DCI.CombineTo(N, AExtOp);
1271 }
1272
1273 // This is a target-specific version of a DAGCombine performed in
1274 // DAGCombiner::visitBITCAST. It performs the equivalent of:
1275 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
1276 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
1277 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
1278 !Op0.getNode()->hasOneUse())
1279 break;
1280 SDValue NewFMV = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64,
1281 Op0.getOperand(0));
1282 APInt SignBit = APInt::getSignMask(32).sext(64);
1283 if (Op0.getOpcode() == ISD::FNEG) {
1284 return DCI.CombineTo(N,
1285 DAG.getNode(ISD::XOR, DL, MVT::i64, NewFMV,
1286 DAG.getConstant(SignBit, DL, MVT::i64)));
1287 }
1288 assert(Op0.getOpcode() == ISD::FABS);
1289 return DCI.CombineTo(N,
1290 DAG.getNode(ISD::AND, DL, MVT::i64, NewFMV,
1291 DAG.getConstant(~SignBit, DL, MVT::i64)));
1292 }
1293 }
1294
1295 return SDValue();
1296 }
1297
isDesirableToCommuteWithShift(const SDNode * N,CombineLevel Level) const1298 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
1299 const SDNode *N, CombineLevel Level) const {
1300 // The following folds are only desirable if `(OP _, c1 << c2)` can be
1301 // materialised in fewer instructions than `(OP _, c1)`:
1302 //
1303 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
1304 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
1305 SDValue N0 = N->getOperand(0);
1306 EVT Ty = N0.getValueType();
1307 if (Ty.isScalarInteger() &&
1308 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
1309 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
1310 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
1311 if (C1 && C2) {
1312 APInt C1Int = C1->getAPIntValue();
1313 APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
1314
1315 // We can materialise `c1 << c2` into an add immediate, so it's "free",
1316 // and the combine should happen, to potentially allow further combines
1317 // later.
1318 if (ShiftedC1Int.getMinSignedBits() <= 64 &&
1319 isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
1320 return true;
1321
1322 // We can materialise `c1` in an add immediate, so it's "free", and the
1323 // combine should be prevented.
1324 if (C1Int.getMinSignedBits() <= 64 &&
1325 isLegalAddImmediate(C1Int.getSExtValue()))
1326 return false;
1327
1328 // Neither constant will fit into an immediate, so find materialisation
1329 // costs.
1330 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
1331 Subtarget.is64Bit());
1332 int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
1333 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.is64Bit());
1334
1335 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
1336 // combine should be prevented.
1337 if (C1Cost < ShiftedC1Cost)
1338 return false;
1339 }
1340 }
1341 return true;
1342 }
1343
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const1344 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
1345 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
1346 unsigned Depth) const {
1347 switch (Op.getOpcode()) {
1348 default:
1349 break;
1350 case RISCVISD::SLLW:
1351 case RISCVISD::SRAW:
1352 case RISCVISD::SRLW:
1353 case RISCVISD::DIVW:
1354 case RISCVISD::DIVUW:
1355 case RISCVISD::REMUW:
1356 // TODO: As the result is sign-extended, this is conservatively correct. A
1357 // more precise answer could be calculated for SRAW depending on known
1358 // bits in the shift amount.
1359 return 33;
1360 }
1361
1362 return 1;
1363 }
1364
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const1365 void RISCVTargetLowering::computeKnownBitsForTargetNode(
1366 const SDValue Op, KnownBits &Known, const APInt &DemandedElts,
1367 const SelectionDAG &DAG, unsigned Depth) const {
1368 Known.resetAll();
1369 bool IsRV64 = Subtarget.is64Bit();
1370 switch (Op.getOpcode()) {
1371 default: break;
1372 case ISD::INTRINSIC_WO_CHAIN: {
1373 switch (cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue()) {
1374 default: break;
1375 case Intrinsic::cheri_round_representable_length:
1376 if (CapTypeHasPreciseBounds) {
1377 Known = DAG.computeKnownBits(Op.getOperand(1));
1378 } else if (IsRV64) {
1379 KnownBits KnownLengthBits = DAG.computeKnownBits(Op.getOperand(1));
1380 uint64_t MinLength = KnownLengthBits.One.getZExtValue();
1381 uint64_t MaxLength = (~KnownLengthBits.Zero).getZExtValue();
1382 uint64_t MinRoundedLength =
1383 RISCVCompressedCap::getRepresentableLength(MinLength, IsRV64);
1384 uint64_t MaxRoundedLength =
1385 RISCVCompressedCap::getRepresentableLength(MaxLength, IsRV64);
1386 bool MinRoundedOverflow = MinRoundedLength < MinLength;
1387 bool MaxRoundedOverflow = MaxRoundedLength < MaxLength;
1388
1389 // A bit is known if the two different output bits are the same and:
1390 //
1391 // (1) All more-significant bits are known. This is regardless of
1392 // whether the corresponding input bits were known, since rounding
1393 // is monotonic.
1394 //
1395 // OR
1396 //
1397 // (2) All less-significant bits are known and the corresponding input
1398 // bit is known.
1399 //
1400 // If the two rounded values are the same, repeated application of (1)
1401 // yields the expected result that all bits are known.
1402 //
1403 // Note that the properties as described above are in terms of the
1404 // (N+1)-bit outputs, not their truncated forms, with the (N+1)th bit
1405 // being the overflow bit, and so we must take that into account.
1406 //
1407 // This can be improved upon to consider inner and trailing bits that
1408 // are still known regardless of the input bits (such as because they
1409 // are 1 in the input and the bounds are not rounded up too much to
1410 // lose them), but this is a good first start.
1411
1412 uint64_t MinMaxRoundedAgreeMask = MinRoundedLength ^ ~MaxRoundedLength;
1413 uint64_t InputKnownMask =
1414 (KnownLengthBits.Zero | KnownLengthBits.One).getZExtValue();
1415
1416 // Calculate bits for property (1)
1417 uint64_t LeadingKnownBits = countLeadingOnes(MinMaxRoundedAgreeMask);
1418 uint64_t LeadingKnownMask =
1419 MinRoundedOverflow == MaxRoundedOverflow
1420 ? maskLeadingOnes<uint64_t>(LeadingKnownBits) : 0;
1421
1422 // Calculate bits for property (2)
1423 uint64_t TrailingKnownBits = countTrailingOnes(MinMaxRoundedAgreeMask);
1424 uint64_t TrailingKnownMask =
1425 maskTrailingOnes<uint64_t>(TrailingKnownBits) & InputKnownMask;
1426
1427 // Combine properties
1428 uint64_t KnownMask = LeadingKnownMask | TrailingKnownMask;
1429
1430 Known.Zero |= ~MinRoundedLength & KnownMask;
1431 Known.One |= MinRoundedLength & KnownMask;
1432 }
1433 break;
1434 case Intrinsic::cheri_representable_alignment_mask:
1435 if (CapTypeHasPreciseBounds) {
1436 Known.setAllOnes();
1437 } else if (IsRV64) {
1438 KnownBits KnownLengthBits = DAG.computeKnownBits(Op.getOperand(1));
1439 uint64_t MinLength = KnownLengthBits.One.getZExtValue();
1440 uint64_t MaxLength = (~KnownLengthBits.Zero).getZExtValue();
1441
1442 Known.Zero |= ~RISCVCompressedCap::getAlignmentMask(MinLength, IsRV64);
1443 Known.One |= RISCVCompressedCap::getAlignmentMask(MaxLength, IsRV64);
1444 }
1445 break;
1446 }
1447 }
1448 }
1449 }
1450
1451 TailPaddingAmount
getTailPaddingForPreciseBounds(uint64_t Size) const1452 RISCVTargetLowering::getTailPaddingForPreciseBounds(uint64_t Size) const {
1453 if (!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()))
1454 return TailPaddingAmount::None;
1455
1456 return RISCVCompressedCap::getRequiredTailPadding(Size, Subtarget.is64Bit());
1457 }
1458
1459 Align
getAlignmentForPreciseBounds(uint64_t Size) const1460 RISCVTargetLowering::getAlignmentForPreciseBounds(uint64_t Size) const {
1461 if (!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()))
1462 return Align();
1463
1464 return RISCVCompressedCap::getRequiredAlignment(Size, Subtarget.is64Bit());
1465 }
1466
emitReadCycleWidePseudo(MachineInstr & MI,MachineBasicBlock * BB)1467 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
1468 MachineBasicBlock *BB) {
1469 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
1470
1471 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
1472 // Should the count have wrapped while it was being read, we need to try
1473 // again.
1474 // ...
1475 // read:
1476 // rdcycleh x3 # load high word of cycle
1477 // rdcycle x2 # load low word of cycle
1478 // rdcycleh x4 # load high word of cycle
1479 // bne x3, x4, read # check if high word reads match, otherwise try again
1480 // ...
1481
1482 MachineFunction &MF = *BB->getParent();
1483 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1484 MachineFunction::iterator It = ++BB->getIterator();
1485
1486 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1487 MF.insert(It, LoopMBB);
1488
1489 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
1490 MF.insert(It, DoneMBB);
1491
1492 // Transfer the remainder of BB and its successor edges to DoneMBB.
1493 DoneMBB->splice(DoneMBB->begin(), BB,
1494 std::next(MachineBasicBlock::iterator(MI)), BB->end());
1495 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
1496
1497 BB->addSuccessor(LoopMBB);
1498
1499 MachineRegisterInfo &RegInfo = MF.getRegInfo();
1500 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
1501 Register LoReg = MI.getOperand(0).getReg();
1502 Register HiReg = MI.getOperand(1).getReg();
1503 DebugLoc DL = MI.getDebugLoc();
1504
1505 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
1506 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
1507 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1508 .addReg(RISCV::X0);
1509 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
1510 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
1511 .addReg(RISCV::X0);
1512 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
1513 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
1514 .addReg(RISCV::X0);
1515
1516 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
1517 .addReg(HiReg)
1518 .addReg(ReadAgainReg)
1519 .addMBB(LoopMBB);
1520
1521 LoopMBB->addSuccessor(LoopMBB);
1522 LoopMBB->addSuccessor(DoneMBB);
1523
1524 MI.eraseFromParent();
1525
1526 return DoneMBB;
1527 }
1528
emitSplitF64Pseudo(MachineInstr & MI,MachineBasicBlock * BB)1529 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
1530 MachineBasicBlock *BB) {
1531 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
1532
1533 MachineFunction &MF = *BB->getParent();
1534 DebugLoc DL = MI.getDebugLoc();
1535 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1536 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1537 Register LoReg = MI.getOperand(0).getReg();
1538 Register HiReg = MI.getOperand(1).getReg();
1539 Register SrcReg = MI.getOperand(2).getReg();
1540 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
1541 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1542
1543 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
1544 RI);
1545 MachineMemOperand *MMO =
1546 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1547 MachineMemOperand::MOLoad, 8, Align(8));
1548 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
1549 .addFrameIndex(FI)
1550 .addImm(0)
1551 .addMemOperand(MMO);
1552 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
1553 .addFrameIndex(FI)
1554 .addImm(4)
1555 .addMemOperand(MMO);
1556 MI.eraseFromParent(); // The pseudo instruction is gone now.
1557 return BB;
1558 }
1559
emitBuildPairF64Pseudo(MachineInstr & MI,MachineBasicBlock * BB)1560 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
1561 MachineBasicBlock *BB) {
1562 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
1563 "Unexpected instruction");
1564
1565 MachineFunction &MF = *BB->getParent();
1566 DebugLoc DL = MI.getDebugLoc();
1567 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
1568 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
1569 Register DstReg = MI.getOperand(0).getReg();
1570 Register LoReg = MI.getOperand(1).getReg();
1571 Register HiReg = MI.getOperand(2).getReg();
1572 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
1573 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
1574
1575 MachineMemOperand *MMO =
1576 MF.getMachineMemOperand(MachinePointerInfo::getFixedStack(MF, FI),
1577 MachineMemOperand::MOStore, 8, Align(8));
1578 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1579 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
1580 .addFrameIndex(FI)
1581 .addImm(0)
1582 .addMemOperand(MMO);
1583 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
1584 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
1585 .addFrameIndex(FI)
1586 .addImm(4)
1587 .addMemOperand(MMO);
1588 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
1589 MI.eraseFromParent(); // The pseudo instruction is gone now.
1590 return BB;
1591 }
1592
isSelectPseudo(MachineInstr & MI)1593 static bool isSelectPseudo(MachineInstr &MI) {
1594 switch (MI.getOpcode()) {
1595 default:
1596 return false;
1597 case RISCV::Select_GPR_Using_CC_GPR:
1598 case RISCV::Select_GPCR_Using_CC_GPR:
1599 case RISCV::Select_FPR32_Using_CC_GPR:
1600 case RISCV::Select_FPR64_Using_CC_GPR:
1601 return true;
1602 }
1603 }
1604
emitSelectPseudo(MachineInstr & MI,MachineBasicBlock * BB)1605 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
1606 MachineBasicBlock *BB) {
1607 // To "insert" Select_* instructions, we actually have to insert the triangle
1608 // control-flow pattern. The incoming instructions know the destination vreg
1609 // to set, the condition code register to branch on, the true/false values to
1610 // select between, and the condcode to use to select the appropriate branch.
1611 //
1612 // We produce the following control flow:
1613 // HeadMBB
1614 // | \
1615 // | IfFalseMBB
1616 // | /
1617 // TailMBB
1618 //
1619 // When we find a sequence of selects we attempt to optimize their emission
1620 // by sharing the control flow. Currently we only handle cases where we have
1621 // multiple selects with the exact same condition (same LHS, RHS and CC).
1622 // The selects may be interleaved with other instructions if the other
1623 // instructions meet some requirements we deem safe:
1624 // - They are debug instructions. Otherwise,
1625 // - They do not have side-effects, do not access memory and their inputs do
1626 // not depend on the results of the select pseudo-instructions.
1627 // The TrueV/FalseV operands of the selects cannot depend on the result of
1628 // previous selects in the sequence.
1629 // These conditions could be further relaxed. See the X86 target for a
1630 // related approach and more information.
1631 Register LHS = MI.getOperand(1).getReg();
1632 Register RHS = MI.getOperand(2).getReg();
1633 auto CC = static_cast<ISD::CondCode>(MI.getOperand(3).getImm());
1634
1635 SmallVector<MachineInstr *, 4> SelectDebugValues;
1636 SmallSet<Register, 4> SelectDests;
1637 SelectDests.insert(MI.getOperand(0).getReg());
1638
1639 MachineInstr *LastSelectPseudo = &MI;
1640
1641 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
1642 SequenceMBBI != E; ++SequenceMBBI) {
1643 if (SequenceMBBI->isDebugInstr())
1644 continue;
1645 else if (isSelectPseudo(*SequenceMBBI)) {
1646 if (SequenceMBBI->getOperand(1).getReg() != LHS ||
1647 SequenceMBBI->getOperand(2).getReg() != RHS ||
1648 SequenceMBBI->getOperand(3).getImm() != CC ||
1649 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
1650 SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
1651 break;
1652 LastSelectPseudo = &*SequenceMBBI;
1653 SequenceMBBI->collectDebugValues(SelectDebugValues);
1654 SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
1655 } else {
1656 if (SequenceMBBI->hasUnmodeledSideEffects() ||
1657 SequenceMBBI->mayLoadOrStore())
1658 break;
1659 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
1660 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
1661 }))
1662 break;
1663 }
1664 }
1665
1666 const TargetInstrInfo &TII = *BB->getParent()->getSubtarget().getInstrInfo();
1667 const BasicBlock *LLVM_BB = BB->getBasicBlock();
1668 DebugLoc DL = MI.getDebugLoc();
1669 MachineFunction::iterator I = ++BB->getIterator();
1670
1671 MachineBasicBlock *HeadMBB = BB;
1672 MachineFunction *F = BB->getParent();
1673 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
1674 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
1675
1676 F->insert(I, IfFalseMBB);
1677 F->insert(I, TailMBB);
1678
1679 // Transfer debug instructions associated with the selects to TailMBB.
1680 for (MachineInstr *DebugInstr : SelectDebugValues) {
1681 TailMBB->push_back(DebugInstr->removeFromParent());
1682 }
1683
1684 // Move all instructions after the sequence to TailMBB.
1685 TailMBB->splice(TailMBB->end(), HeadMBB,
1686 std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
1687 // Update machine-CFG edges by transferring all successors of the current
1688 // block to the new block which will contain the Phi nodes for the selects.
1689 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
1690 // Set the successors for HeadMBB.
1691 HeadMBB->addSuccessor(IfFalseMBB);
1692 HeadMBB->addSuccessor(TailMBB);
1693
1694 // Insert appropriate branch.
1695 unsigned Opcode = getBranchOpcodeForIntCondCode(CC);
1696
1697 BuildMI(HeadMBB, DL, TII.get(Opcode))
1698 .addReg(LHS)
1699 .addReg(RHS)
1700 .addMBB(TailMBB);
1701
1702 // IfFalseMBB just falls through to TailMBB.
1703 IfFalseMBB->addSuccessor(TailMBB);
1704
1705 // Create PHIs for all of the select pseudo-instructions.
1706 auto SelectMBBI = MI.getIterator();
1707 // Result must be virtual registers:
1708 assert(SelectMBBI->getOperand(4).getReg().isVirtual());
1709 assert(SelectMBBI->getOperand(5).getReg().isVirtual());
1710 auto SelectEnd = std::next(LastSelectPseudo->getIterator());
1711 auto InsertionPoint = TailMBB->begin();
1712 while (SelectMBBI != SelectEnd) {
1713 auto Next = std::next(SelectMBBI);
1714 if (isSelectPseudo(*SelectMBBI)) {
1715 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
1716 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
1717 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
1718 .addReg(SelectMBBI->getOperand(4).getReg())
1719 .addMBB(HeadMBB)
1720 .addReg(SelectMBBI->getOperand(5).getReg())
1721 .addMBB(IfFalseMBB);
1722 SelectMBBI->eraseFromParent();
1723 }
1724 SelectMBBI = Next;
1725 }
1726
1727 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
1728 return TailMBB;
1729 }
1730
1731 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const1732 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
1733 MachineBasicBlock *BB) const {
1734 switch (MI.getOpcode()) {
1735 default:
1736 llvm_unreachable("Unexpected instr type to insert");
1737 case RISCV::ReadCycleWide:
1738 assert(!Subtarget.is64Bit() &&
1739 "ReadCycleWrite is only to be used on riscv32");
1740 return emitReadCycleWidePseudo(MI, BB);
1741 case RISCV::Select_GPR_Using_CC_GPR:
1742 case RISCV::Select_GPCR_Using_CC_GPR:
1743 case RISCV::Select_FPR32_Using_CC_GPR:
1744 case RISCV::Select_FPR64_Using_CC_GPR:
1745 return emitSelectPseudo(MI, BB);
1746 case RISCV::BuildPairF64Pseudo:
1747 return emitBuildPairF64Pseudo(MI, BB);
1748 case RISCV::SplitF64Pseudo:
1749 return emitSplitF64Pseudo(MI, BB);
1750 }
1751 }
1752
1753 // Calling Convention Implementation.
1754 // The expectations for frontend ABI lowering vary from target to target.
1755 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
1756 // details, but this is a longer term goal. For now, we simply try to keep the
1757 // role of the frontend as simple and well-defined as possible. The rules can
1758 // be summarised as:
1759 // * Never split up large scalar arguments. We handle them here.
1760 // * If a hardfloat calling convention is being used, and the struct may be
1761 // passed in a pair of registers (fp+fp, int+fp), and both registers are
1762 // available, then pass as two separate arguments. If either the GPRs or FPRs
1763 // are exhausted, then pass according to the rule below.
1764 // * If a struct could never be passed in registers or directly in a stack
1765 // slot (as it is larger than 2*XLEN and the floating point rules don't
1766 // apply), then pass it using a pointer with the byval attribute.
1767 // * If a struct is less than 2*XLEN, then coerce to either a two-element
1768 // word-sized array or a 2*XLEN scalar (depending on alignment).
1769 // * The frontend can determine whether a struct is returned by reference or
1770 // not based on its size and fields. If it will be returned by reference, the
1771 // frontend must modify the prototype so a pointer with the sret annotation is
1772 // passed as the first argument. This is not necessary for large scalar
1773 // returns.
1774 // * Struct return values and varargs should be coerced to structs containing
1775 // register-size fields in the same situations they would be for fixed
1776 // arguments.
1777
1778 static const MCPhysReg ArgGPRs[] = {
1779 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
1780 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
1781 };
1782 static const MCPhysReg ArgFPR32s[] = {
1783 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
1784 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
1785 };
1786 static const MCPhysReg ArgFPR64s[] = {
1787 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
1788 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
1789 };
1790
1791 static const MCPhysReg ArgGPCRs[] = {
1792 RISCV::C10, RISCV::C11, RISCV::C12, RISCV::C13,
1793 RISCV::C14, RISCV::C15, RISCV::C16, RISCV::C17
1794 };
1795
1796 // Pass a 2*XLEN argument that has been split into two XLEN values through
1797 // registers or the stack as necessary.
CC_RISCVAssign2XLen(unsigned XLen,CCState & State,bool IsPureCapVarArgs,CCValAssign VA1,ISD::ArgFlagsTy ArgFlags1,unsigned ValNo2,MVT ValVT2,MVT LocVT2,ISD::ArgFlagsTy ArgFlags2)1798 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State,
1799 bool IsPureCapVarArgs, CCValAssign VA1,
1800 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
1801 MVT ValVT2, MVT LocVT2,
1802 ISD::ArgFlagsTy ArgFlags2) {
1803 unsigned XLenInBytes = XLen / 8;
1804 if (Register Reg = IsPureCapVarArgs ? 0 : State.AllocateReg(ArgGPRs)) {
1805 // At least one half can be passed via register.
1806 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
1807 VA1.getLocVT(), CCValAssign::Full));
1808 } else {
1809 // Both halves must be passed on the stack, with proper alignment.
1810 Align StackAlign =
1811 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
1812 State.addLoc(
1813 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
1814 State.AllocateStack(XLenInBytes, StackAlign),
1815 VA1.getLocVT(), CCValAssign::Full));
1816 State.addLoc(CCValAssign::getMem(
1817 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
1818 LocVT2, CCValAssign::Full));
1819 return false;
1820 }
1821
1822 if (Register Reg = State.AllocateReg(ArgGPRs)) {
1823 // The second half can also be passed via register.
1824 State.addLoc(
1825 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
1826 } else {
1827 // The second half is passed via the stack, without additional alignment.
1828 State.addLoc(CCValAssign::getMem(
1829 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
1830 LocVT2, CCValAssign::Full));
1831 }
1832
1833 return false;
1834 }
1835
1836 // Implements the RISC-V calling convention. Returns true upon failure.
CC_RISCV(const DataLayout & DL,unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State,bool IsFixed,bool IsRet,Type * OrigTy,const RISCVSubtarget & Subtarget)1837 static bool CC_RISCV(const DataLayout &DL, unsigned ValNo, MVT ValVT, MVT LocVT,
1838 CCValAssign::LocInfo LocInfo, ISD::ArgFlagsTy ArgFlags,
1839 CCState &State, bool IsFixed, bool IsRet, Type *OrigTy,
1840 const RISCVSubtarget &Subtarget) {
1841 RISCVABI::ABI ABI = Subtarget.getTargetABI();
1842 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
1843 assert(XLen == 32 || XLen == 64);
1844 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
1845 MVT CLenVT = Subtarget.hasCheri() ? Subtarget.typeForCapabilities()
1846 : MVT();
1847 MVT PtrVT = DL.isFatPointer(DL.getAllocaAddrSpace()) ? CLenVT : XLenVT;
1848 bool IsPureCapVarArgs = !IsFixed && RISCVABI::isCheriPureCapABI(ABI);
1849
1850 // Any return value split in to more than two values can't be returned
1851 // directly.
1852 if (IsRet && ValNo > 1)
1853 return true;
1854
1855 // UseGPRForF32 if targeting one of the soft-float ABIs, if passing a
1856 // variadic argument, or if no F32 argument registers are available.
1857 bool UseGPRForF32 = true;
1858 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
1859 // variadic argument, or if no F64 argument registers are available.
1860 bool UseGPRForF64 = true;
1861
1862 switch (ABI) {
1863 default:
1864 llvm_unreachable("Unexpected ABI");
1865 case RISCVABI::ABI_ILP32:
1866 case RISCVABI::ABI_LP64:
1867 case RISCVABI::ABI_IL32PC64:
1868 case RISCVABI::ABI_L64PC128:
1869 break;
1870 case RISCVABI::ABI_ILP32F:
1871 case RISCVABI::ABI_LP64F:
1872 case RISCVABI::ABI_IL32PC64F:
1873 case RISCVABI::ABI_L64PC128F:
1874 UseGPRForF32 = !IsFixed;
1875 break;
1876 case RISCVABI::ABI_ILP32D:
1877 case RISCVABI::ABI_LP64D:
1878 case RISCVABI::ABI_IL32PC64D:
1879 case RISCVABI::ABI_L64PC128D:
1880 UseGPRForF32 = !IsFixed;
1881 UseGPRForF64 = !IsFixed;
1882 break;
1883 }
1884
1885 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s))
1886 UseGPRForF32 = true;
1887 if (State.getFirstUnallocated(ArgFPR64s) == array_lengthof(ArgFPR64s))
1888 UseGPRForF64 = true;
1889
1890 // From this point on, rely on UseGPRForF32, UseGPRForF64 and similar local
1891 // variables rather than directly checking against the target ABI.
1892
1893 if (UseGPRForF32 && ValVT == MVT::f32) {
1894 LocVT = XLenVT;
1895 LocInfo = CCValAssign::BCvt;
1896 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
1897 LocVT = MVT::i64;
1898 LocInfo = CCValAssign::BCvt;
1899 }
1900
1901 // If this is a variadic argument, the RISC-V calling convention requires
1902 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
1903 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
1904 // be used regardless of whether the original argument was split during
1905 // legalisation or not. The argument will not be passed by registers if the
1906 // original type is larger than 2*XLEN, so the register alignment rule does
1907 // not apply.
1908 // TODO: Pure capability varargs bounds
1909 unsigned TwoXLenInBytes = (2 * XLen) / 8;
1910 if (!IsFixed && !RISCVABI::isCheriPureCapABI(ABI) &&
1911 ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
1912 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
1913 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
1914 // Skip 'odd' register if necessary.
1915 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
1916 State.AllocateReg(ArgGPRs);
1917 }
1918
1919 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
1920 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
1921 State.getPendingArgFlags();
1922
1923 assert(PendingLocs.size() == PendingArgFlags.size() &&
1924 "PendingLocs and PendingArgFlags out of sync");
1925
1926 // Handle passing f64 on RV32D with a soft float ABI or when floating point
1927 // registers are exhausted.
1928 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64 && !IsPureCapVarArgs) {
1929 assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
1930 "Can't lower f64 if it is split");
1931 // Depending on available argument GPRS, f64 may be passed in a pair of
1932 // GPRs, split between a GPR and the stack, or passed completely on the
1933 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
1934 // cases.
1935 Register Reg = State.AllocateReg(ArgGPRs);
1936 LocVT = MVT::i32;
1937 if (!Reg) {
1938 unsigned StackOffset = State.AllocateStack(8, Align(8));
1939 State.addLoc(
1940 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
1941 return false;
1942 }
1943 if (!State.AllocateReg(ArgGPRs))
1944 State.AllocateStack(4, Align(4));
1945 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
1946 return false;
1947 }
1948
1949 // Split arguments might be passed indirectly, so keep track of the pending
1950 // values.
1951 if (ArgFlags.isSplit() || !PendingLocs.empty()) {
1952 LocVT = XLenVT;
1953 LocInfo = CCValAssign::Indirect;
1954 PendingLocs.push_back(
1955 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
1956 PendingArgFlags.push_back(ArgFlags);
1957 if (!ArgFlags.isSplitEnd()) {
1958 return false;
1959 }
1960 }
1961
1962 // If the split argument only had two elements, it should be passed directly
1963 // in registers or on the stack.
1964 if (ArgFlags.isSplitEnd() && PendingLocs.size() <= 2) {
1965 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
1966 // Apply the normal calling convention rules to the first half of the
1967 // split argument.
1968 CCValAssign VA = PendingLocs[0];
1969 ISD::ArgFlagsTy AF = PendingArgFlags[0];
1970 PendingLocs.clear();
1971 PendingArgFlags.clear();
1972 return CC_RISCVAssign2XLen(XLen, State, IsPureCapVarArgs, VA, AF,
1973 ValNo, ValVT, LocVT, ArgFlags);
1974 }
1975
1976 // Will be passed indirectly; make sure we allocate the right type of
1977 // register for the pointer.
1978 if (!PendingLocs.empty())
1979 ValVT = PtrVT;
1980
1981 // Allocate to a register if possible, or else a stack slot.
1982 Register Reg;
1983 unsigned ArgBytes = ValVT == CLenVT ? DL.getPointerSize(200) : XLen / 8;
1984 // Always pass pure capability varargs on the stack
1985 if (IsPureCapVarArgs)
1986 Reg = 0;
1987 else if (ValVT == MVT::f32 && !UseGPRForF32)
1988 Reg = State.AllocateReg(ArgFPR32s, ArgFPR64s);
1989 else if (ValVT == MVT::f64 && !UseGPRForF64)
1990 Reg = State.AllocateReg(ArgFPR64s, ArgFPR32s);
1991 else if (ValVT == CLenVT)
1992 Reg = State.AllocateReg(ArgGPCRs);
1993 else
1994 Reg = State.AllocateReg(ArgGPRs);
1995 unsigned StackOffset = Reg ? 0 : State.AllocateStack(ArgBytes, Align(ArgBytes));
1996
1997 // If we reach this point and PendingLocs is non-empty, we must be at the
1998 // end of a split argument that must be passed indirectly.
1999 if (!PendingLocs.empty()) {
2000 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
2001 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
2002
2003 for (auto &It : PendingLocs) {
2004 if (Reg)
2005 State.addLoc(
2006 CCValAssign::getReg(It.getValNo(), It.getValVT(), Reg,
2007 PtrVT, CCValAssign::Indirect));
2008 else
2009 State.addLoc(
2010 CCValAssign::getMem(It.getValNo(), It.getValVT(), StackOffset,
2011 PtrVT, CCValAssign::Indirect));
2012 }
2013 PendingLocs.clear();
2014 PendingArgFlags.clear();
2015 return false;
2016 }
2017
2018 assert((!UseGPRForF32 || !UseGPRForF64 || LocVT == XLenVT || LocVT == CLenVT) &&
2019 "Expected an XLenVT or CLenVT at this stage");
2020
2021 if (Reg) {
2022 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2023 return false;
2024 }
2025
2026 // When an f32 or f64 is passed on the stack, no bit-conversion is needed.
2027 if (ValVT == MVT::f32 || ValVT == MVT::f64) {
2028 LocVT = ValVT;
2029 LocInfo = CCValAssign::Full;
2030 }
2031 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
2032 return false;
2033 }
2034
analyzeInputArgs(MachineFunction & MF,CCState & CCInfo,const SmallVectorImpl<ISD::InputArg> & Ins,bool IsRet) const2035 void RISCVTargetLowering::analyzeInputArgs(
2036 MachineFunction &MF, CCState &CCInfo,
2037 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet) const {
2038 unsigned NumArgs = Ins.size();
2039 FunctionType *FType = MF.getFunction().getFunctionType();
2040
2041 for (unsigned i = 0; i != NumArgs; ++i) {
2042 MVT ArgVT = Ins[i].VT;
2043 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
2044
2045 Type *ArgTy = nullptr;
2046 if (IsRet)
2047 ArgTy = FType->getReturnType();
2048 else if (Ins[i].isOrigArg())
2049 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
2050
2051 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
2052 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, Subtarget)) {
2053 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
2054 << EVT(ArgVT).getEVTString() << '\n');
2055 llvm_unreachable(nullptr);
2056 }
2057 }
2058 }
2059
analyzeOutputArgs(MachineFunction & MF,CCState & CCInfo,const SmallVectorImpl<ISD::OutputArg> & Outs,bool IsRet,CallLoweringInfo * CLI) const2060 void RISCVTargetLowering::analyzeOutputArgs(
2061 MachineFunction &MF, CCState &CCInfo,
2062 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
2063 CallLoweringInfo *CLI) const {
2064 unsigned NumArgs = Outs.size();
2065
2066 for (unsigned i = 0; i != NumArgs; i++) {
2067 MVT ArgVT = Outs[i].VT;
2068 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2069 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
2070
2071 if (CC_RISCV(MF.getDataLayout(), i, ArgVT, ArgVT, CCValAssign::Full,
2072 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, Subtarget)) {
2073 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
2074 << EVT(ArgVT).getEVTString() << "\n");
2075 llvm_unreachable(nullptr);
2076 }
2077 }
2078 }
2079
2080 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
2081 // values.
convertLocVTToValVT(SelectionDAG & DAG,SDValue Val,const CCValAssign & VA,const SDLoc & DL)2082 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
2083 const CCValAssign &VA, const SDLoc &DL) {
2084 switch (VA.getLocInfo()) {
2085 default:
2086 llvm_unreachable("Unexpected CCValAssign::LocInfo");
2087 case CCValAssign::Full:
2088 break;
2089 case CCValAssign::BCvt:
2090 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
2091 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
2092 break;
2093 }
2094 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
2095 break;
2096 }
2097 return Val;
2098 }
2099
2100 // The caller is responsible for loading the full value if the argument is
2101 // passed with CCValAssign::Indirect.
unpackFromRegLoc(SelectionDAG & DAG,SDValue Chain,const CCValAssign & VA,const SDLoc & DL)2102 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
2103 const CCValAssign &VA, const SDLoc &DL) {
2104 MachineFunction &MF = DAG.getMachineFunction();
2105 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2106 EVT LocVT = VA.getLocVT();
2107 SDValue Val;
2108 const TargetRegisterClass *RC;
2109
2110 if (LocVT.isFatPointer())
2111 RC = &RISCV::GPCRRegClass;
2112 else
2113 switch (LocVT.getSimpleVT().SimpleTy) {
2114 default:
2115 llvm_unreachable("Unexpected register type");
2116 case MVT::i32:
2117 case MVT::i64:
2118 RC = &RISCV::GPRRegClass;
2119 break;
2120 case MVT::f32:
2121 RC = &RISCV::FPR32RegClass;
2122 break;
2123 case MVT::f64:
2124 RC = &RISCV::FPR64RegClass;
2125 break;
2126 }
2127
2128 Register VReg = RegInfo.createVirtualRegister(RC);
2129 RegInfo.addLiveIn(VA.getLocReg(), VReg);
2130 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
2131
2132 if (VA.getLocInfo() == CCValAssign::Indirect)
2133 return Val;
2134
2135 return convertLocVTToValVT(DAG, Val, VA, DL);
2136 }
2137
convertValVTToLocVT(SelectionDAG & DAG,SDValue Val,const CCValAssign & VA,const SDLoc & DL)2138 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
2139 const CCValAssign &VA, const SDLoc &DL) {
2140 EVT LocVT = VA.getLocVT();
2141
2142 switch (VA.getLocInfo()) {
2143 default:
2144 llvm_unreachable("Unexpected CCValAssign::LocInfo");
2145 case CCValAssign::Full:
2146 break;
2147 case CCValAssign::BCvt:
2148 if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32) {
2149 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
2150 break;
2151 }
2152 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
2153 break;
2154 }
2155 return Val;
2156 }
2157
2158 // The caller is responsible for loading the full value if the argument is
2159 // passed with CCValAssign::Indirect.
unpackFromMemLoc(SelectionDAG & DAG,SDValue Chain,const CCValAssign & VA,const SDLoc & DL,EVT PtrVT)2160 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
2161 const CCValAssign &VA, const SDLoc &DL,
2162 EVT PtrVT) {
2163 MachineFunction &MF = DAG.getMachineFunction();
2164 MachineFrameInfo &MFI = MF.getFrameInfo();
2165 EVT LocVT = VA.getLocVT();
2166 EVT ValVT = VA.getValVT();
2167 int FI = MFI.CreateFixedObject(ValVT.getSizeInBits() / 8,
2168 VA.getLocMemOffset(), /*Immutable=*/true);
2169 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
2170 SDValue Val;
2171
2172 ISD::LoadExtType ExtType;
2173 switch (VA.getLocInfo()) {
2174 default:
2175 llvm_unreachable("Unexpected CCValAssign::LocInfo");
2176 case CCValAssign::Full:
2177 case CCValAssign::Indirect:
2178 case CCValAssign::BCvt:
2179 ExtType = ISD::NON_EXTLOAD;
2180 break;
2181 }
2182 Val = DAG.getExtLoad(
2183 ExtType, DL, LocVT, Chain, FIN,
2184 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
2185 return Val;
2186 }
2187
unpackF64OnRV32DSoftABI(SelectionDAG & DAG,SDValue Chain,const CCValAssign & VA,const SDLoc & DL)2188 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
2189 const CCValAssign &VA, const SDLoc &DL) {
2190 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
2191 "Unexpected VA");
2192 MachineFunction &MF = DAG.getMachineFunction();
2193 MachineFrameInfo &MFI = MF.getFrameInfo();
2194 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2195
2196 if (VA.isMemLoc()) {
2197 // f64 is passed on the stack.
2198 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
2199 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
2200 return DAG.getLoad(MVT::f64, DL, Chain, FIN,
2201 MachinePointerInfo::getFixedStack(MF, FI));
2202 }
2203
2204 assert(VA.isRegLoc() && "Expected register VA assignment");
2205
2206 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
2207 RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
2208 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
2209 SDValue Hi;
2210 if (VA.getLocReg() == RISCV::X17) {
2211 // Second half of f64 is passed on the stack.
2212 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
2213 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
2214 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
2215 MachinePointerInfo::getFixedStack(MF, FI));
2216 } else {
2217 // Second half of f64 is passed in another GPR.
2218 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
2219 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
2220 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
2221 }
2222 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
2223 }
2224
2225 // FastCC has less than 1% performance improvement for some particular
2226 // benchmark. But theoretically, it may has benenfit for some cases.
CC_RISCV_FastCC(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)2227 static bool CC_RISCV_FastCC(unsigned ValNo, MVT ValVT, MVT LocVT,
2228 CCValAssign::LocInfo LocInfo,
2229 ISD::ArgFlagsTy ArgFlags, CCState &State) {
2230
2231 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
2232 // X5 and X6 might be used for save-restore libcall.
2233 static const MCPhysReg GPRList[] = {
2234 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
2235 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28,
2236 RISCV::X29, RISCV::X30, RISCV::X31};
2237 if (unsigned Reg = State.AllocateReg(GPRList)) {
2238 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2239 return false;
2240 }
2241 }
2242
2243 if (LocVT.isFatPointer()) {
2244 // C5 and C6 might be used for save-restore libcall.
2245 static const MCPhysReg GPCRList[] = {
2246 RISCV::C10, RISCV::C11, RISCV::C12, RISCV::C13, RISCV::C14,
2247 RISCV::C15, RISCV::C16, RISCV::C17, RISCV::C7, RISCV::C28,
2248 RISCV::C29, RISCV::C30, RISCV::C31};
2249 if (unsigned Reg = State.AllocateReg(GPCRList)) {
2250 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2251 return false;
2252 }
2253 }
2254
2255 if (LocVT == MVT::f32) {
2256 static const MCPhysReg FPR32List[] = {
2257 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
2258 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
2259 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
2260 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
2261 if (unsigned Reg = State.AllocateReg(FPR32List)) {
2262 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2263 return false;
2264 }
2265 }
2266
2267 if (LocVT == MVT::f64) {
2268 static const MCPhysReg FPR64List[] = {
2269 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
2270 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
2271 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
2272 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
2273 if (unsigned Reg = State.AllocateReg(FPR64List)) {
2274 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
2275 return false;
2276 }
2277 }
2278
2279 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
2280 unsigned Offset4 = State.AllocateStack(4, Align(4));
2281 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
2282 return false;
2283 }
2284
2285 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
2286 unsigned Offset5 = State.AllocateStack(8, Align(8));
2287 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
2288 return false;
2289 }
2290
2291 if (LocVT.isFatPointer()) {
2292 unsigned CLen = LocVT.getSizeInBits();
2293 unsigned Offset6 = State.AllocateStack(CLen / 8, Align(CLen / 8));
2294 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset6, LocVT, LocInfo));
2295 return false;
2296 }
2297
2298 return true; // CC didn't match.
2299 }
2300
2301 // Transform physical registers into virtual registers.
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const2302 SDValue RISCVTargetLowering::LowerFormalArguments(
2303 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
2304 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
2305 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
2306
2307 switch (CallConv) {
2308 default:
2309 report_fatal_error("Unsupported calling convention");
2310 case CallingConv::C:
2311 case CallingConv::Fast:
2312 break;
2313 }
2314
2315 MachineFunction &MF = DAG.getMachineFunction();
2316
2317 const Function &Func = MF.getFunction();
2318 if (Func.hasFnAttribute("interrupt")) {
2319 if (!Func.arg_empty())
2320 report_fatal_error(
2321 "Functions with the interrupt attribute cannot have arguments!");
2322
2323 StringRef Kind =
2324 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2325
2326 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
2327 report_fatal_error(
2328 "Function interrupt attribute argument not supported!");
2329 }
2330
2331 EVT PtrVT = getPointerTy(DAG.getDataLayout(),
2332 DAG.getDataLayout().getAllocaAddrSpace());
2333 MVT XLenVT = Subtarget.getXLenVT();
2334 // Used with vargs to acumulate store chains.
2335 std::vector<SDValue> OutChains;
2336
2337 // Assign locations to all of the incoming arguments.
2338 SmallVector<CCValAssign, 16> ArgLocs;
2339 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2340
2341 if (CallConv == CallingConv::Fast)
2342 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_FastCC);
2343 else
2344 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false);
2345
2346 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
2347 CCValAssign &VA = ArgLocs[i];
2348 SDValue ArgValue;
2349 // Passing f64 on RV32D with a soft float ABI must be handled as a special
2350 // case.
2351 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
2352 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
2353 else if (VA.isRegLoc())
2354 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL);
2355 else
2356 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL, PtrVT);
2357
2358 if (VA.getLocInfo() == CCValAssign::Indirect) {
2359 // If the original argument was split and passed by reference (e.g. i128
2360 // on RV32), we need to load all parts of it here (using the same
2361 // address).
2362 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
2363 MachinePointerInfo()));
2364 unsigned ArgIndex = Ins[i].OrigArgIndex;
2365 assert(Ins[i].PartOffset == 0);
2366 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
2367 CCValAssign &PartVA = ArgLocs[i + 1];
2368 unsigned PartOffset = Ins[i + 1].PartOffset;
2369 SDValue Address = DAG.getPointerAdd(DL, ArgValue, PartOffset);
2370 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
2371 MachinePointerInfo()));
2372 ++i;
2373 }
2374 continue;
2375 }
2376 InVals.push_back(ArgValue);
2377 }
2378
2379 MachineFrameInfo &MFI = MF.getFrameInfo();
2380 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
2381 unsigned XLenInBytes = Subtarget.getXLen() / 8;
2382 if (IsVarArg && RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) {
2383 // Record the frame index of the first variable argument
2384 // which is a value necessary to VASTART.
2385 int FI = MFI.CreateFixedObject(XLenInBytes, CCInfo.getNextStackOffset(),
2386 true);
2387 RVFI->setVarArgsFrameIndex(FI);
2388 } else if (IsVarArg) {
2389 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
2390 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
2391 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
2392 MachineRegisterInfo &RegInfo = MF.getRegInfo();
2393
2394 // Offset of the first variable argument from stack pointer, and size of
2395 // the vararg save area. For now, the varargs save area is either zero or
2396 // large enough to hold a0-a7.
2397 int VaArgOffset, VarArgsSaveSize;
2398
2399 // If all registers are allocated, then all varargs must be passed on the
2400 // stack and we don't need to save any argregs.
2401 if (ArgRegs.size() == Idx) {
2402 VaArgOffset = CCInfo.getNextStackOffset();
2403 VarArgsSaveSize = 0;
2404 } else {
2405 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
2406 VaArgOffset = -VarArgsSaveSize;
2407 }
2408
2409 // Record the frame index of the first variable argument
2410 // which is a value necessary to VASTART.
2411 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2412 RVFI->setVarArgsFrameIndex(FI);
2413
2414 // If saving an odd number of registers then create an extra stack slot to
2415 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
2416 // offsets to even-numbered registered remain 2*XLEN-aligned.
2417 if (Idx % 2) {
2418 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
2419 VarArgsSaveSize += XLenInBytes;
2420 }
2421
2422 // Copy the integer registers that may have been used for passing varargs
2423 // to the vararg save area.
2424 for (unsigned I = Idx; I < ArgRegs.size();
2425 ++I, VaArgOffset += XLenInBytes) {
2426 const Register Reg = RegInfo.createVirtualRegister(RC);
2427 RegInfo.addLiveIn(ArgRegs[I], Reg);
2428 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
2429 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
2430 SDValue PtrOff = DAG.getFrameIndex(FI, PtrVT);
2431 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
2432 MachinePointerInfo::getFixedStack(MF, FI));
2433 cast<StoreSDNode>(Store.getNode())
2434 ->getMemOperand()
2435 ->setValue((Value *)nullptr);
2436 OutChains.push_back(Store);
2437 }
2438 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
2439 }
2440
2441 // All stores are grouped in one node to allow the matching between
2442 // the size of Ins and InVals. This only happens for vararg functions.
2443 if (!OutChains.empty()) {
2444 OutChains.push_back(Chain);
2445 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
2446 }
2447
2448 return Chain;
2449 }
2450
2451 /// isEligibleForTailCallOptimization - Check whether the call is eligible
2452 /// for tail call optimization.
2453 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
isEligibleForTailCallOptimization(CCState & CCInfo,CallLoweringInfo & CLI,MachineFunction & MF,const SmallVector<CCValAssign,16> & ArgLocs) const2454 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
2455 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
2456 const SmallVector<CCValAssign, 16> &ArgLocs) const {
2457
2458 auto &Callee = CLI.Callee;
2459 auto CalleeCC = CLI.CallConv;
2460 auto &Outs = CLI.Outs;
2461 auto &Caller = MF.getFunction();
2462 auto CallerCC = Caller.getCallingConv();
2463
2464 // Exception-handling functions need a special set of instructions to
2465 // indicate a return to the hardware. Tail-calling another function would
2466 // probably break this.
2467 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
2468 // should be expanded as new function attributes are introduced.
2469 if (Caller.hasFnAttribute("interrupt"))
2470 return false;
2471
2472 // Do not tail call opt if the stack is used to pass parameters.
2473 if (CCInfo.getNextStackOffset() != 0)
2474 return false;
2475
2476 // Do not tail call opt if any parameters need to be passed indirectly.
2477 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
2478 // passed indirectly. So the address of the value will be passed in a
2479 // register, or if not available, then the address is put on the stack. In
2480 // order to pass indirectly, space on the stack often needs to be allocated
2481 // in order to store the value. In this case the CCInfo.getNextStackOffset()
2482 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
2483 // are passed CCValAssign::Indirect.
2484 for (auto &VA : ArgLocs)
2485 if (VA.getLocInfo() == CCValAssign::Indirect)
2486 return false;
2487
2488 // Do not tail call opt if either caller or callee uses struct return
2489 // semantics.
2490 auto IsCallerStructRet = Caller.hasStructRetAttr();
2491 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
2492 if (IsCallerStructRet || IsCalleeStructRet)
2493 return false;
2494
2495 // Externally-defined functions with weak linkage should not be
2496 // tail-called. The behaviour of branch instructions in this situation (as
2497 // used for tail calls) is implementation-defined, so we cannot rely on the
2498 // linker replacing the tail call with a return.
2499 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
2500 const GlobalValue *GV = G->getGlobal();
2501 if (GV->hasExternalWeakLinkage())
2502 return false;
2503 }
2504
2505 // The callee has to preserve all registers the caller needs to preserve.
2506 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
2507 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
2508 if (CalleeCC != CallerCC) {
2509 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
2510 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
2511 return false;
2512 }
2513
2514 // Byval parameters hand the function a pointer directly into the stack area
2515 // we want to reuse during a tail call. Working around this *is* possible
2516 // but less efficient and uglier in LowerCall.
2517 for (auto &Arg : Outs)
2518 if (Arg.Flags.isByVal())
2519 return false;
2520
2521 return true;
2522 }
2523
2524 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
2525 // and output parameter nodes.
LowerCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const2526 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
2527 SmallVectorImpl<SDValue> &InVals) const {
2528 SelectionDAG &DAG = CLI.DAG;
2529 SDLoc &DL = CLI.DL;
2530 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
2531 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
2532 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
2533 SDValue Chain = CLI.Chain;
2534 SDValue Callee = CLI.Callee;
2535 bool &IsTailCall = CLI.IsTailCall;
2536 CallingConv::ID CallConv = CLI.CallConv;
2537 bool IsVarArg = CLI.IsVarArg;
2538 // TODO-CHERI: Stack address space (and uses)
2539 EVT PtrVT = getPointerTy(DAG.getDataLayout(),
2540 DAG.getDataLayout().getAllocaAddrSpace());
2541 MVT XLenVT = Subtarget.getXLenVT();
2542
2543 MachineFunction &MF = DAG.getMachineFunction();
2544
2545 // Analyze the operands of the call, assigning locations to each operand.
2546 SmallVector<CCValAssign, 16> ArgLocs;
2547 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
2548
2549 if (CallConv == CallingConv::Fast)
2550 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_FastCC);
2551 else
2552 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI);
2553
2554 // Check if it's really possible to do a tail call.
2555 if (IsTailCall)
2556 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
2557
2558 if (IsTailCall)
2559 ++NumTailCalls;
2560 else if (CLI.CB && CLI.CB->isMustTailCall())
2561 report_fatal_error("failed to perform tail call elimination on a call "
2562 "site marked musttail");
2563
2564 // Get a count of how many bytes are to be pushed on the stack.
2565 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
2566
2567 // Create local copies for byval args
2568 SmallVector<SDValue, 8> ByValArgs;
2569 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2570 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2571 if (!Flags.isByVal())
2572 continue;
2573
2574 SDValue Arg = OutVals[i];
2575 unsigned Size = Flags.getByValSize();
2576 Align Alignment = Flags.getNonZeroByValAlign();
2577
2578 int FI =
2579 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
2580 SDValue FIPtr = DAG.getFrameIndex(FI, PtrVT);
2581 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
2582
2583 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
2584 /*IsVolatile=*/false,
2585 /*AlwaysInline=*/false,
2586 /*MustPreserveCheriCapabilities=*/false, IsTailCall,
2587 MachinePointerInfo(), MachinePointerInfo());
2588 ByValArgs.push_back(FIPtr);
2589 }
2590
2591 if (!IsTailCall)
2592 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
2593
2594 // Copy argument values to their designated locations.
2595 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
2596 SmallVector<SDValue, 8> MemOpChains;
2597 SDValue StackPtr;
2598 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
2599 CCValAssign &VA = ArgLocs[i];
2600 SDValue ArgValue = OutVals[i];
2601 ISD::ArgFlagsTy Flags = Outs[i].Flags;
2602
2603 // Handle passing f64 on RV32D with a soft float ABI as a special case.
2604 bool IsF64OnRV32DSoftABI =
2605 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
2606 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
2607 SDValue SplitF64 = DAG.getNode(
2608 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
2609 SDValue Lo = SplitF64.getValue(0);
2610 SDValue Hi = SplitF64.getValue(1);
2611
2612 Register RegLo = VA.getLocReg();
2613 RegsToPass.push_back(std::make_pair(RegLo, Lo));
2614
2615 if (RegLo == RISCV::X17) {
2616 // Second half of f64 is passed on the stack.
2617 // Work out the address of the stack slot.
2618 if (!StackPtr.getNode())
2619 StackPtr =
2620 DAG.getCopyFromReg(Chain, DL,
2621 getStackPointerRegisterToSaveRestore(),
2622 PtrVT);
2623 // Emit the store.
2624 MemOpChains.push_back(
2625 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
2626 } else {
2627 // Second half of f64 is passed in another GPR.
2628 assert(RegLo < RISCV::X31 && "Invalid register pair");
2629 Register RegHigh = RegLo + 1;
2630 RegsToPass.push_back(std::make_pair(RegHigh, Hi));
2631 }
2632 continue;
2633 }
2634
2635 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
2636 // as any other MemLoc.
2637
2638 // Promote the value if needed.
2639 // For now, only handle fully promoted and indirect arguments.
2640 if (VA.getLocInfo() == CCValAssign::Indirect) {
2641 // Store the argument in a stack slot and pass its address.
2642 SDValue SpillSlot = DAG.CreateStackTemporary(Outs[i].ArgVT);
2643 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
2644 MemOpChains.push_back(
2645 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
2646 MachinePointerInfo::getFixedStack(MF, FI)));
2647 // If the original argument was split (e.g. i128), we need
2648 // to store all parts of it here (and pass just one address).
2649 unsigned ArgIndex = Outs[i].OrigArgIndex;
2650 assert(Outs[i].PartOffset == 0);
2651 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
2652 SDValue PartValue = OutVals[i + 1];
2653 unsigned PartOffset = Outs[i + 1].PartOffset;
2654 SDValue Address = DAG.getPointerAdd(DL, SpillSlot, PartOffset);
2655 MemOpChains.push_back(
2656 DAG.getStore(Chain, DL, PartValue, Address,
2657 MachinePointerInfo::getFixedStack(MF, FI)));
2658 ++i;
2659 }
2660 ArgValue = SpillSlot;
2661 } else {
2662 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL);
2663 }
2664
2665 // Use local copy if it is a byval arg.
2666 if (Flags.isByVal())
2667 ArgValue = ByValArgs[j++];
2668
2669 if (VA.isRegLoc()) {
2670 // Queue up the argument copies and emit them at the end.
2671 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
2672 } else {
2673 assert(VA.isMemLoc() && "Argument not register or memory");
2674 assert(!IsTailCall && "Tail call not allowed if stack is used "
2675 "for passing parameters");
2676
2677 // Work out the address of the stack slot.
2678 if (!StackPtr.getNode())
2679 StackPtr =
2680 DAG.getCopyFromReg(Chain, DL,
2681 getStackPointerRegisterToSaveRestore(),
2682 PtrVT);
2683 SDValue Address =
2684 DAG.getPointerAdd(DL, StackPtr, VA.getLocMemOffset());
2685
2686 // Emit the store.
2687 MemOpChains.push_back(
2688 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
2689 }
2690 }
2691
2692 // Join the stores, which are independent of one another.
2693 if (!MemOpChains.empty())
2694 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
2695
2696 SDValue Glue;
2697
2698 // Build a sequence of copy-to-reg nodes, chained and glued together.
2699 for (auto &Reg : RegsToPass) {
2700 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
2701 Glue = Chain.getValue(1);
2702 }
2703
2704 // Validate that none of the argument registers have been marked as
2705 // reserved, if so report an error. Do the same for the return address if this
2706 // is not a tailcall.
2707 validateCCReservedRegs(RegsToPass, MF);
2708 if (!IsTailCall &&
2709 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
2710 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2711 MF.getFunction(),
2712 "Return address register required, but has been reserved."});
2713
2714 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
2715 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
2716 // split it and then direct call can be matched by PseudoCALL.
2717 // TODO: Support purecap PLT
2718 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
2719 const GlobalValue *GV = S->getGlobal();
2720 bool IsLocal =
2721 getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
2722 if (RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) {
2723 // FIXME: we can't set IsLocal yet since we don't handle PLTs yet
2724 IsLocal = false;
2725 Callee = getAddr(S, Callee.getValueType(), DAG, IsLocal,
2726 /*CanDeriveFromPcc=*/true);
2727 } else {
2728 unsigned OpFlags = IsLocal ? RISCVII::MO_CALL : RISCVII::MO_PLT;
2729 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
2730 }
2731 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
2732 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(
2733 *MF.getFunction().getParent(), nullptr);
2734 if (RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())) {
2735 // FIXME: we can't set IsLocal yet since we don't handle PLTs yet
2736 IsLocal = false;
2737 Callee = getAddr(S, Callee.getValueType(), DAG, IsLocal,
2738 /*CanDeriveFromPcc=*/true);
2739 } else {
2740 unsigned OpFlags = IsLocal ? RISCVII::MO_CALL : RISCVII::MO_PLT;
2741 Callee = DAG.getTargetExternalFunctionSymbol(S->getSymbol(), OpFlags);
2742 }
2743 }
2744
2745 // The first call operand is the chain and the second is the target address.
2746 SmallVector<SDValue, 8> Ops;
2747 Ops.push_back(Chain);
2748 Ops.push_back(Callee);
2749
2750 // Add argument registers to the end of the list so that they are
2751 // known live into the call.
2752 for (auto &Reg : RegsToPass)
2753 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
2754
2755 if (!IsTailCall) {
2756 // Add a register mask operand representing the call-preserved registers.
2757 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
2758 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
2759 assert(Mask && "Missing call preserved mask for calling convention");
2760 Ops.push_back(DAG.getRegisterMask(Mask));
2761 }
2762
2763 // Glue the call to the argument copies, if any.
2764 if (Glue.getNode())
2765 Ops.push_back(Glue);
2766
2767 // Emit the call.
2768 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
2769
2770 if (IsTailCall) {
2771 MF.getFrameInfo().setHasTailCall();
2772 if (RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()))
2773 return DAG.getNode(RISCVISD::CAP_TAIL, DL, NodeTys, Ops);
2774 else
2775 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
2776 }
2777
2778 if (RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()))
2779 Chain = DAG.getNode(RISCVISD::CAP_CALL, DL, NodeTys, Ops);
2780 else
2781 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
2782
2783 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
2784 Glue = Chain.getValue(1);
2785
2786 // Mark the end of the call, which is glued to the call itself.
2787 Chain = DAG.getCALLSEQ_END(Chain,
2788 DAG.getIntPtrConstant(NumBytes, DL, true),
2789 DAG.getIntPtrConstant(0, DL, true),
2790 Glue, DL);
2791 Glue = Chain.getValue(1);
2792
2793 // Assign locations to each value returned by this call.
2794 SmallVector<CCValAssign, 16> RVLocs;
2795 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
2796 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true);
2797
2798 // Copy all of the result registers out of their specified physreg.
2799 for (auto &VA : RVLocs) {
2800 // Copy the value out
2801 SDValue RetValue =
2802 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
2803 // Glue the RetValue to the end of the call sequence
2804 Chain = RetValue.getValue(1);
2805 Glue = RetValue.getValue(2);
2806
2807 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2808 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
2809 SDValue RetValue2 =
2810 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
2811 Chain = RetValue2.getValue(1);
2812 Glue = RetValue2.getValue(2);
2813 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
2814 RetValue2);
2815 }
2816
2817 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL);
2818
2819 InVals.push_back(RetValue);
2820 }
2821
2822 return Chain;
2823 }
2824
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const2825 bool RISCVTargetLowering::CanLowerReturn(
2826 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
2827 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
2828 SmallVector<CCValAssign, 16> RVLocs;
2829 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
2830 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
2831 MVT VT = Outs[i].VT;
2832 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
2833 if (CC_RISCV(MF.getDataLayout(), i, VT, VT, CCValAssign::Full, ArgFlags,
2834 CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr, Subtarget))
2835 return false;
2836 }
2837 return true;
2838 }
2839
2840 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const2841 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
2842 bool IsVarArg,
2843 const SmallVectorImpl<ISD::OutputArg> &Outs,
2844 const SmallVectorImpl<SDValue> &OutVals,
2845 const SDLoc &DL, SelectionDAG &DAG) const {
2846 const MachineFunction &MF = DAG.getMachineFunction();
2847 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2848
2849 // Stores the assignment of the return value to a location.
2850 SmallVector<CCValAssign, 16> RVLocs;
2851
2852 // Info about the registers and stack slot.
2853 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
2854 *DAG.getContext());
2855
2856 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
2857 nullptr);
2858
2859 SDValue Glue;
2860 SmallVector<SDValue, 4> RetOps(1, Chain);
2861
2862 // Copy the result values into the output registers.
2863 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
2864 SDValue Val = OutVals[i];
2865 CCValAssign &VA = RVLocs[i];
2866 assert(VA.isRegLoc() && "Can only return in registers!");
2867
2868 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
2869 // Handle returning f64 on RV32D with a soft float ABI.
2870 assert(VA.isRegLoc() && "Expected return via registers");
2871 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
2872 DAG.getVTList(MVT::i32, MVT::i32), Val);
2873 SDValue Lo = SplitF64.getValue(0);
2874 SDValue Hi = SplitF64.getValue(1);
2875 Register RegLo = VA.getLocReg();
2876 assert(RegLo < RISCV::X31 && "Invalid register pair");
2877 Register RegHi = RegLo + 1;
2878
2879 if (STI.isRegisterReservedByUser(RegLo) ||
2880 STI.isRegisterReservedByUser(RegHi))
2881 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2882 MF.getFunction(),
2883 "Return value register required, but has been reserved."});
2884
2885 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
2886 Glue = Chain.getValue(1);
2887 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
2888 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
2889 Glue = Chain.getValue(1);
2890 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
2891 } else {
2892 // Handle a 'normal' return.
2893 Val = convertValVTToLocVT(DAG, Val, VA, DL);
2894 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
2895
2896 if (STI.isRegisterReservedByUser(VA.getLocReg()))
2897 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
2898 MF.getFunction(),
2899 "Return value register required, but has been reserved."});
2900
2901 // Guarantee that all emitted copies are stuck together.
2902 Glue = Chain.getValue(1);
2903 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
2904 }
2905 }
2906
2907 RetOps[0] = Chain; // Update chain.
2908
2909 // Add the glue node if we have it.
2910 if (Glue.getNode()) {
2911 RetOps.push_back(Glue);
2912 }
2913
2914 // Interrupt service routines use different return instructions.
2915 const Function &Func = DAG.getMachineFunction().getFunction();
2916 if (Func.hasFnAttribute("interrupt")) {
2917 if (!Func.getReturnType()->isVoidTy())
2918 report_fatal_error(
2919 "Functions with the interrupt attribute must have void return type!");
2920
2921 MachineFunction &MF = DAG.getMachineFunction();
2922 StringRef Kind =
2923 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
2924
2925 unsigned RetOpc;
2926 if (Kind == "user")
2927 RetOpc = RISCVISD::URET_FLAG;
2928 else if (Kind == "supervisor")
2929 RetOpc = RISCVISD::SRET_FLAG;
2930 else
2931 RetOpc = RISCVISD::MRET_FLAG;
2932
2933 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
2934 }
2935
2936 return DAG.getNode(RISCVISD::RET_FLAG, DL, MVT::Other, RetOps);
2937 }
2938
validateCCReservedRegs(const SmallVectorImpl<std::pair<llvm::Register,llvm::SDValue>> & Regs,MachineFunction & MF) const2939 void RISCVTargetLowering::validateCCReservedRegs(
2940 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
2941 MachineFunction &MF) const {
2942 const Function &F = MF.getFunction();
2943 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
2944
2945 if (std::any_of(std::begin(Regs), std::end(Regs), [&STI](auto Reg) {
2946 return STI.isRegisterReservedByUser(Reg.first);
2947 }))
2948 F.getContext().diagnose(DiagnosticInfoUnsupported{
2949 F, "Argument register required, but has been reserved."});
2950 }
2951
mayBeEmittedAsTailCall(const CallInst * CI) const2952 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
2953 return CI->isTailCall();
2954 }
2955
getTargetNodeName(unsigned Opcode) const2956 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
2957 switch ((RISCVISD::NodeType)Opcode) {
2958 case RISCVISD::FIRST_NUMBER:
2959 break;
2960 case RISCVISD::RET_FLAG:
2961 return "RISCVISD::RET_FLAG";
2962 case RISCVISD::URET_FLAG:
2963 return "RISCVISD::URET_FLAG";
2964 case RISCVISD::SRET_FLAG:
2965 return "RISCVISD::SRET_FLAG";
2966 case RISCVISD::MRET_FLAG:
2967 return "RISCVISD::MRET_FLAG";
2968 case RISCVISD::CALL:
2969 return "RISCVISD::CALL";
2970 case RISCVISD::SELECT_CC:
2971 return "RISCVISD::SELECT_CC";
2972 case RISCVISD::BuildPairF64:
2973 return "RISCVISD::BuildPairF64";
2974 case RISCVISD::SplitF64:
2975 return "RISCVISD::SplitF64";
2976 case RISCVISD::TAIL:
2977 return "RISCVISD::TAIL";
2978 case RISCVISD::SLLW:
2979 return "RISCVISD::SLLW";
2980 case RISCVISD::SRAW:
2981 return "RISCVISD::SRAW";
2982 case RISCVISD::SRLW:
2983 return "RISCVISD::SRLW";
2984 case RISCVISD::DIVW:
2985 return "RISCVISD::DIVW";
2986 case RISCVISD::DIVUW:
2987 return "RISCVISD::DIVUW";
2988 case RISCVISD::REMUW:
2989 return "RISCVISD::REMUW";
2990 case RISCVISD::FMV_W_X_RV64:
2991 return "RISCVISD::FMV_W_X_RV64";
2992 case RISCVISD::FMV_X_ANYEXTW_RV64:
2993 return "RISCVISD::FMV_X_ANYEXTW_RV64";
2994 case RISCVISD::READ_CYCLE_WIDE:
2995 return "RISCVISD::READ_CYCLE_WIDE";
2996 case RISCVISD::CAP_CALL:
2997 return "RISCVISD::CAP_CALL";
2998 case RISCVISD::CAP_TAIL:
2999 return "RISCVISD::CAP_TAIL";
3000 case RISCVISD::CAP_TAG_GET:
3001 return "RISCVISD::CAP_TAG_GET";
3002 case RISCVISD::CAP_SEALED_GET:
3003 return "RISCVISD::CAP_SEALED_GET";
3004 case RISCVISD::CAP_SUBSET_TEST:
3005 return "RISCVISD::CAP_SUBSET_TEST";
3006 case RISCVISD::CAP_EQUAL_EXACT:
3007 return "RISCVISD::CAP_EQUAL_EXACT";
3008 }
3009 return nullptr;
3010 }
3011
3012 /// getConstraintType - Given a constraint letter, return the type of
3013 /// constraint it is for this target.
3014 RISCVTargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const3015 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
3016 if (Constraint.size() == 1) {
3017 switch (Constraint[0]) {
3018 default:
3019 break;
3020 case 'C':
3021 case 'f':
3022 return C_RegisterClass;
3023 case 'I':
3024 case 'J':
3025 case 'K':
3026 return C_Immediate;
3027 case 'A':
3028 return C_Memory;
3029 }
3030 }
3031 return TargetLowering::getConstraintType(Constraint);
3032 }
3033
3034 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const3035 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
3036 StringRef Constraint,
3037 MVT VT) const {
3038 // First, see if this is a constraint that directly corresponds to a
3039 // RISCV register class.
3040 if (Constraint.size() == 1) {
3041 switch (Constraint[0]) {
3042 case 'r':
3043 // Don't try to split/combine capabilities in order to use a GPR; give a
3044 // friendlier error message instead.
3045 if (Subtarget.hasCheri() && VT == Subtarget.typeForCapabilities())
3046 break;
3047 return std::make_pair(0U, &RISCV::GPRRegClass);
3048 case 'C':
3049 if (Subtarget.hasCheri() && VT == Subtarget.typeForCapabilities())
3050 return std::make_pair(0U, &RISCV::GPCRRegClass);
3051 break;
3052 case 'f':
3053 if (Subtarget.hasStdExtF() && VT == MVT::f32)
3054 return std::make_pair(0U, &RISCV::FPR32RegClass);
3055 if (Subtarget.hasStdExtD() && VT == MVT::f64)
3056 return std::make_pair(0U, &RISCV::FPR64RegClass);
3057 break;
3058 default:
3059 break;
3060 }
3061 }
3062
3063 // Clang will correctly decode the usage of register name aliases into their
3064 // official names. However, other frontends like `rustc` do not. This allows
3065 // users of these frontends to use the ABI names for registers in LLVM-style
3066 // register constraints.
3067 Register XRegFromAlias = StringSwitch<Register>(Constraint.lower())
3068 .Case("{zero}", RISCV::X0)
3069 .Case("{ra}", RISCV::X1)
3070 .Case("{sp}", RISCV::X2)
3071 .Case("{gp}", RISCV::X3)
3072 .Case("{tp}", RISCV::X4)
3073 .Case("{t0}", RISCV::X5)
3074 .Case("{t1}", RISCV::X6)
3075 .Case("{t2}", RISCV::X7)
3076 .Cases("{s0}", "{fp}", RISCV::X8)
3077 .Case("{s1}", RISCV::X9)
3078 .Case("{a0}", RISCV::X10)
3079 .Case("{a1}", RISCV::X11)
3080 .Case("{a2}", RISCV::X12)
3081 .Case("{a3}", RISCV::X13)
3082 .Case("{a4}", RISCV::X14)
3083 .Case("{a5}", RISCV::X15)
3084 .Case("{a6}", RISCV::X16)
3085 .Case("{a7}", RISCV::X17)
3086 .Case("{s2}", RISCV::X18)
3087 .Case("{s3}", RISCV::X19)
3088 .Case("{s4}", RISCV::X20)
3089 .Case("{s5}", RISCV::X21)
3090 .Case("{s6}", RISCV::X22)
3091 .Case("{s7}", RISCV::X23)
3092 .Case("{s8}", RISCV::X24)
3093 .Case("{s9}", RISCV::X25)
3094 .Case("{s10}", RISCV::X26)
3095 .Case("{s11}", RISCV::X27)
3096 .Case("{t3}", RISCV::X28)
3097 .Case("{t4}", RISCV::X29)
3098 .Case("{t5}", RISCV::X30)
3099 .Case("{t6}", RISCV::X31)
3100 .Default(RISCV::NoRegister);
3101 if (XRegFromAlias != RISCV::NoRegister)
3102 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
3103
3104 // Similarly, allow capability register ABI names to be used in constraint.
3105 if (Subtarget.hasCheri()) {
3106 Register CRegFromAlias = StringSwitch<Register>(Constraint.lower())
3107 .Case("{cnull}", RISCV::C0)
3108 .Case("{cra}", RISCV::C1)
3109 .Case("{csp}", RISCV::C2)
3110 .Case("{cgp}", RISCV::C3)
3111 .Case("{ctp}", RISCV::C4)
3112 .Case("{ct0}", RISCV::C5)
3113 .Case("{ct1}", RISCV::C6)
3114 .Case("{ct2}", RISCV::C7)
3115 .Cases("{cs0}", "{cfp}", RISCV::C8)
3116 .Case("{cs1}", RISCV::C9)
3117 .Case("{ca0}", RISCV::C10)
3118 .Case("{ca1}", RISCV::C11)
3119 .Case("{ca2}", RISCV::C12)
3120 .Case("{ca3}", RISCV::C13)
3121 .Case("{ca4}", RISCV::C14)
3122 .Case("{ca5}", RISCV::C15)
3123 .Case("{ca6}", RISCV::C16)
3124 .Case("{ca7}", RISCV::C17)
3125 .Case("{cs2}", RISCV::C18)
3126 .Case("{cs3}", RISCV::C19)
3127 .Case("{cs4}", RISCV::C20)
3128 .Case("{cs5}", RISCV::C21)
3129 .Case("{cs6}", RISCV::C22)
3130 .Case("{cs7}", RISCV::C23)
3131 .Case("{cs8}", RISCV::C24)
3132 .Case("{cs9}", RISCV::C25)
3133 .Case("{cs10}", RISCV::C26)
3134 .Case("{cs11}", RISCV::C27)
3135 .Case("{ct3}", RISCV::C28)
3136 .Case("{ct4}", RISCV::C29)
3137 .Case("{ct5}", RISCV::C30)
3138 .Case("{ct6}", RISCV::C31)
3139 .Default(RISCV::NoRegister);
3140 if (CRegFromAlias != RISCV::NoRegister)
3141 return std::make_pair(CRegFromAlias, &RISCV::GPCRRegClass);
3142 }
3143
3144 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
3145 // TableGen record rather than the AsmName to choose registers for InlineAsm
3146 // constraints, plus we want to match those names to the widest floating point
3147 // register type available, manually select floating point registers here.
3148 //
3149 // The second case is the ABI name of the register, so that frontends can also
3150 // use the ABI names in register constraint lists.
3151 if (Subtarget.hasStdExtF() || Subtarget.hasStdExtD()) {
3152 std::pair<Register, Register> FReg =
3153 StringSwitch<std::pair<Register, Register>>(Constraint.lower())
3154 .Cases("{f0}", "{ft0}", {RISCV::F0_F, RISCV::F0_D})
3155 .Cases("{f1}", "{ft1}", {RISCV::F1_F, RISCV::F1_D})
3156 .Cases("{f2}", "{ft2}", {RISCV::F2_F, RISCV::F2_D})
3157 .Cases("{f3}", "{ft3}", {RISCV::F3_F, RISCV::F3_D})
3158 .Cases("{f4}", "{ft4}", {RISCV::F4_F, RISCV::F4_D})
3159 .Cases("{f5}", "{ft5}", {RISCV::F5_F, RISCV::F5_D})
3160 .Cases("{f6}", "{ft6}", {RISCV::F6_F, RISCV::F6_D})
3161 .Cases("{f7}", "{ft7}", {RISCV::F7_F, RISCV::F7_D})
3162 .Cases("{f8}", "{fs0}", {RISCV::F8_F, RISCV::F8_D})
3163 .Cases("{f9}", "{fs1}", {RISCV::F9_F, RISCV::F9_D})
3164 .Cases("{f10}", "{fa0}", {RISCV::F10_F, RISCV::F10_D})
3165 .Cases("{f11}", "{fa1}", {RISCV::F11_F, RISCV::F11_D})
3166 .Cases("{f12}", "{fa2}", {RISCV::F12_F, RISCV::F12_D})
3167 .Cases("{f13}", "{fa3}", {RISCV::F13_F, RISCV::F13_D})
3168 .Cases("{f14}", "{fa4}", {RISCV::F14_F, RISCV::F14_D})
3169 .Cases("{f15}", "{fa5}", {RISCV::F15_F, RISCV::F15_D})
3170 .Cases("{f16}", "{fa6}", {RISCV::F16_F, RISCV::F16_D})
3171 .Cases("{f17}", "{fa7}", {RISCV::F17_F, RISCV::F17_D})
3172 .Cases("{f18}", "{fs2}", {RISCV::F18_F, RISCV::F18_D})
3173 .Cases("{f19}", "{fs3}", {RISCV::F19_F, RISCV::F19_D})
3174 .Cases("{f20}", "{fs4}", {RISCV::F20_F, RISCV::F20_D})
3175 .Cases("{f21}", "{fs5}", {RISCV::F21_F, RISCV::F21_D})
3176 .Cases("{f22}", "{fs6}", {RISCV::F22_F, RISCV::F22_D})
3177 .Cases("{f23}", "{fs7}", {RISCV::F23_F, RISCV::F23_D})
3178 .Cases("{f24}", "{fs8}", {RISCV::F24_F, RISCV::F24_D})
3179 .Cases("{f25}", "{fs9}", {RISCV::F25_F, RISCV::F25_D})
3180 .Cases("{f26}", "{fs10}", {RISCV::F26_F, RISCV::F26_D})
3181 .Cases("{f27}", "{fs11}", {RISCV::F27_F, RISCV::F27_D})
3182 .Cases("{f28}", "{ft8}", {RISCV::F28_F, RISCV::F28_D})
3183 .Cases("{f29}", "{ft9}", {RISCV::F29_F, RISCV::F29_D})
3184 .Cases("{f30}", "{ft10}", {RISCV::F30_F, RISCV::F30_D})
3185 .Cases("{f31}", "{ft11}", {RISCV::F31_F, RISCV::F31_D})
3186 .Default({RISCV::NoRegister, RISCV::NoRegister});
3187 if (FReg.first != RISCV::NoRegister)
3188 return Subtarget.hasStdExtD()
3189 ? std::make_pair(FReg.second, &RISCV::FPR64RegClass)
3190 : std::make_pair(FReg.first, &RISCV::FPR32RegClass);
3191 }
3192
3193 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
3194 }
3195
3196 unsigned
getInlineAsmMemConstraint(StringRef ConstraintCode) const3197 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
3198 // Currently only support length 1 constraints.
3199 if (ConstraintCode.size() == 1) {
3200 switch (ConstraintCode[0]) {
3201 case 'A':
3202 return InlineAsm::Constraint_A;
3203 default:
3204 break;
3205 }
3206 }
3207
3208 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
3209 }
3210
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const3211 void RISCVTargetLowering::LowerAsmOperandForConstraint(
3212 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
3213 SelectionDAG &DAG) const {
3214 // Currently only support length 1 constraints.
3215 if (Constraint.length() == 1) {
3216 switch (Constraint[0]) {
3217 case 'I':
3218 // Validate & create a 12-bit signed immediate operand.
3219 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3220 uint64_t CVal = C->getSExtValue();
3221 if (isInt<12>(CVal))
3222 Ops.push_back(
3223 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
3224 }
3225 return;
3226 case 'J':
3227 // Validate & create an integer zero operand.
3228 if (auto *C = dyn_cast<ConstantSDNode>(Op))
3229 if (C->getZExtValue() == 0)
3230 Ops.push_back(
3231 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
3232 return;
3233 case 'K':
3234 // Validate & create a 5-bit unsigned immediate operand.
3235 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
3236 uint64_t CVal = C->getZExtValue();
3237 if (isUInt<5>(CVal))
3238 Ops.push_back(
3239 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
3240 }
3241 return;
3242 default:
3243 break;
3244 }
3245 }
3246 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
3247 }
3248
emitLeadingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord) const3249 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilder<> &Builder,
3250 Instruction *Inst,
3251 AtomicOrdering Ord) const {
3252 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
3253 return Builder.CreateFence(Ord);
3254 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
3255 return Builder.CreateFence(AtomicOrdering::Release);
3256 return nullptr;
3257 }
3258
emitTrailingFence(IRBuilder<> & Builder,Instruction * Inst,AtomicOrdering Ord) const3259 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilder<> &Builder,
3260 Instruction *Inst,
3261 AtomicOrdering Ord) const {
3262 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
3263 return Builder.CreateFence(AtomicOrdering::Acquire);
3264 return nullptr;
3265 }
3266
getOptimalMemOpType(const MemOp & Op,const AttributeList & FuncAttributes) const3267 EVT RISCVTargetLowering::getOptimalMemOpType(
3268 const MemOp &Op, const AttributeList &FuncAttributes) const {
3269 // FIXME: Share MIPS and RISCV code.
3270 // CHERI memcpy/memmove must be tag-preserving, either through explicit
3271 // capability loads/stores or by making a runtime library call.
3272 // We can't use capability stores as an optimisation for memset unless zeroing.
3273 bool IsNonZeroMemset = Op.isMemset() && !Op.isZeroMemset();
3274 if (Subtarget.hasCheri() && !IsNonZeroMemset) {
3275 unsigned CapSize = Subtarget.typeForCapabilities().getSizeInBits() / 8;
3276 if (Op.size() >= CapSize) {
3277 Align CapAlign(CapSize);
3278 LLVM_DEBUG(dbgs() << __func__ << " Size=" << Op.size() << " DstAlign="
3279 << (Op.isFixedDstAlign() ? Op.getDstAlign().value() : 0)
3280 << " SrcAlign="
3281 << (Op.isMemset() ? 0 : Op.getSrcAlign().value())
3282 << " CapSize=" << CapSize << "\n");
3283 // If sufficiently aligned, we must use capability loads/stores if
3284 // copying, and can use cnull for a zeroing memset.
3285 if (Op.isAligned(CapAlign)) {
3286 return CapType;
3287 } else if (!Op.isMemset()) {
3288 // Otherwise if this is a copy then tell SelectionDAG to do a real
3289 // memcpy/memmove call (by returning MVT::isVoid), since it could still
3290 // contain a capability if sufficiently aligned at runtime. Zeroing
3291 // memsets can fall back on non-capability loads/stores.
3292 return MVT::isVoid;
3293 }
3294 }
3295 }
3296
3297 return TargetLowering::getOptimalMemOpType(Op, FuncAttributes);
3298 }
3299
3300 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const3301 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
3302 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
3303 // point operations can't be used in an lr/sc sequence without breaking the
3304 // forward-progress guarantee.
3305 if (AI->isFloatingPointOperation())
3306 return AtomicExpansionKind::CmpXChg;
3307
3308 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
3309 if ((Size == 8 || Size == 16) &&
3310 !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()))
3311 return AtomicExpansionKind::MaskedIntrinsic;
3312 return AtomicExpansionKind::None;
3313 }
3314
3315 static Intrinsic::ID
getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen,AtomicRMWInst::BinOp BinOp)3316 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
3317 if (XLen == 32) {
3318 switch (BinOp) {
3319 default:
3320 llvm_unreachable("Unexpected AtomicRMW BinOp");
3321 case AtomicRMWInst::Xchg:
3322 return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
3323 case AtomicRMWInst::Add:
3324 return Intrinsic::riscv_masked_atomicrmw_add_i32;
3325 case AtomicRMWInst::Sub:
3326 return Intrinsic::riscv_masked_atomicrmw_sub_i32;
3327 case AtomicRMWInst::Nand:
3328 return Intrinsic::riscv_masked_atomicrmw_nand_i32;
3329 case AtomicRMWInst::Max:
3330 return Intrinsic::riscv_masked_atomicrmw_max_i32;
3331 case AtomicRMWInst::Min:
3332 return Intrinsic::riscv_masked_atomicrmw_min_i32;
3333 case AtomicRMWInst::UMax:
3334 return Intrinsic::riscv_masked_atomicrmw_umax_i32;
3335 case AtomicRMWInst::UMin:
3336 return Intrinsic::riscv_masked_atomicrmw_umin_i32;
3337 }
3338 }
3339
3340 if (XLen == 64) {
3341 switch (BinOp) {
3342 default:
3343 llvm_unreachable("Unexpected AtomicRMW BinOp");
3344 case AtomicRMWInst::Xchg:
3345 return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
3346 case AtomicRMWInst::Add:
3347 return Intrinsic::riscv_masked_atomicrmw_add_i64;
3348 case AtomicRMWInst::Sub:
3349 return Intrinsic::riscv_masked_atomicrmw_sub_i64;
3350 case AtomicRMWInst::Nand:
3351 return Intrinsic::riscv_masked_atomicrmw_nand_i64;
3352 case AtomicRMWInst::Max:
3353 return Intrinsic::riscv_masked_atomicrmw_max_i64;
3354 case AtomicRMWInst::Min:
3355 return Intrinsic::riscv_masked_atomicrmw_min_i64;
3356 case AtomicRMWInst::UMax:
3357 return Intrinsic::riscv_masked_atomicrmw_umax_i64;
3358 case AtomicRMWInst::UMin:
3359 return Intrinsic::riscv_masked_atomicrmw_umin_i64;
3360 }
3361 }
3362
3363 llvm_unreachable("Unexpected XLen\n");
3364 }
3365
emitMaskedAtomicRMWIntrinsic(IRBuilder<> & Builder,AtomicRMWInst * AI,Value * AlignedAddr,Value * Incr,Value * Mask,Value * ShiftAmt,AtomicOrdering Ord) const3366 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
3367 IRBuilder<> &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
3368 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
3369 assert(!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()));
3370 unsigned XLen = Subtarget.getXLen();
3371 Value *Ordering =
3372 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
3373 Type *Tys[] = {AlignedAddr->getType()};
3374 Function *LrwOpScwLoop = Intrinsic::getDeclaration(
3375 AI->getModule(),
3376 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
3377
3378 if (XLen == 64) {
3379 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
3380 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
3381 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
3382 }
3383
3384 Value *Result;
3385
3386 // Must pass the shift amount needed to sign extend the loaded value prior
3387 // to performing a signed comparison for min/max. ShiftAmt is the number of
3388 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
3389 // is the number of bits to left+right shift the value in order to
3390 // sign-extend.
3391 if (AI->getOperation() == AtomicRMWInst::Min ||
3392 AI->getOperation() == AtomicRMWInst::Max) {
3393 const DataLayout &DL = AI->getModule()->getDataLayout();
3394 unsigned ValWidth =
3395 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
3396 Value *SextShamt =
3397 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
3398 Result = Builder.CreateCall(LrwOpScwLoop,
3399 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
3400 } else {
3401 Result =
3402 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
3403 }
3404
3405 if (XLen == 64)
3406 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
3407 return Result;
3408 }
3409
3410 TargetLowering::AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * CI) const3411 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
3412 AtomicCmpXchgInst *CI) const {
3413 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
3414 if ((Size == 8 || Size == 16) &&
3415 !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()))
3416 return AtomicExpansionKind::MaskedIntrinsic;
3417 return AtomicExpansionKind::None;
3418 }
3419
emitMaskedAtomicCmpXchgIntrinsic(IRBuilder<> & Builder,AtomicCmpXchgInst * CI,Value * AlignedAddr,Value * CmpVal,Value * NewVal,Value * Mask,AtomicOrdering Ord) const3420 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
3421 IRBuilder<> &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
3422 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
3423 assert(!RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()));
3424 unsigned XLen = Subtarget.getXLen();
3425 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
3426 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
3427 if (XLen == 64) {
3428 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
3429 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
3430 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
3431 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
3432 }
3433 Type *Tys[] = {AlignedAddr->getType()};
3434 Function *MaskedCmpXchg =
3435 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
3436 Value *Result = Builder.CreateCall(
3437 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
3438 if (XLen == 64)
3439 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
3440 return Result;
3441 }
3442
supportsAtomicOperation(const DataLayout & DL,const Instruction * AI,Type * ValueTy,Type * PointerTy,Align Alignment) const3443 bool RISCVTargetLowering::supportsAtomicOperation(const DataLayout &DL,
3444 const Instruction *AI,
3445 Type *ValueTy,
3446 Type *PointerTy,
3447 Align Alignment) const {
3448 // FIXME: we current have to expand CMPXCHG/RMW to libcalls since we are
3449 // missing the SelectionDAG nodes+expansions to use the explicit addressing
3450 // mode instructions.
3451 if (DL.isFatPointer(PointerTy) &&
3452 !RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()) &&
3453 (isa<AtomicRMWInst>(AI) || isa<AtomicCmpXchgInst>(AI)))
3454 return false;
3455 return TargetLowering::supportsAtomicOperation(DL, AI, ValueTy, PointerTy,
3456 Alignment);
3457 }
3458
getExceptionPointerRegister(const Constant * PersonalityFn) const3459 Register RISCVTargetLowering::getExceptionPointerRegister(
3460 const Constant *PersonalityFn) const {
3461 return RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI())
3462 ? RISCV::C10 : RISCV::X10;
3463 }
3464
getExceptionSelectorRegister(const Constant * PersonalityFn) const3465 Register RISCVTargetLowering::getExceptionSelectorRegister(
3466 const Constant *PersonalityFn) const {
3467 // This is an index, so always an integer GPR register
3468 return RISCV::X11;
3469 }
3470
getExceptionPointerAS() const3471 uint32_t RISCVTargetLowering::getExceptionPointerAS() const {
3472 return RISCVABI::isCheriPureCapABI(Subtarget.getTargetABI()) ? 200 : 0;
3473 }
3474
shouldExtendTypeInLibCall(EVT Type) const3475 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
3476 // Return false to suppress the unnecessary extensions if the LibCall
3477 // arguments or return value is f32 type for LP64 ABI.
3478 RISCVABI::ABI ABI = Subtarget.getTargetABI();
3479 if ((ABI == RISCVABI::ABI_LP64 || ABI == RISCVABI::ABI_L64PC128)
3480 && (Type == MVT::f32))
3481 return false;
3482
3483 return true;
3484 }
3485
decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C) const3486 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
3487 SDValue C) const {
3488 // Check integral scalar types.
3489 if (VT.isScalarInteger()) {
3490 // Do not perform the transformation on riscv32 with the M extension.
3491 if (!Subtarget.is64Bit() && Subtarget.hasStdExtM())
3492 return false;
3493 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
3494 if (ConstNode->getAPIntValue().getBitWidth() > 8 * sizeof(int64_t))
3495 return false;
3496 int64_t Imm = ConstNode->getSExtValue();
3497 if (isPowerOf2_64(Imm + 1) || isPowerOf2_64(Imm - 1) ||
3498 isPowerOf2_64(1 - Imm) || isPowerOf2_64(-1 - Imm))
3499 return true;
3500 }
3501 }
3502
3503 return false;
3504 }
3505
3506 #define GET_REGISTER_MATCHER
3507 #include "RISCVGenAsmMatcher.inc"
3508
3509 Register
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const3510 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
3511 const MachineFunction &MF) const {
3512 Register Reg = MatchRegisterAltName(RegName);
3513 if (Reg == RISCV::NoRegister)
3514 Reg = MatchRegisterName(RegName);
3515 if (Reg == RISCV::NoRegister)
3516 report_fatal_error(
3517 Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
3518 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
3519 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
3520 report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
3521 StringRef(RegName) + "\"."));
3522 return Reg;
3523 }
3524