1 //===-- RISCVISelLowering.cpp - RISCV DAG Lowering Implementation --------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 // This file defines the interfaces that RISCV uses to lower LLVM code into a
10 // selection DAG.
11 //
12 //===----------------------------------------------------------------------===//
13
14 #include "RISCVISelLowering.h"
15 #include "MCTargetDesc/RISCVMatInt.h"
16 #include "RISCV.h"
17 #include "RISCVMachineFunctionInfo.h"
18 #include "RISCVRegisterInfo.h"
19 #include "RISCVSubtarget.h"
20 #include "RISCVTargetMachine.h"
21 #include "llvm/ADT/SmallSet.h"
22 #include "llvm/ADT/Statistic.h"
23 #include "llvm/Analysis/MemoryLocation.h"
24 #include "llvm/CodeGen/MachineFrameInfo.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineRegisterInfo.h"
28 #include "llvm/CodeGen/TargetLoweringObjectFileImpl.h"
29 #include "llvm/CodeGen/ValueTypes.h"
30 #include "llvm/IR/DiagnosticInfo.h"
31 #include "llvm/IR/DiagnosticPrinter.h"
32 #include "llvm/IR/IRBuilder.h"
33 #include "llvm/IR/IntrinsicsRISCV.h"
34 #include "llvm/IR/PatternMatch.h"
35 #include "llvm/Support/Debug.h"
36 #include "llvm/Support/ErrorHandling.h"
37 #include "llvm/Support/KnownBits.h"
38 #include "llvm/Support/MathExtras.h"
39 #include "llvm/Support/raw_ostream.h"
40
41 using namespace llvm;
42
43 #define DEBUG_TYPE "riscv-lower"
44
45 STATISTIC(NumTailCalls, "Number of tail calls");
46
RISCVTargetLowering(const TargetMachine & TM,const RISCVSubtarget & STI)47 RISCVTargetLowering::RISCVTargetLowering(const TargetMachine &TM,
48 const RISCVSubtarget &STI)
49 : TargetLowering(TM), Subtarget(STI) {
50
51 if (Subtarget.isRV32E())
52 report_fatal_error("Codegen not yet implemented for RV32E");
53
54 RISCVABI::ABI ABI = Subtarget.getTargetABI();
55 assert(ABI != RISCVABI::ABI_Unknown && "Improperly initialised target ABI");
56
57 if ((ABI == RISCVABI::ABI_ILP32F || ABI == RISCVABI::ABI_LP64F) &&
58 !Subtarget.hasStdExtF()) {
59 errs() << "Hard-float 'f' ABI can't be used for a target that "
60 "doesn't support the F instruction set extension (ignoring "
61 "target-abi)\n";
62 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
63 } else if ((ABI == RISCVABI::ABI_ILP32D || ABI == RISCVABI::ABI_LP64D) &&
64 !Subtarget.hasStdExtD()) {
65 errs() << "Hard-float 'd' ABI can't be used for a target that "
66 "doesn't support the D instruction set extension (ignoring "
67 "target-abi)\n";
68 ABI = Subtarget.is64Bit() ? RISCVABI::ABI_LP64 : RISCVABI::ABI_ILP32;
69 }
70
71 switch (ABI) {
72 default:
73 report_fatal_error("Don't know how to lower this ABI");
74 case RISCVABI::ABI_ILP32:
75 case RISCVABI::ABI_ILP32F:
76 case RISCVABI::ABI_ILP32D:
77 case RISCVABI::ABI_LP64:
78 case RISCVABI::ABI_LP64F:
79 case RISCVABI::ABI_LP64D:
80 break;
81 }
82
83 MVT XLenVT = Subtarget.getXLenVT();
84
85 // Set up the register classes.
86 addRegisterClass(XLenVT, &RISCV::GPRRegClass);
87
88 if (Subtarget.hasStdExtZfh())
89 addRegisterClass(MVT::f16, &RISCV::FPR16RegClass);
90 if (Subtarget.hasStdExtF())
91 addRegisterClass(MVT::f32, &RISCV::FPR32RegClass);
92 if (Subtarget.hasStdExtD())
93 addRegisterClass(MVT::f64, &RISCV::FPR64RegClass);
94
95 static const MVT::SimpleValueType BoolVecVTs[] = {
96 MVT::nxv1i1, MVT::nxv2i1, MVT::nxv4i1, MVT::nxv8i1,
97 MVT::nxv16i1, MVT::nxv32i1, MVT::nxv64i1};
98 static const MVT::SimpleValueType IntVecVTs[] = {
99 MVT::nxv1i8, MVT::nxv2i8, MVT::nxv4i8, MVT::nxv8i8, MVT::nxv16i8,
100 MVT::nxv32i8, MVT::nxv64i8, MVT::nxv1i16, MVT::nxv2i16, MVT::nxv4i16,
101 MVT::nxv8i16, MVT::nxv16i16, MVT::nxv32i16, MVT::nxv1i32, MVT::nxv2i32,
102 MVT::nxv4i32, MVT::nxv8i32, MVT::nxv16i32, MVT::nxv1i64, MVT::nxv2i64,
103 MVT::nxv4i64, MVT::nxv8i64};
104 static const MVT::SimpleValueType F16VecVTs[] = {
105 MVT::nxv1f16, MVT::nxv2f16, MVT::nxv4f16,
106 MVT::nxv8f16, MVT::nxv16f16, MVT::nxv32f16};
107 static const MVT::SimpleValueType F32VecVTs[] = {
108 MVT::nxv1f32, MVT::nxv2f32, MVT::nxv4f32, MVT::nxv8f32, MVT::nxv16f32};
109 static const MVT::SimpleValueType F64VecVTs[] = {
110 MVT::nxv1f64, MVT::nxv2f64, MVT::nxv4f64, MVT::nxv8f64};
111
112 if (Subtarget.hasStdExtV()) {
113 auto addRegClassForRVV = [this](MVT VT) {
114 unsigned Size = VT.getSizeInBits().getKnownMinValue();
115 assert(Size <= 512 && isPowerOf2_32(Size));
116 const TargetRegisterClass *RC;
117 if (Size <= 64)
118 RC = &RISCV::VRRegClass;
119 else if (Size == 128)
120 RC = &RISCV::VRM2RegClass;
121 else if (Size == 256)
122 RC = &RISCV::VRM4RegClass;
123 else
124 RC = &RISCV::VRM8RegClass;
125
126 addRegisterClass(VT, RC);
127 };
128
129 for (MVT VT : BoolVecVTs)
130 addRegClassForRVV(VT);
131 for (MVT VT : IntVecVTs)
132 addRegClassForRVV(VT);
133
134 if (Subtarget.hasStdExtZfh())
135 for (MVT VT : F16VecVTs)
136 addRegClassForRVV(VT);
137
138 if (Subtarget.hasStdExtF())
139 for (MVT VT : F32VecVTs)
140 addRegClassForRVV(VT);
141
142 if (Subtarget.hasStdExtD())
143 for (MVT VT : F64VecVTs)
144 addRegClassForRVV(VT);
145
146 if (Subtarget.useRVVForFixedLengthVectors()) {
147 auto addRegClassForFixedVectors = [this](MVT VT) {
148 MVT ContainerVT = getContainerForFixedLengthVector(VT);
149 unsigned RCID = getRegClassIDForVecVT(ContainerVT);
150 const RISCVRegisterInfo &TRI = *Subtarget.getRegisterInfo();
151 addRegisterClass(VT, TRI.getRegClass(RCID));
152 };
153 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes())
154 if (useRVVForFixedLengthVectorVT(VT))
155 addRegClassForFixedVectors(VT);
156
157 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes())
158 if (useRVVForFixedLengthVectorVT(VT))
159 addRegClassForFixedVectors(VT);
160 }
161 }
162
163 // Compute derived properties from the register classes.
164 computeRegisterProperties(STI.getRegisterInfo());
165
166 setStackPointerRegisterToSaveRestore(RISCV::X2);
167
168 for (auto N : {ISD::EXTLOAD, ISD::SEXTLOAD, ISD::ZEXTLOAD})
169 setLoadExtAction(N, XLenVT, MVT::i1, Promote);
170
171 // TODO: add all necessary setOperationAction calls.
172 setOperationAction(ISD::DYNAMIC_STACKALLOC, XLenVT, Expand);
173
174 setOperationAction(ISD::BR_JT, MVT::Other, Expand);
175 setOperationAction(ISD::BR_CC, XLenVT, Expand);
176 setOperationAction(ISD::BRCOND, MVT::Other, Custom);
177 setOperationAction(ISD::SELECT_CC, XLenVT, Expand);
178
179 setOperationAction(ISD::STACKSAVE, MVT::Other, Expand);
180 setOperationAction(ISD::STACKRESTORE, MVT::Other, Expand);
181
182 setOperationAction(ISD::VASTART, MVT::Other, Custom);
183 setOperationAction(ISD::VAARG, MVT::Other, Expand);
184 setOperationAction(ISD::VACOPY, MVT::Other, Expand);
185 setOperationAction(ISD::VAEND, MVT::Other, Expand);
186
187 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i1, Expand);
188 if (!Subtarget.hasStdExtZbb()) {
189 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i8, Expand);
190 setOperationAction(ISD::SIGN_EXTEND_INREG, MVT::i16, Expand);
191 }
192
193 if (Subtarget.is64Bit()) {
194 setOperationAction(ISD::ADD, MVT::i32, Custom);
195 setOperationAction(ISD::SUB, MVT::i32, Custom);
196 setOperationAction(ISD::SHL, MVT::i32, Custom);
197 setOperationAction(ISD::SRA, MVT::i32, Custom);
198 setOperationAction(ISD::SRL, MVT::i32, Custom);
199
200 setOperationAction(ISD::UADDO, MVT::i32, Custom);
201 setOperationAction(ISD::USUBO, MVT::i32, Custom);
202 setOperationAction(ISD::UADDSAT, MVT::i32, Custom);
203 setOperationAction(ISD::USUBSAT, MVT::i32, Custom);
204 } else {
205 setLibcallName(RTLIB::SHL_I128, nullptr);
206 setLibcallName(RTLIB::SRL_I128, nullptr);
207 setLibcallName(RTLIB::SRA_I128, nullptr);
208 setLibcallName(RTLIB::MUL_I128, nullptr);
209 setLibcallName(RTLIB::MULO_I64, nullptr);
210 }
211
212 if (!Subtarget.hasStdExtM()) {
213 setOperationAction(ISD::MUL, XLenVT, Expand);
214 setOperationAction(ISD::MULHS, XLenVT, Expand);
215 setOperationAction(ISD::MULHU, XLenVT, Expand);
216 setOperationAction(ISD::SDIV, XLenVT, Expand);
217 setOperationAction(ISD::UDIV, XLenVT, Expand);
218 setOperationAction(ISD::SREM, XLenVT, Expand);
219 setOperationAction(ISD::UREM, XLenVT, Expand);
220 } else {
221 if (Subtarget.is64Bit()) {
222 setOperationAction(ISD::MUL, MVT::i32, Custom);
223 setOperationAction(ISD::MUL, MVT::i128, Custom);
224
225 setOperationAction(ISD::SDIV, MVT::i8, Custom);
226 setOperationAction(ISD::UDIV, MVT::i8, Custom);
227 setOperationAction(ISD::UREM, MVT::i8, Custom);
228 setOperationAction(ISD::SDIV, MVT::i16, Custom);
229 setOperationAction(ISD::UDIV, MVT::i16, Custom);
230 setOperationAction(ISD::UREM, MVT::i16, Custom);
231 setOperationAction(ISD::SDIV, MVT::i32, Custom);
232 setOperationAction(ISD::UDIV, MVT::i32, Custom);
233 setOperationAction(ISD::UREM, MVT::i32, Custom);
234 } else {
235 setOperationAction(ISD::MUL, MVT::i64, Custom);
236 }
237 }
238
239 setOperationAction(ISD::SDIVREM, XLenVT, Expand);
240 setOperationAction(ISD::UDIVREM, XLenVT, Expand);
241 setOperationAction(ISD::SMUL_LOHI, XLenVT, Expand);
242 setOperationAction(ISD::UMUL_LOHI, XLenVT, Expand);
243
244 setOperationAction(ISD::SHL_PARTS, XLenVT, Custom);
245 setOperationAction(ISD::SRL_PARTS, XLenVT, Custom);
246 setOperationAction(ISD::SRA_PARTS, XLenVT, Custom);
247
248 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
249 if (Subtarget.is64Bit()) {
250 setOperationAction(ISD::ROTL, MVT::i32, Custom);
251 setOperationAction(ISD::ROTR, MVT::i32, Custom);
252 }
253 } else {
254 setOperationAction(ISD::ROTL, XLenVT, Expand);
255 setOperationAction(ISD::ROTR, XLenVT, Expand);
256 }
257
258 if (Subtarget.hasStdExtZbp()) {
259 // Custom lower bswap/bitreverse so we can convert them to GREVI to enable
260 // more combining.
261 setOperationAction(ISD::BITREVERSE, XLenVT, Custom);
262 setOperationAction(ISD::BSWAP, XLenVT, Custom);
263 setOperationAction(ISD::BITREVERSE, MVT::i8, Custom);
264 // BSWAP i8 doesn't exist.
265 setOperationAction(ISD::BITREVERSE, MVT::i16, Custom);
266 setOperationAction(ISD::BSWAP, MVT::i16, Custom);
267
268 if (Subtarget.is64Bit()) {
269 setOperationAction(ISD::BITREVERSE, MVT::i32, Custom);
270 setOperationAction(ISD::BSWAP, MVT::i32, Custom);
271 }
272 } else {
273 // With Zbb we have an XLen rev8 instruction, but not GREVI. So we'll
274 // pattern match it directly in isel.
275 setOperationAction(ISD::BSWAP, XLenVT,
276 Subtarget.hasStdExtZbb() ? Legal : Expand);
277 }
278
279 if (Subtarget.hasStdExtZbb()) {
280 setOperationAction(ISD::SMIN, XLenVT, Legal);
281 setOperationAction(ISD::SMAX, XLenVT, Legal);
282 setOperationAction(ISD::UMIN, XLenVT, Legal);
283 setOperationAction(ISD::UMAX, XLenVT, Legal);
284
285 if (Subtarget.is64Bit()) {
286 setOperationAction(ISD::CTTZ, MVT::i32, Custom);
287 setOperationAction(ISD::CTTZ_ZERO_UNDEF, MVT::i32, Custom);
288 setOperationAction(ISD::CTLZ, MVT::i32, Custom);
289 setOperationAction(ISD::CTLZ_ZERO_UNDEF, MVT::i32, Custom);
290 }
291 } else {
292 setOperationAction(ISD::CTTZ, XLenVT, Expand);
293 setOperationAction(ISD::CTLZ, XLenVT, Expand);
294 setOperationAction(ISD::CTPOP, XLenVT, Expand);
295 }
296
297 if (Subtarget.hasStdExtZbt()) {
298 setOperationAction(ISD::FSHL, XLenVT, Custom);
299 setOperationAction(ISD::FSHR, XLenVT, Custom);
300 setOperationAction(ISD::SELECT, XLenVT, Legal);
301
302 if (Subtarget.is64Bit()) {
303 setOperationAction(ISD::FSHL, MVT::i32, Custom);
304 setOperationAction(ISD::FSHR, MVT::i32, Custom);
305 }
306 } else {
307 setOperationAction(ISD::SELECT, XLenVT, Custom);
308 }
309
310 static const ISD::CondCode FPCCToExpand[] = {
311 ISD::SETOGT, ISD::SETOGE, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
312 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUNE, ISD::SETGT,
313 ISD::SETGE, ISD::SETNE, ISD::SETO, ISD::SETUO};
314
315 static const ISD::NodeType FPOpToExpand[] = {
316 ISD::FSIN, ISD::FCOS, ISD::FSINCOS, ISD::FPOW,
317 ISD::FREM, ISD::FP16_TO_FP, ISD::FP_TO_FP16};
318
319 if (Subtarget.hasStdExtZfh())
320 setOperationAction(ISD::BITCAST, MVT::i16, Custom);
321
322 if (Subtarget.hasStdExtZfh()) {
323 setOperationAction(ISD::FMINNUM, MVT::f16, Legal);
324 setOperationAction(ISD::FMAXNUM, MVT::f16, Legal);
325 setOperationAction(ISD::LRINT, MVT::f16, Legal);
326 setOperationAction(ISD::LLRINT, MVT::f16, Legal);
327 setOperationAction(ISD::LROUND, MVT::f16, Legal);
328 setOperationAction(ISD::LLROUND, MVT::f16, Legal);
329 for (auto CC : FPCCToExpand)
330 setCondCodeAction(CC, MVT::f16, Expand);
331 setOperationAction(ISD::SELECT_CC, MVT::f16, Expand);
332 setOperationAction(ISD::SELECT, MVT::f16, Custom);
333 setOperationAction(ISD::BR_CC, MVT::f16, Expand);
334 for (auto Op : FPOpToExpand)
335 setOperationAction(Op, MVT::f16, Expand);
336 }
337
338 if (Subtarget.hasStdExtF()) {
339 setOperationAction(ISD::FMINNUM, MVT::f32, Legal);
340 setOperationAction(ISD::FMAXNUM, MVT::f32, Legal);
341 setOperationAction(ISD::LRINT, MVT::f32, Legal);
342 setOperationAction(ISD::LLRINT, MVT::f32, Legal);
343 setOperationAction(ISD::LROUND, MVT::f32, Legal);
344 setOperationAction(ISD::LLROUND, MVT::f32, Legal);
345 for (auto CC : FPCCToExpand)
346 setCondCodeAction(CC, MVT::f32, Expand);
347 setOperationAction(ISD::SELECT_CC, MVT::f32, Expand);
348 setOperationAction(ISD::SELECT, MVT::f32, Custom);
349 setOperationAction(ISD::BR_CC, MVT::f32, Expand);
350 for (auto Op : FPOpToExpand)
351 setOperationAction(Op, MVT::f32, Expand);
352 setLoadExtAction(ISD::EXTLOAD, MVT::f32, MVT::f16, Expand);
353 setTruncStoreAction(MVT::f32, MVT::f16, Expand);
354 }
355
356 if (Subtarget.hasStdExtF() && Subtarget.is64Bit())
357 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
358
359 if (Subtarget.hasStdExtD()) {
360 setOperationAction(ISD::FMINNUM, MVT::f64, Legal);
361 setOperationAction(ISD::FMAXNUM, MVT::f64, Legal);
362 setOperationAction(ISD::LRINT, MVT::f64, Legal);
363 setOperationAction(ISD::LLRINT, MVT::f64, Legal);
364 setOperationAction(ISD::LROUND, MVT::f64, Legal);
365 setOperationAction(ISD::LLROUND, MVT::f64, Legal);
366 for (auto CC : FPCCToExpand)
367 setCondCodeAction(CC, MVT::f64, Expand);
368 setOperationAction(ISD::SELECT_CC, MVT::f64, Expand);
369 setOperationAction(ISD::SELECT, MVT::f64, Custom);
370 setOperationAction(ISD::BR_CC, MVT::f64, Expand);
371 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f32, Expand);
372 setTruncStoreAction(MVT::f64, MVT::f32, Expand);
373 for (auto Op : FPOpToExpand)
374 setOperationAction(Op, MVT::f64, Expand);
375 setLoadExtAction(ISD::EXTLOAD, MVT::f64, MVT::f16, Expand);
376 setTruncStoreAction(MVT::f64, MVT::f16, Expand);
377 }
378
379 if (Subtarget.is64Bit()) {
380 setOperationAction(ISD::FP_TO_UINT, MVT::i32, Custom);
381 setOperationAction(ISD::FP_TO_SINT, MVT::i32, Custom);
382 setOperationAction(ISD::STRICT_FP_TO_UINT, MVT::i32, Custom);
383 setOperationAction(ISD::STRICT_FP_TO_SINT, MVT::i32, Custom);
384 }
385
386 if (Subtarget.hasStdExtF()) {
387 setOperationAction(ISD::FP_TO_UINT_SAT, XLenVT, Custom);
388 setOperationAction(ISD::FP_TO_SINT_SAT, XLenVT, Custom);
389
390 setOperationAction(ISD::FLT_ROUNDS_, XLenVT, Custom);
391 setOperationAction(ISD::SET_ROUNDING, MVT::Other, Custom);
392 }
393
394 setOperationAction(ISD::GlobalAddress, XLenVT, Custom);
395 setOperationAction(ISD::BlockAddress, XLenVT, Custom);
396 setOperationAction(ISD::ConstantPool, XLenVT, Custom);
397 setOperationAction(ISD::JumpTable, XLenVT, Custom);
398
399 setOperationAction(ISD::GlobalTLSAddress, XLenVT, Custom);
400
401 // TODO: On M-mode only targets, the cycle[h] CSR may not be present.
402 // Unfortunately this can't be determined just from the ISA naming string.
403 setOperationAction(ISD::READCYCLECOUNTER, MVT::i64,
404 Subtarget.is64Bit() ? Legal : Custom);
405
406 setOperationAction(ISD::TRAP, MVT::Other, Legal);
407 setOperationAction(ISD::DEBUGTRAP, MVT::Other, Legal);
408 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::Other, Custom);
409 if (Subtarget.is64Bit())
410 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i32, Custom);
411
412 if (Subtarget.hasStdExtA()) {
413 setMaxAtomicSizeInBitsSupported(Subtarget.getXLen());
414 setMinCmpXchgSizeInBits(32);
415 } else {
416 setMaxAtomicSizeInBitsSupported(0);
417 }
418
419 setBooleanContents(ZeroOrOneBooleanContent);
420
421 if (Subtarget.hasStdExtV()) {
422 setBooleanVectorContents(ZeroOrOneBooleanContent);
423
424 setOperationAction(ISD::VSCALE, XLenVT, Custom);
425
426 // RVV intrinsics may have illegal operands.
427 // We also need to custom legalize vmv.x.s.
428 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i8, Custom);
429 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i16, Custom);
430 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i8, Custom);
431 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i16, Custom);
432 if (Subtarget.is64Bit()) {
433 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i32, Custom);
434 } else {
435 setOperationAction(ISD::INTRINSIC_WO_CHAIN, MVT::i64, Custom);
436 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::i64, Custom);
437 }
438
439 setOperationAction(ISD::INTRINSIC_W_CHAIN, MVT::Other, Custom);
440 setOperationAction(ISD::INTRINSIC_VOID, MVT::Other, Custom);
441
442 static const unsigned IntegerVPOps[] = {
443 ISD::VP_ADD, ISD::VP_SUB, ISD::VP_MUL,
444 ISD::VP_SDIV, ISD::VP_UDIV, ISD::VP_SREM,
445 ISD::VP_UREM, ISD::VP_AND, ISD::VP_OR,
446 ISD::VP_XOR, ISD::VP_ASHR, ISD::VP_LSHR,
447 ISD::VP_SHL, ISD::VP_REDUCE_ADD, ISD::VP_REDUCE_AND,
448 ISD::VP_REDUCE_OR, ISD::VP_REDUCE_XOR, ISD::VP_REDUCE_SMAX,
449 ISD::VP_REDUCE_SMIN, ISD::VP_REDUCE_UMAX, ISD::VP_REDUCE_UMIN};
450
451 static const unsigned FloatingPointVPOps[] = {
452 ISD::VP_FADD, ISD::VP_FSUB, ISD::VP_FMUL,
453 ISD::VP_FDIV, ISD::VP_REDUCE_FADD, ISD::VP_REDUCE_SEQ_FADD,
454 ISD::VP_REDUCE_FMIN, ISD::VP_REDUCE_FMAX};
455
456 if (!Subtarget.is64Bit()) {
457 // We must custom-lower certain vXi64 operations on RV32 due to the vector
458 // element type being illegal.
459 setOperationAction(ISD::INSERT_VECTOR_ELT, MVT::i64, Custom);
460 setOperationAction(ISD::EXTRACT_VECTOR_ELT, MVT::i64, Custom);
461
462 setOperationAction(ISD::VECREDUCE_ADD, MVT::i64, Custom);
463 setOperationAction(ISD::VECREDUCE_AND, MVT::i64, Custom);
464 setOperationAction(ISD::VECREDUCE_OR, MVT::i64, Custom);
465 setOperationAction(ISD::VECREDUCE_XOR, MVT::i64, Custom);
466 setOperationAction(ISD::VECREDUCE_SMAX, MVT::i64, Custom);
467 setOperationAction(ISD::VECREDUCE_SMIN, MVT::i64, Custom);
468 setOperationAction(ISD::VECREDUCE_UMAX, MVT::i64, Custom);
469 setOperationAction(ISD::VECREDUCE_UMIN, MVT::i64, Custom);
470
471 setOperationAction(ISD::VP_REDUCE_ADD, MVT::i64, Custom);
472 setOperationAction(ISD::VP_REDUCE_AND, MVT::i64, Custom);
473 setOperationAction(ISD::VP_REDUCE_OR, MVT::i64, Custom);
474 setOperationAction(ISD::VP_REDUCE_XOR, MVT::i64, Custom);
475 setOperationAction(ISD::VP_REDUCE_SMAX, MVT::i64, Custom);
476 setOperationAction(ISD::VP_REDUCE_SMIN, MVT::i64, Custom);
477 setOperationAction(ISD::VP_REDUCE_UMAX, MVT::i64, Custom);
478 setOperationAction(ISD::VP_REDUCE_UMIN, MVT::i64, Custom);
479 }
480
481 for (MVT VT : BoolVecVTs) {
482 setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
483
484 // Mask VTs are custom-expanded into a series of standard nodes
485 setOperationAction(ISD::TRUNCATE, VT, Custom);
486 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
487 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
488 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
489
490 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
491 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
492
493 setOperationAction(ISD::SELECT, VT, Custom);
494 setOperationAction(ISD::SELECT_CC, VT, Expand);
495 setOperationAction(ISD::VSELECT, VT, Expand);
496
497 setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
498 setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
499 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
500
501 setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
502 setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
503 setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
504
505 // RVV has native int->float & float->int conversions where the
506 // element type sizes are within one power-of-two of each other. Any
507 // wider distances between type sizes have to be lowered as sequences
508 // which progressively narrow the gap in stages.
509 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
510 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
511 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
512 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
513
514 // Expand all extending loads to types larger than this, and truncating
515 // stores from types larger than this.
516 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
517 setTruncStoreAction(OtherVT, VT, Expand);
518 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
519 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
520 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
521 }
522 }
523
524 for (MVT VT : IntVecVTs) {
525 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
526 setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
527
528 setOperationAction(ISD::SMIN, VT, Legal);
529 setOperationAction(ISD::SMAX, VT, Legal);
530 setOperationAction(ISD::UMIN, VT, Legal);
531 setOperationAction(ISD::UMAX, VT, Legal);
532
533 setOperationAction(ISD::ROTL, VT, Expand);
534 setOperationAction(ISD::ROTR, VT, Expand);
535
536 // Custom-lower extensions and truncations from/to mask types.
537 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
538 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
539 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
540
541 // RVV has native int->float & float->int conversions where the
542 // element type sizes are within one power-of-two of each other. Any
543 // wider distances between type sizes have to be lowered as sequences
544 // which progressively narrow the gap in stages.
545 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
546 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
547 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
548 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
549
550 setOperationAction(ISD::SADDSAT, VT, Legal);
551 setOperationAction(ISD::UADDSAT, VT, Legal);
552 setOperationAction(ISD::SSUBSAT, VT, Legal);
553 setOperationAction(ISD::USUBSAT, VT, Legal);
554
555 // Integer VTs are lowered as a series of "RISCVISD::TRUNCATE_VECTOR_VL"
556 // nodes which truncate by one power of two at a time.
557 setOperationAction(ISD::TRUNCATE, VT, Custom);
558
559 // Custom-lower insert/extract operations to simplify patterns.
560 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
561 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
562
563 // Custom-lower reduction operations to set up the corresponding custom
564 // nodes' operands.
565 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
566 setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
567 setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
568 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
569 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
570 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
571 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
572 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
573
574 for (unsigned VPOpc : IntegerVPOps)
575 setOperationAction(VPOpc, VT, Custom);
576
577 setOperationAction(ISD::LOAD, VT, Custom);
578 setOperationAction(ISD::STORE, VT, Custom);
579
580 setOperationAction(ISD::MLOAD, VT, Custom);
581 setOperationAction(ISD::MSTORE, VT, Custom);
582 setOperationAction(ISD::MGATHER, VT, Custom);
583 setOperationAction(ISD::MSCATTER, VT, Custom);
584
585 setOperationAction(ISD::VP_LOAD, VT, Custom);
586 setOperationAction(ISD::VP_STORE, VT, Custom);
587 setOperationAction(ISD::VP_GATHER, VT, Custom);
588 setOperationAction(ISD::VP_SCATTER, VT, Custom);
589
590 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
591 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
592 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
593
594 setOperationAction(ISD::SELECT, VT, Custom);
595 setOperationAction(ISD::SELECT_CC, VT, Expand);
596
597 setOperationAction(ISD::STEP_VECTOR, VT, Custom);
598 setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
599
600 for (MVT OtherVT : MVT::integer_scalable_vector_valuetypes()) {
601 setTruncStoreAction(VT, OtherVT, Expand);
602 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
603 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
604 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
605 }
606 }
607
608 // Expand various CCs to best match the RVV ISA, which natively supports UNE
609 // but no other unordered comparisons, and supports all ordered comparisons
610 // except ONE. Additionally, we expand GT,OGT,GE,OGE for optimization
611 // purposes; they are expanded to their swapped-operand CCs (LT,OLT,LE,OLE),
612 // and we pattern-match those back to the "original", swapping operands once
613 // more. This way we catch both operations and both "vf" and "fv" forms with
614 // fewer patterns.
615 static const ISD::CondCode VFPCCToExpand[] = {
616 ISD::SETO, ISD::SETONE, ISD::SETUEQ, ISD::SETUGT,
617 ISD::SETUGE, ISD::SETULT, ISD::SETULE, ISD::SETUO,
618 ISD::SETGT, ISD::SETOGT, ISD::SETGE, ISD::SETOGE,
619 };
620
621 // Sets common operation actions on RVV floating-point vector types.
622 const auto SetCommonVFPActions = [&](MVT VT) {
623 setOperationAction(ISD::SPLAT_VECTOR, VT, Legal);
624 // RVV has native FP_ROUND & FP_EXTEND conversions where the element type
625 // sizes are within one power-of-two of each other. Therefore conversions
626 // between vXf16 and vXf64 must be lowered as sequences which convert via
627 // vXf32.
628 setOperationAction(ISD::FP_ROUND, VT, Custom);
629 setOperationAction(ISD::FP_EXTEND, VT, Custom);
630 // Custom-lower insert/extract operations to simplify patterns.
631 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
632 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
633 // Expand various condition codes (explained above).
634 for (auto CC : VFPCCToExpand)
635 setCondCodeAction(CC, VT, Expand);
636
637 setOperationAction(ISD::FMINNUM, VT, Legal);
638 setOperationAction(ISD::FMAXNUM, VT, Legal);
639
640 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
641 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
642 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
643 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
644
645 setOperationAction(ISD::FCOPYSIGN, VT, Legal);
646
647 setOperationAction(ISD::LOAD, VT, Custom);
648 setOperationAction(ISD::STORE, VT, Custom);
649
650 setOperationAction(ISD::MLOAD, VT, Custom);
651 setOperationAction(ISD::MSTORE, VT, Custom);
652 setOperationAction(ISD::MGATHER, VT, Custom);
653 setOperationAction(ISD::MSCATTER, VT, Custom);
654
655 setOperationAction(ISD::VP_LOAD, VT, Custom);
656 setOperationAction(ISD::VP_STORE, VT, Custom);
657 setOperationAction(ISD::VP_GATHER, VT, Custom);
658 setOperationAction(ISD::VP_SCATTER, VT, Custom);
659
660 setOperationAction(ISD::SELECT, VT, Custom);
661 setOperationAction(ISD::SELECT_CC, VT, Expand);
662
663 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
664 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
665 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
666
667 setOperationAction(ISD::VECTOR_REVERSE, VT, Custom);
668
669 for (unsigned VPOpc : FloatingPointVPOps)
670 setOperationAction(VPOpc, VT, Custom);
671 };
672
673 // Sets common extload/truncstore actions on RVV floating-point vector
674 // types.
675 const auto SetCommonVFPExtLoadTruncStoreActions =
676 [&](MVT VT, ArrayRef<MVT::SimpleValueType> SmallerVTs) {
677 for (auto SmallVT : SmallerVTs) {
678 setTruncStoreAction(VT, SmallVT, Expand);
679 setLoadExtAction(ISD::EXTLOAD, VT, SmallVT, Expand);
680 }
681 };
682
683 if (Subtarget.hasStdExtZfh())
684 for (MVT VT : F16VecVTs)
685 SetCommonVFPActions(VT);
686
687 for (MVT VT : F32VecVTs) {
688 if (Subtarget.hasStdExtF())
689 SetCommonVFPActions(VT);
690 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
691 }
692
693 for (MVT VT : F64VecVTs) {
694 if (Subtarget.hasStdExtD())
695 SetCommonVFPActions(VT);
696 SetCommonVFPExtLoadTruncStoreActions(VT, F16VecVTs);
697 SetCommonVFPExtLoadTruncStoreActions(VT, F32VecVTs);
698 }
699
700 if (Subtarget.useRVVForFixedLengthVectors()) {
701 for (MVT VT : MVT::integer_fixedlen_vector_valuetypes()) {
702 if (!useRVVForFixedLengthVectorVT(VT))
703 continue;
704
705 // By default everything must be expanded.
706 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
707 setOperationAction(Op, VT, Expand);
708 for (MVT OtherVT : MVT::integer_fixedlen_vector_valuetypes()) {
709 setTruncStoreAction(VT, OtherVT, Expand);
710 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
711 setLoadExtAction(ISD::SEXTLOAD, OtherVT, VT, Expand);
712 setLoadExtAction(ISD::ZEXTLOAD, OtherVT, VT, Expand);
713 }
714
715 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
716 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
717 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
718
719 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
720 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
721
722 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
723 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
724
725 setOperationAction(ISD::LOAD, VT, Custom);
726 setOperationAction(ISD::STORE, VT, Custom);
727
728 setOperationAction(ISD::SETCC, VT, Custom);
729
730 setOperationAction(ISD::SELECT, VT, Custom);
731
732 setOperationAction(ISD::TRUNCATE, VT, Custom);
733
734 setOperationAction(ISD::BITCAST, VT, Custom);
735
736 setOperationAction(ISD::VECREDUCE_AND, VT, Custom);
737 setOperationAction(ISD::VECREDUCE_OR, VT, Custom);
738 setOperationAction(ISD::VECREDUCE_XOR, VT, Custom);
739
740 setOperationAction(ISD::VP_REDUCE_AND, VT, Custom);
741 setOperationAction(ISD::VP_REDUCE_OR, VT, Custom);
742 setOperationAction(ISD::VP_REDUCE_XOR, VT, Custom);
743
744 setOperationAction(ISD::SINT_TO_FP, VT, Custom);
745 setOperationAction(ISD::UINT_TO_FP, VT, Custom);
746 setOperationAction(ISD::FP_TO_SINT, VT, Custom);
747 setOperationAction(ISD::FP_TO_UINT, VT, Custom);
748
749 // Operations below are different for between masks and other vectors.
750 if (VT.getVectorElementType() == MVT::i1) {
751 setOperationAction(ISD::AND, VT, Custom);
752 setOperationAction(ISD::OR, VT, Custom);
753 setOperationAction(ISD::XOR, VT, Custom);
754 continue;
755 }
756
757 // Use SPLAT_VECTOR to prevent type legalization from destroying the
758 // splats when type legalizing i64 scalar on RV32.
759 // FIXME: Use SPLAT_VECTOR for all types? DAGCombine probably needs
760 // improvements first.
761 if (!Subtarget.is64Bit() && VT.getVectorElementType() == MVT::i64) {
762 setOperationAction(ISD::SPLAT_VECTOR, VT, Custom);
763 setOperationAction(ISD::SPLAT_VECTOR_PARTS, VT, Custom);
764 }
765
766 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
767 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
768
769 setOperationAction(ISD::MLOAD, VT, Custom);
770 setOperationAction(ISD::MSTORE, VT, Custom);
771 setOperationAction(ISD::MGATHER, VT, Custom);
772 setOperationAction(ISD::MSCATTER, VT, Custom);
773
774 setOperationAction(ISD::VP_LOAD, VT, Custom);
775 setOperationAction(ISD::VP_STORE, VT, Custom);
776 setOperationAction(ISD::VP_GATHER, VT, Custom);
777 setOperationAction(ISD::VP_SCATTER, VT, Custom);
778
779 setOperationAction(ISD::ADD, VT, Custom);
780 setOperationAction(ISD::MUL, VT, Custom);
781 setOperationAction(ISD::SUB, VT, Custom);
782 setOperationAction(ISD::AND, VT, Custom);
783 setOperationAction(ISD::OR, VT, Custom);
784 setOperationAction(ISD::XOR, VT, Custom);
785 setOperationAction(ISD::SDIV, VT, Custom);
786 setOperationAction(ISD::SREM, VT, Custom);
787 setOperationAction(ISD::UDIV, VT, Custom);
788 setOperationAction(ISD::UREM, VT, Custom);
789 setOperationAction(ISD::SHL, VT, Custom);
790 setOperationAction(ISD::SRA, VT, Custom);
791 setOperationAction(ISD::SRL, VT, Custom);
792
793 setOperationAction(ISD::SMIN, VT, Custom);
794 setOperationAction(ISD::SMAX, VT, Custom);
795 setOperationAction(ISD::UMIN, VT, Custom);
796 setOperationAction(ISD::UMAX, VT, Custom);
797 setOperationAction(ISD::ABS, VT, Custom);
798
799 setOperationAction(ISD::MULHS, VT, Custom);
800 setOperationAction(ISD::MULHU, VT, Custom);
801
802 setOperationAction(ISD::SADDSAT, VT, Custom);
803 setOperationAction(ISD::UADDSAT, VT, Custom);
804 setOperationAction(ISD::SSUBSAT, VT, Custom);
805 setOperationAction(ISD::USUBSAT, VT, Custom);
806
807 setOperationAction(ISD::VSELECT, VT, Custom);
808 setOperationAction(ISD::SELECT_CC, VT, Expand);
809
810 setOperationAction(ISD::ANY_EXTEND, VT, Custom);
811 setOperationAction(ISD::SIGN_EXTEND, VT, Custom);
812 setOperationAction(ISD::ZERO_EXTEND, VT, Custom);
813
814 // Custom-lower reduction operations to set up the corresponding custom
815 // nodes' operands.
816 setOperationAction(ISD::VECREDUCE_ADD, VT, Custom);
817 setOperationAction(ISD::VECREDUCE_SMAX, VT, Custom);
818 setOperationAction(ISD::VECREDUCE_SMIN, VT, Custom);
819 setOperationAction(ISD::VECREDUCE_UMAX, VT, Custom);
820 setOperationAction(ISD::VECREDUCE_UMIN, VT, Custom);
821
822 for (unsigned VPOpc : IntegerVPOps)
823 setOperationAction(VPOpc, VT, Custom);
824 }
825
826 for (MVT VT : MVT::fp_fixedlen_vector_valuetypes()) {
827 if (!useRVVForFixedLengthVectorVT(VT))
828 continue;
829
830 // By default everything must be expanded.
831 for (unsigned Op = 0; Op < ISD::BUILTIN_OP_END; ++Op)
832 setOperationAction(Op, VT, Expand);
833 for (MVT OtherVT : MVT::fp_fixedlen_vector_valuetypes()) {
834 setLoadExtAction(ISD::EXTLOAD, OtherVT, VT, Expand);
835 setTruncStoreAction(VT, OtherVT, Expand);
836 }
837
838 // We use EXTRACT_SUBVECTOR as a "cast" from scalable to fixed.
839 setOperationAction(ISD::INSERT_SUBVECTOR, VT, Custom);
840 setOperationAction(ISD::EXTRACT_SUBVECTOR, VT, Custom);
841
842 setOperationAction(ISD::BUILD_VECTOR, VT, Custom);
843 setOperationAction(ISD::CONCAT_VECTORS, VT, Custom);
844 setOperationAction(ISD::VECTOR_SHUFFLE, VT, Custom);
845 setOperationAction(ISD::INSERT_VECTOR_ELT, VT, Custom);
846 setOperationAction(ISD::EXTRACT_VECTOR_ELT, VT, Custom);
847
848 setOperationAction(ISD::LOAD, VT, Custom);
849 setOperationAction(ISD::STORE, VT, Custom);
850 setOperationAction(ISD::MLOAD, VT, Custom);
851 setOperationAction(ISD::MSTORE, VT, Custom);
852 setOperationAction(ISD::MGATHER, VT, Custom);
853 setOperationAction(ISD::MSCATTER, VT, Custom);
854
855 setOperationAction(ISD::VP_LOAD, VT, Custom);
856 setOperationAction(ISD::VP_STORE, VT, Custom);
857 setOperationAction(ISD::VP_GATHER, VT, Custom);
858 setOperationAction(ISD::VP_SCATTER, VT, Custom);
859
860 setOperationAction(ISD::FADD, VT, Custom);
861 setOperationAction(ISD::FSUB, VT, Custom);
862 setOperationAction(ISD::FMUL, VT, Custom);
863 setOperationAction(ISD::FDIV, VT, Custom);
864 setOperationAction(ISD::FNEG, VT, Custom);
865 setOperationAction(ISD::FABS, VT, Custom);
866 setOperationAction(ISD::FCOPYSIGN, VT, Custom);
867 setOperationAction(ISD::FSQRT, VT, Custom);
868 setOperationAction(ISD::FMA, VT, Custom);
869 setOperationAction(ISD::FMINNUM, VT, Custom);
870 setOperationAction(ISD::FMAXNUM, VT, Custom);
871
872 setOperationAction(ISD::FP_ROUND, VT, Custom);
873 setOperationAction(ISD::FP_EXTEND, VT, Custom);
874
875 for (auto CC : VFPCCToExpand)
876 setCondCodeAction(CC, VT, Expand);
877
878 setOperationAction(ISD::VSELECT, VT, Custom);
879 setOperationAction(ISD::SELECT, VT, Custom);
880 setOperationAction(ISD::SELECT_CC, VT, Expand);
881
882 setOperationAction(ISD::BITCAST, VT, Custom);
883
884 setOperationAction(ISD::VECREDUCE_FADD, VT, Custom);
885 setOperationAction(ISD::VECREDUCE_SEQ_FADD, VT, Custom);
886 setOperationAction(ISD::VECREDUCE_FMIN, VT, Custom);
887 setOperationAction(ISD::VECREDUCE_FMAX, VT, Custom);
888
889 for (unsigned VPOpc : FloatingPointVPOps)
890 setOperationAction(VPOpc, VT, Custom);
891 }
892
893 // Custom-legalize bitcasts from fixed-length vectors to scalar types.
894 setOperationAction(ISD::BITCAST, MVT::i8, Custom);
895 setOperationAction(ISD::BITCAST, MVT::i16, Custom);
896 setOperationAction(ISD::BITCAST, MVT::i32, Custom);
897 setOperationAction(ISD::BITCAST, MVT::i64, Custom);
898 setOperationAction(ISD::BITCAST, MVT::f16, Custom);
899 setOperationAction(ISD::BITCAST, MVT::f32, Custom);
900 setOperationAction(ISD::BITCAST, MVT::f64, Custom);
901 }
902 }
903
904 // Function alignments.
905 const Align FunctionAlignment(Subtarget.hasStdExtC() ? 2 : 4);
906 setMinFunctionAlignment(FunctionAlignment);
907 setPrefFunctionAlignment(FunctionAlignment);
908
909 setMinimumJumpTableEntries(5);
910
911 // Jumps are expensive, compared to logic
912 setJumpIsExpensive();
913
914 // We can use any register for comparisons
915 setHasMultipleConditionRegisters();
916
917 setTargetDAGCombine(ISD::ADD);
918 setTargetDAGCombine(ISD::SUB);
919 setTargetDAGCombine(ISD::AND);
920 setTargetDAGCombine(ISD::OR);
921 setTargetDAGCombine(ISD::XOR);
922 setTargetDAGCombine(ISD::ANY_EXTEND);
923 setTargetDAGCombine(ISD::ZERO_EXTEND);
924 if (Subtarget.hasStdExtV()) {
925 setTargetDAGCombine(ISD::FCOPYSIGN);
926 setTargetDAGCombine(ISD::MGATHER);
927 setTargetDAGCombine(ISD::MSCATTER);
928 setTargetDAGCombine(ISD::VP_GATHER);
929 setTargetDAGCombine(ISD::VP_SCATTER);
930 setTargetDAGCombine(ISD::SRA);
931 setTargetDAGCombine(ISD::SRL);
932 setTargetDAGCombine(ISD::SHL);
933 setTargetDAGCombine(ISD::STORE);
934 }
935 }
936
getSetCCResultType(const DataLayout & DL,LLVMContext & Context,EVT VT) const937 EVT RISCVTargetLowering::getSetCCResultType(const DataLayout &DL,
938 LLVMContext &Context,
939 EVT VT) const {
940 if (!VT.isVector())
941 return getPointerTy(DL);
942 if (Subtarget.hasStdExtV() &&
943 (VT.isScalableVector() || Subtarget.useRVVForFixedLengthVectors()))
944 return EVT::getVectorVT(Context, MVT::i1, VT.getVectorElementCount());
945 return VT.changeVectorElementTypeToInteger();
946 }
947
getVPExplicitVectorLengthTy() const948 MVT RISCVTargetLowering::getVPExplicitVectorLengthTy() const {
949 return Subtarget.getXLenVT();
950 }
951
getTgtMemIntrinsic(IntrinsicInfo & Info,const CallInst & I,MachineFunction & MF,unsigned Intrinsic) const952 bool RISCVTargetLowering::getTgtMemIntrinsic(IntrinsicInfo &Info,
953 const CallInst &I,
954 MachineFunction &MF,
955 unsigned Intrinsic) const {
956 auto &DL = I.getModule()->getDataLayout();
957 switch (Intrinsic) {
958 default:
959 return false;
960 case Intrinsic::riscv_masked_atomicrmw_xchg_i32:
961 case Intrinsic::riscv_masked_atomicrmw_add_i32:
962 case Intrinsic::riscv_masked_atomicrmw_sub_i32:
963 case Intrinsic::riscv_masked_atomicrmw_nand_i32:
964 case Intrinsic::riscv_masked_atomicrmw_max_i32:
965 case Intrinsic::riscv_masked_atomicrmw_min_i32:
966 case Intrinsic::riscv_masked_atomicrmw_umax_i32:
967 case Intrinsic::riscv_masked_atomicrmw_umin_i32:
968 case Intrinsic::riscv_masked_cmpxchg_i32: {
969 PointerType *PtrTy = cast<PointerType>(I.getArgOperand(0)->getType());
970 Info.opc = ISD::INTRINSIC_W_CHAIN;
971 Info.memVT = MVT::getVT(PtrTy->getElementType());
972 Info.ptrVal = I.getArgOperand(0);
973 Info.offset = 0;
974 Info.align = Align(4);
975 Info.flags = MachineMemOperand::MOLoad | MachineMemOperand::MOStore |
976 MachineMemOperand::MOVolatile;
977 return true;
978 }
979 case Intrinsic::riscv_masked_strided_load:
980 Info.opc = ISD::INTRINSIC_W_CHAIN;
981 Info.ptrVal = I.getArgOperand(1);
982 Info.memVT = getValueType(DL, I.getType()->getScalarType());
983 Info.align = Align(DL.getTypeSizeInBits(I.getType()->getScalarType()) / 8);
984 Info.size = MemoryLocation::UnknownSize;
985 Info.flags |= MachineMemOperand::MOLoad;
986 return true;
987 case Intrinsic::riscv_masked_strided_store:
988 Info.opc = ISD::INTRINSIC_VOID;
989 Info.ptrVal = I.getArgOperand(1);
990 Info.memVT =
991 getValueType(DL, I.getArgOperand(0)->getType()->getScalarType());
992 Info.align = Align(
993 DL.getTypeSizeInBits(I.getArgOperand(0)->getType()->getScalarType()) /
994 8);
995 Info.size = MemoryLocation::UnknownSize;
996 Info.flags |= MachineMemOperand::MOStore;
997 return true;
998 }
999 }
1000
isLegalAddressingMode(const DataLayout & DL,const AddrMode & AM,Type * Ty,unsigned AS,Instruction * I) const1001 bool RISCVTargetLowering::isLegalAddressingMode(const DataLayout &DL,
1002 const AddrMode &AM, Type *Ty,
1003 unsigned AS,
1004 Instruction *I) const {
1005 // No global is ever allowed as a base.
1006 if (AM.BaseGV)
1007 return false;
1008
1009 // Require a 12-bit signed offset.
1010 if (!isInt<12>(AM.BaseOffs))
1011 return false;
1012
1013 switch (AM.Scale) {
1014 case 0: // "r+i" or just "i", depending on HasBaseReg.
1015 break;
1016 case 1:
1017 if (!AM.HasBaseReg) // allow "r+i".
1018 break;
1019 return false; // disallow "r+r" or "r+r+i".
1020 default:
1021 return false;
1022 }
1023
1024 return true;
1025 }
1026
isLegalICmpImmediate(int64_t Imm) const1027 bool RISCVTargetLowering::isLegalICmpImmediate(int64_t Imm) const {
1028 return isInt<12>(Imm);
1029 }
1030
isLegalAddImmediate(int64_t Imm) const1031 bool RISCVTargetLowering::isLegalAddImmediate(int64_t Imm) const {
1032 return isInt<12>(Imm);
1033 }
1034
1035 // On RV32, 64-bit integers are split into their high and low parts and held
1036 // in two different registers, so the trunc is free since the low register can
1037 // just be used.
isTruncateFree(Type * SrcTy,Type * DstTy) const1038 bool RISCVTargetLowering::isTruncateFree(Type *SrcTy, Type *DstTy) const {
1039 if (Subtarget.is64Bit() || !SrcTy->isIntegerTy() || !DstTy->isIntegerTy())
1040 return false;
1041 unsigned SrcBits = SrcTy->getPrimitiveSizeInBits();
1042 unsigned DestBits = DstTy->getPrimitiveSizeInBits();
1043 return (SrcBits == 64 && DestBits == 32);
1044 }
1045
isTruncateFree(EVT SrcVT,EVT DstVT) const1046 bool RISCVTargetLowering::isTruncateFree(EVT SrcVT, EVT DstVT) const {
1047 if (Subtarget.is64Bit() || SrcVT.isVector() || DstVT.isVector() ||
1048 !SrcVT.isInteger() || !DstVT.isInteger())
1049 return false;
1050 unsigned SrcBits = SrcVT.getSizeInBits();
1051 unsigned DestBits = DstVT.getSizeInBits();
1052 return (SrcBits == 64 && DestBits == 32);
1053 }
1054
isZExtFree(SDValue Val,EVT VT2) const1055 bool RISCVTargetLowering::isZExtFree(SDValue Val, EVT VT2) const {
1056 // Zexts are free if they can be combined with a load.
1057 if (auto *LD = dyn_cast<LoadSDNode>(Val)) {
1058 EVT MemVT = LD->getMemoryVT();
1059 if ((MemVT == MVT::i8 || MemVT == MVT::i16 ||
1060 (Subtarget.is64Bit() && MemVT == MVT::i32)) &&
1061 (LD->getExtensionType() == ISD::NON_EXTLOAD ||
1062 LD->getExtensionType() == ISD::ZEXTLOAD))
1063 return true;
1064 }
1065
1066 return TargetLowering::isZExtFree(Val, VT2);
1067 }
1068
isSExtCheaperThanZExt(EVT SrcVT,EVT DstVT) const1069 bool RISCVTargetLowering::isSExtCheaperThanZExt(EVT SrcVT, EVT DstVT) const {
1070 return Subtarget.is64Bit() && SrcVT == MVT::i32 && DstVT == MVT::i64;
1071 }
1072
isCheapToSpeculateCttz() const1073 bool RISCVTargetLowering::isCheapToSpeculateCttz() const {
1074 return Subtarget.hasStdExtZbb();
1075 }
1076
isCheapToSpeculateCtlz() const1077 bool RISCVTargetLowering::isCheapToSpeculateCtlz() const {
1078 return Subtarget.hasStdExtZbb();
1079 }
1080
1081 /// Check if sinking \p I's operands to I's basic block is profitable, because
1082 /// the operands can be folded into a target instruction, e.g.
1083 /// splats of scalars can fold into vector instructions.
shouldSinkOperands(Instruction * I,SmallVectorImpl<Use * > & Ops) const1084 bool RISCVTargetLowering::shouldSinkOperands(
1085 Instruction *I, SmallVectorImpl<Use *> &Ops) const {
1086 using namespace llvm::PatternMatch;
1087
1088 if (!I->getType()->isVectorTy() || !Subtarget.hasStdExtV())
1089 return false;
1090
1091 auto IsSinker = [&](Instruction *I, int Operand) {
1092 switch (I->getOpcode()) {
1093 case Instruction::Add:
1094 case Instruction::Sub:
1095 case Instruction::Mul:
1096 case Instruction::And:
1097 case Instruction::Or:
1098 case Instruction::Xor:
1099 case Instruction::FAdd:
1100 case Instruction::FSub:
1101 case Instruction::FMul:
1102 case Instruction::FDiv:
1103 return true;
1104 case Instruction::Shl:
1105 case Instruction::LShr:
1106 case Instruction::AShr:
1107 return Operand == 1;
1108 case Instruction::Call:
1109 if (auto *II = dyn_cast<IntrinsicInst>(I)) {
1110 switch (II->getIntrinsicID()) {
1111 case Intrinsic::fma:
1112 return Operand == 0 || Operand == 1;
1113 default:
1114 return false;
1115 }
1116 }
1117 return false;
1118 default:
1119 return false;
1120 }
1121 };
1122
1123 for (auto OpIdx : enumerate(I->operands())) {
1124 if (!IsSinker(I, OpIdx.index()))
1125 continue;
1126
1127 Instruction *Op = dyn_cast<Instruction>(OpIdx.value().get());
1128 // Make sure we are not already sinking this operand
1129 if (!Op || any_of(Ops, [&](Use *U) { return U->get() == Op; }))
1130 continue;
1131
1132 // We are looking for a splat that can be sunk.
1133 if (!match(Op, m_Shuffle(m_InsertElt(m_Undef(), m_Value(), m_ZeroInt()),
1134 m_Undef(), m_ZeroMask())))
1135 continue;
1136
1137 // All uses of the shuffle should be sunk to avoid duplicating it across gpr
1138 // and vector registers
1139 for (Use &U : Op->uses()) {
1140 Instruction *Insn = cast<Instruction>(U.getUser());
1141 if (!IsSinker(Insn, U.getOperandNo()))
1142 return false;
1143 }
1144
1145 Ops.push_back(&Op->getOperandUse(0));
1146 Ops.push_back(&OpIdx.value());
1147 }
1148 return true;
1149 }
1150
isFPImmLegal(const APFloat & Imm,EVT VT,bool ForCodeSize) const1151 bool RISCVTargetLowering::isFPImmLegal(const APFloat &Imm, EVT VT,
1152 bool ForCodeSize) const {
1153 if (VT == MVT::f16 && !Subtarget.hasStdExtZfh())
1154 return false;
1155 if (VT == MVT::f32 && !Subtarget.hasStdExtF())
1156 return false;
1157 if (VT == MVT::f64 && !Subtarget.hasStdExtD())
1158 return false;
1159 if (Imm.isNegZero())
1160 return false;
1161 return Imm.isZero();
1162 }
1163
hasBitPreservingFPLogic(EVT VT) const1164 bool RISCVTargetLowering::hasBitPreservingFPLogic(EVT VT) const {
1165 return (VT == MVT::f16 && Subtarget.hasStdExtZfh()) ||
1166 (VT == MVT::f32 && Subtarget.hasStdExtF()) ||
1167 (VT == MVT::f64 && Subtarget.hasStdExtD());
1168 }
1169
getRegisterTypeForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT) const1170 MVT RISCVTargetLowering::getRegisterTypeForCallingConv(LLVMContext &Context,
1171 CallingConv::ID CC,
1172 EVT VT) const {
1173 // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1174 // end up using a GPR but that will be decided based on ABI.
1175 if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1176 return MVT::f32;
1177
1178 return TargetLowering::getRegisterTypeForCallingConv(Context, CC, VT);
1179 }
1180
getNumRegistersForCallingConv(LLVMContext & Context,CallingConv::ID CC,EVT VT) const1181 unsigned RISCVTargetLowering::getNumRegistersForCallingConv(LLVMContext &Context,
1182 CallingConv::ID CC,
1183 EVT VT) const {
1184 // Use f32 to pass f16 if it is legal and Zfh is not enabled. We might still
1185 // end up using a GPR but that will be decided based on ABI.
1186 if (VT == MVT::f16 && Subtarget.hasStdExtF() && !Subtarget.hasStdExtZfh())
1187 return 1;
1188
1189 return TargetLowering::getNumRegistersForCallingConv(Context, CC, VT);
1190 }
1191
1192 // Changes the condition code and swaps operands if necessary, so the SetCC
1193 // operation matches one of the comparisons supported directly by branches
1194 // in the RISC-V ISA. May adjust compares to favor compare with 0 over compare
1195 // with 1/-1.
translateSetCCForBranch(const SDLoc & DL,SDValue & LHS,SDValue & RHS,ISD::CondCode & CC,SelectionDAG & DAG)1196 static void translateSetCCForBranch(const SDLoc &DL, SDValue &LHS, SDValue &RHS,
1197 ISD::CondCode &CC, SelectionDAG &DAG) {
1198 // Convert X > -1 to X >= 0.
1199 if (CC == ISD::SETGT && isAllOnesConstant(RHS)) {
1200 RHS = DAG.getConstant(0, DL, RHS.getValueType());
1201 CC = ISD::SETGE;
1202 return;
1203 }
1204 // Convert X < 1 to 0 >= X.
1205 if (CC == ISD::SETLT && isOneConstant(RHS)) {
1206 RHS = LHS;
1207 LHS = DAG.getConstant(0, DL, RHS.getValueType());
1208 CC = ISD::SETGE;
1209 return;
1210 }
1211
1212 switch (CC) {
1213 default:
1214 break;
1215 case ISD::SETGT:
1216 case ISD::SETLE:
1217 case ISD::SETUGT:
1218 case ISD::SETULE:
1219 CC = ISD::getSetCCSwappedOperands(CC);
1220 std::swap(LHS, RHS);
1221 break;
1222 }
1223 }
1224
getLMUL(MVT VT)1225 RISCVII::VLMUL RISCVTargetLowering::getLMUL(MVT VT) {
1226 assert(VT.isScalableVector() && "Expecting a scalable vector type");
1227 unsigned KnownSize = VT.getSizeInBits().getKnownMinValue();
1228 if (VT.getVectorElementType() == MVT::i1)
1229 KnownSize *= 8;
1230
1231 switch (KnownSize) {
1232 default:
1233 llvm_unreachable("Invalid LMUL.");
1234 case 8:
1235 return RISCVII::VLMUL::LMUL_F8;
1236 case 16:
1237 return RISCVII::VLMUL::LMUL_F4;
1238 case 32:
1239 return RISCVII::VLMUL::LMUL_F2;
1240 case 64:
1241 return RISCVII::VLMUL::LMUL_1;
1242 case 128:
1243 return RISCVII::VLMUL::LMUL_2;
1244 case 256:
1245 return RISCVII::VLMUL::LMUL_4;
1246 case 512:
1247 return RISCVII::VLMUL::LMUL_8;
1248 }
1249 }
1250
getRegClassIDForLMUL(RISCVII::VLMUL LMul)1251 unsigned RISCVTargetLowering::getRegClassIDForLMUL(RISCVII::VLMUL LMul) {
1252 switch (LMul) {
1253 default:
1254 llvm_unreachable("Invalid LMUL.");
1255 case RISCVII::VLMUL::LMUL_F8:
1256 case RISCVII::VLMUL::LMUL_F4:
1257 case RISCVII::VLMUL::LMUL_F2:
1258 case RISCVII::VLMUL::LMUL_1:
1259 return RISCV::VRRegClassID;
1260 case RISCVII::VLMUL::LMUL_2:
1261 return RISCV::VRM2RegClassID;
1262 case RISCVII::VLMUL::LMUL_4:
1263 return RISCV::VRM4RegClassID;
1264 case RISCVII::VLMUL::LMUL_8:
1265 return RISCV::VRM8RegClassID;
1266 }
1267 }
1268
getSubregIndexByMVT(MVT VT,unsigned Index)1269 unsigned RISCVTargetLowering::getSubregIndexByMVT(MVT VT, unsigned Index) {
1270 RISCVII::VLMUL LMUL = getLMUL(VT);
1271 if (LMUL == RISCVII::VLMUL::LMUL_F8 ||
1272 LMUL == RISCVII::VLMUL::LMUL_F4 ||
1273 LMUL == RISCVII::VLMUL::LMUL_F2 ||
1274 LMUL == RISCVII::VLMUL::LMUL_1) {
1275 static_assert(RISCV::sub_vrm1_7 == RISCV::sub_vrm1_0 + 7,
1276 "Unexpected subreg numbering");
1277 return RISCV::sub_vrm1_0 + Index;
1278 }
1279 if (LMUL == RISCVII::VLMUL::LMUL_2) {
1280 static_assert(RISCV::sub_vrm2_3 == RISCV::sub_vrm2_0 + 3,
1281 "Unexpected subreg numbering");
1282 return RISCV::sub_vrm2_0 + Index;
1283 }
1284 if (LMUL == RISCVII::VLMUL::LMUL_4) {
1285 static_assert(RISCV::sub_vrm4_1 == RISCV::sub_vrm4_0 + 1,
1286 "Unexpected subreg numbering");
1287 return RISCV::sub_vrm4_0 + Index;
1288 }
1289 llvm_unreachable("Invalid vector type.");
1290 }
1291
getRegClassIDForVecVT(MVT VT)1292 unsigned RISCVTargetLowering::getRegClassIDForVecVT(MVT VT) {
1293 if (VT.getVectorElementType() == MVT::i1)
1294 return RISCV::VRRegClassID;
1295 return getRegClassIDForLMUL(getLMUL(VT));
1296 }
1297
1298 // Attempt to decompose a subvector insert/extract between VecVT and
1299 // SubVecVT via subregister indices. Returns the subregister index that
1300 // can perform the subvector insert/extract with the given element index, as
1301 // well as the index corresponding to any leftover subvectors that must be
1302 // further inserted/extracted within the register class for SubVecVT.
1303 std::pair<unsigned, unsigned>
decomposeSubvectorInsertExtractToSubRegs(MVT VecVT,MVT SubVecVT,unsigned InsertExtractIdx,const RISCVRegisterInfo * TRI)1304 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
1305 MVT VecVT, MVT SubVecVT, unsigned InsertExtractIdx,
1306 const RISCVRegisterInfo *TRI) {
1307 static_assert((RISCV::VRM8RegClassID > RISCV::VRM4RegClassID &&
1308 RISCV::VRM4RegClassID > RISCV::VRM2RegClassID &&
1309 RISCV::VRM2RegClassID > RISCV::VRRegClassID),
1310 "Register classes not ordered");
1311 unsigned VecRegClassID = getRegClassIDForVecVT(VecVT);
1312 unsigned SubRegClassID = getRegClassIDForVecVT(SubVecVT);
1313 // Try to compose a subregister index that takes us from the incoming
1314 // LMUL>1 register class down to the outgoing one. At each step we half
1315 // the LMUL:
1316 // nxv16i32@12 -> nxv2i32: sub_vrm4_1_then_sub_vrm2_1_then_sub_vrm1_0
1317 // Note that this is not guaranteed to find a subregister index, such as
1318 // when we are extracting from one VR type to another.
1319 unsigned SubRegIdx = RISCV::NoSubRegister;
1320 for (const unsigned RCID :
1321 {RISCV::VRM4RegClassID, RISCV::VRM2RegClassID, RISCV::VRRegClassID})
1322 if (VecRegClassID > RCID && SubRegClassID <= RCID) {
1323 VecVT = VecVT.getHalfNumVectorElementsVT();
1324 bool IsHi =
1325 InsertExtractIdx >= VecVT.getVectorElementCount().getKnownMinValue();
1326 SubRegIdx = TRI->composeSubRegIndices(SubRegIdx,
1327 getSubregIndexByMVT(VecVT, IsHi));
1328 if (IsHi)
1329 InsertExtractIdx -= VecVT.getVectorElementCount().getKnownMinValue();
1330 }
1331 return {SubRegIdx, InsertExtractIdx};
1332 }
1333
1334 // Permit combining of mask vectors as BUILD_VECTOR never expands to scalar
1335 // stores for those types.
mergeStoresAfterLegalization(EVT VT) const1336 bool RISCVTargetLowering::mergeStoresAfterLegalization(EVT VT) const {
1337 return !Subtarget.useRVVForFixedLengthVectors() ||
1338 (VT.isFixedLengthVector() && VT.getVectorElementType() == MVT::i1);
1339 }
1340
isLegalElementTypeForRVV(Type * ScalarTy) const1341 bool RISCVTargetLowering::isLegalElementTypeForRVV(Type *ScalarTy) const {
1342 if (ScalarTy->isPointerTy())
1343 return true;
1344
1345 if (ScalarTy->isIntegerTy(8) || ScalarTy->isIntegerTy(16) ||
1346 ScalarTy->isIntegerTy(32) || ScalarTy->isIntegerTy(64))
1347 return true;
1348
1349 if (ScalarTy->isHalfTy())
1350 return Subtarget.hasStdExtZfh();
1351 if (ScalarTy->isFloatTy())
1352 return Subtarget.hasStdExtF();
1353 if (ScalarTy->isDoubleTy())
1354 return Subtarget.hasStdExtD();
1355
1356 return false;
1357 }
1358
useRVVForFixedLengthVectorVT(MVT VT,const RISCVSubtarget & Subtarget)1359 static bool useRVVForFixedLengthVectorVT(MVT VT,
1360 const RISCVSubtarget &Subtarget) {
1361 assert(VT.isFixedLengthVector() && "Expected a fixed length vector type!");
1362 if (!Subtarget.useRVVForFixedLengthVectors())
1363 return false;
1364
1365 // We only support a set of vector types with a consistent maximum fixed size
1366 // across all supported vector element types to avoid legalization issues.
1367 // Therefore -- since the largest is v1024i8/v512i16/etc -- the largest
1368 // fixed-length vector type we support is 1024 bytes.
1369 if (VT.getFixedSizeInBits() > 1024 * 8)
1370 return false;
1371
1372 unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1373
1374 MVT EltVT = VT.getVectorElementType();
1375
1376 // Don't use RVV for vectors we cannot scalarize if required.
1377 switch (EltVT.SimpleTy) {
1378 // i1 is supported but has different rules.
1379 default:
1380 return false;
1381 case MVT::i1:
1382 // Masks can only use a single register.
1383 if (VT.getVectorNumElements() > MinVLen)
1384 return false;
1385 MinVLen /= 8;
1386 break;
1387 case MVT::i8:
1388 case MVT::i16:
1389 case MVT::i32:
1390 case MVT::i64:
1391 break;
1392 case MVT::f16:
1393 if (!Subtarget.hasStdExtZfh())
1394 return false;
1395 break;
1396 case MVT::f32:
1397 if (!Subtarget.hasStdExtF())
1398 return false;
1399 break;
1400 case MVT::f64:
1401 if (!Subtarget.hasStdExtD())
1402 return false;
1403 break;
1404 }
1405
1406 // Reject elements larger than ELEN.
1407 if (EltVT.getSizeInBits() > Subtarget.getMaxELENForFixedLengthVectors())
1408 return false;
1409
1410 unsigned LMul = divideCeil(VT.getSizeInBits(), MinVLen);
1411 // Don't use RVV for types that don't fit.
1412 if (LMul > Subtarget.getMaxLMULForFixedLengthVectors())
1413 return false;
1414
1415 // TODO: Perhaps an artificial restriction, but worth having whilst getting
1416 // the base fixed length RVV support in place.
1417 if (!VT.isPow2VectorType())
1418 return false;
1419
1420 return true;
1421 }
1422
useRVVForFixedLengthVectorVT(MVT VT) const1423 bool RISCVTargetLowering::useRVVForFixedLengthVectorVT(MVT VT) const {
1424 return ::useRVVForFixedLengthVectorVT(VT, Subtarget);
1425 }
1426
1427 // Return the largest legal scalable vector type that matches VT's element type.
getContainerForFixedLengthVector(const TargetLowering & TLI,MVT VT,const RISCVSubtarget & Subtarget)1428 static MVT getContainerForFixedLengthVector(const TargetLowering &TLI, MVT VT,
1429 const RISCVSubtarget &Subtarget) {
1430 // This may be called before legal types are setup.
1431 assert(((VT.isFixedLengthVector() && TLI.isTypeLegal(VT)) ||
1432 useRVVForFixedLengthVectorVT(VT, Subtarget)) &&
1433 "Expected legal fixed length vector!");
1434
1435 unsigned MinVLen = Subtarget.getMinRVVVectorSizeInBits();
1436 unsigned MaxELen = Subtarget.getMaxELENForFixedLengthVectors();
1437
1438 MVT EltVT = VT.getVectorElementType();
1439 switch (EltVT.SimpleTy) {
1440 default:
1441 llvm_unreachable("unexpected element type for RVV container");
1442 case MVT::i1:
1443 case MVT::i8:
1444 case MVT::i16:
1445 case MVT::i32:
1446 case MVT::i64:
1447 case MVT::f16:
1448 case MVT::f32:
1449 case MVT::f64: {
1450 // We prefer to use LMUL=1 for VLEN sized types. Use fractional lmuls for
1451 // narrower types. The smallest fractional LMUL we support is 8/ELEN. Within
1452 // each fractional LMUL we support SEW between 8 and LMUL*ELEN.
1453 unsigned NumElts =
1454 (VT.getVectorNumElements() * RISCV::RVVBitsPerBlock) / MinVLen;
1455 NumElts = std::max(NumElts, RISCV::RVVBitsPerBlock / MaxELen);
1456 assert(isPowerOf2_32(NumElts) && "Expected power of 2 NumElts");
1457 return MVT::getScalableVectorVT(EltVT, NumElts);
1458 }
1459 }
1460 }
1461
getContainerForFixedLengthVector(SelectionDAG & DAG,MVT VT,const RISCVSubtarget & Subtarget)1462 static MVT getContainerForFixedLengthVector(SelectionDAG &DAG, MVT VT,
1463 const RISCVSubtarget &Subtarget) {
1464 return getContainerForFixedLengthVector(DAG.getTargetLoweringInfo(), VT,
1465 Subtarget);
1466 }
1467
getContainerForFixedLengthVector(MVT VT) const1468 MVT RISCVTargetLowering::getContainerForFixedLengthVector(MVT VT) const {
1469 return ::getContainerForFixedLengthVector(*this, VT, getSubtarget());
1470 }
1471
1472 // Grow V to consume an entire RVV register.
convertToScalableVector(EVT VT,SDValue V,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)1473 static SDValue convertToScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1474 const RISCVSubtarget &Subtarget) {
1475 assert(VT.isScalableVector() &&
1476 "Expected to convert into a scalable vector!");
1477 assert(V.getValueType().isFixedLengthVector() &&
1478 "Expected a fixed length vector operand!");
1479 SDLoc DL(V);
1480 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1481 return DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, DAG.getUNDEF(VT), V, Zero);
1482 }
1483
1484 // Shrink V so it's just big enough to maintain a VT's worth of data.
convertFromScalableVector(EVT VT,SDValue V,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)1485 static SDValue convertFromScalableVector(EVT VT, SDValue V, SelectionDAG &DAG,
1486 const RISCVSubtarget &Subtarget) {
1487 assert(VT.isFixedLengthVector() &&
1488 "Expected to convert into a fixed length vector!");
1489 assert(V.getValueType().isScalableVector() &&
1490 "Expected a scalable vector operand!");
1491 SDLoc DL(V);
1492 SDValue Zero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
1493 return DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, V, Zero);
1494 }
1495
1496 // Gets the two common "VL" operands: an all-ones mask and the vector length.
1497 // VecVT is a vector type, either fixed-length or scalable, and ContainerVT is
1498 // the vector type that it is contained in.
1499 static std::pair<SDValue, SDValue>
getDefaultVLOps(MVT VecVT,MVT ContainerVT,SDLoc DL,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)1500 getDefaultVLOps(MVT VecVT, MVT ContainerVT, SDLoc DL, SelectionDAG &DAG,
1501 const RISCVSubtarget &Subtarget) {
1502 assert(ContainerVT.isScalableVector() && "Expecting scalable container type");
1503 MVT XLenVT = Subtarget.getXLenVT();
1504 SDValue VL = VecVT.isFixedLengthVector()
1505 ? DAG.getConstant(VecVT.getVectorNumElements(), DL, XLenVT)
1506 : DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, XLenVT);
1507 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
1508 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
1509 return {Mask, VL};
1510 }
1511
1512 // As above but assuming the given type is a scalable vector type.
1513 static std::pair<SDValue, SDValue>
getDefaultScalableVLOps(MVT VecVT,SDLoc DL,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)1514 getDefaultScalableVLOps(MVT VecVT, SDLoc DL, SelectionDAG &DAG,
1515 const RISCVSubtarget &Subtarget) {
1516 assert(VecVT.isScalableVector() && "Expecting a scalable vector");
1517 return getDefaultVLOps(VecVT, VecVT, DL, DAG, Subtarget);
1518 }
1519
1520 // The state of RVV BUILD_VECTOR and VECTOR_SHUFFLE lowering is that very few
1521 // of either is (currently) supported. This can get us into an infinite loop
1522 // where we try to lower a BUILD_VECTOR as a VECTOR_SHUFFLE as a BUILD_VECTOR
1523 // as a ..., etc.
1524 // Until either (or both) of these can reliably lower any node, reporting that
1525 // we don't want to expand BUILD_VECTORs via VECTOR_SHUFFLEs at least breaks
1526 // the infinite loop. Note that this lowers BUILD_VECTOR through the stack,
1527 // which is not desirable.
shouldExpandBuildVectorWithShuffles(EVT VT,unsigned DefinedValues) const1528 bool RISCVTargetLowering::shouldExpandBuildVectorWithShuffles(
1529 EVT VT, unsigned DefinedValues) const {
1530 return false;
1531 }
1532
isShuffleMaskLegal(ArrayRef<int> M,EVT VT) const1533 bool RISCVTargetLowering::isShuffleMaskLegal(ArrayRef<int> M, EVT VT) const {
1534 // Only splats are currently supported.
1535 if (ShuffleVectorSDNode::isSplatMask(M.data(), VT))
1536 return true;
1537
1538 return false;
1539 }
1540
lowerFP_TO_INT_SAT(SDValue Op,SelectionDAG & DAG)1541 static SDValue lowerFP_TO_INT_SAT(SDValue Op, SelectionDAG &DAG) {
1542 // RISCV FP-to-int conversions saturate to the destination register size, but
1543 // don't produce 0 for nan. We can use a conversion instruction and fix the
1544 // nan case with a compare and a select.
1545 SDValue Src = Op.getOperand(0);
1546
1547 EVT DstVT = Op.getValueType();
1548 EVT SatVT = cast<VTSDNode>(Op.getOperand(1))->getVT();
1549
1550 bool IsSigned = Op.getOpcode() == ISD::FP_TO_SINT_SAT;
1551 unsigned Opc;
1552 if (SatVT == DstVT)
1553 Opc = IsSigned ? RISCVISD::FCVT_X_RTZ : RISCVISD::FCVT_XU_RTZ;
1554 else if (DstVT == MVT::i64 && SatVT == MVT::i32)
1555 Opc = IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
1556 else
1557 return SDValue();
1558 // FIXME: Support other SatVTs by clamping before or after the conversion.
1559
1560 SDLoc DL(Op);
1561 SDValue FpToInt = DAG.getNode(Opc, DL, DstVT, Src);
1562
1563 SDValue ZeroInt = DAG.getConstant(0, DL, DstVT);
1564 return DAG.getSelectCC(DL, Src, Src, ZeroInt, FpToInt, ISD::CondCode::SETUO);
1565 }
1566
lowerSPLAT_VECTOR(SDValue Op,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)1567 static SDValue lowerSPLAT_VECTOR(SDValue Op, SelectionDAG &DAG,
1568 const RISCVSubtarget &Subtarget) {
1569 MVT VT = Op.getSimpleValueType();
1570 assert(VT.isFixedLengthVector() && "Unexpected vector!");
1571
1572 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1573
1574 SDLoc DL(Op);
1575 SDValue Mask, VL;
1576 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1577
1578 unsigned Opc =
1579 VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
1580 SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, Op.getOperand(0), VL);
1581 return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1582 }
1583
1584 struct VIDSequence {
1585 int64_t StepNumerator;
1586 unsigned StepDenominator;
1587 int64_t Addend;
1588 };
1589
1590 // Try to match an arithmetic-sequence BUILD_VECTOR [X,X+S,X+2*S,...,X+(N-1)*S]
1591 // to the (non-zero) step S and start value X. This can be then lowered as the
1592 // RVV sequence (VID * S) + X, for example.
1593 // The step S is represented as an integer numerator divided by a positive
1594 // denominator. Note that the implementation currently only identifies
1595 // sequences in which either the numerator is +/- 1 or the denominator is 1. It
1596 // cannot detect 2/3, for example.
1597 // Note that this method will also match potentially unappealing index
1598 // sequences, like <i32 0, i32 50939494>, however it is left to the caller to
1599 // determine whether this is worth generating code for.
isSimpleVIDSequence(SDValue Op)1600 static Optional<VIDSequence> isSimpleVIDSequence(SDValue Op) {
1601 unsigned NumElts = Op.getNumOperands();
1602 assert(Op.getOpcode() == ISD::BUILD_VECTOR && "Unexpected BUILD_VECTOR");
1603 if (!Op.getValueType().isInteger())
1604 return None;
1605
1606 Optional<unsigned> SeqStepDenom;
1607 Optional<int64_t> SeqStepNum, SeqAddend;
1608 Optional<std::pair<uint64_t, unsigned>> PrevElt;
1609 unsigned EltSizeInBits = Op.getValueType().getScalarSizeInBits();
1610 for (unsigned Idx = 0; Idx < NumElts; Idx++) {
1611 // Assume undef elements match the sequence; we just have to be careful
1612 // when interpolating across them.
1613 if (Op.getOperand(Idx).isUndef())
1614 continue;
1615 // The BUILD_VECTOR must be all constants.
1616 if (!isa<ConstantSDNode>(Op.getOperand(Idx)))
1617 return None;
1618
1619 uint64_t Val = Op.getConstantOperandVal(Idx) &
1620 maskTrailingOnes<uint64_t>(EltSizeInBits);
1621
1622 if (PrevElt) {
1623 // Calculate the step since the last non-undef element, and ensure
1624 // it's consistent across the entire sequence.
1625 unsigned IdxDiff = Idx - PrevElt->second;
1626 int64_t ValDiff = SignExtend64(Val - PrevElt->first, EltSizeInBits);
1627
1628 // A zero-value value difference means that we're somewhere in the middle
1629 // of a fractional step, e.g. <0,0,0*,0,1,1,1,1>. Wait until we notice a
1630 // step change before evaluating the sequence.
1631 if (ValDiff != 0) {
1632 int64_t Remainder = ValDiff % IdxDiff;
1633 // Normalize the step if it's greater than 1.
1634 if (Remainder != ValDiff) {
1635 // The difference must cleanly divide the element span.
1636 if (Remainder != 0)
1637 return None;
1638 ValDiff /= IdxDiff;
1639 IdxDiff = 1;
1640 }
1641
1642 if (!SeqStepNum)
1643 SeqStepNum = ValDiff;
1644 else if (ValDiff != SeqStepNum)
1645 return None;
1646
1647 if (!SeqStepDenom)
1648 SeqStepDenom = IdxDiff;
1649 else if (IdxDiff != *SeqStepDenom)
1650 return None;
1651 }
1652 }
1653
1654 // Record and/or check any addend.
1655 if (SeqStepNum && SeqStepDenom) {
1656 uint64_t ExpectedVal =
1657 (int64_t)(Idx * (uint64_t)*SeqStepNum) / *SeqStepDenom;
1658 int64_t Addend = SignExtend64(Val - ExpectedVal, EltSizeInBits);
1659 if (!SeqAddend)
1660 SeqAddend = Addend;
1661 else if (SeqAddend != Addend)
1662 return None;
1663 }
1664
1665 // Record this non-undef element for later.
1666 if (!PrevElt || PrevElt->first != Val)
1667 PrevElt = std::make_pair(Val, Idx);
1668 }
1669 // We need to have logged both a step and an addend for this to count as
1670 // a legal index sequence.
1671 if (!SeqStepNum || !SeqStepDenom || !SeqAddend)
1672 return None;
1673
1674 return VIDSequence{*SeqStepNum, *SeqStepDenom, *SeqAddend};
1675 }
1676
lowerBUILD_VECTOR(SDValue Op,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)1677 static SDValue lowerBUILD_VECTOR(SDValue Op, SelectionDAG &DAG,
1678 const RISCVSubtarget &Subtarget) {
1679 MVT VT = Op.getSimpleValueType();
1680 assert(VT.isFixedLengthVector() && "Unexpected vector!");
1681
1682 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
1683
1684 SDLoc DL(Op);
1685 SDValue Mask, VL;
1686 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
1687
1688 MVT XLenVT = Subtarget.getXLenVT();
1689 unsigned NumElts = Op.getNumOperands();
1690
1691 if (VT.getVectorElementType() == MVT::i1) {
1692 if (ISD::isBuildVectorAllZeros(Op.getNode())) {
1693 SDValue VMClr = DAG.getNode(RISCVISD::VMCLR_VL, DL, ContainerVT, VL);
1694 return convertFromScalableVector(VT, VMClr, DAG, Subtarget);
1695 }
1696
1697 if (ISD::isBuildVectorAllOnes(Op.getNode())) {
1698 SDValue VMSet = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
1699 return convertFromScalableVector(VT, VMSet, DAG, Subtarget);
1700 }
1701
1702 // Lower constant mask BUILD_VECTORs via an integer vector type, in
1703 // scalar integer chunks whose bit-width depends on the number of mask
1704 // bits and XLEN.
1705 // First, determine the most appropriate scalar integer type to use. This
1706 // is at most XLenVT, but may be shrunk to a smaller vector element type
1707 // according to the size of the final vector - use i8 chunks rather than
1708 // XLenVT if we're producing a v8i1. This results in more consistent
1709 // codegen across RV32 and RV64.
1710 unsigned NumViaIntegerBits =
1711 std::min(std::max(NumElts, 8u), Subtarget.getXLen());
1712 if (ISD::isBuildVectorOfConstantSDNodes(Op.getNode())) {
1713 // If we have to use more than one INSERT_VECTOR_ELT then this
1714 // optimization is likely to increase code size; avoid peforming it in
1715 // such a case. We can use a load from a constant pool in this case.
1716 if (DAG.shouldOptForSize() && NumElts > NumViaIntegerBits)
1717 return SDValue();
1718 // Now we can create our integer vector type. Note that it may be larger
1719 // than the resulting mask type: v4i1 would use v1i8 as its integer type.
1720 MVT IntegerViaVecVT =
1721 MVT::getVectorVT(MVT::getIntegerVT(NumViaIntegerBits),
1722 divideCeil(NumElts, NumViaIntegerBits));
1723
1724 uint64_t Bits = 0;
1725 unsigned BitPos = 0, IntegerEltIdx = 0;
1726 SDValue Vec = DAG.getUNDEF(IntegerViaVecVT);
1727
1728 for (unsigned I = 0; I < NumElts; I++, BitPos++) {
1729 // Once we accumulate enough bits to fill our scalar type, insert into
1730 // our vector and clear our accumulated data.
1731 if (I != 0 && I % NumViaIntegerBits == 0) {
1732 if (NumViaIntegerBits <= 32)
1733 Bits = SignExtend64(Bits, 32);
1734 SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1735 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec,
1736 Elt, DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1737 Bits = 0;
1738 BitPos = 0;
1739 IntegerEltIdx++;
1740 }
1741 SDValue V = Op.getOperand(I);
1742 bool BitValue = !V.isUndef() && cast<ConstantSDNode>(V)->getZExtValue();
1743 Bits |= ((uint64_t)BitValue << BitPos);
1744 }
1745
1746 // Insert the (remaining) scalar value into position in our integer
1747 // vector type.
1748 if (NumViaIntegerBits <= 32)
1749 Bits = SignExtend64(Bits, 32);
1750 SDValue Elt = DAG.getConstant(Bits, DL, XLenVT);
1751 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, IntegerViaVecVT, Vec, Elt,
1752 DAG.getConstant(IntegerEltIdx, DL, XLenVT));
1753
1754 if (NumElts < NumViaIntegerBits) {
1755 // If we're producing a smaller vector than our minimum legal integer
1756 // type, bitcast to the equivalent (known-legal) mask type, and extract
1757 // our final mask.
1758 assert(IntegerViaVecVT == MVT::v1i8 && "Unexpected mask vector type");
1759 Vec = DAG.getBitcast(MVT::v8i1, Vec);
1760 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, VT, Vec,
1761 DAG.getConstant(0, DL, XLenVT));
1762 } else {
1763 // Else we must have produced an integer type with the same size as the
1764 // mask type; bitcast for the final result.
1765 assert(VT.getSizeInBits() == IntegerViaVecVT.getSizeInBits());
1766 Vec = DAG.getBitcast(VT, Vec);
1767 }
1768
1769 return Vec;
1770 }
1771
1772 // A BUILD_VECTOR can be lowered as a SETCC. For each fixed-length mask
1773 // vector type, we have a legal equivalently-sized i8 type, so we can use
1774 // that.
1775 MVT WideVecVT = VT.changeVectorElementType(MVT::i8);
1776 SDValue VecZero = DAG.getConstant(0, DL, WideVecVT);
1777
1778 SDValue WideVec;
1779 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1780 // For a splat, perform a scalar truncate before creating the wider
1781 // vector.
1782 assert(Splat.getValueType() == XLenVT &&
1783 "Unexpected type for i1 splat value");
1784 Splat = DAG.getNode(ISD::AND, DL, XLenVT, Splat,
1785 DAG.getConstant(1, DL, XLenVT));
1786 WideVec = DAG.getSplatBuildVector(WideVecVT, DL, Splat);
1787 } else {
1788 SmallVector<SDValue, 8> Ops(Op->op_values());
1789 WideVec = DAG.getBuildVector(WideVecVT, DL, Ops);
1790 SDValue VecOne = DAG.getConstant(1, DL, WideVecVT);
1791 WideVec = DAG.getNode(ISD::AND, DL, WideVecVT, WideVec, VecOne);
1792 }
1793
1794 return DAG.getSetCC(DL, VT, WideVec, VecZero, ISD::SETNE);
1795 }
1796
1797 if (SDValue Splat = cast<BuildVectorSDNode>(Op)->getSplatValue()) {
1798 unsigned Opc = VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL
1799 : RISCVISD::VMV_V_X_VL;
1800 Splat = DAG.getNode(Opc, DL, ContainerVT, Splat, VL);
1801 return convertFromScalableVector(VT, Splat, DAG, Subtarget);
1802 }
1803
1804 // Try and match index sequences, which we can lower to the vid instruction
1805 // with optional modifications. An all-undef vector is matched by
1806 // getSplatValue, above.
1807 if (auto SimpleVID = isSimpleVIDSequence(Op)) {
1808 int64_t StepNumerator = SimpleVID->StepNumerator;
1809 unsigned StepDenominator = SimpleVID->StepDenominator;
1810 int64_t Addend = SimpleVID->Addend;
1811 // Only emit VIDs with suitably-small steps/addends. We use imm5 is a
1812 // threshold since it's the immediate value many RVV instructions accept.
1813 if (isInt<5>(StepNumerator) && isPowerOf2_32(StepDenominator) &&
1814 isInt<5>(Addend)) {
1815 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, ContainerVT, Mask, VL);
1816 // Convert right out of the scalable type so we can use standard ISD
1817 // nodes for the rest of the computation. If we used scalable types with
1818 // these, we'd lose the fixed-length vector info and generate worse
1819 // vsetvli code.
1820 VID = convertFromScalableVector(VT, VID, DAG, Subtarget);
1821 assert(StepNumerator != 0 && "Invalid step");
1822 bool Negate = false;
1823 if (StepNumerator != 1) {
1824 int64_t SplatStepVal = StepNumerator;
1825 unsigned Opcode = ISD::MUL;
1826 if (isPowerOf2_64(std::abs(StepNumerator))) {
1827 Negate = StepNumerator < 0;
1828 Opcode = ISD::SHL;
1829 SplatStepVal = Log2_64(std::abs(StepNumerator));
1830 }
1831 SDValue SplatStep = DAG.getSplatVector(
1832 VT, DL, DAG.getConstant(SplatStepVal, DL, XLenVT));
1833 VID = DAG.getNode(Opcode, DL, VT, VID, SplatStep);
1834 }
1835 if (StepDenominator != 1) {
1836 SDValue SplatStep = DAG.getSplatVector(
1837 VT, DL, DAG.getConstant(Log2_64(StepDenominator), DL, XLenVT));
1838 VID = DAG.getNode(ISD::SRL, DL, VT, VID, SplatStep);
1839 }
1840 if (Addend != 0 || Negate) {
1841 SDValue SplatAddend =
1842 DAG.getSplatVector(VT, DL, DAG.getConstant(Addend, DL, XLenVT));
1843 VID = DAG.getNode(Negate ? ISD::SUB : ISD::ADD, DL, VT, SplatAddend, VID);
1844 }
1845 return VID;
1846 }
1847 }
1848
1849 // Attempt to detect "hidden" splats, which only reveal themselves as splats
1850 // when re-interpreted as a vector with a larger element type. For example,
1851 // v4i16 = build_vector i16 0, i16 1, i16 0, i16 1
1852 // could be instead splat as
1853 // v2i32 = build_vector i32 0x00010000, i32 0x00010000
1854 // TODO: This optimization could also work on non-constant splats, but it
1855 // would require bit-manipulation instructions to construct the splat value.
1856 SmallVector<SDValue> Sequence;
1857 unsigned EltBitSize = VT.getScalarSizeInBits();
1858 const auto *BV = cast<BuildVectorSDNode>(Op);
1859 if (VT.isInteger() && EltBitSize < 64 &&
1860 ISD::isBuildVectorOfConstantSDNodes(Op.getNode()) &&
1861 BV->getRepeatedSequence(Sequence) &&
1862 (Sequence.size() * EltBitSize) <= 64) {
1863 unsigned SeqLen = Sequence.size();
1864 MVT ViaIntVT = MVT::getIntegerVT(EltBitSize * SeqLen);
1865 MVT ViaVecVT = MVT::getVectorVT(ViaIntVT, NumElts / SeqLen);
1866 assert((ViaIntVT == MVT::i16 || ViaIntVT == MVT::i32 ||
1867 ViaIntVT == MVT::i64) &&
1868 "Unexpected sequence type");
1869
1870 unsigned EltIdx = 0;
1871 uint64_t EltMask = maskTrailingOnes<uint64_t>(EltBitSize);
1872 uint64_t SplatValue = 0;
1873 // Construct the amalgamated value which can be splatted as this larger
1874 // vector type.
1875 for (const auto &SeqV : Sequence) {
1876 if (!SeqV.isUndef())
1877 SplatValue |= ((cast<ConstantSDNode>(SeqV)->getZExtValue() & EltMask)
1878 << (EltIdx * EltBitSize));
1879 EltIdx++;
1880 }
1881
1882 // On RV64, sign-extend from 32 to 64 bits where possible in order to
1883 // achieve better constant materializion.
1884 if (Subtarget.is64Bit() && ViaIntVT == MVT::i32)
1885 SplatValue = SignExtend64(SplatValue, 32);
1886
1887 // Since we can't introduce illegal i64 types at this stage, we can only
1888 // perform an i64 splat on RV32 if it is its own sign-extended value. That
1889 // way we can use RVV instructions to splat.
1890 assert((ViaIntVT.bitsLE(XLenVT) ||
1891 (!Subtarget.is64Bit() && ViaIntVT == MVT::i64)) &&
1892 "Unexpected bitcast sequence");
1893 if (ViaIntVT.bitsLE(XLenVT) || isInt<32>(SplatValue)) {
1894 SDValue ViaVL =
1895 DAG.getConstant(ViaVecVT.getVectorNumElements(), DL, XLenVT);
1896 MVT ViaContainerVT =
1897 getContainerForFixedLengthVector(DAG, ViaVecVT, Subtarget);
1898 SDValue Splat =
1899 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ViaContainerVT,
1900 DAG.getConstant(SplatValue, DL, XLenVT), ViaVL);
1901 Splat = convertFromScalableVector(ViaVecVT, Splat, DAG, Subtarget);
1902 return DAG.getBitcast(VT, Splat);
1903 }
1904 }
1905
1906 // Try and optimize BUILD_VECTORs with "dominant values" - these are values
1907 // which constitute a large proportion of the elements. In such cases we can
1908 // splat a vector with the dominant element and make up the shortfall with
1909 // INSERT_VECTOR_ELTs.
1910 // Note that this includes vectors of 2 elements by association. The
1911 // upper-most element is the "dominant" one, allowing us to use a splat to
1912 // "insert" the upper element, and an insert of the lower element at position
1913 // 0, which improves codegen.
1914 SDValue DominantValue;
1915 unsigned MostCommonCount = 0;
1916 DenseMap<SDValue, unsigned> ValueCounts;
1917 unsigned NumUndefElts =
1918 count_if(Op->op_values(), [](const SDValue &V) { return V.isUndef(); });
1919
1920 // Track the number of scalar loads we know we'd be inserting, estimated as
1921 // any non-zero floating-point constant. Other kinds of element are either
1922 // already in registers or are materialized on demand. The threshold at which
1923 // a vector load is more desirable than several scalar materializion and
1924 // vector-insertion instructions is not known.
1925 unsigned NumScalarLoads = 0;
1926
1927 for (SDValue V : Op->op_values()) {
1928 if (V.isUndef())
1929 continue;
1930
1931 ValueCounts.insert(std::make_pair(V, 0));
1932 unsigned &Count = ValueCounts[V];
1933
1934 if (auto *CFP = dyn_cast<ConstantFPSDNode>(V))
1935 NumScalarLoads += !CFP->isExactlyValue(+0.0);
1936
1937 // Is this value dominant? In case of a tie, prefer the highest element as
1938 // it's cheaper to insert near the beginning of a vector than it is at the
1939 // end.
1940 if (++Count >= MostCommonCount) {
1941 DominantValue = V;
1942 MostCommonCount = Count;
1943 }
1944 }
1945
1946 assert(DominantValue && "Not expecting an all-undef BUILD_VECTOR");
1947 unsigned NumDefElts = NumElts - NumUndefElts;
1948 unsigned DominantValueCountThreshold = NumDefElts <= 2 ? 0 : NumDefElts - 2;
1949
1950 // Don't perform this optimization when optimizing for size, since
1951 // materializing elements and inserting them tends to cause code bloat.
1952 if (!DAG.shouldOptForSize() && NumScalarLoads < NumElts &&
1953 ((MostCommonCount > DominantValueCountThreshold) ||
1954 (ValueCounts.size() <= Log2_32(NumDefElts)))) {
1955 // Start by splatting the most common element.
1956 SDValue Vec = DAG.getSplatBuildVector(VT, DL, DominantValue);
1957
1958 DenseSet<SDValue> Processed{DominantValue};
1959 MVT SelMaskTy = VT.changeVectorElementType(MVT::i1);
1960 for (const auto &OpIdx : enumerate(Op->ops())) {
1961 const SDValue &V = OpIdx.value();
1962 if (V.isUndef() || !Processed.insert(V).second)
1963 continue;
1964 if (ValueCounts[V] == 1) {
1965 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, VT, Vec, V,
1966 DAG.getConstant(OpIdx.index(), DL, XLenVT));
1967 } else {
1968 // Blend in all instances of this value using a VSELECT, using a
1969 // mask where each bit signals whether that element is the one
1970 // we're after.
1971 SmallVector<SDValue> Ops;
1972 transform(Op->op_values(), std::back_inserter(Ops), [&](SDValue V1) {
1973 return DAG.getConstant(V == V1, DL, XLenVT);
1974 });
1975 Vec = DAG.getNode(ISD::VSELECT, DL, VT,
1976 DAG.getBuildVector(SelMaskTy, DL, Ops),
1977 DAG.getSplatBuildVector(VT, DL, V), Vec);
1978 }
1979 }
1980
1981 return Vec;
1982 }
1983
1984 return SDValue();
1985 }
1986
splatPartsI64WithVL(const SDLoc & DL,MVT VT,SDValue Lo,SDValue Hi,SDValue VL,SelectionDAG & DAG)1987 static SDValue splatPartsI64WithVL(const SDLoc &DL, MVT VT, SDValue Lo,
1988 SDValue Hi, SDValue VL, SelectionDAG &DAG) {
1989 if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
1990 int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
1991 int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
1992 // If Hi constant is all the same sign bit as Lo, lower this as a custom
1993 // node in order to try and match RVV vector/scalar instructions.
1994 if ((LoC >> 31) == HiC)
1995 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Lo, VL);
1996 }
1997
1998 // Fall back to a stack store and stride x0 vector load.
1999 return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VT, Lo, Hi, VL);
2000 }
2001
2002 // Called by type legalization to handle splat of i64 on RV32.
2003 // FIXME: We can optimize this when the type has sign or zero bits in one
2004 // of the halves.
splatSplitI64WithVL(const SDLoc & DL,MVT VT,SDValue Scalar,SDValue VL,SelectionDAG & DAG)2005 static SDValue splatSplitI64WithVL(const SDLoc &DL, MVT VT, SDValue Scalar,
2006 SDValue VL, SelectionDAG &DAG) {
2007 assert(Scalar.getValueType() == MVT::i64 && "Unexpected VT!");
2008 SDValue Lo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2009 DAG.getConstant(0, DL, MVT::i32));
2010 SDValue Hi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
2011 DAG.getConstant(1, DL, MVT::i32));
2012 return splatPartsI64WithVL(DL, VT, Lo, Hi, VL, DAG);
2013 }
2014
2015 // This function lowers a splat of a scalar operand Splat with the vector
2016 // length VL. It ensures the final sequence is type legal, which is useful when
2017 // lowering a splat after type legalization.
lowerScalarSplat(SDValue Scalar,SDValue VL,MVT VT,SDLoc DL,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)2018 static SDValue lowerScalarSplat(SDValue Scalar, SDValue VL, MVT VT, SDLoc DL,
2019 SelectionDAG &DAG,
2020 const RISCVSubtarget &Subtarget) {
2021 if (VT.isFloatingPoint())
2022 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, VT, Scalar, VL);
2023
2024 MVT XLenVT = Subtarget.getXLenVT();
2025
2026 // Simplest case is that the operand needs to be promoted to XLenVT.
2027 if (Scalar.getValueType().bitsLE(XLenVT)) {
2028 // If the operand is a constant, sign extend to increase our chances
2029 // of being able to use a .vi instruction. ANY_EXTEND would become a
2030 // a zero extend and the simm5 check in isel would fail.
2031 // FIXME: Should we ignore the upper bits in isel instead?
2032 unsigned ExtOpc =
2033 isa<ConstantSDNode>(Scalar) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
2034 Scalar = DAG.getNode(ExtOpc, DL, XLenVT, Scalar);
2035 return DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, Scalar, VL);
2036 }
2037
2038 assert(XLenVT == MVT::i32 && Scalar.getValueType() == MVT::i64 &&
2039 "Unexpected scalar for splat lowering!");
2040
2041 // Otherwise use the more complicated splatting algorithm.
2042 return splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
2043 }
2044
lowerVECTOR_SHUFFLE(SDValue Op,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)2045 static SDValue lowerVECTOR_SHUFFLE(SDValue Op, SelectionDAG &DAG,
2046 const RISCVSubtarget &Subtarget) {
2047 SDValue V1 = Op.getOperand(0);
2048 SDValue V2 = Op.getOperand(1);
2049 SDLoc DL(Op);
2050 MVT XLenVT = Subtarget.getXLenVT();
2051 MVT VT = Op.getSimpleValueType();
2052 unsigned NumElts = VT.getVectorNumElements();
2053 ShuffleVectorSDNode *SVN = cast<ShuffleVectorSDNode>(Op.getNode());
2054
2055 MVT ContainerVT = getContainerForFixedLengthVector(DAG, VT, Subtarget);
2056
2057 SDValue TrueMask, VL;
2058 std::tie(TrueMask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2059
2060 if (SVN->isSplat()) {
2061 const int Lane = SVN->getSplatIndex();
2062 if (Lane >= 0) {
2063 MVT SVT = VT.getVectorElementType();
2064
2065 // Turn splatted vector load into a strided load with an X0 stride.
2066 SDValue V = V1;
2067 // Peek through CONCAT_VECTORS as VectorCombine can concat a vector
2068 // with undef.
2069 // FIXME: Peek through INSERT_SUBVECTOR, EXTRACT_SUBVECTOR, bitcasts?
2070 int Offset = Lane;
2071 if (V.getOpcode() == ISD::CONCAT_VECTORS) {
2072 int OpElements =
2073 V.getOperand(0).getSimpleValueType().getVectorNumElements();
2074 V = V.getOperand(Offset / OpElements);
2075 Offset %= OpElements;
2076 }
2077
2078 // We need to ensure the load isn't atomic or volatile.
2079 if (ISD::isNormalLoad(V.getNode()) && cast<LoadSDNode>(V)->isSimple()) {
2080 auto *Ld = cast<LoadSDNode>(V);
2081 Offset *= SVT.getStoreSize();
2082 SDValue NewAddr = DAG.getMemBasePlusOffset(Ld->getBasePtr(),
2083 TypeSize::Fixed(Offset), DL);
2084
2085 // If this is SEW=64 on RV32, use a strided load with a stride of x0.
2086 if (SVT.isInteger() && SVT.bitsGT(XLenVT)) {
2087 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
2088 SDValue IntID =
2089 DAG.getTargetConstant(Intrinsic::riscv_vlse, DL, XLenVT);
2090 SDValue Ops[] = {Ld->getChain(), IntID, NewAddr,
2091 DAG.getRegister(RISCV::X0, XLenVT), VL};
2092 SDValue NewLoad = DAG.getMemIntrinsicNode(
2093 ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, SVT,
2094 DAG.getMachineFunction().getMachineMemOperand(
2095 Ld->getMemOperand(), Offset, SVT.getStoreSize()));
2096 DAG.makeEquivalentMemoryOrdering(Ld, NewLoad);
2097 return convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
2098 }
2099
2100 // Otherwise use a scalar load and splat. This will give the best
2101 // opportunity to fold a splat into the operation. ISel can turn it into
2102 // the x0 strided load if we aren't able to fold away the select.
2103 if (SVT.isFloatingPoint())
2104 V = DAG.getLoad(SVT, DL, Ld->getChain(), NewAddr,
2105 Ld->getPointerInfo().getWithOffset(Offset),
2106 Ld->getOriginalAlign(),
2107 Ld->getMemOperand()->getFlags());
2108 else
2109 V = DAG.getExtLoad(ISD::SEXTLOAD, DL, XLenVT, Ld->getChain(), NewAddr,
2110 Ld->getPointerInfo().getWithOffset(Offset), SVT,
2111 Ld->getOriginalAlign(),
2112 Ld->getMemOperand()->getFlags());
2113 DAG.makeEquivalentMemoryOrdering(Ld, V);
2114
2115 unsigned Opc =
2116 VT.isFloatingPoint() ? RISCVISD::VFMV_V_F_VL : RISCVISD::VMV_V_X_VL;
2117 SDValue Splat = DAG.getNode(Opc, DL, ContainerVT, V, VL);
2118 return convertFromScalableVector(VT, Splat, DAG, Subtarget);
2119 }
2120
2121 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2122 assert(Lane < (int)NumElts && "Unexpected lane!");
2123 SDValue Gather =
2124 DAG.getNode(RISCVISD::VRGATHER_VX_VL, DL, ContainerVT, V1,
2125 DAG.getConstant(Lane, DL, XLenVT), TrueMask, VL);
2126 return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2127 }
2128 }
2129
2130 // Detect shuffles which can be re-expressed as vector selects; these are
2131 // shuffles in which each element in the destination is taken from an element
2132 // at the corresponding index in either source vectors.
2133 bool IsSelect = all_of(enumerate(SVN->getMask()), [&](const auto &MaskIdx) {
2134 int MaskIndex = MaskIdx.value();
2135 return MaskIndex < 0 || MaskIdx.index() == (unsigned)MaskIndex % NumElts;
2136 });
2137
2138 assert(!V1.isUndef() && "Unexpected shuffle canonicalization");
2139
2140 SmallVector<SDValue> MaskVals;
2141 // As a backup, shuffles can be lowered via a vrgather instruction, possibly
2142 // merged with a second vrgather.
2143 SmallVector<SDValue> GatherIndicesLHS, GatherIndicesRHS;
2144
2145 // By default we preserve the original operand order, and use a mask to
2146 // select LHS as true and RHS as false. However, since RVV vector selects may
2147 // feature splats but only on the LHS, we may choose to invert our mask and
2148 // instead select between RHS and LHS.
2149 bool SwapOps = DAG.isSplatValue(V2) && !DAG.isSplatValue(V1);
2150 bool InvertMask = IsSelect == SwapOps;
2151
2152 // Keep a track of which non-undef indices are used by each LHS/RHS shuffle
2153 // half.
2154 DenseMap<int, unsigned> LHSIndexCounts, RHSIndexCounts;
2155
2156 // Now construct the mask that will be used by the vselect or blended
2157 // vrgather operation. For vrgathers, construct the appropriate indices into
2158 // each vector.
2159 for (int MaskIndex : SVN->getMask()) {
2160 bool SelectMaskVal = (MaskIndex < (int)NumElts) ^ InvertMask;
2161 MaskVals.push_back(DAG.getConstant(SelectMaskVal, DL, XLenVT));
2162 if (!IsSelect) {
2163 bool IsLHSOrUndefIndex = MaskIndex < (int)NumElts;
2164 GatherIndicesLHS.push_back(IsLHSOrUndefIndex && MaskIndex >= 0
2165 ? DAG.getConstant(MaskIndex, DL, XLenVT)
2166 : DAG.getUNDEF(XLenVT));
2167 GatherIndicesRHS.push_back(
2168 IsLHSOrUndefIndex ? DAG.getUNDEF(XLenVT)
2169 : DAG.getConstant(MaskIndex - NumElts, DL, XLenVT));
2170 if (IsLHSOrUndefIndex && MaskIndex >= 0)
2171 ++LHSIndexCounts[MaskIndex];
2172 if (!IsLHSOrUndefIndex)
2173 ++RHSIndexCounts[MaskIndex - NumElts];
2174 }
2175 }
2176
2177 if (SwapOps) {
2178 std::swap(V1, V2);
2179 std::swap(GatherIndicesLHS, GatherIndicesRHS);
2180 }
2181
2182 assert(MaskVals.size() == NumElts && "Unexpected select-like shuffle");
2183 MVT MaskVT = MVT::getVectorVT(MVT::i1, NumElts);
2184 SDValue SelectMask = DAG.getBuildVector(MaskVT, DL, MaskVals);
2185
2186 if (IsSelect)
2187 return DAG.getNode(ISD::VSELECT, DL, VT, SelectMask, V1, V2);
2188
2189 if (VT.getScalarSizeInBits() == 8 && VT.getVectorNumElements() > 256) {
2190 // On such a large vector we're unable to use i8 as the index type.
2191 // FIXME: We could promote the index to i16 and use vrgatherei16, but that
2192 // may involve vector splitting if we're already at LMUL=8, or our
2193 // user-supplied maximum fixed-length LMUL.
2194 return SDValue();
2195 }
2196
2197 unsigned GatherVXOpc = RISCVISD::VRGATHER_VX_VL;
2198 unsigned GatherVVOpc = RISCVISD::VRGATHER_VV_VL;
2199 MVT IndexVT = VT.changeTypeToInteger();
2200 // Since we can't introduce illegal index types at this stage, use i16 and
2201 // vrgatherei16 if the corresponding index type for plain vrgather is greater
2202 // than XLenVT.
2203 if (IndexVT.getScalarType().bitsGT(XLenVT)) {
2204 GatherVVOpc = RISCVISD::VRGATHEREI16_VV_VL;
2205 IndexVT = IndexVT.changeVectorElementType(MVT::i16);
2206 }
2207
2208 MVT IndexContainerVT =
2209 ContainerVT.changeVectorElementType(IndexVT.getScalarType());
2210
2211 SDValue Gather;
2212 // TODO: This doesn't trigger for i64 vectors on RV32, since there we
2213 // encounter a bitcasted BUILD_VECTOR with low/high i32 values.
2214 if (SDValue SplatValue = DAG.getSplatValue(V1, /*LegalTypes*/ true)) {
2215 Gather = lowerScalarSplat(SplatValue, VL, ContainerVT, DL, DAG, Subtarget);
2216 } else {
2217 V1 = convertToScalableVector(ContainerVT, V1, DAG, Subtarget);
2218 // If only one index is used, we can use a "splat" vrgather.
2219 // TODO: We can splat the most-common index and fix-up any stragglers, if
2220 // that's beneficial.
2221 if (LHSIndexCounts.size() == 1) {
2222 int SplatIndex = LHSIndexCounts.begin()->getFirst();
2223 Gather =
2224 DAG.getNode(GatherVXOpc, DL, ContainerVT, V1,
2225 DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2226 } else {
2227 SDValue LHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesLHS);
2228 LHSIndices =
2229 convertToScalableVector(IndexContainerVT, LHSIndices, DAG, Subtarget);
2230
2231 Gather = DAG.getNode(GatherVVOpc, DL, ContainerVT, V1, LHSIndices,
2232 TrueMask, VL);
2233 }
2234 }
2235
2236 // If a second vector operand is used by this shuffle, blend it in with an
2237 // additional vrgather.
2238 if (!V2.isUndef()) {
2239 V2 = convertToScalableVector(ContainerVT, V2, DAG, Subtarget);
2240 // If only one index is used, we can use a "splat" vrgather.
2241 // TODO: We can splat the most-common index and fix-up any stragglers, if
2242 // that's beneficial.
2243 if (RHSIndexCounts.size() == 1) {
2244 int SplatIndex = RHSIndexCounts.begin()->getFirst();
2245 V2 = DAG.getNode(GatherVXOpc, DL, ContainerVT, V2,
2246 DAG.getConstant(SplatIndex, DL, XLenVT), TrueMask, VL);
2247 } else {
2248 SDValue RHSIndices = DAG.getBuildVector(IndexVT, DL, GatherIndicesRHS);
2249 RHSIndices =
2250 convertToScalableVector(IndexContainerVT, RHSIndices, DAG, Subtarget);
2251 V2 = DAG.getNode(GatherVVOpc, DL, ContainerVT, V2, RHSIndices, TrueMask,
2252 VL);
2253 }
2254
2255 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
2256 SelectMask =
2257 convertToScalableVector(MaskContainerVT, SelectMask, DAG, Subtarget);
2258
2259 Gather = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, SelectMask, V2,
2260 Gather, VL);
2261 }
2262
2263 return convertFromScalableVector(VT, Gather, DAG, Subtarget);
2264 }
2265
getRVVFPExtendOrRound(SDValue Op,MVT VT,MVT ContainerVT,SDLoc DL,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)2266 static SDValue getRVVFPExtendOrRound(SDValue Op, MVT VT, MVT ContainerVT,
2267 SDLoc DL, SelectionDAG &DAG,
2268 const RISCVSubtarget &Subtarget) {
2269 if (VT.isScalableVector())
2270 return DAG.getFPExtendOrRound(Op, DL, VT);
2271 assert(VT.isFixedLengthVector() &&
2272 "Unexpected value type for RVV FP extend/round lowering");
2273 SDValue Mask, VL;
2274 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2275 unsigned RVVOpc = ContainerVT.bitsGT(Op.getSimpleValueType())
2276 ? RISCVISD::FP_EXTEND_VL
2277 : RISCVISD::FP_ROUND_VL;
2278 return DAG.getNode(RVVOpc, DL, ContainerVT, Op, Mask, VL);
2279 }
2280
2281 // While RVV has alignment restrictions, we should always be able to load as a
2282 // legal equivalently-sized byte-typed vector instead. This method is
2283 // responsible for re-expressing a ISD::LOAD via a correctly-aligned type. If
2284 // the load is already correctly-aligned, it returns SDValue().
expandUnalignedRVVLoad(SDValue Op,SelectionDAG & DAG) const2285 SDValue RISCVTargetLowering::expandUnalignedRVVLoad(SDValue Op,
2286 SelectionDAG &DAG) const {
2287 auto *Load = cast<LoadSDNode>(Op);
2288 assert(Load && Load->getMemoryVT().isVector() && "Expected vector load");
2289
2290 if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2291 Load->getMemoryVT(),
2292 *Load->getMemOperand()))
2293 return SDValue();
2294
2295 SDLoc DL(Op);
2296 MVT VT = Op.getSimpleValueType();
2297 unsigned EltSizeBits = VT.getScalarSizeInBits();
2298 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2299 "Unexpected unaligned RVV load type");
2300 MVT NewVT =
2301 MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2302 assert(NewVT.isValid() &&
2303 "Expecting equally-sized RVV vector types to be legal");
2304 SDValue L = DAG.getLoad(NewVT, DL, Load->getChain(), Load->getBasePtr(),
2305 Load->getPointerInfo(), Load->getOriginalAlign(),
2306 Load->getMemOperand()->getFlags());
2307 return DAG.getMergeValues({DAG.getBitcast(VT, L), L.getValue(1)}, DL);
2308 }
2309
2310 // While RVV has alignment restrictions, we should always be able to store as a
2311 // legal equivalently-sized byte-typed vector instead. This method is
2312 // responsible for re-expressing a ISD::STORE via a correctly-aligned type. It
2313 // returns SDValue() if the store is already correctly aligned.
expandUnalignedRVVStore(SDValue Op,SelectionDAG & DAG) const2314 SDValue RISCVTargetLowering::expandUnalignedRVVStore(SDValue Op,
2315 SelectionDAG &DAG) const {
2316 auto *Store = cast<StoreSDNode>(Op);
2317 assert(Store && Store->getValue().getValueType().isVector() &&
2318 "Expected vector store");
2319
2320 if (allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
2321 Store->getMemoryVT(),
2322 *Store->getMemOperand()))
2323 return SDValue();
2324
2325 SDLoc DL(Op);
2326 SDValue StoredVal = Store->getValue();
2327 MVT VT = StoredVal.getSimpleValueType();
2328 unsigned EltSizeBits = VT.getScalarSizeInBits();
2329 assert((EltSizeBits == 16 || EltSizeBits == 32 || EltSizeBits == 64) &&
2330 "Unexpected unaligned RVV store type");
2331 MVT NewVT =
2332 MVT::getVectorVT(MVT::i8, VT.getVectorElementCount() * (EltSizeBits / 8));
2333 assert(NewVT.isValid() &&
2334 "Expecting equally-sized RVV vector types to be legal");
2335 StoredVal = DAG.getBitcast(NewVT, StoredVal);
2336 return DAG.getStore(Store->getChain(), DL, StoredVal, Store->getBasePtr(),
2337 Store->getPointerInfo(), Store->getOriginalAlign(),
2338 Store->getMemOperand()->getFlags());
2339 }
2340
LowerOperation(SDValue Op,SelectionDAG & DAG) const2341 SDValue RISCVTargetLowering::LowerOperation(SDValue Op,
2342 SelectionDAG &DAG) const {
2343 switch (Op.getOpcode()) {
2344 default:
2345 report_fatal_error("unimplemented operand");
2346 case ISD::GlobalAddress:
2347 return lowerGlobalAddress(Op, DAG);
2348 case ISD::BlockAddress:
2349 return lowerBlockAddress(Op, DAG);
2350 case ISD::ConstantPool:
2351 return lowerConstantPool(Op, DAG);
2352 case ISD::JumpTable:
2353 return lowerJumpTable(Op, DAG);
2354 case ISD::GlobalTLSAddress:
2355 return lowerGlobalTLSAddress(Op, DAG);
2356 case ISD::SELECT:
2357 return lowerSELECT(Op, DAG);
2358 case ISD::BRCOND:
2359 return lowerBRCOND(Op, DAG);
2360 case ISD::VASTART:
2361 return lowerVASTART(Op, DAG);
2362 case ISD::FRAMEADDR:
2363 return lowerFRAMEADDR(Op, DAG);
2364 case ISD::RETURNADDR:
2365 return lowerRETURNADDR(Op, DAG);
2366 case ISD::SHL_PARTS:
2367 return lowerShiftLeftParts(Op, DAG);
2368 case ISD::SRA_PARTS:
2369 return lowerShiftRightParts(Op, DAG, true);
2370 case ISD::SRL_PARTS:
2371 return lowerShiftRightParts(Op, DAG, false);
2372 case ISD::BITCAST: {
2373 SDLoc DL(Op);
2374 EVT VT = Op.getValueType();
2375 SDValue Op0 = Op.getOperand(0);
2376 EVT Op0VT = Op0.getValueType();
2377 MVT XLenVT = Subtarget.getXLenVT();
2378 if (VT.isFixedLengthVector()) {
2379 // We can handle fixed length vector bitcasts with a simple replacement
2380 // in isel.
2381 if (Op0VT.isFixedLengthVector())
2382 return Op;
2383 // When bitcasting from scalar to fixed-length vector, insert the scalar
2384 // into a one-element vector of the result type, and perform a vector
2385 // bitcast.
2386 if (!Op0VT.isVector()) {
2387 auto BVT = EVT::getVectorVT(*DAG.getContext(), Op0VT, 1);
2388 return DAG.getBitcast(VT, DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, BVT,
2389 DAG.getUNDEF(BVT), Op0,
2390 DAG.getConstant(0, DL, XLenVT)));
2391 }
2392 return SDValue();
2393 }
2394 // Custom-legalize bitcasts from fixed-length vector types to scalar types
2395 // thus: bitcast the vector to a one-element vector type whose element type
2396 // is the same as the result type, and extract the first element.
2397 if (!VT.isVector() && Op0VT.isFixedLengthVector()) {
2398 LLVMContext &Context = *DAG.getContext();
2399 SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
2400 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
2401 DAG.getConstant(0, DL, XLenVT));
2402 }
2403 if (VT == MVT::f16 && Op0VT == MVT::i16 && Subtarget.hasStdExtZfh()) {
2404 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Op0);
2405 SDValue FPConv = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, NewOp0);
2406 return FPConv;
2407 }
2408 if (VT == MVT::f32 && Op0VT == MVT::i32 && Subtarget.is64Bit() &&
2409 Subtarget.hasStdExtF()) {
2410 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, Op0);
2411 SDValue FPConv =
2412 DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, NewOp0);
2413 return FPConv;
2414 }
2415 return SDValue();
2416 }
2417 case ISD::INTRINSIC_WO_CHAIN:
2418 return LowerINTRINSIC_WO_CHAIN(Op, DAG);
2419 case ISD::INTRINSIC_W_CHAIN:
2420 return LowerINTRINSIC_W_CHAIN(Op, DAG);
2421 case ISD::INTRINSIC_VOID:
2422 return LowerINTRINSIC_VOID(Op, DAG);
2423 case ISD::BSWAP:
2424 case ISD::BITREVERSE: {
2425 // Convert BSWAP/BITREVERSE to GREVI to enable GREVI combinining.
2426 assert(Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
2427 MVT VT = Op.getSimpleValueType();
2428 SDLoc DL(Op);
2429 // Start with the maximum immediate value which is the bitwidth - 1.
2430 unsigned Imm = VT.getSizeInBits() - 1;
2431 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
2432 if (Op.getOpcode() == ISD::BSWAP)
2433 Imm &= ~0x7U;
2434 return DAG.getNode(RISCVISD::GREV, DL, VT, Op.getOperand(0),
2435 DAG.getConstant(Imm, DL, VT));
2436 }
2437 case ISD::FSHL:
2438 case ISD::FSHR: {
2439 MVT VT = Op.getSimpleValueType();
2440 assert(VT == Subtarget.getXLenVT() && "Unexpected custom legalization");
2441 SDLoc DL(Op);
2442 if (Op.getOperand(2).getOpcode() == ISD::Constant)
2443 return Op;
2444 // FSL/FSR take a log2(XLen)+1 bit shift amount but XLenVT FSHL/FSHR only
2445 // use log(XLen) bits. Mask the shift amount accordingly.
2446 unsigned ShAmtWidth = Subtarget.getXLen() - 1;
2447 SDValue ShAmt = DAG.getNode(ISD::AND, DL, VT, Op.getOperand(2),
2448 DAG.getConstant(ShAmtWidth, DL, VT));
2449 unsigned Opc = Op.getOpcode() == ISD::FSHL ? RISCVISD::FSL : RISCVISD::FSR;
2450 return DAG.getNode(Opc, DL, VT, Op.getOperand(0), Op.getOperand(1), ShAmt);
2451 }
2452 case ISD::TRUNCATE: {
2453 SDLoc DL(Op);
2454 MVT VT = Op.getSimpleValueType();
2455 // Only custom-lower vector truncates
2456 if (!VT.isVector())
2457 return Op;
2458
2459 // Truncates to mask types are handled differently
2460 if (VT.getVectorElementType() == MVT::i1)
2461 return lowerVectorMaskTrunc(Op, DAG);
2462
2463 // RVV only has truncates which operate from SEW*2->SEW, so lower arbitrary
2464 // truncates as a series of "RISCVISD::TRUNCATE_VECTOR_VL" nodes which
2465 // truncate by one power of two at a time.
2466 MVT DstEltVT = VT.getVectorElementType();
2467
2468 SDValue Src = Op.getOperand(0);
2469 MVT SrcVT = Src.getSimpleValueType();
2470 MVT SrcEltVT = SrcVT.getVectorElementType();
2471
2472 assert(DstEltVT.bitsLT(SrcEltVT) &&
2473 isPowerOf2_64(DstEltVT.getSizeInBits()) &&
2474 isPowerOf2_64(SrcEltVT.getSizeInBits()) &&
2475 "Unexpected vector truncate lowering");
2476
2477 MVT ContainerVT = SrcVT;
2478 if (SrcVT.isFixedLengthVector()) {
2479 ContainerVT = getContainerForFixedLengthVector(SrcVT);
2480 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
2481 }
2482
2483 SDValue Result = Src;
2484 SDValue Mask, VL;
2485 std::tie(Mask, VL) =
2486 getDefaultVLOps(SrcVT, ContainerVT, DL, DAG, Subtarget);
2487 LLVMContext &Context = *DAG.getContext();
2488 const ElementCount Count = ContainerVT.getVectorElementCount();
2489 do {
2490 SrcEltVT = MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2);
2491 EVT ResultVT = EVT::getVectorVT(Context, SrcEltVT, Count);
2492 Result = DAG.getNode(RISCVISD::TRUNCATE_VECTOR_VL, DL, ResultVT, Result,
2493 Mask, VL);
2494 } while (SrcEltVT != DstEltVT);
2495
2496 if (SrcVT.isFixedLengthVector())
2497 Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
2498
2499 return Result;
2500 }
2501 case ISD::ANY_EXTEND:
2502 case ISD::ZERO_EXTEND:
2503 if (Op.getOperand(0).getValueType().isVector() &&
2504 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2505 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ 1);
2506 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VZEXT_VL);
2507 case ISD::SIGN_EXTEND:
2508 if (Op.getOperand(0).getValueType().isVector() &&
2509 Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2510 return lowerVectorMaskExt(Op, DAG, /*ExtVal*/ -1);
2511 return lowerFixedLengthVectorExtendToRVV(Op, DAG, RISCVISD::VSEXT_VL);
2512 case ISD::SPLAT_VECTOR_PARTS:
2513 return lowerSPLAT_VECTOR_PARTS(Op, DAG);
2514 case ISD::INSERT_VECTOR_ELT:
2515 return lowerINSERT_VECTOR_ELT(Op, DAG);
2516 case ISD::EXTRACT_VECTOR_ELT:
2517 return lowerEXTRACT_VECTOR_ELT(Op, DAG);
2518 case ISD::VSCALE: {
2519 MVT VT = Op.getSimpleValueType();
2520 SDLoc DL(Op);
2521 SDValue VLENB = DAG.getNode(RISCVISD::READ_VLENB, DL, VT);
2522 // We define our scalable vector types for lmul=1 to use a 64 bit known
2523 // minimum size. e.g. <vscale x 2 x i32>. VLENB is in bytes so we calculate
2524 // vscale as VLENB / 8.
2525 assert(RISCV::RVVBitsPerBlock == 64 && "Unexpected bits per block!");
2526 if (isa<ConstantSDNode>(Op.getOperand(0))) {
2527 // We assume VLENB is a multiple of 8. We manually choose the best shift
2528 // here because SimplifyDemandedBits isn't always able to simplify it.
2529 uint64_t Val = Op.getConstantOperandVal(0);
2530 if (isPowerOf2_64(Val)) {
2531 uint64_t Log2 = Log2_64(Val);
2532 if (Log2 < 3)
2533 return DAG.getNode(ISD::SRL, DL, VT, VLENB,
2534 DAG.getConstant(3 - Log2, DL, VT));
2535 if (Log2 > 3)
2536 return DAG.getNode(ISD::SHL, DL, VT, VLENB,
2537 DAG.getConstant(Log2 - 3, DL, VT));
2538 return VLENB;
2539 }
2540 // If the multiplier is a multiple of 8, scale it down to avoid needing
2541 // to shift the VLENB value.
2542 if ((Val % 8) == 0)
2543 return DAG.getNode(ISD::MUL, DL, VT, VLENB,
2544 DAG.getConstant(Val / 8, DL, VT));
2545 }
2546
2547 SDValue VScale = DAG.getNode(ISD::SRL, DL, VT, VLENB,
2548 DAG.getConstant(3, DL, VT));
2549 return DAG.getNode(ISD::MUL, DL, VT, VScale, Op.getOperand(0));
2550 }
2551 case ISD::FP_EXTEND: {
2552 // RVV can only do fp_extend to types double the size as the source. We
2553 // custom-lower f16->f64 extensions to two hops of ISD::FP_EXTEND, going
2554 // via f32.
2555 SDLoc DL(Op);
2556 MVT VT = Op.getSimpleValueType();
2557 SDValue Src = Op.getOperand(0);
2558 MVT SrcVT = Src.getSimpleValueType();
2559
2560 // Prepare any fixed-length vector operands.
2561 MVT ContainerVT = VT;
2562 if (SrcVT.isFixedLengthVector()) {
2563 ContainerVT = getContainerForFixedLengthVector(VT);
2564 MVT SrcContainerVT =
2565 ContainerVT.changeVectorElementType(SrcVT.getVectorElementType());
2566 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2567 }
2568
2569 if (!VT.isVector() || VT.getVectorElementType() != MVT::f64 ||
2570 SrcVT.getVectorElementType() != MVT::f16) {
2571 // For scalable vectors, we only need to close the gap between
2572 // vXf16->vXf64.
2573 if (!VT.isFixedLengthVector())
2574 return Op;
2575 // For fixed-length vectors, lower the FP_EXTEND to a custom "VL" version.
2576 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2577 return convertFromScalableVector(VT, Src, DAG, Subtarget);
2578 }
2579
2580 MVT InterVT = VT.changeVectorElementType(MVT::f32);
2581 MVT InterContainerVT = ContainerVT.changeVectorElementType(MVT::f32);
2582 SDValue IntermediateExtend = getRVVFPExtendOrRound(
2583 Src, InterVT, InterContainerVT, DL, DAG, Subtarget);
2584
2585 SDValue Extend = getRVVFPExtendOrRound(IntermediateExtend, VT, ContainerVT,
2586 DL, DAG, Subtarget);
2587 if (VT.isFixedLengthVector())
2588 return convertFromScalableVector(VT, Extend, DAG, Subtarget);
2589 return Extend;
2590 }
2591 case ISD::FP_ROUND: {
2592 // RVV can only do fp_round to types half the size as the source. We
2593 // custom-lower f64->f16 rounds via RVV's round-to-odd float
2594 // conversion instruction.
2595 SDLoc DL(Op);
2596 MVT VT = Op.getSimpleValueType();
2597 SDValue Src = Op.getOperand(0);
2598 MVT SrcVT = Src.getSimpleValueType();
2599
2600 // Prepare any fixed-length vector operands.
2601 MVT ContainerVT = VT;
2602 if (VT.isFixedLengthVector()) {
2603 MVT SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2604 ContainerVT =
2605 SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2606 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2607 }
2608
2609 if (!VT.isVector() || VT.getVectorElementType() != MVT::f16 ||
2610 SrcVT.getVectorElementType() != MVT::f64) {
2611 // For scalable vectors, we only need to close the gap between
2612 // vXf64<->vXf16.
2613 if (!VT.isFixedLengthVector())
2614 return Op;
2615 // For fixed-length vectors, lower the FP_ROUND to a custom "VL" version.
2616 Src = getRVVFPExtendOrRound(Src, VT, ContainerVT, DL, DAG, Subtarget);
2617 return convertFromScalableVector(VT, Src, DAG, Subtarget);
2618 }
2619
2620 SDValue Mask, VL;
2621 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2622
2623 MVT InterVT = ContainerVT.changeVectorElementType(MVT::f32);
2624 SDValue IntermediateRound =
2625 DAG.getNode(RISCVISD::VFNCVT_ROD_VL, DL, InterVT, Src, Mask, VL);
2626 SDValue Round = getRVVFPExtendOrRound(IntermediateRound, VT, ContainerVT,
2627 DL, DAG, Subtarget);
2628
2629 if (VT.isFixedLengthVector())
2630 return convertFromScalableVector(VT, Round, DAG, Subtarget);
2631 return Round;
2632 }
2633 case ISD::FP_TO_SINT:
2634 case ISD::FP_TO_UINT:
2635 case ISD::SINT_TO_FP:
2636 case ISD::UINT_TO_FP: {
2637 // RVV can only do fp<->int conversions to types half/double the size as
2638 // the source. We custom-lower any conversions that do two hops into
2639 // sequences.
2640 MVT VT = Op.getSimpleValueType();
2641 if (!VT.isVector())
2642 return Op;
2643 SDLoc DL(Op);
2644 SDValue Src = Op.getOperand(0);
2645 MVT EltVT = VT.getVectorElementType();
2646 MVT SrcVT = Src.getSimpleValueType();
2647 MVT SrcEltVT = SrcVT.getVectorElementType();
2648 unsigned EltSize = EltVT.getSizeInBits();
2649 unsigned SrcEltSize = SrcEltVT.getSizeInBits();
2650 assert(isPowerOf2_32(EltSize) && isPowerOf2_32(SrcEltSize) &&
2651 "Unexpected vector element types");
2652
2653 bool IsInt2FP = SrcEltVT.isInteger();
2654 // Widening conversions
2655 if (EltSize > SrcEltSize && (EltSize / SrcEltSize >= 4)) {
2656 if (IsInt2FP) {
2657 // Do a regular integer sign/zero extension then convert to float.
2658 MVT IVecVT = MVT::getVectorVT(MVT::getIntegerVT(EltVT.getSizeInBits()),
2659 VT.getVectorElementCount());
2660 unsigned ExtOpcode = Op.getOpcode() == ISD::UINT_TO_FP
2661 ? ISD::ZERO_EXTEND
2662 : ISD::SIGN_EXTEND;
2663 SDValue Ext = DAG.getNode(ExtOpcode, DL, IVecVT, Src);
2664 return DAG.getNode(Op.getOpcode(), DL, VT, Ext);
2665 }
2666 // FP2Int
2667 assert(SrcEltVT == MVT::f16 && "Unexpected FP_TO_[US]INT lowering");
2668 // Do one doubling fp_extend then complete the operation by converting
2669 // to int.
2670 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2671 SDValue FExt = DAG.getFPExtendOrRound(Src, DL, InterimFVT);
2672 return DAG.getNode(Op.getOpcode(), DL, VT, FExt);
2673 }
2674
2675 // Narrowing conversions
2676 if (SrcEltSize > EltSize && (SrcEltSize / EltSize >= 4)) {
2677 if (IsInt2FP) {
2678 // One narrowing int_to_fp, then an fp_round.
2679 assert(EltVT == MVT::f16 && "Unexpected [US]_TO_FP lowering");
2680 MVT InterimFVT = MVT::getVectorVT(MVT::f32, VT.getVectorElementCount());
2681 SDValue Int2FP = DAG.getNode(Op.getOpcode(), DL, InterimFVT, Src);
2682 return DAG.getFPExtendOrRound(Int2FP, DL, VT);
2683 }
2684 // FP2Int
2685 // One narrowing fp_to_int, then truncate the integer. If the float isn't
2686 // representable by the integer, the result is poison.
2687 MVT IVecVT =
2688 MVT::getVectorVT(MVT::getIntegerVT(SrcEltVT.getSizeInBits() / 2),
2689 VT.getVectorElementCount());
2690 SDValue FP2Int = DAG.getNode(Op.getOpcode(), DL, IVecVT, Src);
2691 return DAG.getNode(ISD::TRUNCATE, DL, VT, FP2Int);
2692 }
2693
2694 // Scalable vectors can exit here. Patterns will handle equally-sized
2695 // conversions halving/doubling ones.
2696 if (!VT.isFixedLengthVector())
2697 return Op;
2698
2699 // For fixed-length vectors we lower to a custom "VL" node.
2700 unsigned RVVOpc = 0;
2701 switch (Op.getOpcode()) {
2702 default:
2703 llvm_unreachable("Impossible opcode");
2704 case ISD::FP_TO_SINT:
2705 RVVOpc = RISCVISD::FP_TO_SINT_VL;
2706 break;
2707 case ISD::FP_TO_UINT:
2708 RVVOpc = RISCVISD::FP_TO_UINT_VL;
2709 break;
2710 case ISD::SINT_TO_FP:
2711 RVVOpc = RISCVISD::SINT_TO_FP_VL;
2712 break;
2713 case ISD::UINT_TO_FP:
2714 RVVOpc = RISCVISD::UINT_TO_FP_VL;
2715 break;
2716 }
2717
2718 MVT ContainerVT, SrcContainerVT;
2719 // Derive the reference container type from the larger vector type.
2720 if (SrcEltSize > EltSize) {
2721 SrcContainerVT = getContainerForFixedLengthVector(SrcVT);
2722 ContainerVT =
2723 SrcContainerVT.changeVectorElementType(VT.getVectorElementType());
2724 } else {
2725 ContainerVT = getContainerForFixedLengthVector(VT);
2726 SrcContainerVT = ContainerVT.changeVectorElementType(SrcEltVT);
2727 }
2728
2729 SDValue Mask, VL;
2730 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
2731
2732 Src = convertToScalableVector(SrcContainerVT, Src, DAG, Subtarget);
2733 Src = DAG.getNode(RVVOpc, DL, ContainerVT, Src, Mask, VL);
2734 return convertFromScalableVector(VT, Src, DAG, Subtarget);
2735 }
2736 case ISD::FP_TO_SINT_SAT:
2737 case ISD::FP_TO_UINT_SAT:
2738 return lowerFP_TO_INT_SAT(Op, DAG);
2739 case ISD::VECREDUCE_ADD:
2740 case ISD::VECREDUCE_UMAX:
2741 case ISD::VECREDUCE_SMAX:
2742 case ISD::VECREDUCE_UMIN:
2743 case ISD::VECREDUCE_SMIN:
2744 return lowerVECREDUCE(Op, DAG);
2745 case ISD::VECREDUCE_AND:
2746 case ISD::VECREDUCE_OR:
2747 case ISD::VECREDUCE_XOR:
2748 if (Op.getOperand(0).getValueType().getVectorElementType() == MVT::i1)
2749 return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ false);
2750 return lowerVECREDUCE(Op, DAG);
2751 case ISD::VECREDUCE_FADD:
2752 case ISD::VECREDUCE_SEQ_FADD:
2753 case ISD::VECREDUCE_FMIN:
2754 case ISD::VECREDUCE_FMAX:
2755 return lowerFPVECREDUCE(Op, DAG);
2756 case ISD::VP_REDUCE_ADD:
2757 case ISD::VP_REDUCE_UMAX:
2758 case ISD::VP_REDUCE_SMAX:
2759 case ISD::VP_REDUCE_UMIN:
2760 case ISD::VP_REDUCE_SMIN:
2761 case ISD::VP_REDUCE_FADD:
2762 case ISD::VP_REDUCE_SEQ_FADD:
2763 case ISD::VP_REDUCE_FMIN:
2764 case ISD::VP_REDUCE_FMAX:
2765 return lowerVPREDUCE(Op, DAG);
2766 case ISD::VP_REDUCE_AND:
2767 case ISD::VP_REDUCE_OR:
2768 case ISD::VP_REDUCE_XOR:
2769 if (Op.getOperand(1).getValueType().getVectorElementType() == MVT::i1)
2770 return lowerVectorMaskVecReduction(Op, DAG, /*IsVP*/ true);
2771 return lowerVPREDUCE(Op, DAG);
2772 case ISD::INSERT_SUBVECTOR:
2773 return lowerINSERT_SUBVECTOR(Op, DAG);
2774 case ISD::EXTRACT_SUBVECTOR:
2775 return lowerEXTRACT_SUBVECTOR(Op, DAG);
2776 case ISD::STEP_VECTOR:
2777 return lowerSTEP_VECTOR(Op, DAG);
2778 case ISD::VECTOR_REVERSE:
2779 return lowerVECTOR_REVERSE(Op, DAG);
2780 case ISD::BUILD_VECTOR:
2781 return lowerBUILD_VECTOR(Op, DAG, Subtarget);
2782 case ISD::SPLAT_VECTOR:
2783 if (Op.getValueType().getVectorElementType() == MVT::i1)
2784 return lowerVectorMaskSplat(Op, DAG);
2785 return lowerSPLAT_VECTOR(Op, DAG, Subtarget);
2786 case ISD::VECTOR_SHUFFLE:
2787 return lowerVECTOR_SHUFFLE(Op, DAG, Subtarget);
2788 case ISD::CONCAT_VECTORS: {
2789 // Split CONCAT_VECTORS into a series of INSERT_SUBVECTOR nodes. This is
2790 // better than going through the stack, as the default expansion does.
2791 SDLoc DL(Op);
2792 MVT VT = Op.getSimpleValueType();
2793 unsigned NumOpElts =
2794 Op.getOperand(0).getSimpleValueType().getVectorMinNumElements();
2795 SDValue Vec = DAG.getUNDEF(VT);
2796 for (const auto &OpIdx : enumerate(Op->ops()))
2797 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT, Vec, OpIdx.value(),
2798 DAG.getIntPtrConstant(OpIdx.index() * NumOpElts, DL));
2799 return Vec;
2800 }
2801 case ISD::LOAD:
2802 if (auto V = expandUnalignedRVVLoad(Op, DAG))
2803 return V;
2804 if (Op.getValueType().isFixedLengthVector())
2805 return lowerFixedLengthVectorLoadToRVV(Op, DAG);
2806 return Op;
2807 case ISD::STORE:
2808 if (auto V = expandUnalignedRVVStore(Op, DAG))
2809 return V;
2810 if (Op.getOperand(1).getValueType().isFixedLengthVector())
2811 return lowerFixedLengthVectorStoreToRVV(Op, DAG);
2812 return Op;
2813 case ISD::MLOAD:
2814 case ISD::VP_LOAD:
2815 return lowerMaskedLoad(Op, DAG);
2816 case ISD::MSTORE:
2817 case ISD::VP_STORE:
2818 return lowerMaskedStore(Op, DAG);
2819 case ISD::SETCC:
2820 return lowerFixedLengthVectorSetccToRVV(Op, DAG);
2821 case ISD::ADD:
2822 return lowerToScalableOp(Op, DAG, RISCVISD::ADD_VL);
2823 case ISD::SUB:
2824 return lowerToScalableOp(Op, DAG, RISCVISD::SUB_VL);
2825 case ISD::MUL:
2826 return lowerToScalableOp(Op, DAG, RISCVISD::MUL_VL);
2827 case ISD::MULHS:
2828 return lowerToScalableOp(Op, DAG, RISCVISD::MULHS_VL);
2829 case ISD::MULHU:
2830 return lowerToScalableOp(Op, DAG, RISCVISD::MULHU_VL);
2831 case ISD::AND:
2832 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMAND_VL,
2833 RISCVISD::AND_VL);
2834 case ISD::OR:
2835 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMOR_VL,
2836 RISCVISD::OR_VL);
2837 case ISD::XOR:
2838 return lowerFixedLengthVectorLogicOpToRVV(Op, DAG, RISCVISD::VMXOR_VL,
2839 RISCVISD::XOR_VL);
2840 case ISD::SDIV:
2841 return lowerToScalableOp(Op, DAG, RISCVISD::SDIV_VL);
2842 case ISD::SREM:
2843 return lowerToScalableOp(Op, DAG, RISCVISD::SREM_VL);
2844 case ISD::UDIV:
2845 return lowerToScalableOp(Op, DAG, RISCVISD::UDIV_VL);
2846 case ISD::UREM:
2847 return lowerToScalableOp(Op, DAG, RISCVISD::UREM_VL);
2848 case ISD::SHL:
2849 case ISD::SRA:
2850 case ISD::SRL:
2851 if (Op.getSimpleValueType().isFixedLengthVector())
2852 return lowerFixedLengthVectorShiftToRVV(Op, DAG);
2853 // This can be called for an i32 shift amount that needs to be promoted.
2854 assert(Op.getOperand(1).getValueType() == MVT::i32 && Subtarget.is64Bit() &&
2855 "Unexpected custom legalisation");
2856 return SDValue();
2857 case ISD::SADDSAT:
2858 return lowerToScalableOp(Op, DAG, RISCVISD::SADDSAT_VL);
2859 case ISD::UADDSAT:
2860 return lowerToScalableOp(Op, DAG, RISCVISD::UADDSAT_VL);
2861 case ISD::SSUBSAT:
2862 return lowerToScalableOp(Op, DAG, RISCVISD::SSUBSAT_VL);
2863 case ISD::USUBSAT:
2864 return lowerToScalableOp(Op, DAG, RISCVISD::USUBSAT_VL);
2865 case ISD::FADD:
2866 return lowerToScalableOp(Op, DAG, RISCVISD::FADD_VL);
2867 case ISD::FSUB:
2868 return lowerToScalableOp(Op, DAG, RISCVISD::FSUB_VL);
2869 case ISD::FMUL:
2870 return lowerToScalableOp(Op, DAG, RISCVISD::FMUL_VL);
2871 case ISD::FDIV:
2872 return lowerToScalableOp(Op, DAG, RISCVISD::FDIV_VL);
2873 case ISD::FNEG:
2874 return lowerToScalableOp(Op, DAG, RISCVISD::FNEG_VL);
2875 case ISD::FABS:
2876 return lowerToScalableOp(Op, DAG, RISCVISD::FABS_VL);
2877 case ISD::FSQRT:
2878 return lowerToScalableOp(Op, DAG, RISCVISD::FSQRT_VL);
2879 case ISD::FMA:
2880 return lowerToScalableOp(Op, DAG, RISCVISD::FMA_VL);
2881 case ISD::SMIN:
2882 return lowerToScalableOp(Op, DAG, RISCVISD::SMIN_VL);
2883 case ISD::SMAX:
2884 return lowerToScalableOp(Op, DAG, RISCVISD::SMAX_VL);
2885 case ISD::UMIN:
2886 return lowerToScalableOp(Op, DAG, RISCVISD::UMIN_VL);
2887 case ISD::UMAX:
2888 return lowerToScalableOp(Op, DAG, RISCVISD::UMAX_VL);
2889 case ISD::FMINNUM:
2890 return lowerToScalableOp(Op, DAG, RISCVISD::FMINNUM_VL);
2891 case ISD::FMAXNUM:
2892 return lowerToScalableOp(Op, DAG, RISCVISD::FMAXNUM_VL);
2893 case ISD::ABS:
2894 return lowerABS(Op, DAG);
2895 case ISD::VSELECT:
2896 return lowerFixedLengthVectorSelectToRVV(Op, DAG);
2897 case ISD::FCOPYSIGN:
2898 return lowerFixedLengthVectorFCOPYSIGNToRVV(Op, DAG);
2899 case ISD::MGATHER:
2900 case ISD::VP_GATHER:
2901 return lowerMaskedGather(Op, DAG);
2902 case ISD::MSCATTER:
2903 case ISD::VP_SCATTER:
2904 return lowerMaskedScatter(Op, DAG);
2905 case ISD::FLT_ROUNDS_:
2906 return lowerGET_ROUNDING(Op, DAG);
2907 case ISD::SET_ROUNDING:
2908 return lowerSET_ROUNDING(Op, DAG);
2909 case ISD::VP_ADD:
2910 return lowerVPOp(Op, DAG, RISCVISD::ADD_VL);
2911 case ISD::VP_SUB:
2912 return lowerVPOp(Op, DAG, RISCVISD::SUB_VL);
2913 case ISD::VP_MUL:
2914 return lowerVPOp(Op, DAG, RISCVISD::MUL_VL);
2915 case ISD::VP_SDIV:
2916 return lowerVPOp(Op, DAG, RISCVISD::SDIV_VL);
2917 case ISD::VP_UDIV:
2918 return lowerVPOp(Op, DAG, RISCVISD::UDIV_VL);
2919 case ISD::VP_SREM:
2920 return lowerVPOp(Op, DAG, RISCVISD::SREM_VL);
2921 case ISD::VP_UREM:
2922 return lowerVPOp(Op, DAG, RISCVISD::UREM_VL);
2923 case ISD::VP_AND:
2924 return lowerVPOp(Op, DAG, RISCVISD::AND_VL);
2925 case ISD::VP_OR:
2926 return lowerVPOp(Op, DAG, RISCVISD::OR_VL);
2927 case ISD::VP_XOR:
2928 return lowerVPOp(Op, DAG, RISCVISD::XOR_VL);
2929 case ISD::VP_ASHR:
2930 return lowerVPOp(Op, DAG, RISCVISD::SRA_VL);
2931 case ISD::VP_LSHR:
2932 return lowerVPOp(Op, DAG, RISCVISD::SRL_VL);
2933 case ISD::VP_SHL:
2934 return lowerVPOp(Op, DAG, RISCVISD::SHL_VL);
2935 case ISD::VP_FADD:
2936 return lowerVPOp(Op, DAG, RISCVISD::FADD_VL);
2937 case ISD::VP_FSUB:
2938 return lowerVPOp(Op, DAG, RISCVISD::FSUB_VL);
2939 case ISD::VP_FMUL:
2940 return lowerVPOp(Op, DAG, RISCVISD::FMUL_VL);
2941 case ISD::VP_FDIV:
2942 return lowerVPOp(Op, DAG, RISCVISD::FDIV_VL);
2943 }
2944 }
2945
getTargetNode(GlobalAddressSDNode * N,SDLoc DL,EVT Ty,SelectionDAG & DAG,unsigned Flags)2946 static SDValue getTargetNode(GlobalAddressSDNode *N, SDLoc DL, EVT Ty,
2947 SelectionDAG &DAG, unsigned Flags) {
2948 return DAG.getTargetGlobalAddress(N->getGlobal(), DL, Ty, 0, Flags);
2949 }
2950
getTargetNode(BlockAddressSDNode * N,SDLoc DL,EVT Ty,SelectionDAG & DAG,unsigned Flags)2951 static SDValue getTargetNode(BlockAddressSDNode *N, SDLoc DL, EVT Ty,
2952 SelectionDAG &DAG, unsigned Flags) {
2953 return DAG.getTargetBlockAddress(N->getBlockAddress(), Ty, N->getOffset(),
2954 Flags);
2955 }
2956
getTargetNode(ConstantPoolSDNode * N,SDLoc DL,EVT Ty,SelectionDAG & DAG,unsigned Flags)2957 static SDValue getTargetNode(ConstantPoolSDNode *N, SDLoc DL, EVT Ty,
2958 SelectionDAG &DAG, unsigned Flags) {
2959 return DAG.getTargetConstantPool(N->getConstVal(), Ty, N->getAlign(),
2960 N->getOffset(), Flags);
2961 }
2962
getTargetNode(JumpTableSDNode * N,SDLoc DL,EVT Ty,SelectionDAG & DAG,unsigned Flags)2963 static SDValue getTargetNode(JumpTableSDNode *N, SDLoc DL, EVT Ty,
2964 SelectionDAG &DAG, unsigned Flags) {
2965 return DAG.getTargetJumpTable(N->getIndex(), Ty, Flags);
2966 }
2967
2968 template <class NodeTy>
getAddr(NodeTy * N,SelectionDAG & DAG,bool IsLocal) const2969 SDValue RISCVTargetLowering::getAddr(NodeTy *N, SelectionDAG &DAG,
2970 bool IsLocal) const {
2971 SDLoc DL(N);
2972 EVT Ty = getPointerTy(DAG.getDataLayout());
2973
2974 if (isPositionIndependent()) {
2975 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
2976 if (IsLocal)
2977 // Use PC-relative addressing to access the symbol. This generates the
2978 // pattern (PseudoLLA sym), which expands to (addi (auipc %pcrel_hi(sym))
2979 // %pcrel_lo(auipc)).
2980 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
2981
2982 // Use PC-relative addressing to access the GOT for this symbol, then load
2983 // the address from the GOT. This generates the pattern (PseudoLA sym),
2984 // which expands to (ld (addi (auipc %got_pcrel_hi(sym)) %pcrel_lo(auipc))).
2985 return SDValue(DAG.getMachineNode(RISCV::PseudoLA, DL, Ty, Addr), 0);
2986 }
2987
2988 switch (getTargetMachine().getCodeModel()) {
2989 default:
2990 report_fatal_error("Unsupported code model for lowering");
2991 case CodeModel::Small: {
2992 // Generate a sequence for accessing addresses within the first 2 GiB of
2993 // address space. This generates the pattern (addi (lui %hi(sym)) %lo(sym)).
2994 SDValue AddrHi = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_HI);
2995 SDValue AddrLo = getTargetNode(N, DL, Ty, DAG, RISCVII::MO_LO);
2996 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
2997 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNHi, AddrLo), 0);
2998 }
2999 case CodeModel::Medium: {
3000 // Generate a sequence for accessing addresses within any 2GiB range within
3001 // the address space. This generates the pattern (PseudoLLA sym), which
3002 // expands to (addi (auipc %pcrel_hi(sym)) %pcrel_lo(auipc)).
3003 SDValue Addr = getTargetNode(N, DL, Ty, DAG, 0);
3004 return SDValue(DAG.getMachineNode(RISCV::PseudoLLA, DL, Ty, Addr), 0);
3005 }
3006 }
3007 }
3008
lowerGlobalAddress(SDValue Op,SelectionDAG & DAG) const3009 SDValue RISCVTargetLowering::lowerGlobalAddress(SDValue Op,
3010 SelectionDAG &DAG) const {
3011 SDLoc DL(Op);
3012 EVT Ty = Op.getValueType();
3013 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3014 int64_t Offset = N->getOffset();
3015 MVT XLenVT = Subtarget.getXLenVT();
3016
3017 const GlobalValue *GV = N->getGlobal();
3018 bool IsLocal = getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV);
3019 SDValue Addr = getAddr(N, DAG, IsLocal);
3020
3021 // In order to maximise the opportunity for common subexpression elimination,
3022 // emit a separate ADD node for the global address offset instead of folding
3023 // it in the global address node. Later peephole optimisations may choose to
3024 // fold it back in when profitable.
3025 if (Offset != 0)
3026 return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3027 DAG.getConstant(Offset, DL, XLenVT));
3028 return Addr;
3029 }
3030
lowerBlockAddress(SDValue Op,SelectionDAG & DAG) const3031 SDValue RISCVTargetLowering::lowerBlockAddress(SDValue Op,
3032 SelectionDAG &DAG) const {
3033 BlockAddressSDNode *N = cast<BlockAddressSDNode>(Op);
3034
3035 return getAddr(N, DAG);
3036 }
3037
lowerConstantPool(SDValue Op,SelectionDAG & DAG) const3038 SDValue RISCVTargetLowering::lowerConstantPool(SDValue Op,
3039 SelectionDAG &DAG) const {
3040 ConstantPoolSDNode *N = cast<ConstantPoolSDNode>(Op);
3041
3042 return getAddr(N, DAG);
3043 }
3044
lowerJumpTable(SDValue Op,SelectionDAG & DAG) const3045 SDValue RISCVTargetLowering::lowerJumpTable(SDValue Op,
3046 SelectionDAG &DAG) const {
3047 JumpTableSDNode *N = cast<JumpTableSDNode>(Op);
3048
3049 return getAddr(N, DAG);
3050 }
3051
getStaticTLSAddr(GlobalAddressSDNode * N,SelectionDAG & DAG,bool UseGOT) const3052 SDValue RISCVTargetLowering::getStaticTLSAddr(GlobalAddressSDNode *N,
3053 SelectionDAG &DAG,
3054 bool UseGOT) const {
3055 SDLoc DL(N);
3056 EVT Ty = getPointerTy(DAG.getDataLayout());
3057 const GlobalValue *GV = N->getGlobal();
3058 MVT XLenVT = Subtarget.getXLenVT();
3059
3060 if (UseGOT) {
3061 // Use PC-relative addressing to access the GOT for this TLS symbol, then
3062 // load the address from the GOT and add the thread pointer. This generates
3063 // the pattern (PseudoLA_TLS_IE sym), which expands to
3064 // (ld (auipc %tls_ie_pcrel_hi(sym)) %pcrel_lo(auipc)).
3065 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3066 SDValue Load =
3067 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_IE, DL, Ty, Addr), 0);
3068
3069 // Add the thread pointer.
3070 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3071 return DAG.getNode(ISD::ADD, DL, Ty, Load, TPReg);
3072 }
3073
3074 // Generate a sequence for accessing the address relative to the thread
3075 // pointer, with the appropriate adjustment for the thread pointer offset.
3076 // This generates the pattern
3077 // (add (add_tprel (lui %tprel_hi(sym)) tp %tprel_add(sym)) %tprel_lo(sym))
3078 SDValue AddrHi =
3079 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_HI);
3080 SDValue AddrAdd =
3081 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_ADD);
3082 SDValue AddrLo =
3083 DAG.getTargetGlobalAddress(GV, DL, Ty, 0, RISCVII::MO_TPREL_LO);
3084
3085 SDValue MNHi = SDValue(DAG.getMachineNode(RISCV::LUI, DL, Ty, AddrHi), 0);
3086 SDValue TPReg = DAG.getRegister(RISCV::X4, XLenVT);
3087 SDValue MNAdd = SDValue(
3088 DAG.getMachineNode(RISCV::PseudoAddTPRel, DL, Ty, MNHi, TPReg, AddrAdd),
3089 0);
3090 return SDValue(DAG.getMachineNode(RISCV::ADDI, DL, Ty, MNAdd, AddrLo), 0);
3091 }
3092
getDynamicTLSAddr(GlobalAddressSDNode * N,SelectionDAG & DAG) const3093 SDValue RISCVTargetLowering::getDynamicTLSAddr(GlobalAddressSDNode *N,
3094 SelectionDAG &DAG) const {
3095 SDLoc DL(N);
3096 EVT Ty = getPointerTy(DAG.getDataLayout());
3097 IntegerType *CallTy = Type::getIntNTy(*DAG.getContext(), Ty.getSizeInBits());
3098 const GlobalValue *GV = N->getGlobal();
3099
3100 // Use a PC-relative addressing mode to access the global dynamic GOT address.
3101 // This generates the pattern (PseudoLA_TLS_GD sym), which expands to
3102 // (addi (auipc %tls_gd_pcrel_hi(sym)) %pcrel_lo(auipc)).
3103 SDValue Addr = DAG.getTargetGlobalAddress(GV, DL, Ty, 0, 0);
3104 SDValue Load =
3105 SDValue(DAG.getMachineNode(RISCV::PseudoLA_TLS_GD, DL, Ty, Addr), 0);
3106
3107 // Prepare argument list to generate call.
3108 ArgListTy Args;
3109 ArgListEntry Entry;
3110 Entry.Node = Load;
3111 Entry.Ty = CallTy;
3112 Args.push_back(Entry);
3113
3114 // Setup call to __tls_get_addr.
3115 TargetLowering::CallLoweringInfo CLI(DAG);
3116 CLI.setDebugLoc(DL)
3117 .setChain(DAG.getEntryNode())
3118 .setLibCallee(CallingConv::C, CallTy,
3119 DAG.getExternalSymbol("__tls_get_addr", Ty),
3120 std::move(Args));
3121
3122 return LowerCallTo(CLI).first;
3123 }
3124
lowerGlobalTLSAddress(SDValue Op,SelectionDAG & DAG) const3125 SDValue RISCVTargetLowering::lowerGlobalTLSAddress(SDValue Op,
3126 SelectionDAG &DAG) const {
3127 SDLoc DL(Op);
3128 EVT Ty = Op.getValueType();
3129 GlobalAddressSDNode *N = cast<GlobalAddressSDNode>(Op);
3130 int64_t Offset = N->getOffset();
3131 MVT XLenVT = Subtarget.getXLenVT();
3132
3133 TLSModel::Model Model = getTargetMachine().getTLSModel(N->getGlobal());
3134
3135 if (DAG.getMachineFunction().getFunction().getCallingConv() ==
3136 CallingConv::GHC)
3137 report_fatal_error("In GHC calling convention TLS is not supported");
3138
3139 SDValue Addr;
3140 switch (Model) {
3141 case TLSModel::LocalExec:
3142 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/false);
3143 break;
3144 case TLSModel::InitialExec:
3145 Addr = getStaticTLSAddr(N, DAG, /*UseGOT=*/true);
3146 break;
3147 case TLSModel::LocalDynamic:
3148 case TLSModel::GeneralDynamic:
3149 Addr = getDynamicTLSAddr(N, DAG);
3150 break;
3151 }
3152
3153 // In order to maximise the opportunity for common subexpression elimination,
3154 // emit a separate ADD node for the global address offset instead of folding
3155 // it in the global address node. Later peephole optimisations may choose to
3156 // fold it back in when profitable.
3157 if (Offset != 0)
3158 return DAG.getNode(ISD::ADD, DL, Ty, Addr,
3159 DAG.getConstant(Offset, DL, XLenVT));
3160 return Addr;
3161 }
3162
lowerSELECT(SDValue Op,SelectionDAG & DAG) const3163 SDValue RISCVTargetLowering::lowerSELECT(SDValue Op, SelectionDAG &DAG) const {
3164 SDValue CondV = Op.getOperand(0);
3165 SDValue TrueV = Op.getOperand(1);
3166 SDValue FalseV = Op.getOperand(2);
3167 SDLoc DL(Op);
3168 MVT VT = Op.getSimpleValueType();
3169 MVT XLenVT = Subtarget.getXLenVT();
3170
3171 // Lower vector SELECTs to VSELECTs by splatting the condition.
3172 if (VT.isVector()) {
3173 MVT SplatCondVT = VT.changeVectorElementType(MVT::i1);
3174 SDValue CondSplat = VT.isScalableVector()
3175 ? DAG.getSplatVector(SplatCondVT, DL, CondV)
3176 : DAG.getSplatBuildVector(SplatCondVT, DL, CondV);
3177 return DAG.getNode(ISD::VSELECT, DL, VT, CondSplat, TrueV, FalseV);
3178 }
3179
3180 // If the result type is XLenVT and CondV is the output of a SETCC node
3181 // which also operated on XLenVT inputs, then merge the SETCC node into the
3182 // lowered RISCVISD::SELECT_CC to take advantage of the integer
3183 // compare+branch instructions. i.e.:
3184 // (select (setcc lhs, rhs, cc), truev, falsev)
3185 // -> (riscvisd::select_cc lhs, rhs, cc, truev, falsev)
3186 if (VT == XLenVT && CondV.getOpcode() == ISD::SETCC &&
3187 CondV.getOperand(0).getSimpleValueType() == XLenVT) {
3188 SDValue LHS = CondV.getOperand(0);
3189 SDValue RHS = CondV.getOperand(1);
3190 const auto *CC = cast<CondCodeSDNode>(CondV.getOperand(2));
3191 ISD::CondCode CCVal = CC->get();
3192
3193 // Special case for a select of 2 constants that have a diffence of 1.
3194 // Normally this is done by DAGCombine, but if the select is introduced by
3195 // type legalization or op legalization, we miss it. Restricting to SETLT
3196 // case for now because that is what signed saturating add/sub need.
3197 // FIXME: We don't need the condition to be SETLT or even a SETCC,
3198 // but we would probably want to swap the true/false values if the condition
3199 // is SETGE/SETLE to avoid an XORI.
3200 if (isa<ConstantSDNode>(TrueV) && isa<ConstantSDNode>(FalseV) &&
3201 CCVal == ISD::SETLT) {
3202 const APInt &TrueVal = cast<ConstantSDNode>(TrueV)->getAPIntValue();
3203 const APInt &FalseVal = cast<ConstantSDNode>(FalseV)->getAPIntValue();
3204 if (TrueVal - 1 == FalseVal)
3205 return DAG.getNode(ISD::ADD, DL, Op.getValueType(), CondV, FalseV);
3206 if (TrueVal + 1 == FalseVal)
3207 return DAG.getNode(ISD::SUB, DL, Op.getValueType(), FalseV, CondV);
3208 }
3209
3210 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3211
3212 SDValue TargetCC = DAG.getCondCode(CCVal);
3213 SDValue Ops[] = {LHS, RHS, TargetCC, TrueV, FalseV};
3214 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3215 }
3216
3217 // Otherwise:
3218 // (select condv, truev, falsev)
3219 // -> (riscvisd::select_cc condv, zero, setne, truev, falsev)
3220 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3221 SDValue SetNE = DAG.getCondCode(ISD::SETNE);
3222
3223 SDValue Ops[] = {CondV, Zero, SetNE, TrueV, FalseV};
3224
3225 return DAG.getNode(RISCVISD::SELECT_CC, DL, Op.getValueType(), Ops);
3226 }
3227
lowerBRCOND(SDValue Op,SelectionDAG & DAG) const3228 SDValue RISCVTargetLowering::lowerBRCOND(SDValue Op, SelectionDAG &DAG) const {
3229 SDValue CondV = Op.getOperand(1);
3230 SDLoc DL(Op);
3231 MVT XLenVT = Subtarget.getXLenVT();
3232
3233 if (CondV.getOpcode() == ISD::SETCC &&
3234 CondV.getOperand(0).getValueType() == XLenVT) {
3235 SDValue LHS = CondV.getOperand(0);
3236 SDValue RHS = CondV.getOperand(1);
3237 ISD::CondCode CCVal = cast<CondCodeSDNode>(CondV.getOperand(2))->get();
3238
3239 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
3240
3241 SDValue TargetCC = DAG.getCondCode(CCVal);
3242 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3243 LHS, RHS, TargetCC, Op.getOperand(2));
3244 }
3245
3246 return DAG.getNode(RISCVISD::BR_CC, DL, Op.getValueType(), Op.getOperand(0),
3247 CondV, DAG.getConstant(0, DL, XLenVT),
3248 DAG.getCondCode(ISD::SETNE), Op.getOperand(2));
3249 }
3250
lowerVASTART(SDValue Op,SelectionDAG & DAG) const3251 SDValue RISCVTargetLowering::lowerVASTART(SDValue Op, SelectionDAG &DAG) const {
3252 MachineFunction &MF = DAG.getMachineFunction();
3253 RISCVMachineFunctionInfo *FuncInfo = MF.getInfo<RISCVMachineFunctionInfo>();
3254
3255 SDLoc DL(Op);
3256 SDValue FI = DAG.getFrameIndex(FuncInfo->getVarArgsFrameIndex(),
3257 getPointerTy(MF.getDataLayout()));
3258
3259 // vastart just stores the address of the VarArgsFrameIndex slot into the
3260 // memory location argument.
3261 const Value *SV = cast<SrcValueSDNode>(Op.getOperand(2))->getValue();
3262 return DAG.getStore(Op.getOperand(0), DL, FI, Op.getOperand(1),
3263 MachinePointerInfo(SV));
3264 }
3265
lowerFRAMEADDR(SDValue Op,SelectionDAG & DAG) const3266 SDValue RISCVTargetLowering::lowerFRAMEADDR(SDValue Op,
3267 SelectionDAG &DAG) const {
3268 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3269 MachineFunction &MF = DAG.getMachineFunction();
3270 MachineFrameInfo &MFI = MF.getFrameInfo();
3271 MFI.setFrameAddressIsTaken(true);
3272 Register FrameReg = RI.getFrameRegister(MF);
3273 int XLenInBytes = Subtarget.getXLen() / 8;
3274
3275 EVT VT = Op.getValueType();
3276 SDLoc DL(Op);
3277 SDValue FrameAddr = DAG.getCopyFromReg(DAG.getEntryNode(), DL, FrameReg, VT);
3278 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3279 while (Depth--) {
3280 int Offset = -(XLenInBytes * 2);
3281 SDValue Ptr = DAG.getNode(ISD::ADD, DL, VT, FrameAddr,
3282 DAG.getIntPtrConstant(Offset, DL));
3283 FrameAddr =
3284 DAG.getLoad(VT, DL, DAG.getEntryNode(), Ptr, MachinePointerInfo());
3285 }
3286 return FrameAddr;
3287 }
3288
lowerRETURNADDR(SDValue Op,SelectionDAG & DAG) const3289 SDValue RISCVTargetLowering::lowerRETURNADDR(SDValue Op,
3290 SelectionDAG &DAG) const {
3291 const RISCVRegisterInfo &RI = *Subtarget.getRegisterInfo();
3292 MachineFunction &MF = DAG.getMachineFunction();
3293 MachineFrameInfo &MFI = MF.getFrameInfo();
3294 MFI.setReturnAddressIsTaken(true);
3295 MVT XLenVT = Subtarget.getXLenVT();
3296 int XLenInBytes = Subtarget.getXLen() / 8;
3297
3298 if (verifyReturnAddressArgumentIsConstant(Op, DAG))
3299 return SDValue();
3300
3301 EVT VT = Op.getValueType();
3302 SDLoc DL(Op);
3303 unsigned Depth = cast<ConstantSDNode>(Op.getOperand(0))->getZExtValue();
3304 if (Depth) {
3305 int Off = -XLenInBytes;
3306 SDValue FrameAddr = lowerFRAMEADDR(Op, DAG);
3307 SDValue Offset = DAG.getConstant(Off, DL, VT);
3308 return DAG.getLoad(VT, DL, DAG.getEntryNode(),
3309 DAG.getNode(ISD::ADD, DL, VT, FrameAddr, Offset),
3310 MachinePointerInfo());
3311 }
3312
3313 // Return the value of the return address register, marking it an implicit
3314 // live-in.
3315 Register Reg = MF.addLiveIn(RI.getRARegister(), getRegClassFor(XLenVT));
3316 return DAG.getCopyFromReg(DAG.getEntryNode(), DL, Reg, XLenVT);
3317 }
3318
lowerShiftLeftParts(SDValue Op,SelectionDAG & DAG) const3319 SDValue RISCVTargetLowering::lowerShiftLeftParts(SDValue Op,
3320 SelectionDAG &DAG) const {
3321 SDLoc DL(Op);
3322 SDValue Lo = Op.getOperand(0);
3323 SDValue Hi = Op.getOperand(1);
3324 SDValue Shamt = Op.getOperand(2);
3325 EVT VT = Lo.getValueType();
3326
3327 // if Shamt-XLEN < 0: // Shamt < XLEN
3328 // Lo = Lo << Shamt
3329 // Hi = (Hi << Shamt) | ((Lo >>u 1) >>u (XLEN-1 - Shamt))
3330 // else:
3331 // Lo = 0
3332 // Hi = Lo << (Shamt-XLEN)
3333
3334 SDValue Zero = DAG.getConstant(0, DL, VT);
3335 SDValue One = DAG.getConstant(1, DL, VT);
3336 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3337 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3338 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3339 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3340
3341 SDValue LoTrue = DAG.getNode(ISD::SHL, DL, VT, Lo, Shamt);
3342 SDValue ShiftRight1Lo = DAG.getNode(ISD::SRL, DL, VT, Lo, One);
3343 SDValue ShiftRightLo =
3344 DAG.getNode(ISD::SRL, DL, VT, ShiftRight1Lo, XLenMinus1Shamt);
3345 SDValue ShiftLeftHi = DAG.getNode(ISD::SHL, DL, VT, Hi, Shamt);
3346 SDValue HiTrue = DAG.getNode(ISD::OR, DL, VT, ShiftLeftHi, ShiftRightLo);
3347 SDValue HiFalse = DAG.getNode(ISD::SHL, DL, VT, Lo, ShamtMinusXLen);
3348
3349 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3350
3351 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, Zero);
3352 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3353
3354 SDValue Parts[2] = {Lo, Hi};
3355 return DAG.getMergeValues(Parts, DL);
3356 }
3357
lowerShiftRightParts(SDValue Op,SelectionDAG & DAG,bool IsSRA) const3358 SDValue RISCVTargetLowering::lowerShiftRightParts(SDValue Op, SelectionDAG &DAG,
3359 bool IsSRA) const {
3360 SDLoc DL(Op);
3361 SDValue Lo = Op.getOperand(0);
3362 SDValue Hi = Op.getOperand(1);
3363 SDValue Shamt = Op.getOperand(2);
3364 EVT VT = Lo.getValueType();
3365
3366 // SRA expansion:
3367 // if Shamt-XLEN < 0: // Shamt < XLEN
3368 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3369 // Hi = Hi >>s Shamt
3370 // else:
3371 // Lo = Hi >>s (Shamt-XLEN);
3372 // Hi = Hi >>s (XLEN-1)
3373 //
3374 // SRL expansion:
3375 // if Shamt-XLEN < 0: // Shamt < XLEN
3376 // Lo = (Lo >>u Shamt) | ((Hi << 1) << (XLEN-1 - Shamt))
3377 // Hi = Hi >>u Shamt
3378 // else:
3379 // Lo = Hi >>u (Shamt-XLEN);
3380 // Hi = 0;
3381
3382 unsigned ShiftRightOp = IsSRA ? ISD::SRA : ISD::SRL;
3383
3384 SDValue Zero = DAG.getConstant(0, DL, VT);
3385 SDValue One = DAG.getConstant(1, DL, VT);
3386 SDValue MinusXLen = DAG.getConstant(-(int)Subtarget.getXLen(), DL, VT);
3387 SDValue XLenMinus1 = DAG.getConstant(Subtarget.getXLen() - 1, DL, VT);
3388 SDValue ShamtMinusXLen = DAG.getNode(ISD::ADD, DL, VT, Shamt, MinusXLen);
3389 SDValue XLenMinus1Shamt = DAG.getNode(ISD::SUB, DL, VT, XLenMinus1, Shamt);
3390
3391 SDValue ShiftRightLo = DAG.getNode(ISD::SRL, DL, VT, Lo, Shamt);
3392 SDValue ShiftLeftHi1 = DAG.getNode(ISD::SHL, DL, VT, Hi, One);
3393 SDValue ShiftLeftHi =
3394 DAG.getNode(ISD::SHL, DL, VT, ShiftLeftHi1, XLenMinus1Shamt);
3395 SDValue LoTrue = DAG.getNode(ISD::OR, DL, VT, ShiftRightLo, ShiftLeftHi);
3396 SDValue HiTrue = DAG.getNode(ShiftRightOp, DL, VT, Hi, Shamt);
3397 SDValue LoFalse = DAG.getNode(ShiftRightOp, DL, VT, Hi, ShamtMinusXLen);
3398 SDValue HiFalse =
3399 IsSRA ? DAG.getNode(ISD::SRA, DL, VT, Hi, XLenMinus1) : Zero;
3400
3401 SDValue CC = DAG.getSetCC(DL, VT, ShamtMinusXLen, Zero, ISD::SETLT);
3402
3403 Lo = DAG.getNode(ISD::SELECT, DL, VT, CC, LoTrue, LoFalse);
3404 Hi = DAG.getNode(ISD::SELECT, DL, VT, CC, HiTrue, HiFalse);
3405
3406 SDValue Parts[2] = {Lo, Hi};
3407 return DAG.getMergeValues(Parts, DL);
3408 }
3409
3410 // Lower splats of i1 types to SETCC. For each mask vector type, we have a
3411 // legal equivalently-sized i8 type, so we can use that as a go-between.
lowerVectorMaskSplat(SDValue Op,SelectionDAG & DAG) const3412 SDValue RISCVTargetLowering::lowerVectorMaskSplat(SDValue Op,
3413 SelectionDAG &DAG) const {
3414 SDLoc DL(Op);
3415 MVT VT = Op.getSimpleValueType();
3416 SDValue SplatVal = Op.getOperand(0);
3417 // All-zeros or all-ones splats are handled specially.
3418 if (ISD::isConstantSplatVectorAllOnes(Op.getNode())) {
3419 SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3420 return DAG.getNode(RISCVISD::VMSET_VL, DL, VT, VL);
3421 }
3422 if (ISD::isConstantSplatVectorAllZeros(Op.getNode())) {
3423 SDValue VL = getDefaultScalableVLOps(VT, DL, DAG, Subtarget).second;
3424 return DAG.getNode(RISCVISD::VMCLR_VL, DL, VT, VL);
3425 }
3426 MVT XLenVT = Subtarget.getXLenVT();
3427 assert(SplatVal.getValueType() == XLenVT &&
3428 "Unexpected type for i1 splat value");
3429 MVT InterVT = VT.changeVectorElementType(MVT::i8);
3430 SplatVal = DAG.getNode(ISD::AND, DL, XLenVT, SplatVal,
3431 DAG.getConstant(1, DL, XLenVT));
3432 SDValue LHS = DAG.getSplatVector(InterVT, DL, SplatVal);
3433 SDValue Zero = DAG.getConstant(0, DL, InterVT);
3434 return DAG.getSetCC(DL, VT, LHS, Zero, ISD::SETNE);
3435 }
3436
3437 // Custom-lower a SPLAT_VECTOR_PARTS where XLEN<SEW, as the SEW element type is
3438 // illegal (currently only vXi64 RV32).
3439 // FIXME: We could also catch non-constant sign-extended i32 values and lower
3440 // them to SPLAT_VECTOR_I64
lowerSPLAT_VECTOR_PARTS(SDValue Op,SelectionDAG & DAG) const3441 SDValue RISCVTargetLowering::lowerSPLAT_VECTOR_PARTS(SDValue Op,
3442 SelectionDAG &DAG) const {
3443 SDLoc DL(Op);
3444 MVT VecVT = Op.getSimpleValueType();
3445 assert(!Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64 &&
3446 "Unexpected SPLAT_VECTOR_PARTS lowering");
3447
3448 assert(Op.getNumOperands() == 2 && "Unexpected number of operands!");
3449 SDValue Lo = Op.getOperand(0);
3450 SDValue Hi = Op.getOperand(1);
3451
3452 if (VecVT.isFixedLengthVector()) {
3453 MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3454 SDLoc DL(Op);
3455 SDValue Mask, VL;
3456 std::tie(Mask, VL) =
3457 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3458
3459 SDValue Res = splatPartsI64WithVL(DL, ContainerVT, Lo, Hi, VL, DAG);
3460 return convertFromScalableVector(VecVT, Res, DAG, Subtarget);
3461 }
3462
3463 if (isa<ConstantSDNode>(Lo) && isa<ConstantSDNode>(Hi)) {
3464 int32_t LoC = cast<ConstantSDNode>(Lo)->getSExtValue();
3465 int32_t HiC = cast<ConstantSDNode>(Hi)->getSExtValue();
3466 // If Hi constant is all the same sign bit as Lo, lower this as a custom
3467 // node in order to try and match RVV vector/scalar instructions.
3468 if ((LoC >> 31) == HiC)
3469 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3470 }
3471
3472 // Detect cases where Hi is (SRA Lo, 31) which means Hi is Lo sign extended.
3473 if (Hi.getOpcode() == ISD::SRA && Hi.getOperand(0) == Lo &&
3474 isa<ConstantSDNode>(Hi.getOperand(1)) &&
3475 Hi.getConstantOperandVal(1) == 31)
3476 return DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, Lo);
3477
3478 // Fall back to use a stack store and stride x0 vector load. Use X0 as VL.
3479 return DAG.getNode(RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL, DL, VecVT, Lo, Hi,
3480 DAG.getTargetConstant(RISCV::VLMaxSentinel, DL, MVT::i64));
3481 }
3482
3483 // Custom-lower extensions from mask vectors by using a vselect either with 1
3484 // for zero/any-extension or -1 for sign-extension:
3485 // (vXiN = (s|z)ext vXi1:vmask) -> (vXiN = vselect vmask, (-1 or 1), 0)
3486 // Note that any-extension is lowered identically to zero-extension.
lowerVectorMaskExt(SDValue Op,SelectionDAG & DAG,int64_t ExtTrueVal) const3487 SDValue RISCVTargetLowering::lowerVectorMaskExt(SDValue Op, SelectionDAG &DAG,
3488 int64_t ExtTrueVal) const {
3489 SDLoc DL(Op);
3490 MVT VecVT = Op.getSimpleValueType();
3491 SDValue Src = Op.getOperand(0);
3492 // Only custom-lower extensions from mask types
3493 assert(Src.getValueType().isVector() &&
3494 Src.getValueType().getVectorElementType() == MVT::i1);
3495
3496 MVT XLenVT = Subtarget.getXLenVT();
3497 SDValue SplatZero = DAG.getConstant(0, DL, XLenVT);
3498 SDValue SplatTrueVal = DAG.getConstant(ExtTrueVal, DL, XLenVT);
3499
3500 if (VecVT.isScalableVector()) {
3501 // Be careful not to introduce illegal scalar types at this stage, and be
3502 // careful also about splatting constants as on RV32, vXi64 SPLAT_VECTOR is
3503 // illegal and must be expanded. Since we know that the constants are
3504 // sign-extended 32-bit values, we use SPLAT_VECTOR_I64 directly.
3505 bool IsRV32E64 =
3506 !Subtarget.is64Bit() && VecVT.getVectorElementType() == MVT::i64;
3507
3508 if (!IsRV32E64) {
3509 SplatZero = DAG.getSplatVector(VecVT, DL, SplatZero);
3510 SplatTrueVal = DAG.getSplatVector(VecVT, DL, SplatTrueVal);
3511 } else {
3512 SplatZero = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatZero);
3513 SplatTrueVal =
3514 DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VecVT, SplatTrueVal);
3515 }
3516
3517 return DAG.getNode(ISD::VSELECT, DL, VecVT, Src, SplatTrueVal, SplatZero);
3518 }
3519
3520 MVT ContainerVT = getContainerForFixedLengthVector(VecVT);
3521 MVT I1ContainerVT =
3522 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3523
3524 SDValue CC = convertToScalableVector(I1ContainerVT, Src, DAG, Subtarget);
3525
3526 SDValue Mask, VL;
3527 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3528
3529 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero, VL);
3530 SplatTrueVal =
3531 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatTrueVal, VL);
3532 SDValue Select = DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC,
3533 SplatTrueVal, SplatZero, VL);
3534
3535 return convertFromScalableVector(VecVT, Select, DAG, Subtarget);
3536 }
3537
lowerFixedLengthVectorExtendToRVV(SDValue Op,SelectionDAG & DAG,unsigned ExtendOpc) const3538 SDValue RISCVTargetLowering::lowerFixedLengthVectorExtendToRVV(
3539 SDValue Op, SelectionDAG &DAG, unsigned ExtendOpc) const {
3540 MVT ExtVT = Op.getSimpleValueType();
3541 // Only custom-lower extensions from fixed-length vector types.
3542 if (!ExtVT.isFixedLengthVector())
3543 return Op;
3544 MVT VT = Op.getOperand(0).getSimpleValueType();
3545 // Grab the canonical container type for the extended type. Infer the smaller
3546 // type from that to ensure the same number of vector elements, as we know
3547 // the LMUL will be sufficient to hold the smaller type.
3548 MVT ContainerExtVT = getContainerForFixedLengthVector(ExtVT);
3549 // Get the extended container type manually to ensure the same number of
3550 // vector elements between source and dest.
3551 MVT ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
3552 ContainerExtVT.getVectorElementCount());
3553
3554 SDValue Op1 =
3555 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
3556
3557 SDLoc DL(Op);
3558 SDValue Mask, VL;
3559 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
3560
3561 SDValue Ext = DAG.getNode(ExtendOpc, DL, ContainerExtVT, Op1, Mask, VL);
3562
3563 return convertFromScalableVector(ExtVT, Ext, DAG, Subtarget);
3564 }
3565
3566 // Custom-lower truncations from vectors to mask vectors by using a mask and a
3567 // setcc operation:
3568 // (vXi1 = trunc vXiN vec) -> (vXi1 = setcc (and vec, 1), 0, ne)
lowerVectorMaskTrunc(SDValue Op,SelectionDAG & DAG) const3569 SDValue RISCVTargetLowering::lowerVectorMaskTrunc(SDValue Op,
3570 SelectionDAG &DAG) const {
3571 SDLoc DL(Op);
3572 EVT MaskVT = Op.getValueType();
3573 // Only expect to custom-lower truncations to mask types
3574 assert(MaskVT.isVector() && MaskVT.getVectorElementType() == MVT::i1 &&
3575 "Unexpected type for vector mask lowering");
3576 SDValue Src = Op.getOperand(0);
3577 MVT VecVT = Src.getSimpleValueType();
3578
3579 // If this is a fixed vector, we need to convert it to a scalable vector.
3580 MVT ContainerVT = VecVT;
3581 if (VecVT.isFixedLengthVector()) {
3582 ContainerVT = getContainerForFixedLengthVector(VecVT);
3583 Src = convertToScalableVector(ContainerVT, Src, DAG, Subtarget);
3584 }
3585
3586 SDValue SplatOne = DAG.getConstant(1, DL, Subtarget.getXLenVT());
3587 SDValue SplatZero = DAG.getConstant(0, DL, Subtarget.getXLenVT());
3588
3589 SplatOne = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatOne);
3590 SplatZero = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT, SplatZero);
3591
3592 if (VecVT.isScalableVector()) {
3593 SDValue Trunc = DAG.getNode(ISD::AND, DL, VecVT, Src, SplatOne);
3594 return DAG.getSetCC(DL, MaskVT, Trunc, SplatZero, ISD::SETNE);
3595 }
3596
3597 SDValue Mask, VL;
3598 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3599
3600 MVT MaskContainerVT = ContainerVT.changeVectorElementType(MVT::i1);
3601 SDValue Trunc =
3602 DAG.getNode(RISCVISD::AND_VL, DL, ContainerVT, Src, SplatOne, Mask, VL);
3603 Trunc = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskContainerVT, Trunc, SplatZero,
3604 DAG.getCondCode(ISD::SETNE), Mask, VL);
3605 return convertFromScalableVector(MaskVT, Trunc, DAG, Subtarget);
3606 }
3607
3608 // Custom-legalize INSERT_VECTOR_ELT so that the value is inserted into the
3609 // first position of a vector, and that vector is slid up to the insert index.
3610 // By limiting the active vector length to index+1 and merging with the
3611 // original vector (with an undisturbed tail policy for elements >= VL), we
3612 // achieve the desired result of leaving all elements untouched except the one
3613 // at VL-1, which is replaced with the desired value.
lowerINSERT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const3614 SDValue RISCVTargetLowering::lowerINSERT_VECTOR_ELT(SDValue Op,
3615 SelectionDAG &DAG) const {
3616 SDLoc DL(Op);
3617 MVT VecVT = Op.getSimpleValueType();
3618 SDValue Vec = Op.getOperand(0);
3619 SDValue Val = Op.getOperand(1);
3620 SDValue Idx = Op.getOperand(2);
3621
3622 if (VecVT.getVectorElementType() == MVT::i1) {
3623 // FIXME: For now we just promote to an i8 vector and insert into that,
3624 // but this is probably not optimal.
3625 MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3626 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3627 Vec = DAG.getNode(ISD::INSERT_VECTOR_ELT, DL, WideVT, Vec, Val, Idx);
3628 return DAG.getNode(ISD::TRUNCATE, DL, VecVT, Vec);
3629 }
3630
3631 MVT ContainerVT = VecVT;
3632 // If the operand is a fixed-length vector, convert to a scalable one.
3633 if (VecVT.isFixedLengthVector()) {
3634 ContainerVT = getContainerForFixedLengthVector(VecVT);
3635 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3636 }
3637
3638 MVT XLenVT = Subtarget.getXLenVT();
3639
3640 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
3641 bool IsLegalInsert = Subtarget.is64Bit() || Val.getValueType() != MVT::i64;
3642 // Even i64-element vectors on RV32 can be lowered without scalar
3643 // legalization if the most-significant 32 bits of the value are not affected
3644 // by the sign-extension of the lower 32 bits.
3645 // TODO: We could also catch sign extensions of a 32-bit value.
3646 if (!IsLegalInsert && isa<ConstantSDNode>(Val)) {
3647 const auto *CVal = cast<ConstantSDNode>(Val);
3648 if (isInt<32>(CVal->getSExtValue())) {
3649 IsLegalInsert = true;
3650 Val = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3651 }
3652 }
3653
3654 SDValue Mask, VL;
3655 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
3656
3657 SDValue ValInVec;
3658
3659 if (IsLegalInsert) {
3660 unsigned Opc =
3661 VecVT.isFloatingPoint() ? RISCVISD::VFMV_S_F_VL : RISCVISD::VMV_S_X_VL;
3662 if (isNullConstant(Idx)) {
3663 Vec = DAG.getNode(Opc, DL, ContainerVT, Vec, Val, VL);
3664 if (!VecVT.isFixedLengthVector())
3665 return Vec;
3666 return convertFromScalableVector(VecVT, Vec, DAG, Subtarget);
3667 }
3668 ValInVec =
3669 DAG.getNode(Opc, DL, ContainerVT, DAG.getUNDEF(ContainerVT), Val, VL);
3670 } else {
3671 // On RV32, i64-element vectors must be specially handled to place the
3672 // value at element 0, by using two vslide1up instructions in sequence on
3673 // the i32 split lo/hi value. Use an equivalently-sized i32 vector for
3674 // this.
3675 SDValue One = DAG.getConstant(1, DL, XLenVT);
3676 SDValue ValLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, Zero);
3677 SDValue ValHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Val, One);
3678 MVT I32ContainerVT =
3679 MVT::getVectorVT(MVT::i32, ContainerVT.getVectorElementCount() * 2);
3680 SDValue I32Mask =
3681 getDefaultScalableVLOps(I32ContainerVT, DL, DAG, Subtarget).first;
3682 // Limit the active VL to two.
3683 SDValue InsertI64VL = DAG.getConstant(2, DL, XLenVT);
3684 // Note: We can't pass a UNDEF to the first VSLIDE1UP_VL since an untied
3685 // undef doesn't obey the earlyclobber constraint. Just splat a zero value.
3686 ValInVec = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, I32ContainerVT, Zero,
3687 InsertI64VL);
3688 // First slide in the hi value, then the lo in underneath it.
3689 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3690 ValHi, I32Mask, InsertI64VL);
3691 ValInVec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32ContainerVT, ValInVec,
3692 ValLo, I32Mask, InsertI64VL);
3693 // Bitcast back to the right container type.
3694 ValInVec = DAG.getBitcast(ContainerVT, ValInVec);
3695 }
3696
3697 // Now that the value is in a vector, slide it into position.
3698 SDValue InsertVL =
3699 DAG.getNode(ISD::ADD, DL, XLenVT, Idx, DAG.getConstant(1, DL, XLenVT));
3700 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
3701 ValInVec, Idx, Mask, InsertVL);
3702 if (!VecVT.isFixedLengthVector())
3703 return Slideup;
3704 return convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
3705 }
3706
3707 // Custom-lower EXTRACT_VECTOR_ELT operations to slide the vector down, then
3708 // extract the first element: (extractelt (slidedown vec, idx), 0). For integer
3709 // types this is done using VMV_X_S to allow us to glean information about the
3710 // sign bits of the result.
lowerEXTRACT_VECTOR_ELT(SDValue Op,SelectionDAG & DAG) const3711 SDValue RISCVTargetLowering::lowerEXTRACT_VECTOR_ELT(SDValue Op,
3712 SelectionDAG &DAG) const {
3713 SDLoc DL(Op);
3714 SDValue Idx = Op.getOperand(1);
3715 SDValue Vec = Op.getOperand(0);
3716 EVT EltVT = Op.getValueType();
3717 MVT VecVT = Vec.getSimpleValueType();
3718 MVT XLenVT = Subtarget.getXLenVT();
3719
3720 if (VecVT.getVectorElementType() == MVT::i1) {
3721 // FIXME: For now we just promote to an i8 vector and extract from that,
3722 // but this is probably not optimal.
3723 MVT WideVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorElementCount());
3724 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, WideVT, Vec);
3725 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec, Idx);
3726 }
3727
3728 // If this is a fixed vector, we need to convert it to a scalable vector.
3729 MVT ContainerVT = VecVT;
3730 if (VecVT.isFixedLengthVector()) {
3731 ContainerVT = getContainerForFixedLengthVector(VecVT);
3732 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
3733 }
3734
3735 // If the index is 0, the vector is already in the right position.
3736 if (!isNullConstant(Idx)) {
3737 // Use a VL of 1 to avoid processing more elements than we need.
3738 SDValue VL = DAG.getConstant(1, DL, XLenVT);
3739 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
3740 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3741 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
3742 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
3743 }
3744
3745 if (!EltVT.isInteger()) {
3746 // Floating-point extracts are handled in TableGen.
3747 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, EltVT, Vec,
3748 DAG.getConstant(0, DL, XLenVT));
3749 }
3750
3751 SDValue Elt0 = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
3752 return DAG.getNode(ISD::TRUNCATE, DL, EltVT, Elt0);
3753 }
3754
3755 // Some RVV intrinsics may claim that they want an integer operand to be
3756 // promoted or expanded.
lowerVectorIntrinsicSplats(SDValue Op,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)3757 static SDValue lowerVectorIntrinsicSplats(SDValue Op, SelectionDAG &DAG,
3758 const RISCVSubtarget &Subtarget) {
3759 assert((Op.getOpcode() == ISD::INTRINSIC_WO_CHAIN ||
3760 Op.getOpcode() == ISD::INTRINSIC_W_CHAIN) &&
3761 "Unexpected opcode");
3762
3763 if (!Subtarget.hasStdExtV())
3764 return SDValue();
3765
3766 bool HasChain = Op.getOpcode() == ISD::INTRINSIC_W_CHAIN;
3767 unsigned IntNo = Op.getConstantOperandVal(HasChain ? 1 : 0);
3768 SDLoc DL(Op);
3769
3770 const RISCVVIntrinsicsTable::RISCVVIntrinsicInfo *II =
3771 RISCVVIntrinsicsTable::getRISCVVIntrinsicInfo(IntNo);
3772 if (!II || !II->SplatOperand)
3773 return SDValue();
3774
3775 unsigned SplatOp = II->SplatOperand + HasChain;
3776 assert(SplatOp < Op.getNumOperands());
3777
3778 SmallVector<SDValue, 8> Operands(Op->op_begin(), Op->op_end());
3779 SDValue &ScalarOp = Operands[SplatOp];
3780 MVT OpVT = ScalarOp.getSimpleValueType();
3781 MVT XLenVT = Subtarget.getXLenVT();
3782
3783 // If this isn't a scalar, or its type is XLenVT we're done.
3784 if (!OpVT.isScalarInteger() || OpVT == XLenVT)
3785 return SDValue();
3786
3787 // Simplest case is that the operand needs to be promoted to XLenVT.
3788 if (OpVT.bitsLT(XLenVT)) {
3789 // If the operand is a constant, sign extend to increase our chances
3790 // of being able to use a .vi instruction. ANY_EXTEND would become a
3791 // a zero extend and the simm5 check in isel would fail.
3792 // FIXME: Should we ignore the upper bits in isel instead?
3793 unsigned ExtOpc =
3794 isa<ConstantSDNode>(ScalarOp) ? ISD::SIGN_EXTEND : ISD::ANY_EXTEND;
3795 ScalarOp = DAG.getNode(ExtOpc, DL, XLenVT, ScalarOp);
3796 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3797 }
3798
3799 // Use the previous operand to get the vXi64 VT. The result might be a mask
3800 // VT for compares. Using the previous operand assumes that the previous
3801 // operand will never have a smaller element size than a scalar operand and
3802 // that a widening operation never uses SEW=64.
3803 // NOTE: If this fails the below assert, we can probably just find the
3804 // element count from any operand or result and use it to construct the VT.
3805 assert(II->SplatOperand > 1 && "Unexpected splat operand!");
3806 MVT VT = Op.getOperand(SplatOp - 1).getSimpleValueType();
3807
3808 // The more complex case is when the scalar is larger than XLenVT.
3809 assert(XLenVT == MVT::i32 && OpVT == MVT::i64 &&
3810 VT.getVectorElementType() == MVT::i64 && "Unexpected VTs!");
3811
3812 // If this is a sign-extended 32-bit constant, we can truncate it and rely
3813 // on the instruction to sign-extend since SEW>XLEN.
3814 if (auto *CVal = dyn_cast<ConstantSDNode>(ScalarOp)) {
3815 if (isInt<32>(CVal->getSExtValue())) {
3816 ScalarOp = DAG.getConstant(CVal->getSExtValue(), DL, MVT::i32);
3817 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3818 }
3819 }
3820
3821 // We need to convert the scalar to a splat vector.
3822 // FIXME: Can we implicitly truncate the scalar if it is known to
3823 // be sign extended?
3824 // VL should be the last operand.
3825 SDValue VL = Op.getOperand(Op.getNumOperands() - 1);
3826 assert(VL.getValueType() == XLenVT);
3827 ScalarOp = splatSplitI64WithVL(DL, VT, ScalarOp, VL, DAG);
3828 return DAG.getNode(Op->getOpcode(), DL, Op->getVTList(), Operands);
3829 }
3830
LowerINTRINSIC_WO_CHAIN(SDValue Op,SelectionDAG & DAG) const3831 SDValue RISCVTargetLowering::LowerINTRINSIC_WO_CHAIN(SDValue Op,
3832 SelectionDAG &DAG) const {
3833 unsigned IntNo = Op.getConstantOperandVal(0);
3834 SDLoc DL(Op);
3835 MVT XLenVT = Subtarget.getXLenVT();
3836
3837 switch (IntNo) {
3838 default:
3839 break; // Don't custom lower most intrinsics.
3840 case Intrinsic::thread_pointer: {
3841 EVT PtrVT = getPointerTy(DAG.getDataLayout());
3842 return DAG.getRegister(RISCV::X4, PtrVT);
3843 }
3844 case Intrinsic::riscv_orc_b:
3845 // Lower to the GORCI encoding for orc.b.
3846 return DAG.getNode(RISCVISD::GORC, DL, XLenVT, Op.getOperand(1),
3847 DAG.getConstant(7, DL, XLenVT));
3848 case Intrinsic::riscv_grev:
3849 case Intrinsic::riscv_gorc: {
3850 unsigned Opc =
3851 IntNo == Intrinsic::riscv_grev ? RISCVISD::GREV : RISCVISD::GORC;
3852 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3853 }
3854 case Intrinsic::riscv_shfl:
3855 case Intrinsic::riscv_unshfl: {
3856 unsigned Opc =
3857 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
3858 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3859 }
3860 case Intrinsic::riscv_bcompress:
3861 case Intrinsic::riscv_bdecompress: {
3862 unsigned Opc = IntNo == Intrinsic::riscv_bcompress ? RISCVISD::BCOMPRESS
3863 : RISCVISD::BDECOMPRESS;
3864 return DAG.getNode(Opc, DL, XLenVT, Op.getOperand(1), Op.getOperand(2));
3865 }
3866 case Intrinsic::riscv_vmv_x_s:
3867 assert(Op.getValueType() == XLenVT && "Unexpected VT!");
3868 return DAG.getNode(RISCVISD::VMV_X_S, DL, Op.getValueType(),
3869 Op.getOperand(1));
3870 case Intrinsic::riscv_vmv_v_x:
3871 return lowerScalarSplat(Op.getOperand(1), Op.getOperand(2),
3872 Op.getSimpleValueType(), DL, DAG, Subtarget);
3873 case Intrinsic::riscv_vfmv_v_f:
3874 return DAG.getNode(RISCVISD::VFMV_V_F_VL, DL, Op.getValueType(),
3875 Op.getOperand(1), Op.getOperand(2));
3876 case Intrinsic::riscv_vmv_s_x: {
3877 SDValue Scalar = Op.getOperand(2);
3878
3879 if (Scalar.getValueType().bitsLE(XLenVT)) {
3880 Scalar = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, Scalar);
3881 return DAG.getNode(RISCVISD::VMV_S_X_VL, DL, Op.getValueType(),
3882 Op.getOperand(1), Scalar, Op.getOperand(3));
3883 }
3884
3885 assert(Scalar.getValueType() == MVT::i64 && "Unexpected scalar VT!");
3886
3887 // This is an i64 value that lives in two scalar registers. We have to
3888 // insert this in a convoluted way. First we build vXi64 splat containing
3889 // the/ two values that we assemble using some bit math. Next we'll use
3890 // vid.v and vmseq to build a mask with bit 0 set. Then we'll use that mask
3891 // to merge element 0 from our splat into the source vector.
3892 // FIXME: This is probably not the best way to do this, but it is
3893 // consistent with INSERT_VECTOR_ELT lowering so it is a good starting
3894 // point.
3895 // sw lo, (a0)
3896 // sw hi, 4(a0)
3897 // vlse vX, (a0)
3898 //
3899 // vid.v vVid
3900 // vmseq.vx mMask, vVid, 0
3901 // vmerge.vvm vDest, vSrc, vVal, mMask
3902 MVT VT = Op.getSimpleValueType();
3903 SDValue Vec = Op.getOperand(1);
3904 SDValue VL = Op.getOperand(3);
3905
3906 SDValue SplattedVal = splatSplitI64WithVL(DL, VT, Scalar, VL, DAG);
3907 SDValue SplattedIdx = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
3908 DAG.getConstant(0, DL, MVT::i32), VL);
3909
3910 MVT MaskVT = MVT::getVectorVT(MVT::i1, VT.getVectorElementCount());
3911 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
3912 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
3913 SDValue SelectCond =
3914 DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, VID, SplattedIdx,
3915 DAG.getCondCode(ISD::SETEQ), Mask, VL);
3916 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, SelectCond, SplattedVal,
3917 Vec, VL);
3918 }
3919 case Intrinsic::riscv_vslide1up:
3920 case Intrinsic::riscv_vslide1down:
3921 case Intrinsic::riscv_vslide1up_mask:
3922 case Intrinsic::riscv_vslide1down_mask: {
3923 // We need to special case these when the scalar is larger than XLen.
3924 unsigned NumOps = Op.getNumOperands();
3925 bool IsMasked = NumOps == 7;
3926 unsigned OpOffset = IsMasked ? 1 : 0;
3927 SDValue Scalar = Op.getOperand(2 + OpOffset);
3928 if (Scalar.getValueType().bitsLE(XLenVT))
3929 break;
3930
3931 // Splatting a sign extended constant is fine.
3932 if (auto *CVal = dyn_cast<ConstantSDNode>(Scalar))
3933 if (isInt<32>(CVal->getSExtValue()))
3934 break;
3935
3936 MVT VT = Op.getSimpleValueType();
3937 assert(VT.getVectorElementType() == MVT::i64 &&
3938 Scalar.getValueType() == MVT::i64 && "Unexpected VTs");
3939
3940 // Convert the vector source to the equivalent nxvXi32 vector.
3941 MVT I32VT = MVT::getVectorVT(MVT::i32, VT.getVectorElementCount() * 2);
3942 SDValue Vec = DAG.getBitcast(I32VT, Op.getOperand(1 + OpOffset));
3943
3944 SDValue ScalarLo = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3945 DAG.getConstant(0, DL, XLenVT));
3946 SDValue ScalarHi = DAG.getNode(ISD::EXTRACT_ELEMENT, DL, MVT::i32, Scalar,
3947 DAG.getConstant(1, DL, XLenVT));
3948
3949 // Double the VL since we halved SEW.
3950 SDValue VL = Op.getOperand(NumOps - (1 + OpOffset));
3951 SDValue I32VL =
3952 DAG.getNode(ISD::SHL, DL, XLenVT, VL, DAG.getConstant(1, DL, XLenVT));
3953
3954 MVT I32MaskVT = MVT::getVectorVT(MVT::i1, I32VT.getVectorElementCount());
3955 SDValue I32Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, I32MaskVT, VL);
3956
3957 // Shift the two scalar parts in using SEW=32 slide1up/slide1down
3958 // instructions.
3959 if (IntNo == Intrinsic::riscv_vslide1up ||
3960 IntNo == Intrinsic::riscv_vslide1up_mask) {
3961 Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarHi,
3962 I32Mask, I32VL);
3963 Vec = DAG.getNode(RISCVISD::VSLIDE1UP_VL, DL, I32VT, Vec, ScalarLo,
3964 I32Mask, I32VL);
3965 } else {
3966 Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarLo,
3967 I32Mask, I32VL);
3968 Vec = DAG.getNode(RISCVISD::VSLIDE1DOWN_VL, DL, I32VT, Vec, ScalarHi,
3969 I32Mask, I32VL);
3970 }
3971
3972 // Convert back to nxvXi64.
3973 Vec = DAG.getBitcast(VT, Vec);
3974
3975 if (!IsMasked)
3976 return Vec;
3977
3978 // Apply mask after the operation.
3979 SDValue Mask = Op.getOperand(NumOps - 3);
3980 SDValue MaskedOff = Op.getOperand(1);
3981 return DAG.getNode(RISCVISD::VSELECT_VL, DL, VT, Mask, Vec, MaskedOff, VL);
3982 }
3983 }
3984
3985 return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
3986 }
3987
LowerINTRINSIC_W_CHAIN(SDValue Op,SelectionDAG & DAG) const3988 SDValue RISCVTargetLowering::LowerINTRINSIC_W_CHAIN(SDValue Op,
3989 SelectionDAG &DAG) const {
3990 unsigned IntNo = Op.getConstantOperandVal(1);
3991 switch (IntNo) {
3992 default:
3993 break;
3994 case Intrinsic::riscv_masked_strided_load: {
3995 SDLoc DL(Op);
3996 MVT XLenVT = Subtarget.getXLenVT();
3997
3998 // If the mask is known to be all ones, optimize to an unmasked intrinsic;
3999 // the selection of the masked intrinsics doesn't do this for us.
4000 SDValue Mask = Op.getOperand(5);
4001 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4002
4003 MVT VT = Op->getSimpleValueType(0);
4004 MVT ContainerVT = getContainerForFixedLengthVector(VT);
4005
4006 SDValue PassThru = Op.getOperand(2);
4007 if (!IsUnmasked) {
4008 MVT MaskVT =
4009 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4010 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4011 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4012 }
4013
4014 SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4015
4016 SDValue IntID = DAG.getTargetConstant(
4017 IsUnmasked ? Intrinsic::riscv_vlse : Intrinsic::riscv_vlse_mask, DL,
4018 XLenVT);
4019
4020 auto *Load = cast<MemIntrinsicSDNode>(Op);
4021 SmallVector<SDValue, 8> Ops{Load->getChain(), IntID};
4022 if (!IsUnmasked)
4023 Ops.push_back(PassThru);
4024 Ops.push_back(Op.getOperand(3)); // Ptr
4025 Ops.push_back(Op.getOperand(4)); // Stride
4026 if (!IsUnmasked)
4027 Ops.push_back(Mask);
4028 Ops.push_back(VL);
4029 if (!IsUnmasked) {
4030 SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4031 Ops.push_back(Policy);
4032 }
4033
4034 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4035 SDValue Result =
4036 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops,
4037 Load->getMemoryVT(), Load->getMemOperand());
4038 SDValue Chain = Result.getValue(1);
4039 Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4040 return DAG.getMergeValues({Result, Chain}, DL);
4041 }
4042 }
4043
4044 return lowerVectorIntrinsicSplats(Op, DAG, Subtarget);
4045 }
4046
LowerINTRINSIC_VOID(SDValue Op,SelectionDAG & DAG) const4047 SDValue RISCVTargetLowering::LowerINTRINSIC_VOID(SDValue Op,
4048 SelectionDAG &DAG) const {
4049 unsigned IntNo = Op.getConstantOperandVal(1);
4050 switch (IntNo) {
4051 default:
4052 break;
4053 case Intrinsic::riscv_masked_strided_store: {
4054 SDLoc DL(Op);
4055 MVT XLenVT = Subtarget.getXLenVT();
4056
4057 // If the mask is known to be all ones, optimize to an unmasked intrinsic;
4058 // the selection of the masked intrinsics doesn't do this for us.
4059 SDValue Mask = Op.getOperand(5);
4060 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4061
4062 SDValue Val = Op.getOperand(2);
4063 MVT VT = Val.getSimpleValueType();
4064 MVT ContainerVT = getContainerForFixedLengthVector(VT);
4065
4066 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4067 if (!IsUnmasked) {
4068 MVT MaskVT =
4069 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4070 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4071 }
4072
4073 SDValue VL = DAG.getConstant(VT.getVectorNumElements(), DL, XLenVT);
4074
4075 SDValue IntID = DAG.getTargetConstant(
4076 IsUnmasked ? Intrinsic::riscv_vsse : Intrinsic::riscv_vsse_mask, DL,
4077 XLenVT);
4078
4079 auto *Store = cast<MemIntrinsicSDNode>(Op);
4080 SmallVector<SDValue, 8> Ops{Store->getChain(), IntID};
4081 Ops.push_back(Val);
4082 Ops.push_back(Op.getOperand(3)); // Ptr
4083 Ops.push_back(Op.getOperand(4)); // Stride
4084 if (!IsUnmasked)
4085 Ops.push_back(Mask);
4086 Ops.push_back(VL);
4087
4088 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL, Store->getVTList(),
4089 Ops, Store->getMemoryVT(),
4090 Store->getMemOperand());
4091 }
4092 }
4093
4094 return SDValue();
4095 }
4096
getLMUL1VT(MVT VT)4097 static MVT getLMUL1VT(MVT VT) {
4098 assert(VT.getVectorElementType().getSizeInBits() <= 64 &&
4099 "Unexpected vector MVT");
4100 return MVT::getScalableVectorVT(
4101 VT.getVectorElementType(),
4102 RISCV::RVVBitsPerBlock / VT.getVectorElementType().getSizeInBits());
4103 }
4104
getRVVReductionOp(unsigned ISDOpcode)4105 static unsigned getRVVReductionOp(unsigned ISDOpcode) {
4106 switch (ISDOpcode) {
4107 default:
4108 llvm_unreachable("Unhandled reduction");
4109 case ISD::VECREDUCE_ADD:
4110 return RISCVISD::VECREDUCE_ADD_VL;
4111 case ISD::VECREDUCE_UMAX:
4112 return RISCVISD::VECREDUCE_UMAX_VL;
4113 case ISD::VECREDUCE_SMAX:
4114 return RISCVISD::VECREDUCE_SMAX_VL;
4115 case ISD::VECREDUCE_UMIN:
4116 return RISCVISD::VECREDUCE_UMIN_VL;
4117 case ISD::VECREDUCE_SMIN:
4118 return RISCVISD::VECREDUCE_SMIN_VL;
4119 case ISD::VECREDUCE_AND:
4120 return RISCVISD::VECREDUCE_AND_VL;
4121 case ISD::VECREDUCE_OR:
4122 return RISCVISD::VECREDUCE_OR_VL;
4123 case ISD::VECREDUCE_XOR:
4124 return RISCVISD::VECREDUCE_XOR_VL;
4125 }
4126 }
4127
lowerVectorMaskVecReduction(SDValue Op,SelectionDAG & DAG,bool IsVP) const4128 SDValue RISCVTargetLowering::lowerVectorMaskVecReduction(SDValue Op,
4129 SelectionDAG &DAG,
4130 bool IsVP) const {
4131 SDLoc DL(Op);
4132 SDValue Vec = Op.getOperand(IsVP ? 1 : 0);
4133 MVT VecVT = Vec.getSimpleValueType();
4134 assert((Op.getOpcode() == ISD::VECREDUCE_AND ||
4135 Op.getOpcode() == ISD::VECREDUCE_OR ||
4136 Op.getOpcode() == ISD::VECREDUCE_XOR ||
4137 Op.getOpcode() == ISD::VP_REDUCE_AND ||
4138 Op.getOpcode() == ISD::VP_REDUCE_OR ||
4139 Op.getOpcode() == ISD::VP_REDUCE_XOR) &&
4140 "Unexpected reduction lowering");
4141
4142 MVT XLenVT = Subtarget.getXLenVT();
4143 assert(Op.getValueType() == XLenVT &&
4144 "Expected reduction output to be legalized to XLenVT");
4145
4146 MVT ContainerVT = VecVT;
4147 if (VecVT.isFixedLengthVector()) {
4148 ContainerVT = getContainerForFixedLengthVector(VecVT);
4149 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4150 }
4151
4152 SDValue Mask, VL;
4153 if (IsVP) {
4154 Mask = Op.getOperand(2);
4155 VL = Op.getOperand(3);
4156 } else {
4157 std::tie(Mask, VL) =
4158 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4159 }
4160
4161 unsigned BaseOpc;
4162 ISD::CondCode CC;
4163 SDValue Zero = DAG.getConstant(0, DL, XLenVT);
4164
4165 switch (Op.getOpcode()) {
4166 default:
4167 llvm_unreachable("Unhandled reduction");
4168 case ISD::VECREDUCE_AND:
4169 case ISD::VP_REDUCE_AND: {
4170 // vpopc ~x == 0
4171 SDValue TrueMask = DAG.getNode(RISCVISD::VMSET_VL, DL, ContainerVT, VL);
4172 Vec = DAG.getNode(RISCVISD::VMXOR_VL, DL, ContainerVT, Vec, TrueMask, VL);
4173 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
4174 CC = ISD::SETEQ;
4175 BaseOpc = ISD::AND;
4176 break;
4177 }
4178 case ISD::VECREDUCE_OR:
4179 case ISD::VP_REDUCE_OR:
4180 // vpopc x != 0
4181 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
4182 CC = ISD::SETNE;
4183 BaseOpc = ISD::OR;
4184 break;
4185 case ISD::VECREDUCE_XOR:
4186 case ISD::VP_REDUCE_XOR: {
4187 // ((vpopc x) & 1) != 0
4188 SDValue One = DAG.getConstant(1, DL, XLenVT);
4189 Vec = DAG.getNode(RISCVISD::VPOPC_VL, DL, XLenVT, Vec, Mask, VL);
4190 Vec = DAG.getNode(ISD::AND, DL, XLenVT, Vec, One);
4191 CC = ISD::SETNE;
4192 BaseOpc = ISD::XOR;
4193 break;
4194 }
4195 }
4196
4197 SDValue SetCC = DAG.getSetCC(DL, XLenVT, Vec, Zero, CC);
4198
4199 if (!IsVP)
4200 return SetCC;
4201
4202 // Now include the start value in the operation.
4203 // Note that we must return the start value when no elements are operated
4204 // upon. The vpopc instructions we've emitted in each case above will return
4205 // 0 for an inactive vector, and so we've already received the neutral value:
4206 // AND gives us (0 == 0) -> 1 and OR/XOR give us (0 != 0) -> 0. Therefore we
4207 // can simply include the start value.
4208 return DAG.getNode(BaseOpc, DL, XLenVT, SetCC, Op.getOperand(0));
4209 }
4210
lowerVECREDUCE(SDValue Op,SelectionDAG & DAG) const4211 SDValue RISCVTargetLowering::lowerVECREDUCE(SDValue Op,
4212 SelectionDAG &DAG) const {
4213 SDLoc DL(Op);
4214 SDValue Vec = Op.getOperand(0);
4215 EVT VecEVT = Vec.getValueType();
4216
4217 unsigned BaseOpc = ISD::getVecReduceBaseOpcode(Op.getOpcode());
4218
4219 // Due to ordering in legalize types we may have a vector type that needs to
4220 // be split. Do that manually so we can get down to a legal type.
4221 while (getTypeAction(*DAG.getContext(), VecEVT) ==
4222 TargetLowering::TypeSplitVector) {
4223 SDValue Lo, Hi;
4224 std::tie(Lo, Hi) = DAG.SplitVector(Vec, DL);
4225 VecEVT = Lo.getValueType();
4226 Vec = DAG.getNode(BaseOpc, DL, VecEVT, Lo, Hi);
4227 }
4228
4229 // TODO: The type may need to be widened rather than split. Or widened before
4230 // it can be split.
4231 if (!isTypeLegal(VecEVT))
4232 return SDValue();
4233
4234 MVT VecVT = VecEVT.getSimpleVT();
4235 MVT VecEltVT = VecVT.getVectorElementType();
4236 unsigned RVVOpcode = getRVVReductionOp(Op.getOpcode());
4237
4238 MVT ContainerVT = VecVT;
4239 if (VecVT.isFixedLengthVector()) {
4240 ContainerVT = getContainerForFixedLengthVector(VecVT);
4241 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4242 }
4243
4244 MVT M1VT = getLMUL1VT(ContainerVT);
4245
4246 SDValue Mask, VL;
4247 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4248
4249 // FIXME: This is a VLMAX splat which might be too large and can prevent
4250 // vsetvli removal.
4251 SDValue NeutralElem =
4252 DAG.getNeutralElement(BaseOpc, DL, VecEltVT, SDNodeFlags());
4253 SDValue IdentitySplat = DAG.getSplatVector(M1VT, DL, NeutralElem);
4254 SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT), Vec,
4255 IdentitySplat, Mask, VL);
4256 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4257 DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4258 return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4259 }
4260
4261 // Given a reduction op, this function returns the matching reduction opcode,
4262 // the vector SDValue and the scalar SDValue required to lower this to a
4263 // RISCVISD node.
4264 static std::tuple<unsigned, SDValue, SDValue>
getRVVFPReductionOpAndOperands(SDValue Op,SelectionDAG & DAG,EVT EltVT)4265 getRVVFPReductionOpAndOperands(SDValue Op, SelectionDAG &DAG, EVT EltVT) {
4266 SDLoc DL(Op);
4267 auto Flags = Op->getFlags();
4268 unsigned Opcode = Op.getOpcode();
4269 unsigned BaseOpcode = ISD::getVecReduceBaseOpcode(Opcode);
4270 switch (Opcode) {
4271 default:
4272 llvm_unreachable("Unhandled reduction");
4273 case ISD::VECREDUCE_FADD:
4274 return std::make_tuple(RISCVISD::VECREDUCE_FADD_VL, Op.getOperand(0),
4275 DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4276 case ISD::VECREDUCE_SEQ_FADD:
4277 return std::make_tuple(RISCVISD::VECREDUCE_SEQ_FADD_VL, Op.getOperand(1),
4278 Op.getOperand(0));
4279 case ISD::VECREDUCE_FMIN:
4280 return std::make_tuple(RISCVISD::VECREDUCE_FMIN_VL, Op.getOperand(0),
4281 DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4282 case ISD::VECREDUCE_FMAX:
4283 return std::make_tuple(RISCVISD::VECREDUCE_FMAX_VL, Op.getOperand(0),
4284 DAG.getNeutralElement(BaseOpcode, DL, EltVT, Flags));
4285 }
4286 }
4287
lowerFPVECREDUCE(SDValue Op,SelectionDAG & DAG) const4288 SDValue RISCVTargetLowering::lowerFPVECREDUCE(SDValue Op,
4289 SelectionDAG &DAG) const {
4290 SDLoc DL(Op);
4291 MVT VecEltVT = Op.getSimpleValueType();
4292
4293 unsigned RVVOpcode;
4294 SDValue VectorVal, ScalarVal;
4295 std::tie(RVVOpcode, VectorVal, ScalarVal) =
4296 getRVVFPReductionOpAndOperands(Op, DAG, VecEltVT);
4297 MVT VecVT = VectorVal.getSimpleValueType();
4298
4299 MVT ContainerVT = VecVT;
4300 if (VecVT.isFixedLengthVector()) {
4301 ContainerVT = getContainerForFixedLengthVector(VecVT);
4302 VectorVal = convertToScalableVector(ContainerVT, VectorVal, DAG, Subtarget);
4303 }
4304
4305 MVT M1VT = getLMUL1VT(VectorVal.getSimpleValueType());
4306
4307 SDValue Mask, VL;
4308 std::tie(Mask, VL) = getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget);
4309
4310 // FIXME: This is a VLMAX splat which might be too large and can prevent
4311 // vsetvli removal.
4312 SDValue ScalarSplat = DAG.getSplatVector(M1VT, DL, ScalarVal);
4313 SDValue Reduction = DAG.getNode(RVVOpcode, DL, M1VT, DAG.getUNDEF(M1VT),
4314 VectorVal, ScalarSplat, Mask, VL);
4315 return DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VecEltVT, Reduction,
4316 DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4317 }
4318
getRVVVPReductionOp(unsigned ISDOpcode)4319 static unsigned getRVVVPReductionOp(unsigned ISDOpcode) {
4320 switch (ISDOpcode) {
4321 default:
4322 llvm_unreachable("Unhandled reduction");
4323 case ISD::VP_REDUCE_ADD:
4324 return RISCVISD::VECREDUCE_ADD_VL;
4325 case ISD::VP_REDUCE_UMAX:
4326 return RISCVISD::VECREDUCE_UMAX_VL;
4327 case ISD::VP_REDUCE_SMAX:
4328 return RISCVISD::VECREDUCE_SMAX_VL;
4329 case ISD::VP_REDUCE_UMIN:
4330 return RISCVISD::VECREDUCE_UMIN_VL;
4331 case ISD::VP_REDUCE_SMIN:
4332 return RISCVISD::VECREDUCE_SMIN_VL;
4333 case ISD::VP_REDUCE_AND:
4334 return RISCVISD::VECREDUCE_AND_VL;
4335 case ISD::VP_REDUCE_OR:
4336 return RISCVISD::VECREDUCE_OR_VL;
4337 case ISD::VP_REDUCE_XOR:
4338 return RISCVISD::VECREDUCE_XOR_VL;
4339 case ISD::VP_REDUCE_FADD:
4340 return RISCVISD::VECREDUCE_FADD_VL;
4341 case ISD::VP_REDUCE_SEQ_FADD:
4342 return RISCVISD::VECREDUCE_SEQ_FADD_VL;
4343 case ISD::VP_REDUCE_FMAX:
4344 return RISCVISD::VECREDUCE_FMAX_VL;
4345 case ISD::VP_REDUCE_FMIN:
4346 return RISCVISD::VECREDUCE_FMIN_VL;
4347 }
4348 }
4349
lowerVPREDUCE(SDValue Op,SelectionDAG & DAG) const4350 SDValue RISCVTargetLowering::lowerVPREDUCE(SDValue Op,
4351 SelectionDAG &DAG) const {
4352 SDLoc DL(Op);
4353 SDValue Vec = Op.getOperand(1);
4354 EVT VecEVT = Vec.getValueType();
4355
4356 // TODO: The type may need to be widened rather than split. Or widened before
4357 // it can be split.
4358 if (!isTypeLegal(VecEVT))
4359 return SDValue();
4360
4361 MVT VecVT = VecEVT.getSimpleVT();
4362 MVT VecEltVT = VecVT.getVectorElementType();
4363 unsigned RVVOpcode = getRVVVPReductionOp(Op.getOpcode());
4364
4365 MVT ContainerVT = VecVT;
4366 if (VecVT.isFixedLengthVector()) {
4367 ContainerVT = getContainerForFixedLengthVector(VecVT);
4368 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4369 }
4370
4371 SDValue VL = Op.getOperand(3);
4372 SDValue Mask = Op.getOperand(2);
4373
4374 MVT M1VT = getLMUL1VT(ContainerVT);
4375 MVT XLenVT = Subtarget.getXLenVT();
4376 MVT ResVT = !VecVT.isInteger() || VecEltVT.bitsGE(XLenVT) ? VecEltVT : XLenVT;
4377
4378 // FIXME: This is a VLMAX splat which might be too large and can prevent
4379 // vsetvli removal.
4380 SDValue StartSplat = DAG.getSplatVector(M1VT, DL, Op.getOperand(0));
4381 SDValue Reduction =
4382 DAG.getNode(RVVOpcode, DL, M1VT, StartSplat, Vec, StartSplat, Mask, VL);
4383 SDValue Elt0 = DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, ResVT, Reduction,
4384 DAG.getConstant(0, DL, Subtarget.getXLenVT()));
4385 if (!VecVT.isInteger())
4386 return Elt0;
4387 return DAG.getSExtOrTrunc(Elt0, DL, Op.getValueType());
4388 }
4389
lowerINSERT_SUBVECTOR(SDValue Op,SelectionDAG & DAG) const4390 SDValue RISCVTargetLowering::lowerINSERT_SUBVECTOR(SDValue Op,
4391 SelectionDAG &DAG) const {
4392 SDValue Vec = Op.getOperand(0);
4393 SDValue SubVec = Op.getOperand(1);
4394 MVT VecVT = Vec.getSimpleValueType();
4395 MVT SubVecVT = SubVec.getSimpleValueType();
4396
4397 SDLoc DL(Op);
4398 MVT XLenVT = Subtarget.getXLenVT();
4399 unsigned OrigIdx = Op.getConstantOperandVal(2);
4400 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4401
4402 // We don't have the ability to slide mask vectors up indexed by their i1
4403 // elements; the smallest we can do is i8. Often we are able to bitcast to
4404 // equivalent i8 vectors. Note that when inserting a fixed-length vector
4405 // into a scalable one, we might not necessarily have enough scalable
4406 // elements to safely divide by 8: nxv1i1 = insert nxv1i1, v4i1 is valid.
4407 if (SubVecVT.getVectorElementType() == MVT::i1 &&
4408 (OrigIdx != 0 || !Vec.isUndef())) {
4409 if (VecVT.getVectorMinNumElements() >= 8 &&
4410 SubVecVT.getVectorMinNumElements() >= 8) {
4411 assert(OrigIdx % 8 == 0 && "Invalid index");
4412 assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4413 SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4414 "Unexpected mask vector lowering");
4415 OrigIdx /= 8;
4416 SubVecVT =
4417 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4418 SubVecVT.isScalableVector());
4419 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4420 VecVT.isScalableVector());
4421 Vec = DAG.getBitcast(VecVT, Vec);
4422 SubVec = DAG.getBitcast(SubVecVT, SubVec);
4423 } else {
4424 // We can't slide this mask vector up indexed by its i1 elements.
4425 // This poses a problem when we wish to insert a scalable vector which
4426 // can't be re-expressed as a larger type. Just choose the slow path and
4427 // extend to a larger type, then truncate back down.
4428 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4429 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4430 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4431 SubVec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtSubVecVT, SubVec);
4432 Vec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ExtVecVT, Vec, SubVec,
4433 Op.getOperand(2));
4434 SDValue SplatZero = DAG.getConstant(0, DL, ExtVecVT);
4435 return DAG.getSetCC(DL, VecVT, Vec, SplatZero, ISD::SETNE);
4436 }
4437 }
4438
4439 // If the subvector vector is a fixed-length type, we cannot use subregister
4440 // manipulation to simplify the codegen; we don't know which register of a
4441 // LMUL group contains the specific subvector as we only know the minimum
4442 // register size. Therefore we must slide the vector group up the full
4443 // amount.
4444 if (SubVecVT.isFixedLengthVector()) {
4445 if (OrigIdx == 0 && Vec.isUndef())
4446 return Op;
4447 MVT ContainerVT = VecVT;
4448 if (VecVT.isFixedLengthVector()) {
4449 ContainerVT = getContainerForFixedLengthVector(VecVT);
4450 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4451 }
4452 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, ContainerVT,
4453 DAG.getUNDEF(ContainerVT), SubVec,
4454 DAG.getConstant(0, DL, XLenVT));
4455 SDValue Mask =
4456 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4457 // Set the vector length to only the number of elements we care about. Note
4458 // that for slideup this includes the offset.
4459 SDValue VL =
4460 DAG.getConstant(OrigIdx + SubVecVT.getVectorNumElements(), DL, XLenVT);
4461 SDValue SlideupAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4462 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, ContainerVT, Vec,
4463 SubVec, SlideupAmt, Mask, VL);
4464 if (VecVT.isFixedLengthVector())
4465 Slideup = convertFromScalableVector(VecVT, Slideup, DAG, Subtarget);
4466 return DAG.getBitcast(Op.getValueType(), Slideup);
4467 }
4468
4469 unsigned SubRegIdx, RemIdx;
4470 std::tie(SubRegIdx, RemIdx) =
4471 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4472 VecVT, SubVecVT, OrigIdx, TRI);
4473
4474 RISCVII::VLMUL SubVecLMUL = RISCVTargetLowering::getLMUL(SubVecVT);
4475 bool IsSubVecPartReg = SubVecLMUL == RISCVII::VLMUL::LMUL_F2 ||
4476 SubVecLMUL == RISCVII::VLMUL::LMUL_F4 ||
4477 SubVecLMUL == RISCVII::VLMUL::LMUL_F8;
4478
4479 // 1. If the Idx has been completely eliminated and this subvector's size is
4480 // a vector register or a multiple thereof, or the surrounding elements are
4481 // undef, then this is a subvector insert which naturally aligns to a vector
4482 // register. These can easily be handled using subregister manipulation.
4483 // 2. If the subvector is smaller than a vector register, then the insertion
4484 // must preserve the undisturbed elements of the register. We do this by
4485 // lowering to an EXTRACT_SUBVECTOR grabbing the nearest LMUL=1 vector type
4486 // (which resolves to a subregister copy), performing a VSLIDEUP to place the
4487 // subvector within the vector register, and an INSERT_SUBVECTOR of that
4488 // LMUL=1 type back into the larger vector (resolving to another subregister
4489 // operation). See below for how our VSLIDEUP works. We go via a LMUL=1 type
4490 // to avoid allocating a large register group to hold our subvector.
4491 if (RemIdx == 0 && (!IsSubVecPartReg || Vec.isUndef()))
4492 return Op;
4493
4494 // VSLIDEUP works by leaving elements 0<i<OFFSET undisturbed, elements
4495 // OFFSET<=i<VL set to the "subvector" and vl<=i<VLMAX set to the tail policy
4496 // (in our case undisturbed). This means we can set up a subvector insertion
4497 // where OFFSET is the insertion offset, and the VL is the OFFSET plus the
4498 // size of the subvector.
4499 MVT InterSubVT = VecVT;
4500 SDValue AlignedExtract = Vec;
4501 unsigned AlignedIdx = OrigIdx - RemIdx;
4502 if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4503 InterSubVT = getLMUL1VT(VecVT);
4504 // Extract a subvector equal to the nearest full vector register type. This
4505 // should resolve to a EXTRACT_SUBREG instruction.
4506 AlignedExtract = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4507 DAG.getConstant(AlignedIdx, DL, XLenVT));
4508 }
4509
4510 SDValue SlideupAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4511 // For scalable vectors this must be further multiplied by vscale.
4512 SlideupAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlideupAmt);
4513
4514 SDValue Mask, VL;
4515 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4516
4517 // Construct the vector length corresponding to RemIdx + length(SubVecVT).
4518 VL = DAG.getConstant(SubVecVT.getVectorMinNumElements(), DL, XLenVT);
4519 VL = DAG.getNode(ISD::VSCALE, DL, XLenVT, VL);
4520 VL = DAG.getNode(ISD::ADD, DL, XLenVT, SlideupAmt, VL);
4521
4522 SubVec = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, InterSubVT,
4523 DAG.getUNDEF(InterSubVT), SubVec,
4524 DAG.getConstant(0, DL, XLenVT));
4525
4526 SDValue Slideup = DAG.getNode(RISCVISD::VSLIDEUP_VL, DL, InterSubVT,
4527 AlignedExtract, SubVec, SlideupAmt, Mask, VL);
4528
4529 // If required, insert this subvector back into the correct vector register.
4530 // This should resolve to an INSERT_SUBREG instruction.
4531 if (VecVT.bitsGT(InterSubVT))
4532 Slideup = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, Vec, Slideup,
4533 DAG.getConstant(AlignedIdx, DL, XLenVT));
4534
4535 // We might have bitcast from a mask type: cast back to the original type if
4536 // required.
4537 return DAG.getBitcast(Op.getSimpleValueType(), Slideup);
4538 }
4539
lowerEXTRACT_SUBVECTOR(SDValue Op,SelectionDAG & DAG) const4540 SDValue RISCVTargetLowering::lowerEXTRACT_SUBVECTOR(SDValue Op,
4541 SelectionDAG &DAG) const {
4542 SDValue Vec = Op.getOperand(0);
4543 MVT SubVecVT = Op.getSimpleValueType();
4544 MVT VecVT = Vec.getSimpleValueType();
4545
4546 SDLoc DL(Op);
4547 MVT XLenVT = Subtarget.getXLenVT();
4548 unsigned OrigIdx = Op.getConstantOperandVal(1);
4549 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
4550
4551 // We don't have the ability to slide mask vectors down indexed by their i1
4552 // elements; the smallest we can do is i8. Often we are able to bitcast to
4553 // equivalent i8 vectors. Note that when extracting a fixed-length vector
4554 // from a scalable one, we might not necessarily have enough scalable
4555 // elements to safely divide by 8: v8i1 = extract nxv1i1 is valid.
4556 if (SubVecVT.getVectorElementType() == MVT::i1 && OrigIdx != 0) {
4557 if (VecVT.getVectorMinNumElements() >= 8 &&
4558 SubVecVT.getVectorMinNumElements() >= 8) {
4559 assert(OrigIdx % 8 == 0 && "Invalid index");
4560 assert(VecVT.getVectorMinNumElements() % 8 == 0 &&
4561 SubVecVT.getVectorMinNumElements() % 8 == 0 &&
4562 "Unexpected mask vector lowering");
4563 OrigIdx /= 8;
4564 SubVecVT =
4565 MVT::getVectorVT(MVT::i8, SubVecVT.getVectorMinNumElements() / 8,
4566 SubVecVT.isScalableVector());
4567 VecVT = MVT::getVectorVT(MVT::i8, VecVT.getVectorMinNumElements() / 8,
4568 VecVT.isScalableVector());
4569 Vec = DAG.getBitcast(VecVT, Vec);
4570 } else {
4571 // We can't slide this mask vector down, indexed by its i1 elements.
4572 // This poses a problem when we wish to extract a scalable vector which
4573 // can't be re-expressed as a larger type. Just choose the slow path and
4574 // extend to a larger type, then truncate back down.
4575 // TODO: We could probably improve this when extracting certain fixed
4576 // from fixed, where we can extract as i8 and shift the correct element
4577 // right to reach the desired subvector?
4578 MVT ExtVecVT = VecVT.changeVectorElementType(MVT::i8);
4579 MVT ExtSubVecVT = SubVecVT.changeVectorElementType(MVT::i8);
4580 Vec = DAG.getNode(ISD::ZERO_EXTEND, DL, ExtVecVT, Vec);
4581 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, ExtSubVecVT, Vec,
4582 Op.getOperand(1));
4583 SDValue SplatZero = DAG.getConstant(0, DL, ExtSubVecVT);
4584 return DAG.getSetCC(DL, SubVecVT, Vec, SplatZero, ISD::SETNE);
4585 }
4586 }
4587
4588 // If the subvector vector is a fixed-length type, we cannot use subregister
4589 // manipulation to simplify the codegen; we don't know which register of a
4590 // LMUL group contains the specific subvector as we only know the minimum
4591 // register size. Therefore we must slide the vector group down the full
4592 // amount.
4593 if (SubVecVT.isFixedLengthVector()) {
4594 // With an index of 0 this is a cast-like subvector, which can be performed
4595 // with subregister operations.
4596 if (OrigIdx == 0)
4597 return Op;
4598 MVT ContainerVT = VecVT;
4599 if (VecVT.isFixedLengthVector()) {
4600 ContainerVT = getContainerForFixedLengthVector(VecVT);
4601 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
4602 }
4603 SDValue Mask =
4604 getDefaultVLOps(VecVT, ContainerVT, DL, DAG, Subtarget).first;
4605 // Set the vector length to only the number of elements we care about. This
4606 // avoids sliding down elements we're going to discard straight away.
4607 SDValue VL = DAG.getConstant(SubVecVT.getVectorNumElements(), DL, XLenVT);
4608 SDValue SlidedownAmt = DAG.getConstant(OrigIdx, DL, XLenVT);
4609 SDValue Slidedown =
4610 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
4611 DAG.getUNDEF(ContainerVT), Vec, SlidedownAmt, Mask, VL);
4612 // Now we can use a cast-like subvector extract to get the result.
4613 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4614 DAG.getConstant(0, DL, XLenVT));
4615 return DAG.getBitcast(Op.getValueType(), Slidedown);
4616 }
4617
4618 unsigned SubRegIdx, RemIdx;
4619 std::tie(SubRegIdx, RemIdx) =
4620 RISCVTargetLowering::decomposeSubvectorInsertExtractToSubRegs(
4621 VecVT, SubVecVT, OrigIdx, TRI);
4622
4623 // If the Idx has been completely eliminated then this is a subvector extract
4624 // which naturally aligns to a vector register. These can easily be handled
4625 // using subregister manipulation.
4626 if (RemIdx == 0)
4627 return Op;
4628
4629 // Else we must shift our vector register directly to extract the subvector.
4630 // Do this using VSLIDEDOWN.
4631
4632 // If the vector type is an LMUL-group type, extract a subvector equal to the
4633 // nearest full vector register type. This should resolve to a EXTRACT_SUBREG
4634 // instruction.
4635 MVT InterSubVT = VecVT;
4636 if (VecVT.bitsGT(getLMUL1VT(VecVT))) {
4637 InterSubVT = getLMUL1VT(VecVT);
4638 Vec = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, InterSubVT, Vec,
4639 DAG.getConstant(OrigIdx - RemIdx, DL, XLenVT));
4640 }
4641
4642 // Slide this vector register down by the desired number of elements in order
4643 // to place the desired subvector starting at element 0.
4644 SDValue SlidedownAmt = DAG.getConstant(RemIdx, DL, XLenVT);
4645 // For scalable vectors this must be further multiplied by vscale.
4646 SlidedownAmt = DAG.getNode(ISD::VSCALE, DL, XLenVT, SlidedownAmt);
4647
4648 SDValue Mask, VL;
4649 std::tie(Mask, VL) = getDefaultScalableVLOps(InterSubVT, DL, DAG, Subtarget);
4650 SDValue Slidedown =
4651 DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, InterSubVT,
4652 DAG.getUNDEF(InterSubVT), Vec, SlidedownAmt, Mask, VL);
4653
4654 // Now the vector is in the right position, extract our final subvector. This
4655 // should resolve to a COPY.
4656 Slidedown = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SubVecVT, Slidedown,
4657 DAG.getConstant(0, DL, XLenVT));
4658
4659 // We might have bitcast from a mask type: cast back to the original type if
4660 // required.
4661 return DAG.getBitcast(Op.getSimpleValueType(), Slidedown);
4662 }
4663
4664 // Lower step_vector to the vid instruction. Any non-identity step value must
4665 // be accounted for my manual expansion.
lowerSTEP_VECTOR(SDValue Op,SelectionDAG & DAG) const4666 SDValue RISCVTargetLowering::lowerSTEP_VECTOR(SDValue Op,
4667 SelectionDAG &DAG) const {
4668 SDLoc DL(Op);
4669 MVT VT = Op.getSimpleValueType();
4670 MVT XLenVT = Subtarget.getXLenVT();
4671 SDValue Mask, VL;
4672 std::tie(Mask, VL) = getDefaultScalableVLOps(VT, DL, DAG, Subtarget);
4673 SDValue StepVec = DAG.getNode(RISCVISD::VID_VL, DL, VT, Mask, VL);
4674 uint64_t StepValImm = Op.getConstantOperandVal(0);
4675 if (StepValImm != 1) {
4676 if (isPowerOf2_64(StepValImm)) {
4677 SDValue StepVal =
4678 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT,
4679 DAG.getConstant(Log2_64(StepValImm), DL, XLenVT));
4680 StepVec = DAG.getNode(ISD::SHL, DL, VT, StepVec, StepVal);
4681 } else {
4682 SDValue StepVal = lowerScalarSplat(
4683 DAG.getConstant(StepValImm, DL, VT.getVectorElementType()), VL, VT,
4684 DL, DAG, Subtarget);
4685 StepVec = DAG.getNode(ISD::MUL, DL, VT, StepVec, StepVal);
4686 }
4687 }
4688 return StepVec;
4689 }
4690
4691 // Implement vector_reverse using vrgather.vv with indices determined by
4692 // subtracting the id of each element from (VLMAX-1). This will convert
4693 // the indices like so:
4694 // (0, 1,..., VLMAX-2, VLMAX-1) -> (VLMAX-1, VLMAX-2,..., 1, 0).
4695 // TODO: This code assumes VLMAX <= 65536 for LMUL=8 SEW=16.
lowerVECTOR_REVERSE(SDValue Op,SelectionDAG & DAG) const4696 SDValue RISCVTargetLowering::lowerVECTOR_REVERSE(SDValue Op,
4697 SelectionDAG &DAG) const {
4698 SDLoc DL(Op);
4699 MVT VecVT = Op.getSimpleValueType();
4700 unsigned EltSize = VecVT.getScalarSizeInBits();
4701 unsigned MinSize = VecVT.getSizeInBits().getKnownMinValue();
4702
4703 unsigned MaxVLMAX = 0;
4704 unsigned VectorBitsMax = Subtarget.getMaxRVVVectorSizeInBits();
4705 if (VectorBitsMax != 0)
4706 MaxVLMAX = ((VectorBitsMax / EltSize) * MinSize) / RISCV::RVVBitsPerBlock;
4707
4708 unsigned GatherOpc = RISCVISD::VRGATHER_VV_VL;
4709 MVT IntVT = VecVT.changeVectorElementTypeToInteger();
4710
4711 // If this is SEW=8 and VLMAX is unknown or more than 256, we need
4712 // to use vrgatherei16.vv.
4713 // TODO: It's also possible to use vrgatherei16.vv for other types to
4714 // decrease register width for the index calculation.
4715 if ((MaxVLMAX == 0 || MaxVLMAX > 256) && EltSize == 8) {
4716 // If this is LMUL=8, we have to split before can use vrgatherei16.vv.
4717 // Reverse each half, then reassemble them in reverse order.
4718 // NOTE: It's also possible that after splitting that VLMAX no longer
4719 // requires vrgatherei16.vv.
4720 if (MinSize == (8 * RISCV::RVVBitsPerBlock)) {
4721 SDValue Lo, Hi;
4722 std::tie(Lo, Hi) = DAG.SplitVectorOperand(Op.getNode(), 0);
4723 EVT LoVT, HiVT;
4724 std::tie(LoVT, HiVT) = DAG.GetSplitDestVTs(VecVT);
4725 Lo = DAG.getNode(ISD::VECTOR_REVERSE, DL, LoVT, Lo);
4726 Hi = DAG.getNode(ISD::VECTOR_REVERSE, DL, HiVT, Hi);
4727 // Reassemble the low and high pieces reversed.
4728 // FIXME: This is a CONCAT_VECTORS.
4729 SDValue Res =
4730 DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VecVT, DAG.getUNDEF(VecVT), Hi,
4731 DAG.getIntPtrConstant(0, DL));
4732 return DAG.getNode(
4733 ISD::INSERT_SUBVECTOR, DL, VecVT, Res, Lo,
4734 DAG.getIntPtrConstant(LoVT.getVectorMinNumElements(), DL));
4735 }
4736
4737 // Just promote the int type to i16 which will double the LMUL.
4738 IntVT = MVT::getVectorVT(MVT::i16, VecVT.getVectorElementCount());
4739 GatherOpc = RISCVISD::VRGATHEREI16_VV_VL;
4740 }
4741
4742 MVT XLenVT = Subtarget.getXLenVT();
4743 SDValue Mask, VL;
4744 std::tie(Mask, VL) = getDefaultScalableVLOps(VecVT, DL, DAG, Subtarget);
4745
4746 // Calculate VLMAX-1 for the desired SEW.
4747 unsigned MinElts = VecVT.getVectorMinNumElements();
4748 SDValue VLMax = DAG.getNode(ISD::VSCALE, DL, XLenVT,
4749 DAG.getConstant(MinElts, DL, XLenVT));
4750 SDValue VLMinus1 =
4751 DAG.getNode(ISD::SUB, DL, XLenVT, VLMax, DAG.getConstant(1, DL, XLenVT));
4752
4753 // Splat VLMAX-1 taking care to handle SEW==64 on RV32.
4754 bool IsRV32E64 =
4755 !Subtarget.is64Bit() && IntVT.getVectorElementType() == MVT::i64;
4756 SDValue SplatVL;
4757 if (!IsRV32E64)
4758 SplatVL = DAG.getSplatVector(IntVT, DL, VLMinus1);
4759 else
4760 SplatVL = DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, IntVT, VLMinus1);
4761
4762 SDValue VID = DAG.getNode(RISCVISD::VID_VL, DL, IntVT, Mask, VL);
4763 SDValue Indices =
4764 DAG.getNode(RISCVISD::SUB_VL, DL, IntVT, SplatVL, VID, Mask, VL);
4765
4766 return DAG.getNode(GatherOpc, DL, VecVT, Op.getOperand(0), Indices, Mask, VL);
4767 }
4768
4769 SDValue
lowerFixedLengthVectorLoadToRVV(SDValue Op,SelectionDAG & DAG) const4770 RISCVTargetLowering::lowerFixedLengthVectorLoadToRVV(SDValue Op,
4771 SelectionDAG &DAG) const {
4772 SDLoc DL(Op);
4773 auto *Load = cast<LoadSDNode>(Op);
4774
4775 assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4776 Load->getMemoryVT(),
4777 *Load->getMemOperand()) &&
4778 "Expecting a correctly-aligned load");
4779
4780 MVT VT = Op.getSimpleValueType();
4781 MVT ContainerVT = getContainerForFixedLengthVector(VT);
4782
4783 SDValue VL =
4784 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4785
4786 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4787 SDValue NewLoad = DAG.getMemIntrinsicNode(
4788 RISCVISD::VLE_VL, DL, VTs, {Load->getChain(), Load->getBasePtr(), VL},
4789 Load->getMemoryVT(), Load->getMemOperand());
4790
4791 SDValue Result = convertFromScalableVector(VT, NewLoad, DAG, Subtarget);
4792 return DAG.getMergeValues({Result, Load->getChain()}, DL);
4793 }
4794
4795 SDValue
lowerFixedLengthVectorStoreToRVV(SDValue Op,SelectionDAG & DAG) const4796 RISCVTargetLowering::lowerFixedLengthVectorStoreToRVV(SDValue Op,
4797 SelectionDAG &DAG) const {
4798 SDLoc DL(Op);
4799 auto *Store = cast<StoreSDNode>(Op);
4800
4801 assert(allowsMemoryAccessForAlignment(*DAG.getContext(), DAG.getDataLayout(),
4802 Store->getMemoryVT(),
4803 *Store->getMemOperand()) &&
4804 "Expecting a correctly-aligned store");
4805
4806 SDValue StoreVal = Store->getValue();
4807 MVT VT = StoreVal.getSimpleValueType();
4808
4809 // If the size less than a byte, we need to pad with zeros to make a byte.
4810 if (VT.getVectorElementType() == MVT::i1 && VT.getVectorNumElements() < 8) {
4811 VT = MVT::v8i1;
4812 StoreVal = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, VT,
4813 DAG.getConstant(0, DL, VT), StoreVal,
4814 DAG.getIntPtrConstant(0, DL));
4815 }
4816
4817 MVT ContainerVT = getContainerForFixedLengthVector(VT);
4818
4819 SDValue VL =
4820 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4821
4822 SDValue NewValue =
4823 convertToScalableVector(ContainerVT, StoreVal, DAG, Subtarget);
4824 return DAG.getMemIntrinsicNode(
4825 RISCVISD::VSE_VL, DL, DAG.getVTList(MVT::Other),
4826 {Store->getChain(), NewValue, Store->getBasePtr(), VL},
4827 Store->getMemoryVT(), Store->getMemOperand());
4828 }
4829
lowerMaskedLoad(SDValue Op,SelectionDAG & DAG) const4830 SDValue RISCVTargetLowering::lowerMaskedLoad(SDValue Op,
4831 SelectionDAG &DAG) const {
4832 SDLoc DL(Op);
4833 MVT VT = Op.getSimpleValueType();
4834
4835 const auto *MemSD = cast<MemSDNode>(Op);
4836 EVT MemVT = MemSD->getMemoryVT();
4837 MachineMemOperand *MMO = MemSD->getMemOperand();
4838 SDValue Chain = MemSD->getChain();
4839 SDValue BasePtr = MemSD->getBasePtr();
4840
4841 SDValue Mask, PassThru, VL;
4842 if (const auto *VPLoad = dyn_cast<VPLoadSDNode>(Op)) {
4843 Mask = VPLoad->getMask();
4844 PassThru = DAG.getUNDEF(VT);
4845 VL = VPLoad->getVectorLength();
4846 } else {
4847 const auto *MLoad = cast<MaskedLoadSDNode>(Op);
4848 Mask = MLoad->getMask();
4849 PassThru = MLoad->getPassThru();
4850 }
4851
4852 MVT XLenVT = Subtarget.getXLenVT();
4853
4854 MVT ContainerVT = VT;
4855 if (VT.isFixedLengthVector()) {
4856 ContainerVT = getContainerForFixedLengthVector(VT);
4857 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4858
4859 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4860 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
4861 }
4862
4863 if (!VL)
4864 VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
4865
4866 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
4867 SDValue IntID = DAG.getTargetConstant(Intrinsic::riscv_vle_mask, DL, XLenVT);
4868 SDValue Policy = DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT);
4869 SDValue Ops[] = {Chain, IntID, PassThru, BasePtr, Mask, VL, Policy};
4870 SDValue Result =
4871 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
4872 Chain = Result.getValue(1);
4873
4874 if (VT.isFixedLengthVector())
4875 Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
4876
4877 return DAG.getMergeValues({Result, Chain}, DL);
4878 }
4879
lowerMaskedStore(SDValue Op,SelectionDAG & DAG) const4880 SDValue RISCVTargetLowering::lowerMaskedStore(SDValue Op,
4881 SelectionDAG &DAG) const {
4882 SDLoc DL(Op);
4883
4884 const auto *MemSD = cast<MemSDNode>(Op);
4885 EVT MemVT = MemSD->getMemoryVT();
4886 MachineMemOperand *MMO = MemSD->getMemOperand();
4887 SDValue Chain = MemSD->getChain();
4888 SDValue BasePtr = MemSD->getBasePtr();
4889 SDValue Val, Mask, VL;
4890
4891 if (const auto *VPStore = dyn_cast<VPStoreSDNode>(Op)) {
4892 Val = VPStore->getValue();
4893 Mask = VPStore->getMask();
4894 VL = VPStore->getVectorLength();
4895 } else {
4896 const auto *MStore = cast<MaskedStoreSDNode>(Op);
4897 Val = MStore->getValue();
4898 Mask = MStore->getMask();
4899 }
4900
4901 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
4902
4903 MVT VT = Val.getSimpleValueType();
4904 MVT XLenVT = Subtarget.getXLenVT();
4905
4906 MVT ContainerVT = VT;
4907 if (VT.isFixedLengthVector()) {
4908 ContainerVT = getContainerForFixedLengthVector(VT);
4909
4910 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
4911 if (!IsUnmasked) {
4912 MVT MaskVT =
4913 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4914 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
4915 }
4916 }
4917
4918 if (!VL)
4919 VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
4920
4921 unsigned IntID =
4922 IsUnmasked ? Intrinsic::riscv_vse : Intrinsic::riscv_vse_mask;
4923 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
4924 Ops.push_back(Val);
4925 Ops.push_back(BasePtr);
4926 if (!IsUnmasked)
4927 Ops.push_back(Mask);
4928 Ops.push_back(VL);
4929
4930 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
4931 DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
4932 }
4933
4934 SDValue
lowerFixedLengthVectorSetccToRVV(SDValue Op,SelectionDAG & DAG) const4935 RISCVTargetLowering::lowerFixedLengthVectorSetccToRVV(SDValue Op,
4936 SelectionDAG &DAG) const {
4937 MVT InVT = Op.getOperand(0).getSimpleValueType();
4938 MVT ContainerVT = getContainerForFixedLengthVector(InVT);
4939
4940 MVT VT = Op.getSimpleValueType();
4941
4942 SDValue Op1 =
4943 convertToScalableVector(ContainerVT, Op.getOperand(0), DAG, Subtarget);
4944 SDValue Op2 =
4945 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
4946
4947 SDLoc DL(Op);
4948 SDValue VL =
4949 DAG.getConstant(VT.getVectorNumElements(), DL, Subtarget.getXLenVT());
4950
4951 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
4952 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
4953
4954 SDValue Cmp = DAG.getNode(RISCVISD::SETCC_VL, DL, MaskVT, Op1, Op2,
4955 Op.getOperand(2), Mask, VL);
4956
4957 return convertFromScalableVector(VT, Cmp, DAG, Subtarget);
4958 }
4959
lowerFixedLengthVectorLogicOpToRVV(SDValue Op,SelectionDAG & DAG,unsigned MaskOpc,unsigned VecOpc) const4960 SDValue RISCVTargetLowering::lowerFixedLengthVectorLogicOpToRVV(
4961 SDValue Op, SelectionDAG &DAG, unsigned MaskOpc, unsigned VecOpc) const {
4962 MVT VT = Op.getSimpleValueType();
4963
4964 if (VT.getVectorElementType() == MVT::i1)
4965 return lowerToScalableOp(Op, DAG, MaskOpc, /*HasMask*/ false);
4966
4967 return lowerToScalableOp(Op, DAG, VecOpc, /*HasMask*/ true);
4968 }
4969
4970 SDValue
lowerFixedLengthVectorShiftToRVV(SDValue Op,SelectionDAG & DAG) const4971 RISCVTargetLowering::lowerFixedLengthVectorShiftToRVV(SDValue Op,
4972 SelectionDAG &DAG) const {
4973 unsigned Opc;
4974 switch (Op.getOpcode()) {
4975 default: llvm_unreachable("Unexpected opcode!");
4976 case ISD::SHL: Opc = RISCVISD::SHL_VL; break;
4977 case ISD::SRA: Opc = RISCVISD::SRA_VL; break;
4978 case ISD::SRL: Opc = RISCVISD::SRL_VL; break;
4979 }
4980
4981 return lowerToScalableOp(Op, DAG, Opc);
4982 }
4983
4984 // Lower vector ABS to smax(X, sub(0, X)).
lowerABS(SDValue Op,SelectionDAG & DAG) const4985 SDValue RISCVTargetLowering::lowerABS(SDValue Op, SelectionDAG &DAG) const {
4986 SDLoc DL(Op);
4987 MVT VT = Op.getSimpleValueType();
4988 SDValue X = Op.getOperand(0);
4989
4990 assert(VT.isFixedLengthVector() && "Unexpected type");
4991
4992 MVT ContainerVT = getContainerForFixedLengthVector(VT);
4993 X = convertToScalableVector(ContainerVT, X, DAG, Subtarget);
4994
4995 SDValue Mask, VL;
4996 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
4997
4998 SDValue SplatZero =
4999 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5000 DAG.getConstant(0, DL, Subtarget.getXLenVT()));
5001 SDValue NegX =
5002 DAG.getNode(RISCVISD::SUB_VL, DL, ContainerVT, SplatZero, X, Mask, VL);
5003 SDValue Max =
5004 DAG.getNode(RISCVISD::SMAX_VL, DL, ContainerVT, X, NegX, Mask, VL);
5005
5006 return convertFromScalableVector(VT, Max, DAG, Subtarget);
5007 }
5008
lowerFixedLengthVectorFCOPYSIGNToRVV(SDValue Op,SelectionDAG & DAG) const5009 SDValue RISCVTargetLowering::lowerFixedLengthVectorFCOPYSIGNToRVV(
5010 SDValue Op, SelectionDAG &DAG) const {
5011 SDLoc DL(Op);
5012 MVT VT = Op.getSimpleValueType();
5013 SDValue Mag = Op.getOperand(0);
5014 SDValue Sign = Op.getOperand(1);
5015 assert(Mag.getValueType() == Sign.getValueType() &&
5016 "Can only handle COPYSIGN with matching types.");
5017
5018 MVT ContainerVT = getContainerForFixedLengthVector(VT);
5019 Mag = convertToScalableVector(ContainerVT, Mag, DAG, Subtarget);
5020 Sign = convertToScalableVector(ContainerVT, Sign, DAG, Subtarget);
5021
5022 SDValue Mask, VL;
5023 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5024
5025 SDValue CopySign =
5026 DAG.getNode(RISCVISD::FCOPYSIGN_VL, DL, ContainerVT, Mag, Sign, Mask, VL);
5027
5028 return convertFromScalableVector(VT, CopySign, DAG, Subtarget);
5029 }
5030
lowerFixedLengthVectorSelectToRVV(SDValue Op,SelectionDAG & DAG) const5031 SDValue RISCVTargetLowering::lowerFixedLengthVectorSelectToRVV(
5032 SDValue Op, SelectionDAG &DAG) const {
5033 MVT VT = Op.getSimpleValueType();
5034 MVT ContainerVT = getContainerForFixedLengthVector(VT);
5035
5036 MVT I1ContainerVT =
5037 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5038
5039 SDValue CC =
5040 convertToScalableVector(I1ContainerVT, Op.getOperand(0), DAG, Subtarget);
5041 SDValue Op1 =
5042 convertToScalableVector(ContainerVT, Op.getOperand(1), DAG, Subtarget);
5043 SDValue Op2 =
5044 convertToScalableVector(ContainerVT, Op.getOperand(2), DAG, Subtarget);
5045
5046 SDLoc DL(Op);
5047 SDValue Mask, VL;
5048 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5049
5050 SDValue Select =
5051 DAG.getNode(RISCVISD::VSELECT_VL, DL, ContainerVT, CC, Op1, Op2, VL);
5052
5053 return convertFromScalableVector(VT, Select, DAG, Subtarget);
5054 }
5055
lowerToScalableOp(SDValue Op,SelectionDAG & DAG,unsigned NewOpc,bool HasMask) const5056 SDValue RISCVTargetLowering::lowerToScalableOp(SDValue Op, SelectionDAG &DAG,
5057 unsigned NewOpc,
5058 bool HasMask) const {
5059 MVT VT = Op.getSimpleValueType();
5060 MVT ContainerVT = getContainerForFixedLengthVector(VT);
5061
5062 // Create list of operands by converting existing ones to scalable types.
5063 SmallVector<SDValue, 6> Ops;
5064 for (const SDValue &V : Op->op_values()) {
5065 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5066
5067 // Pass through non-vector operands.
5068 if (!V.getValueType().isVector()) {
5069 Ops.push_back(V);
5070 continue;
5071 }
5072
5073 // "cast" fixed length vector to a scalable vector.
5074 assert(useRVVForFixedLengthVectorVT(V.getSimpleValueType()) &&
5075 "Only fixed length vectors are supported!");
5076 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5077 }
5078
5079 SDLoc DL(Op);
5080 SDValue Mask, VL;
5081 std::tie(Mask, VL) = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget);
5082 if (HasMask)
5083 Ops.push_back(Mask);
5084 Ops.push_back(VL);
5085
5086 SDValue ScalableRes = DAG.getNode(NewOpc, DL, ContainerVT, Ops);
5087 return convertFromScalableVector(VT, ScalableRes, DAG, Subtarget);
5088 }
5089
5090 // Lower a VP_* ISD node to the corresponding RISCVISD::*_VL node:
5091 // * Operands of each node are assumed to be in the same order.
5092 // * The EVL operand is promoted from i32 to i64 on RV64.
5093 // * Fixed-length vectors are converted to their scalable-vector container
5094 // types.
lowerVPOp(SDValue Op,SelectionDAG & DAG,unsigned RISCVISDOpc) const5095 SDValue RISCVTargetLowering::lowerVPOp(SDValue Op, SelectionDAG &DAG,
5096 unsigned RISCVISDOpc) const {
5097 SDLoc DL(Op);
5098 MVT VT = Op.getSimpleValueType();
5099 SmallVector<SDValue, 4> Ops;
5100
5101 for (const auto &OpIdx : enumerate(Op->ops())) {
5102 SDValue V = OpIdx.value();
5103 assert(!isa<VTSDNode>(V) && "Unexpected VTSDNode node!");
5104 // Pass through operands which aren't fixed-length vectors.
5105 if (!V.getValueType().isFixedLengthVector()) {
5106 Ops.push_back(V);
5107 continue;
5108 }
5109 // "cast" fixed length vector to a scalable vector.
5110 MVT OpVT = V.getSimpleValueType();
5111 MVT ContainerVT = getContainerForFixedLengthVector(OpVT);
5112 assert(useRVVForFixedLengthVectorVT(OpVT) &&
5113 "Only fixed length vectors are supported!");
5114 Ops.push_back(convertToScalableVector(ContainerVT, V, DAG, Subtarget));
5115 }
5116
5117 if (!VT.isFixedLengthVector())
5118 return DAG.getNode(RISCVISDOpc, DL, VT, Ops);
5119
5120 MVT ContainerVT = getContainerForFixedLengthVector(VT);
5121
5122 SDValue VPOp = DAG.getNode(RISCVISDOpc, DL, ContainerVT, Ops);
5123
5124 return convertFromScalableVector(VT, VPOp, DAG, Subtarget);
5125 }
5126
5127 // Custom lower MGATHER/VP_GATHER to a legalized form for RVV. It will then be
5128 // matched to a RVV indexed load. The RVV indexed load instructions only
5129 // support the "unsigned unscaled" addressing mode; indices are implicitly
5130 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5131 // signed or scaled indexing is extended to the XLEN value type and scaled
5132 // accordingly.
lowerMaskedGather(SDValue Op,SelectionDAG & DAG) const5133 SDValue RISCVTargetLowering::lowerMaskedGather(SDValue Op,
5134 SelectionDAG &DAG) const {
5135 SDLoc DL(Op);
5136 MVT VT = Op.getSimpleValueType();
5137
5138 const auto *MemSD = cast<MemSDNode>(Op.getNode());
5139 EVT MemVT = MemSD->getMemoryVT();
5140 MachineMemOperand *MMO = MemSD->getMemOperand();
5141 SDValue Chain = MemSD->getChain();
5142 SDValue BasePtr = MemSD->getBasePtr();
5143
5144 ISD::LoadExtType LoadExtType;
5145 SDValue Index, Mask, PassThru, VL;
5146
5147 if (auto *VPGN = dyn_cast<VPGatherSDNode>(Op.getNode())) {
5148 Index = VPGN->getIndex();
5149 Mask = VPGN->getMask();
5150 PassThru = DAG.getUNDEF(VT);
5151 VL = VPGN->getVectorLength();
5152 // VP doesn't support extending loads.
5153 LoadExtType = ISD::NON_EXTLOAD;
5154 } else {
5155 // Else it must be a MGATHER.
5156 auto *MGN = cast<MaskedGatherSDNode>(Op.getNode());
5157 Index = MGN->getIndex();
5158 Mask = MGN->getMask();
5159 PassThru = MGN->getPassThru();
5160 LoadExtType = MGN->getExtensionType();
5161 }
5162
5163 MVT IndexVT = Index.getSimpleValueType();
5164 MVT XLenVT = Subtarget.getXLenVT();
5165
5166 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5167 "Unexpected VTs!");
5168 assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5169 // Targets have to explicitly opt-in for extending vector loads.
5170 assert(LoadExtType == ISD::NON_EXTLOAD &&
5171 "Unexpected extending MGATHER/VP_GATHER");
5172 (void)LoadExtType;
5173
5174 // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5175 // the selection of the masked intrinsics doesn't do this for us.
5176 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5177
5178 MVT ContainerVT = VT;
5179 if (VT.isFixedLengthVector()) {
5180 // We need to use the larger of the result and index type to determine the
5181 // scalable type to use so we don't increase LMUL for any operand/result.
5182 if (VT.bitsGE(IndexVT)) {
5183 ContainerVT = getContainerForFixedLengthVector(VT);
5184 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5185 ContainerVT.getVectorElementCount());
5186 } else {
5187 IndexVT = getContainerForFixedLengthVector(IndexVT);
5188 ContainerVT = MVT::getVectorVT(ContainerVT.getVectorElementType(),
5189 IndexVT.getVectorElementCount());
5190 }
5191
5192 Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5193
5194 if (!IsUnmasked) {
5195 MVT MaskVT =
5196 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5197 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5198 PassThru = convertToScalableVector(ContainerVT, PassThru, DAG, Subtarget);
5199 }
5200 }
5201
5202 if (!VL)
5203 VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5204
5205 unsigned IntID =
5206 IsUnmasked ? Intrinsic::riscv_vluxei : Intrinsic::riscv_vluxei_mask;
5207 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5208 if (!IsUnmasked)
5209 Ops.push_back(PassThru);
5210 Ops.push_back(BasePtr);
5211 Ops.push_back(Index);
5212 if (!IsUnmasked)
5213 Ops.push_back(Mask);
5214 Ops.push_back(VL);
5215 if (!IsUnmasked)
5216 Ops.push_back(DAG.getTargetConstant(RISCVII::TAIL_AGNOSTIC, DL, XLenVT));
5217
5218 SDVTList VTs = DAG.getVTList({ContainerVT, MVT::Other});
5219 SDValue Result =
5220 DAG.getMemIntrinsicNode(ISD::INTRINSIC_W_CHAIN, DL, VTs, Ops, MemVT, MMO);
5221 Chain = Result.getValue(1);
5222
5223 if (VT.isFixedLengthVector())
5224 Result = convertFromScalableVector(VT, Result, DAG, Subtarget);
5225
5226 return DAG.getMergeValues({Result, Chain}, DL);
5227 }
5228
5229 // Custom lower MSCATTER/VP_SCATTER to a legalized form for RVV. It will then be
5230 // matched to a RVV indexed store. The RVV indexed store instructions only
5231 // support the "unsigned unscaled" addressing mode; indices are implicitly
5232 // zero-extended or truncated to XLEN and are treated as byte offsets. Any
5233 // signed or scaled indexing is extended to the XLEN value type and scaled
5234 // accordingly.
lowerMaskedScatter(SDValue Op,SelectionDAG & DAG) const5235 SDValue RISCVTargetLowering::lowerMaskedScatter(SDValue Op,
5236 SelectionDAG &DAG) const {
5237 SDLoc DL(Op);
5238 const auto *MemSD = cast<MemSDNode>(Op.getNode());
5239 EVT MemVT = MemSD->getMemoryVT();
5240 MachineMemOperand *MMO = MemSD->getMemOperand();
5241 SDValue Chain = MemSD->getChain();
5242 SDValue BasePtr = MemSD->getBasePtr();
5243
5244 bool IsTruncatingStore = false;
5245 SDValue Index, Mask, Val, VL;
5246
5247 if (auto *VPSN = dyn_cast<VPScatterSDNode>(Op.getNode())) {
5248 Index = VPSN->getIndex();
5249 Mask = VPSN->getMask();
5250 Val = VPSN->getValue();
5251 VL = VPSN->getVectorLength();
5252 // VP doesn't support truncating stores.
5253 IsTruncatingStore = false;
5254 } else {
5255 // Else it must be a MSCATTER.
5256 auto *MSN = cast<MaskedScatterSDNode>(Op.getNode());
5257 Index = MSN->getIndex();
5258 Mask = MSN->getMask();
5259 Val = MSN->getValue();
5260 IsTruncatingStore = MSN->isTruncatingStore();
5261 }
5262
5263 MVT VT = Val.getSimpleValueType();
5264 MVT IndexVT = Index.getSimpleValueType();
5265 MVT XLenVT = Subtarget.getXLenVT();
5266
5267 assert(VT.getVectorElementCount() == IndexVT.getVectorElementCount() &&
5268 "Unexpected VTs!");
5269 assert(BasePtr.getSimpleValueType() == XLenVT && "Unexpected pointer type");
5270 // Targets have to explicitly opt-in for extending vector loads and
5271 // truncating vector stores.
5272 assert(!IsTruncatingStore && "Unexpected truncating MSCATTER/VP_SCATTER");
5273 (void)IsTruncatingStore;
5274
5275 // If the mask is known to be all ones, optimize to an unmasked intrinsic;
5276 // the selection of the masked intrinsics doesn't do this for us.
5277 bool IsUnmasked = ISD::isConstantSplatVectorAllOnes(Mask.getNode());
5278
5279 MVT ContainerVT = VT;
5280 if (VT.isFixedLengthVector()) {
5281 // We need to use the larger of the value and index type to determine the
5282 // scalable type to use so we don't increase LMUL for any operand/result.
5283 if (VT.bitsGE(IndexVT)) {
5284 ContainerVT = getContainerForFixedLengthVector(VT);
5285 IndexVT = MVT::getVectorVT(IndexVT.getVectorElementType(),
5286 ContainerVT.getVectorElementCount());
5287 } else {
5288 IndexVT = getContainerForFixedLengthVector(IndexVT);
5289 ContainerVT = MVT::getVectorVT(VT.getVectorElementType(),
5290 IndexVT.getVectorElementCount());
5291 }
5292
5293 Index = convertToScalableVector(IndexVT, Index, DAG, Subtarget);
5294 Val = convertToScalableVector(ContainerVT, Val, DAG, Subtarget);
5295
5296 if (!IsUnmasked) {
5297 MVT MaskVT =
5298 MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5299 Mask = convertToScalableVector(MaskVT, Mask, DAG, Subtarget);
5300 }
5301 }
5302
5303 if (!VL)
5304 VL = getDefaultVLOps(VT, ContainerVT, DL, DAG, Subtarget).second;
5305
5306 unsigned IntID =
5307 IsUnmasked ? Intrinsic::riscv_vsoxei : Intrinsic::riscv_vsoxei_mask;
5308 SmallVector<SDValue, 8> Ops{Chain, DAG.getTargetConstant(IntID, DL, XLenVT)};
5309 Ops.push_back(Val);
5310 Ops.push_back(BasePtr);
5311 Ops.push_back(Index);
5312 if (!IsUnmasked)
5313 Ops.push_back(Mask);
5314 Ops.push_back(VL);
5315
5316 return DAG.getMemIntrinsicNode(ISD::INTRINSIC_VOID, DL,
5317 DAG.getVTList(MVT::Other), Ops, MemVT, MMO);
5318 }
5319
lowerGET_ROUNDING(SDValue Op,SelectionDAG & DAG) const5320 SDValue RISCVTargetLowering::lowerGET_ROUNDING(SDValue Op,
5321 SelectionDAG &DAG) const {
5322 const MVT XLenVT = Subtarget.getXLenVT();
5323 SDLoc DL(Op);
5324 SDValue Chain = Op->getOperand(0);
5325 SDValue SysRegNo = DAG.getConstant(
5326 RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5327 SDVTList VTs = DAG.getVTList(XLenVT, MVT::Other);
5328 SDValue RM = DAG.getNode(RISCVISD::READ_CSR, DL, VTs, Chain, SysRegNo);
5329
5330 // Encoding used for rounding mode in RISCV differs from that used in
5331 // FLT_ROUNDS. To convert it the RISCV rounding mode is used as an index in a
5332 // table, which consists of a sequence of 4-bit fields, each representing
5333 // corresponding FLT_ROUNDS mode.
5334 static const int Table =
5335 (int(RoundingMode::NearestTiesToEven) << 4 * RISCVFPRndMode::RNE) |
5336 (int(RoundingMode::TowardZero) << 4 * RISCVFPRndMode::RTZ) |
5337 (int(RoundingMode::TowardNegative) << 4 * RISCVFPRndMode::RDN) |
5338 (int(RoundingMode::TowardPositive) << 4 * RISCVFPRndMode::RUP) |
5339 (int(RoundingMode::NearestTiesToAway) << 4 * RISCVFPRndMode::RMM);
5340
5341 SDValue Shift =
5342 DAG.getNode(ISD::SHL, DL, XLenVT, RM, DAG.getConstant(2, DL, XLenVT));
5343 SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5344 DAG.getConstant(Table, DL, XLenVT), Shift);
5345 SDValue Masked = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5346 DAG.getConstant(7, DL, XLenVT));
5347
5348 return DAG.getMergeValues({Masked, Chain}, DL);
5349 }
5350
lowerSET_ROUNDING(SDValue Op,SelectionDAG & DAG) const5351 SDValue RISCVTargetLowering::lowerSET_ROUNDING(SDValue Op,
5352 SelectionDAG &DAG) const {
5353 const MVT XLenVT = Subtarget.getXLenVT();
5354 SDLoc DL(Op);
5355 SDValue Chain = Op->getOperand(0);
5356 SDValue RMValue = Op->getOperand(1);
5357 SDValue SysRegNo = DAG.getConstant(
5358 RISCVSysReg::lookupSysRegByName("FRM")->Encoding, DL, XLenVT);
5359
5360 // Encoding used for rounding mode in RISCV differs from that used in
5361 // FLT_ROUNDS. To convert it the C rounding mode is used as an index in
5362 // a table, which consists of a sequence of 4-bit fields, each representing
5363 // corresponding RISCV mode.
5364 static const unsigned Table =
5365 (RISCVFPRndMode::RNE << 4 * int(RoundingMode::NearestTiesToEven)) |
5366 (RISCVFPRndMode::RTZ << 4 * int(RoundingMode::TowardZero)) |
5367 (RISCVFPRndMode::RDN << 4 * int(RoundingMode::TowardNegative)) |
5368 (RISCVFPRndMode::RUP << 4 * int(RoundingMode::TowardPositive)) |
5369 (RISCVFPRndMode::RMM << 4 * int(RoundingMode::NearestTiesToAway));
5370
5371 SDValue Shift = DAG.getNode(ISD::SHL, DL, XLenVT, RMValue,
5372 DAG.getConstant(2, DL, XLenVT));
5373 SDValue Shifted = DAG.getNode(ISD::SRL, DL, XLenVT,
5374 DAG.getConstant(Table, DL, XLenVT), Shift);
5375 RMValue = DAG.getNode(ISD::AND, DL, XLenVT, Shifted,
5376 DAG.getConstant(0x7, DL, XLenVT));
5377 return DAG.getNode(RISCVISD::WRITE_CSR, DL, MVT::Other, Chain, SysRegNo,
5378 RMValue);
5379 }
5380
5381 // Returns the opcode of the target-specific SDNode that implements the 32-bit
5382 // form of the given Opcode.
getRISCVWOpcode(unsigned Opcode)5383 static RISCVISD::NodeType getRISCVWOpcode(unsigned Opcode) {
5384 switch (Opcode) {
5385 default:
5386 llvm_unreachable("Unexpected opcode");
5387 case ISD::SHL:
5388 return RISCVISD::SLLW;
5389 case ISD::SRA:
5390 return RISCVISD::SRAW;
5391 case ISD::SRL:
5392 return RISCVISD::SRLW;
5393 case ISD::SDIV:
5394 return RISCVISD::DIVW;
5395 case ISD::UDIV:
5396 return RISCVISD::DIVUW;
5397 case ISD::UREM:
5398 return RISCVISD::REMUW;
5399 case ISD::ROTL:
5400 return RISCVISD::ROLW;
5401 case ISD::ROTR:
5402 return RISCVISD::RORW;
5403 case RISCVISD::GREV:
5404 return RISCVISD::GREVW;
5405 case RISCVISD::GORC:
5406 return RISCVISD::GORCW;
5407 }
5408 }
5409
5410 // Converts the given i8/i16/i32 operation to a target-specific SelectionDAG
5411 // node. Because i8/i16/i32 isn't a legal type for RV64, these operations would
5412 // otherwise be promoted to i64, making it difficult to select the
5413 // SLLW/DIVUW/.../*W later one because the fact the operation was originally of
5414 // type i8/i16/i32 is lost.
customLegalizeToWOp(SDNode * N,SelectionDAG & DAG,unsigned ExtOpc=ISD::ANY_EXTEND)5415 static SDValue customLegalizeToWOp(SDNode *N, SelectionDAG &DAG,
5416 unsigned ExtOpc = ISD::ANY_EXTEND) {
5417 SDLoc DL(N);
5418 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5419 SDValue NewOp0 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(0));
5420 SDValue NewOp1 = DAG.getNode(ExtOpc, DL, MVT::i64, N->getOperand(1));
5421 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5422 // ReplaceNodeResults requires we maintain the same type for the return value.
5423 return DAG.getNode(ISD::TRUNCATE, DL, N->getValueType(0), NewRes);
5424 }
5425
5426 // Converts the given 32-bit operation to a i64 operation with signed extension
5427 // semantic to reduce the signed extension instructions.
customLegalizeToWOpWithSExt(SDNode * N,SelectionDAG & DAG)5428 static SDValue customLegalizeToWOpWithSExt(SDNode *N, SelectionDAG &DAG) {
5429 SDLoc DL(N);
5430 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5431 SDValue NewOp1 = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5432 SDValue NewWOp = DAG.getNode(N->getOpcode(), DL, MVT::i64, NewOp0, NewOp1);
5433 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5434 DAG.getValueType(MVT::i32));
5435 return DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes);
5436 }
5437
ReplaceNodeResults(SDNode * N,SmallVectorImpl<SDValue> & Results,SelectionDAG & DAG) const5438 void RISCVTargetLowering::ReplaceNodeResults(SDNode *N,
5439 SmallVectorImpl<SDValue> &Results,
5440 SelectionDAG &DAG) const {
5441 SDLoc DL(N);
5442 switch (N->getOpcode()) {
5443 default:
5444 llvm_unreachable("Don't know how to custom type legalize this operation!");
5445 case ISD::STRICT_FP_TO_SINT:
5446 case ISD::STRICT_FP_TO_UINT:
5447 case ISD::FP_TO_SINT:
5448 case ISD::FP_TO_UINT: {
5449 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5450 "Unexpected custom legalisation");
5451 bool IsStrict = N->isStrictFPOpcode();
5452 bool IsSigned = N->getOpcode() == ISD::FP_TO_SINT ||
5453 N->getOpcode() == ISD::STRICT_FP_TO_SINT;
5454 SDValue Op0 = IsStrict ? N->getOperand(1) : N->getOperand(0);
5455 if (getTypeAction(*DAG.getContext(), Op0.getValueType()) !=
5456 TargetLowering::TypeSoftenFloat) {
5457 // FIXME: Support strict FP.
5458 if (IsStrict)
5459 return;
5460 if (!isTypeLegal(Op0.getValueType()))
5461 return;
5462 unsigned Opc =
5463 IsSigned ? RISCVISD::FCVT_W_RTZ_RV64 : RISCVISD::FCVT_WU_RTZ_RV64;
5464 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, Op0);
5465 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5466 return;
5467 }
5468 // If the FP type needs to be softened, emit a library call using the 'si'
5469 // version. If we left it to default legalization we'd end up with 'di'. If
5470 // the FP type doesn't need to be softened just let generic type
5471 // legalization promote the result type.
5472 RTLIB::Libcall LC;
5473 if (IsSigned)
5474 LC = RTLIB::getFPTOSINT(Op0.getValueType(), N->getValueType(0));
5475 else
5476 LC = RTLIB::getFPTOUINT(Op0.getValueType(), N->getValueType(0));
5477 MakeLibCallOptions CallOptions;
5478 EVT OpVT = Op0.getValueType();
5479 CallOptions.setTypeListBeforeSoften(OpVT, N->getValueType(0), true);
5480 SDValue Chain = IsStrict ? N->getOperand(0) : SDValue();
5481 SDValue Result;
5482 std::tie(Result, Chain) =
5483 makeLibCall(DAG, LC, N->getValueType(0), Op0, CallOptions, DL, Chain);
5484 Results.push_back(Result);
5485 if (IsStrict)
5486 Results.push_back(Chain);
5487 break;
5488 }
5489 case ISD::READCYCLECOUNTER: {
5490 assert(!Subtarget.is64Bit() &&
5491 "READCYCLECOUNTER only has custom type legalization on riscv32");
5492
5493 SDVTList VTs = DAG.getVTList(MVT::i32, MVT::i32, MVT::Other);
5494 SDValue RCW =
5495 DAG.getNode(RISCVISD::READ_CYCLE_WIDE, DL, VTs, N->getOperand(0));
5496
5497 Results.push_back(
5498 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, RCW, RCW.getValue(1)));
5499 Results.push_back(RCW.getValue(2));
5500 break;
5501 }
5502 case ISD::MUL: {
5503 unsigned Size = N->getSimpleValueType(0).getSizeInBits();
5504 unsigned XLen = Subtarget.getXLen();
5505 // This multiply needs to be expanded, try to use MULHSU+MUL if possible.
5506 if (Size > XLen) {
5507 assert(Size == (XLen * 2) && "Unexpected custom legalisation");
5508 SDValue LHS = N->getOperand(0);
5509 SDValue RHS = N->getOperand(1);
5510 APInt HighMask = APInt::getHighBitsSet(Size, XLen);
5511
5512 bool LHSIsU = DAG.MaskedValueIsZero(LHS, HighMask);
5513 bool RHSIsU = DAG.MaskedValueIsZero(RHS, HighMask);
5514 // We need exactly one side to be unsigned.
5515 if (LHSIsU == RHSIsU)
5516 return;
5517
5518 auto MakeMULPair = [&](SDValue S, SDValue U) {
5519 MVT XLenVT = Subtarget.getXLenVT();
5520 S = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, S);
5521 U = DAG.getNode(ISD::TRUNCATE, DL, XLenVT, U);
5522 SDValue Lo = DAG.getNode(ISD::MUL, DL, XLenVT, S, U);
5523 SDValue Hi = DAG.getNode(RISCVISD::MULHSU, DL, XLenVT, S, U);
5524 return DAG.getNode(ISD::BUILD_PAIR, DL, N->getValueType(0), Lo, Hi);
5525 };
5526
5527 bool LHSIsS = DAG.ComputeNumSignBits(LHS) > XLen;
5528 bool RHSIsS = DAG.ComputeNumSignBits(RHS) > XLen;
5529
5530 // The other operand should be signed, but still prefer MULH when
5531 // possible.
5532 if (RHSIsU && LHSIsS && !RHSIsS)
5533 Results.push_back(MakeMULPair(LHS, RHS));
5534 else if (LHSIsU && RHSIsS && !LHSIsS)
5535 Results.push_back(MakeMULPair(RHS, LHS));
5536
5537 return;
5538 }
5539 LLVM_FALLTHROUGH;
5540 }
5541 case ISD::ADD:
5542 case ISD::SUB:
5543 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5544 "Unexpected custom legalisation");
5545 Results.push_back(customLegalizeToWOpWithSExt(N, DAG));
5546 break;
5547 case ISD::SHL:
5548 case ISD::SRA:
5549 case ISD::SRL:
5550 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5551 "Unexpected custom legalisation");
5552 if (N->getOperand(1).getOpcode() != ISD::Constant) {
5553 Results.push_back(customLegalizeToWOp(N, DAG));
5554 break;
5555 }
5556
5557 // Custom legalize ISD::SHL by placing a SIGN_EXTEND_INREG after. This is
5558 // similar to customLegalizeToWOpWithSExt, but we must zero_extend the
5559 // shift amount.
5560 if (N->getOpcode() == ISD::SHL) {
5561 SDLoc DL(N);
5562 SDValue NewOp0 =
5563 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5564 SDValue NewOp1 =
5565 DAG.getNode(ISD::ZERO_EXTEND, DL, MVT::i64, N->getOperand(1));
5566 SDValue NewWOp = DAG.getNode(ISD::SHL, DL, MVT::i64, NewOp0, NewOp1);
5567 SDValue NewRes = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, NewWOp,
5568 DAG.getValueType(MVT::i32));
5569 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5570 }
5571
5572 break;
5573 case ISD::ROTL:
5574 case ISD::ROTR:
5575 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5576 "Unexpected custom legalisation");
5577 Results.push_back(customLegalizeToWOp(N, DAG));
5578 break;
5579 case ISD::CTTZ:
5580 case ISD::CTTZ_ZERO_UNDEF:
5581 case ISD::CTLZ:
5582 case ISD::CTLZ_ZERO_UNDEF: {
5583 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5584 "Unexpected custom legalisation");
5585
5586 SDValue NewOp0 =
5587 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5588 bool IsCTZ =
5589 N->getOpcode() == ISD::CTTZ || N->getOpcode() == ISD::CTTZ_ZERO_UNDEF;
5590 unsigned Opc = IsCTZ ? RISCVISD::CTZW : RISCVISD::CLZW;
5591 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp0);
5592 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5593 return;
5594 }
5595 case ISD::SDIV:
5596 case ISD::UDIV:
5597 case ISD::UREM: {
5598 MVT VT = N->getSimpleValueType(0);
5599 assert((VT == MVT::i8 || VT == MVT::i16 || VT == MVT::i32) &&
5600 Subtarget.is64Bit() && Subtarget.hasStdExtM() &&
5601 "Unexpected custom legalisation");
5602 // Don't promote division/remainder by constant since we should expand those
5603 // to multiply by magic constant.
5604 // FIXME: What if the expansion is disabled for minsize.
5605 if (N->getOperand(1).getOpcode() == ISD::Constant)
5606 return;
5607
5608 // If the input is i32, use ANY_EXTEND since the W instructions don't read
5609 // the upper 32 bits. For other types we need to sign or zero extend
5610 // based on the opcode.
5611 unsigned ExtOpc = ISD::ANY_EXTEND;
5612 if (VT != MVT::i32)
5613 ExtOpc = N->getOpcode() == ISD::SDIV ? ISD::SIGN_EXTEND
5614 : ISD::ZERO_EXTEND;
5615
5616 Results.push_back(customLegalizeToWOp(N, DAG, ExtOpc));
5617 break;
5618 }
5619 case ISD::UADDO:
5620 case ISD::USUBO: {
5621 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5622 "Unexpected custom legalisation");
5623 bool IsAdd = N->getOpcode() == ISD::UADDO;
5624 // Create an ADDW or SUBW.
5625 SDValue LHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5626 SDValue RHS = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5627 SDValue Res =
5628 DAG.getNode(IsAdd ? ISD::ADD : ISD::SUB, DL, MVT::i64, LHS, RHS);
5629 Res = DAG.getNode(ISD::SIGN_EXTEND_INREG, DL, MVT::i64, Res,
5630 DAG.getValueType(MVT::i32));
5631
5632 // Sign extend the LHS and perform an unsigned compare with the ADDW result.
5633 // Since the inputs are sign extended from i32, this is equivalent to
5634 // comparing the lower 32 bits.
5635 LHS = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5636 SDValue Overflow = DAG.getSetCC(DL, N->getValueType(1), Res, LHS,
5637 IsAdd ? ISD::SETULT : ISD::SETUGT);
5638
5639 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5640 Results.push_back(Overflow);
5641 return;
5642 }
5643 case ISD::UADDSAT:
5644 case ISD::USUBSAT: {
5645 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5646 "Unexpected custom legalisation");
5647 if (Subtarget.hasStdExtZbb()) {
5648 // With Zbb we can sign extend and let LegalizeDAG use minu/maxu. Using
5649 // sign extend allows overflow of the lower 32 bits to be detected on
5650 // the promoted size.
5651 SDValue LHS =
5652 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(0));
5653 SDValue RHS =
5654 DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, N->getOperand(1));
5655 SDValue Res = DAG.getNode(N->getOpcode(), DL, MVT::i64, LHS, RHS);
5656 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5657 return;
5658 }
5659
5660 // Without Zbb, expand to UADDO/USUBO+select which will trigger our custom
5661 // promotion for UADDO/USUBO.
5662 Results.push_back(expandAddSubSat(N, DAG));
5663 return;
5664 }
5665 case ISD::BITCAST: {
5666 EVT VT = N->getValueType(0);
5667 assert(VT.isInteger() && !VT.isVector() && "Unexpected VT!");
5668 SDValue Op0 = N->getOperand(0);
5669 EVT Op0VT = Op0.getValueType();
5670 MVT XLenVT = Subtarget.getXLenVT();
5671 if (VT == MVT::i16 && Op0VT == MVT::f16 && Subtarget.hasStdExtZfh()) {
5672 SDValue FPConv = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, XLenVT, Op0);
5673 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, FPConv));
5674 } else if (VT == MVT::i32 && Op0VT == MVT::f32 && Subtarget.is64Bit() &&
5675 Subtarget.hasStdExtF()) {
5676 SDValue FPConv =
5677 DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Op0);
5678 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, FPConv));
5679 } else if (!VT.isVector() && Op0VT.isFixedLengthVector() &&
5680 isTypeLegal(Op0VT)) {
5681 // Custom-legalize bitcasts from fixed-length vector types to illegal
5682 // scalar types in order to improve codegen. Bitcast the vector to a
5683 // one-element vector type whose element type is the same as the result
5684 // type, and extract the first element.
5685 LLVMContext &Context = *DAG.getContext();
5686 SDValue BVec = DAG.getBitcast(EVT::getVectorVT(Context, VT, 1), Op0);
5687 Results.push_back(DAG.getNode(ISD::EXTRACT_VECTOR_ELT, DL, VT, BVec,
5688 DAG.getConstant(0, DL, XLenVT)));
5689 }
5690 break;
5691 }
5692 case RISCVISD::GREV:
5693 case RISCVISD::GORC: {
5694 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5695 "Unexpected custom legalisation");
5696 assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5697 // This is similar to customLegalizeToWOp, except that we pass the second
5698 // operand (a TargetConstant) straight through: it is already of type
5699 // XLenVT.
5700 RISCVISD::NodeType WOpcode = getRISCVWOpcode(N->getOpcode());
5701 SDValue NewOp0 =
5702 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5703 SDValue NewOp1 =
5704 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5705 SDValue NewRes = DAG.getNode(WOpcode, DL, MVT::i64, NewOp0, NewOp1);
5706 // ReplaceNodeResults requires we maintain the same type for the return
5707 // value.
5708 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5709 break;
5710 }
5711 case RISCVISD::SHFL: {
5712 // There is no SHFLIW instruction, but we can just promote the operation.
5713 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5714 "Unexpected custom legalisation");
5715 assert(isa<ConstantSDNode>(N->getOperand(1)) && "Expected constant");
5716 SDValue NewOp0 =
5717 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5718 SDValue NewOp1 =
5719 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5720 SDValue NewRes = DAG.getNode(RISCVISD::SHFL, DL, MVT::i64, NewOp0, NewOp1);
5721 // ReplaceNodeResults requires we maintain the same type for the return
5722 // value.
5723 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewRes));
5724 break;
5725 }
5726 case ISD::BSWAP:
5727 case ISD::BITREVERSE: {
5728 MVT VT = N->getSimpleValueType(0);
5729 MVT XLenVT = Subtarget.getXLenVT();
5730 assert((VT == MVT::i8 || VT == MVT::i16 ||
5731 (VT == MVT::i32 && Subtarget.is64Bit())) &&
5732 Subtarget.hasStdExtZbp() && "Unexpected custom legalisation");
5733 SDValue NewOp0 = DAG.getNode(ISD::ANY_EXTEND, DL, XLenVT, N->getOperand(0));
5734 unsigned Imm = VT.getSizeInBits() - 1;
5735 // If this is BSWAP rather than BITREVERSE, clear the lower 3 bits.
5736 if (N->getOpcode() == ISD::BSWAP)
5737 Imm &= ~0x7U;
5738 unsigned Opc = Subtarget.is64Bit() ? RISCVISD::GREVW : RISCVISD::GREV;
5739 SDValue GREVI =
5740 DAG.getNode(Opc, DL, XLenVT, NewOp0, DAG.getConstant(Imm, DL, XLenVT));
5741 // ReplaceNodeResults requires we maintain the same type for the return
5742 // value.
5743 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, GREVI));
5744 break;
5745 }
5746 case ISD::FSHL:
5747 case ISD::FSHR: {
5748 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5749 Subtarget.hasStdExtZbt() && "Unexpected custom legalisation");
5750 SDValue NewOp0 =
5751 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(0));
5752 SDValue NewOp1 =
5753 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5754 SDValue NewOp2 =
5755 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5756 // FSLW/FSRW take a 6 bit shift amount but i32 FSHL/FSHR only use 5 bits.
5757 // Mask the shift amount to 5 bits.
5758 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5759 DAG.getConstant(0x1f, DL, MVT::i64));
5760 unsigned Opc =
5761 N->getOpcode() == ISD::FSHL ? RISCVISD::FSLW : RISCVISD::FSRW;
5762 SDValue NewOp = DAG.getNode(Opc, DL, MVT::i64, NewOp0, NewOp1, NewOp2);
5763 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, NewOp));
5764 break;
5765 }
5766 case ISD::EXTRACT_VECTOR_ELT: {
5767 // Custom-legalize an EXTRACT_VECTOR_ELT where XLEN<SEW, as the SEW element
5768 // type is illegal (currently only vXi64 RV32).
5769 // With vmv.x.s, when SEW > XLEN, only the least-significant XLEN bits are
5770 // transferred to the destination register. We issue two of these from the
5771 // upper- and lower- halves of the SEW-bit vector element, slid down to the
5772 // first element.
5773 SDValue Vec = N->getOperand(0);
5774 SDValue Idx = N->getOperand(1);
5775
5776 // The vector type hasn't been legalized yet so we can't issue target
5777 // specific nodes if it needs legalization.
5778 // FIXME: We would manually legalize if it's important.
5779 if (!isTypeLegal(Vec.getValueType()))
5780 return;
5781
5782 MVT VecVT = Vec.getSimpleValueType();
5783
5784 assert(!Subtarget.is64Bit() && N->getValueType(0) == MVT::i64 &&
5785 VecVT.getVectorElementType() == MVT::i64 &&
5786 "Unexpected EXTRACT_VECTOR_ELT legalization");
5787
5788 // If this is a fixed vector, we need to convert it to a scalable vector.
5789 MVT ContainerVT = VecVT;
5790 if (VecVT.isFixedLengthVector()) {
5791 ContainerVT = getContainerForFixedLengthVector(VecVT);
5792 Vec = convertToScalableVector(ContainerVT, Vec, DAG, Subtarget);
5793 }
5794
5795 MVT XLenVT = Subtarget.getXLenVT();
5796
5797 // Use a VL of 1 to avoid processing more elements than we need.
5798 MVT MaskVT = MVT::getVectorVT(MVT::i1, ContainerVT.getVectorElementCount());
5799 SDValue VL = DAG.getConstant(1, DL, XLenVT);
5800 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5801
5802 // Unless the index is known to be 0, we must slide the vector down to get
5803 // the desired element into index 0.
5804 if (!isNullConstant(Idx)) {
5805 Vec = DAG.getNode(RISCVISD::VSLIDEDOWN_VL, DL, ContainerVT,
5806 DAG.getUNDEF(ContainerVT), Vec, Idx, Mask, VL);
5807 }
5808
5809 // Extract the lower XLEN bits of the correct vector element.
5810 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5811
5812 // To extract the upper XLEN bits of the vector element, shift the first
5813 // element right by 32 bits and re-extract the lower XLEN bits.
5814 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, ContainerVT,
5815 DAG.getConstant(32, DL, XLenVT), VL);
5816 SDValue LShr32 = DAG.getNode(RISCVISD::SRL_VL, DL, ContainerVT, Vec,
5817 ThirtyTwoV, Mask, VL);
5818
5819 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5820
5821 Results.push_back(DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5822 break;
5823 }
5824 case ISD::INTRINSIC_WO_CHAIN: {
5825 unsigned IntNo = cast<ConstantSDNode>(N->getOperand(0))->getZExtValue();
5826 switch (IntNo) {
5827 default:
5828 llvm_unreachable(
5829 "Don't know how to custom type legalize this intrinsic!");
5830 case Intrinsic::riscv_orc_b: {
5831 // Lower to the GORCI encoding for orc.b with the operand extended.
5832 SDValue NewOp =
5833 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5834 // If Zbp is enabled, use GORCIW which will sign extend the result.
5835 unsigned Opc =
5836 Subtarget.hasStdExtZbp() ? RISCVISD::GORCW : RISCVISD::GORC;
5837 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp,
5838 DAG.getConstant(7, DL, MVT::i64));
5839 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5840 return;
5841 }
5842 case Intrinsic::riscv_grev:
5843 case Intrinsic::riscv_gorc: {
5844 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5845 "Unexpected custom legalisation");
5846 SDValue NewOp1 =
5847 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5848 SDValue NewOp2 =
5849 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5850 unsigned Opc =
5851 IntNo == Intrinsic::riscv_grev ? RISCVISD::GREVW : RISCVISD::GORCW;
5852 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5853 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5854 break;
5855 }
5856 case Intrinsic::riscv_shfl:
5857 case Intrinsic::riscv_unshfl: {
5858 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5859 "Unexpected custom legalisation");
5860 SDValue NewOp1 =
5861 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5862 SDValue NewOp2 =
5863 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5864 unsigned Opc =
5865 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFLW : RISCVISD::UNSHFLW;
5866 if (isa<ConstantSDNode>(N->getOperand(2))) {
5867 NewOp2 = DAG.getNode(ISD::AND, DL, MVT::i64, NewOp2,
5868 DAG.getConstant(0xf, DL, MVT::i64));
5869 Opc =
5870 IntNo == Intrinsic::riscv_shfl ? RISCVISD::SHFL : RISCVISD::UNSHFL;
5871 }
5872 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5873 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5874 break;
5875 }
5876 case Intrinsic::riscv_bcompress:
5877 case Intrinsic::riscv_bdecompress: {
5878 assert(N->getValueType(0) == MVT::i32 && Subtarget.is64Bit() &&
5879 "Unexpected custom legalisation");
5880 SDValue NewOp1 =
5881 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(1));
5882 SDValue NewOp2 =
5883 DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i64, N->getOperand(2));
5884 unsigned Opc = IntNo == Intrinsic::riscv_bcompress
5885 ? RISCVISD::BCOMPRESSW
5886 : RISCVISD::BDECOMPRESSW;
5887 SDValue Res = DAG.getNode(Opc, DL, MVT::i64, NewOp1, NewOp2);
5888 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, MVT::i32, Res));
5889 break;
5890 }
5891 case Intrinsic::riscv_vmv_x_s: {
5892 EVT VT = N->getValueType(0);
5893 MVT XLenVT = Subtarget.getXLenVT();
5894 if (VT.bitsLT(XLenVT)) {
5895 // Simple case just extract using vmv.x.s and truncate.
5896 SDValue Extract = DAG.getNode(RISCVISD::VMV_X_S, DL,
5897 Subtarget.getXLenVT(), N->getOperand(1));
5898 Results.push_back(DAG.getNode(ISD::TRUNCATE, DL, VT, Extract));
5899 return;
5900 }
5901
5902 assert(VT == MVT::i64 && !Subtarget.is64Bit() &&
5903 "Unexpected custom legalization");
5904
5905 // We need to do the move in two steps.
5906 SDValue Vec = N->getOperand(1);
5907 MVT VecVT = Vec.getSimpleValueType();
5908
5909 // First extract the lower XLEN bits of the element.
5910 SDValue EltLo = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, Vec);
5911
5912 // To extract the upper XLEN bits of the vector element, shift the first
5913 // element right by 32 bits and re-extract the lower XLEN bits.
5914 SDValue VL = DAG.getConstant(1, DL, XLenVT);
5915 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
5916 SDValue Mask = DAG.getNode(RISCVISD::VMSET_VL, DL, MaskVT, VL);
5917 SDValue ThirtyTwoV = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VecVT,
5918 DAG.getConstant(32, DL, XLenVT), VL);
5919 SDValue LShr32 =
5920 DAG.getNode(RISCVISD::SRL_VL, DL, VecVT, Vec, ThirtyTwoV, Mask, VL);
5921 SDValue EltHi = DAG.getNode(RISCVISD::VMV_X_S, DL, XLenVT, LShr32);
5922
5923 Results.push_back(
5924 DAG.getNode(ISD::BUILD_PAIR, DL, MVT::i64, EltLo, EltHi));
5925 break;
5926 }
5927 }
5928 break;
5929 }
5930 case ISD::VECREDUCE_ADD:
5931 case ISD::VECREDUCE_AND:
5932 case ISD::VECREDUCE_OR:
5933 case ISD::VECREDUCE_XOR:
5934 case ISD::VECREDUCE_SMAX:
5935 case ISD::VECREDUCE_UMAX:
5936 case ISD::VECREDUCE_SMIN:
5937 case ISD::VECREDUCE_UMIN:
5938 if (SDValue V = lowerVECREDUCE(SDValue(N, 0), DAG))
5939 Results.push_back(V);
5940 break;
5941 case ISD::VP_REDUCE_ADD:
5942 case ISD::VP_REDUCE_AND:
5943 case ISD::VP_REDUCE_OR:
5944 case ISD::VP_REDUCE_XOR:
5945 case ISD::VP_REDUCE_SMAX:
5946 case ISD::VP_REDUCE_UMAX:
5947 case ISD::VP_REDUCE_SMIN:
5948 case ISD::VP_REDUCE_UMIN:
5949 if (SDValue V = lowerVPREDUCE(SDValue(N, 0), DAG))
5950 Results.push_back(V);
5951 break;
5952 case ISD::FLT_ROUNDS_: {
5953 SDVTList VTs = DAG.getVTList(Subtarget.getXLenVT(), MVT::Other);
5954 SDValue Res = DAG.getNode(ISD::FLT_ROUNDS_, DL, VTs, N->getOperand(0));
5955 Results.push_back(Res.getValue(0));
5956 Results.push_back(Res.getValue(1));
5957 break;
5958 }
5959 }
5960 }
5961
5962 // A structure to hold one of the bit-manipulation patterns below. Together, a
5963 // SHL and non-SHL pattern may form a bit-manipulation pair on a single source:
5964 // (or (and (shl x, 1), 0xAAAAAAAA),
5965 // (and (srl x, 1), 0x55555555))
5966 struct RISCVBitmanipPat {
5967 SDValue Op;
5968 unsigned ShAmt;
5969 bool IsSHL;
5970
formsPairWithRISCVBitmanipPat5971 bool formsPairWith(const RISCVBitmanipPat &Other) const {
5972 return Op == Other.Op && ShAmt == Other.ShAmt && IsSHL != Other.IsSHL;
5973 }
5974 };
5975
5976 // Matches patterns of the form
5977 // (and (shl x, C2), (C1 << C2))
5978 // (and (srl x, C2), C1)
5979 // (shl (and x, C1), C2)
5980 // (srl (and x, (C1 << C2)), C2)
5981 // Where C2 is a power of 2 and C1 has at least that many leading zeroes.
5982 // The expected masks for each shift amount are specified in BitmanipMasks where
5983 // BitmanipMasks[log2(C2)] specifies the expected C1 value.
5984 // The max allowed shift amount is either XLen/2 or XLen/4 determined by whether
5985 // BitmanipMasks contains 6 or 5 entries assuming that the maximum possible
5986 // XLen is 64.
5987 static Optional<RISCVBitmanipPat>
matchRISCVBitmanipPat(SDValue Op,ArrayRef<uint64_t> BitmanipMasks)5988 matchRISCVBitmanipPat(SDValue Op, ArrayRef<uint64_t> BitmanipMasks) {
5989 assert((BitmanipMasks.size() == 5 || BitmanipMasks.size() == 6) &&
5990 "Unexpected number of masks");
5991 Optional<uint64_t> Mask;
5992 // Optionally consume a mask around the shift operation.
5993 if (Op.getOpcode() == ISD::AND && isa<ConstantSDNode>(Op.getOperand(1))) {
5994 Mask = Op.getConstantOperandVal(1);
5995 Op = Op.getOperand(0);
5996 }
5997 if (Op.getOpcode() != ISD::SHL && Op.getOpcode() != ISD::SRL)
5998 return None;
5999 bool IsSHL = Op.getOpcode() == ISD::SHL;
6000
6001 if (!isa<ConstantSDNode>(Op.getOperand(1)))
6002 return None;
6003 uint64_t ShAmt = Op.getConstantOperandVal(1);
6004
6005 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6006 if (ShAmt >= Width || !isPowerOf2_64(ShAmt))
6007 return None;
6008 // If we don't have enough masks for 64 bit, then we must be trying to
6009 // match SHFL so we're only allowed to shift 1/4 of the width.
6010 if (BitmanipMasks.size() == 5 && ShAmt >= (Width / 2))
6011 return None;
6012
6013 SDValue Src = Op.getOperand(0);
6014
6015 // The expected mask is shifted left when the AND is found around SHL
6016 // patterns.
6017 // ((x >> 1) & 0x55555555)
6018 // ((x << 1) & 0xAAAAAAAA)
6019 bool SHLExpMask = IsSHL;
6020
6021 if (!Mask) {
6022 // Sometimes LLVM keeps the mask as an operand of the shift, typically when
6023 // the mask is all ones: consume that now.
6024 if (Src.getOpcode() == ISD::AND && isa<ConstantSDNode>(Src.getOperand(1))) {
6025 Mask = Src.getConstantOperandVal(1);
6026 Src = Src.getOperand(0);
6027 // The expected mask is now in fact shifted left for SRL, so reverse the
6028 // decision.
6029 // ((x & 0xAAAAAAAA) >> 1)
6030 // ((x & 0x55555555) << 1)
6031 SHLExpMask = !SHLExpMask;
6032 } else {
6033 // Use a default shifted mask of all-ones if there's no AND, truncated
6034 // down to the expected width. This simplifies the logic later on.
6035 Mask = maskTrailingOnes<uint64_t>(Width);
6036 *Mask &= (IsSHL ? *Mask << ShAmt : *Mask >> ShAmt);
6037 }
6038 }
6039
6040 unsigned MaskIdx = Log2_32(ShAmt);
6041 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6042
6043 if (SHLExpMask)
6044 ExpMask <<= ShAmt;
6045
6046 if (Mask != ExpMask)
6047 return None;
6048
6049 return RISCVBitmanipPat{Src, (unsigned)ShAmt, IsSHL};
6050 }
6051
6052 // Matches any of the following bit-manipulation patterns:
6053 // (and (shl x, 1), (0x55555555 << 1))
6054 // (and (srl x, 1), 0x55555555)
6055 // (shl (and x, 0x55555555), 1)
6056 // (srl (and x, (0x55555555 << 1)), 1)
6057 // where the shift amount and mask may vary thus:
6058 // [1] = 0x55555555 / 0xAAAAAAAA
6059 // [2] = 0x33333333 / 0xCCCCCCCC
6060 // [4] = 0x0F0F0F0F / 0xF0F0F0F0
6061 // [8] = 0x00FF00FF / 0xFF00FF00
6062 // [16] = 0x0000FFFF / 0xFFFFFFFF
6063 // [32] = 0x00000000FFFFFFFF / 0xFFFFFFFF00000000 (for RV64)
matchGREVIPat(SDValue Op)6064 static Optional<RISCVBitmanipPat> matchGREVIPat(SDValue Op) {
6065 // These are the unshifted masks which we use to match bit-manipulation
6066 // patterns. They may be shifted left in certain circumstances.
6067 static const uint64_t BitmanipMasks[] = {
6068 0x5555555555555555ULL, 0x3333333333333333ULL, 0x0F0F0F0F0F0F0F0FULL,
6069 0x00FF00FF00FF00FFULL, 0x0000FFFF0000FFFFULL, 0x00000000FFFFFFFFULL};
6070
6071 return matchRISCVBitmanipPat(Op, BitmanipMasks);
6072 }
6073
6074 // Match the following pattern as a GREVI(W) operation
6075 // (or (BITMANIP_SHL x), (BITMANIP_SRL x))
combineORToGREV(SDValue Op,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)6076 static SDValue combineORToGREV(SDValue Op, SelectionDAG &DAG,
6077 const RISCVSubtarget &Subtarget) {
6078 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6079 EVT VT = Op.getValueType();
6080
6081 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6082 auto LHS = matchGREVIPat(Op.getOperand(0));
6083 auto RHS = matchGREVIPat(Op.getOperand(1));
6084 if (LHS && RHS && LHS->formsPairWith(*RHS)) {
6085 SDLoc DL(Op);
6086 return DAG.getNode(RISCVISD::GREV, DL, VT, LHS->Op,
6087 DAG.getConstant(LHS->ShAmt, DL, VT));
6088 }
6089 }
6090 return SDValue();
6091 }
6092
6093 // Matches any the following pattern as a GORCI(W) operation
6094 // 1. (or (GREVI x, shamt), x) if shamt is a power of 2
6095 // 2. (or x, (GREVI x, shamt)) if shamt is a power of 2
6096 // 3. (or (or (BITMANIP_SHL x), x), (BITMANIP_SRL x))
6097 // Note that with the variant of 3.,
6098 // (or (or (BITMANIP_SHL x), (BITMANIP_SRL x)), x)
6099 // the inner pattern will first be matched as GREVI and then the outer
6100 // pattern will be matched to GORC via the first rule above.
6101 // 4. (or (rotl/rotr x, bitwidth/2), x)
combineORToGORC(SDValue Op,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)6102 static SDValue combineORToGORC(SDValue Op, SelectionDAG &DAG,
6103 const RISCVSubtarget &Subtarget) {
6104 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6105 EVT VT = Op.getValueType();
6106
6107 if (VT == Subtarget.getXLenVT() || (Subtarget.is64Bit() && VT == MVT::i32)) {
6108 SDLoc DL(Op);
6109 SDValue Op0 = Op.getOperand(0);
6110 SDValue Op1 = Op.getOperand(1);
6111
6112 auto MatchOROfReverse = [&](SDValue Reverse, SDValue X) {
6113 if (Reverse.getOpcode() == RISCVISD::GREV && Reverse.getOperand(0) == X &&
6114 isa<ConstantSDNode>(Reverse.getOperand(1)) &&
6115 isPowerOf2_32(Reverse.getConstantOperandVal(1)))
6116 return DAG.getNode(RISCVISD::GORC, DL, VT, X, Reverse.getOperand(1));
6117 // We can also form GORCI from ROTL/ROTR by half the bitwidth.
6118 if ((Reverse.getOpcode() == ISD::ROTL ||
6119 Reverse.getOpcode() == ISD::ROTR) &&
6120 Reverse.getOperand(0) == X &&
6121 isa<ConstantSDNode>(Reverse.getOperand(1))) {
6122 uint64_t RotAmt = Reverse.getConstantOperandVal(1);
6123 if (RotAmt == (VT.getSizeInBits() / 2))
6124 return DAG.getNode(RISCVISD::GORC, DL, VT, X,
6125 DAG.getConstant(RotAmt, DL, VT));
6126 }
6127 return SDValue();
6128 };
6129
6130 // Check for either commutable permutation of (or (GREVI x, shamt), x)
6131 if (SDValue V = MatchOROfReverse(Op0, Op1))
6132 return V;
6133 if (SDValue V = MatchOROfReverse(Op1, Op0))
6134 return V;
6135
6136 // OR is commutable so canonicalize its OR operand to the left
6137 if (Op0.getOpcode() != ISD::OR && Op1.getOpcode() == ISD::OR)
6138 std::swap(Op0, Op1);
6139 if (Op0.getOpcode() != ISD::OR)
6140 return SDValue();
6141 SDValue OrOp0 = Op0.getOperand(0);
6142 SDValue OrOp1 = Op0.getOperand(1);
6143 auto LHS = matchGREVIPat(OrOp0);
6144 // OR is commutable so swap the operands and try again: x might have been
6145 // on the left
6146 if (!LHS) {
6147 std::swap(OrOp0, OrOp1);
6148 LHS = matchGREVIPat(OrOp0);
6149 }
6150 auto RHS = matchGREVIPat(Op1);
6151 if (LHS && RHS && LHS->formsPairWith(*RHS) && LHS->Op == OrOp1) {
6152 return DAG.getNode(RISCVISD::GORC, DL, VT, LHS->Op,
6153 DAG.getConstant(LHS->ShAmt, DL, VT));
6154 }
6155 }
6156 return SDValue();
6157 }
6158
6159 // Matches any of the following bit-manipulation patterns:
6160 // (and (shl x, 1), (0x22222222 << 1))
6161 // (and (srl x, 1), 0x22222222)
6162 // (shl (and x, 0x22222222), 1)
6163 // (srl (and x, (0x22222222 << 1)), 1)
6164 // where the shift amount and mask may vary thus:
6165 // [1] = 0x22222222 / 0x44444444
6166 // [2] = 0x0C0C0C0C / 0x3C3C3C3C
6167 // [4] = 0x00F000F0 / 0x0F000F00
6168 // [8] = 0x0000FF00 / 0x00FF0000
6169 // [16] = 0x00000000FFFF0000 / 0x0000FFFF00000000 (for RV64)
matchSHFLPat(SDValue Op)6170 static Optional<RISCVBitmanipPat> matchSHFLPat(SDValue Op) {
6171 // These are the unshifted masks which we use to match bit-manipulation
6172 // patterns. They may be shifted left in certain circumstances.
6173 static const uint64_t BitmanipMasks[] = {
6174 0x2222222222222222ULL, 0x0C0C0C0C0C0C0C0CULL, 0x00F000F000F000F0ULL,
6175 0x0000FF000000FF00ULL, 0x00000000FFFF0000ULL};
6176
6177 return matchRISCVBitmanipPat(Op, BitmanipMasks);
6178 }
6179
6180 // Match (or (or (SHFL_SHL x), (SHFL_SHR x)), (SHFL_AND x)
combineORToSHFL(SDValue Op,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)6181 static SDValue combineORToSHFL(SDValue Op, SelectionDAG &DAG,
6182 const RISCVSubtarget &Subtarget) {
6183 assert(Subtarget.hasStdExtZbp() && "Expected Zbp extenson");
6184 EVT VT = Op.getValueType();
6185
6186 if (VT != MVT::i32 && VT != Subtarget.getXLenVT())
6187 return SDValue();
6188
6189 SDValue Op0 = Op.getOperand(0);
6190 SDValue Op1 = Op.getOperand(1);
6191
6192 // Or is commutable so canonicalize the second OR to the LHS.
6193 if (Op0.getOpcode() != ISD::OR)
6194 std::swap(Op0, Op1);
6195 if (Op0.getOpcode() != ISD::OR)
6196 return SDValue();
6197
6198 // We found an inner OR, so our operands are the operands of the inner OR
6199 // and the other operand of the outer OR.
6200 SDValue A = Op0.getOperand(0);
6201 SDValue B = Op0.getOperand(1);
6202 SDValue C = Op1;
6203
6204 auto Match1 = matchSHFLPat(A);
6205 auto Match2 = matchSHFLPat(B);
6206
6207 // If neither matched, we failed.
6208 if (!Match1 && !Match2)
6209 return SDValue();
6210
6211 // We had at least one match. if one failed, try the remaining C operand.
6212 if (!Match1) {
6213 std::swap(A, C);
6214 Match1 = matchSHFLPat(A);
6215 if (!Match1)
6216 return SDValue();
6217 } else if (!Match2) {
6218 std::swap(B, C);
6219 Match2 = matchSHFLPat(B);
6220 if (!Match2)
6221 return SDValue();
6222 }
6223 assert(Match1 && Match2);
6224
6225 // Make sure our matches pair up.
6226 if (!Match1->formsPairWith(*Match2))
6227 return SDValue();
6228
6229 // All the remains is to make sure C is an AND with the same input, that masks
6230 // out the bits that are being shuffled.
6231 if (C.getOpcode() != ISD::AND || !isa<ConstantSDNode>(C.getOperand(1)) ||
6232 C.getOperand(0) != Match1->Op)
6233 return SDValue();
6234
6235 uint64_t Mask = C.getConstantOperandVal(1);
6236
6237 static const uint64_t BitmanipMasks[] = {
6238 0x9999999999999999ULL, 0xC3C3C3C3C3C3C3C3ULL, 0xF00FF00FF00FF00FULL,
6239 0xFF0000FFFF0000FFULL, 0xFFFF00000000FFFFULL,
6240 };
6241
6242 unsigned Width = Op.getValueType() == MVT::i64 ? 64 : 32;
6243 unsigned MaskIdx = Log2_32(Match1->ShAmt);
6244 uint64_t ExpMask = BitmanipMasks[MaskIdx] & maskTrailingOnes<uint64_t>(Width);
6245
6246 if (Mask != ExpMask)
6247 return SDValue();
6248
6249 SDLoc DL(Op);
6250 return DAG.getNode(RISCVISD::SHFL, DL, VT, Match1->Op,
6251 DAG.getConstant(Match1->ShAmt, DL, VT));
6252 }
6253
6254 // Optimize (add (shl x, c0), (shl y, c1)) ->
6255 // (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
transformAddShlImm(SDNode * N,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)6256 static SDValue transformAddShlImm(SDNode *N, SelectionDAG &DAG,
6257 const RISCVSubtarget &Subtarget) {
6258 // Perform this optimization only in the zba extension.
6259 if (!Subtarget.hasStdExtZba())
6260 return SDValue();
6261
6262 // Skip for vector types and larger types.
6263 EVT VT = N->getValueType(0);
6264 if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6265 return SDValue();
6266
6267 // The two operand nodes must be SHL and have no other use.
6268 SDValue N0 = N->getOperand(0);
6269 SDValue N1 = N->getOperand(1);
6270 if (N0->getOpcode() != ISD::SHL || N1->getOpcode() != ISD::SHL ||
6271 !N0->hasOneUse() || !N1->hasOneUse())
6272 return SDValue();
6273
6274 // Check c0 and c1.
6275 auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6276 auto *N1C = dyn_cast<ConstantSDNode>(N1->getOperand(1));
6277 if (!N0C || !N1C)
6278 return SDValue();
6279 int64_t C0 = N0C->getSExtValue();
6280 int64_t C1 = N1C->getSExtValue();
6281 if (C0 <= 0 || C1 <= 0)
6282 return SDValue();
6283
6284 // Skip if SH1ADD/SH2ADD/SH3ADD are not applicable.
6285 int64_t Bits = std::min(C0, C1);
6286 int64_t Diff = std::abs(C0 - C1);
6287 if (Diff != 1 && Diff != 2 && Diff != 3)
6288 return SDValue();
6289
6290 // Build nodes.
6291 SDLoc DL(N);
6292 SDValue NS = (C0 < C1) ? N0->getOperand(0) : N1->getOperand(0);
6293 SDValue NL = (C0 > C1) ? N0->getOperand(0) : N1->getOperand(0);
6294 SDValue NA0 =
6295 DAG.getNode(ISD::SHL, DL, VT, NL, DAG.getConstant(Diff, DL, VT));
6296 SDValue NA1 = DAG.getNode(ISD::ADD, DL, VT, NA0, NS);
6297 return DAG.getNode(ISD::SHL, DL, VT, NA1, DAG.getConstant(Bits, DL, VT));
6298 }
6299
6300 // Combine (GREVI (GREVI x, C2), C1) -> (GREVI x, C1^C2) when C1^C2 is
6301 // non-zero, and to x when it is. Any repeated GREVI stage undoes itself.
6302 // Combine (GORCI (GORCI x, C2), C1) -> (GORCI x, C1|C2). Repeated stage does
6303 // not undo itself, but they are redundant.
combineGREVI_GORCI(SDNode * N,SelectionDAG & DAG)6304 static SDValue combineGREVI_GORCI(SDNode *N, SelectionDAG &DAG) {
6305 SDValue Src = N->getOperand(0);
6306
6307 if (Src.getOpcode() != N->getOpcode())
6308 return SDValue();
6309
6310 if (!isa<ConstantSDNode>(N->getOperand(1)) ||
6311 !isa<ConstantSDNode>(Src.getOperand(1)))
6312 return SDValue();
6313
6314 unsigned ShAmt1 = N->getConstantOperandVal(1);
6315 unsigned ShAmt2 = Src.getConstantOperandVal(1);
6316 Src = Src.getOperand(0);
6317
6318 unsigned CombinedShAmt;
6319 if (N->getOpcode() == RISCVISD::GORC || N->getOpcode() == RISCVISD::GORCW)
6320 CombinedShAmt = ShAmt1 | ShAmt2;
6321 else
6322 CombinedShAmt = ShAmt1 ^ ShAmt2;
6323
6324 if (CombinedShAmt == 0)
6325 return Src;
6326
6327 SDLoc DL(N);
6328 return DAG.getNode(
6329 N->getOpcode(), DL, N->getValueType(0), Src,
6330 DAG.getConstant(CombinedShAmt, DL, N->getOperand(1).getValueType()));
6331 }
6332
6333 // Combine a constant select operand into its use:
6334 //
6335 // (and (select cond, -1, c), x)
6336 // -> (select cond, x, (and x, c)) [AllOnes=1]
6337 // (or (select cond, 0, c), x)
6338 // -> (select cond, x, (or x, c)) [AllOnes=0]
6339 // (xor (select cond, 0, c), x)
6340 // -> (select cond, x, (xor x, c)) [AllOnes=0]
6341 // (add (select cond, 0, c), x)
6342 // -> (select cond, x, (add x, c)) [AllOnes=0]
6343 // (sub x, (select cond, 0, c))
6344 // -> (select cond, x, (sub x, c)) [AllOnes=0]
combineSelectAndUse(SDNode * N,SDValue Slct,SDValue OtherOp,SelectionDAG & DAG,bool AllOnes)6345 static SDValue combineSelectAndUse(SDNode *N, SDValue Slct, SDValue OtherOp,
6346 SelectionDAG &DAG, bool AllOnes) {
6347 EVT VT = N->getValueType(0);
6348
6349 // Skip vectors.
6350 if (VT.isVector())
6351 return SDValue();
6352
6353 if ((Slct.getOpcode() != ISD::SELECT &&
6354 Slct.getOpcode() != RISCVISD::SELECT_CC) ||
6355 !Slct.hasOneUse())
6356 return SDValue();
6357
6358 auto isZeroOrAllOnes = [](SDValue N, bool AllOnes) {
6359 return AllOnes ? isAllOnesConstant(N) : isNullConstant(N);
6360 };
6361
6362 bool SwapSelectOps;
6363 unsigned OpOffset = Slct.getOpcode() == RISCVISD::SELECT_CC ? 2 : 0;
6364 SDValue TrueVal = Slct.getOperand(1 + OpOffset);
6365 SDValue FalseVal = Slct.getOperand(2 + OpOffset);
6366 SDValue NonConstantVal;
6367 if (isZeroOrAllOnes(TrueVal, AllOnes)) {
6368 SwapSelectOps = false;
6369 NonConstantVal = FalseVal;
6370 } else if (isZeroOrAllOnes(FalseVal, AllOnes)) {
6371 SwapSelectOps = true;
6372 NonConstantVal = TrueVal;
6373 } else
6374 return SDValue();
6375
6376 // Slct is now know to be the desired identity constant when CC is true.
6377 TrueVal = OtherOp;
6378 FalseVal = DAG.getNode(N->getOpcode(), SDLoc(N), VT, OtherOp, NonConstantVal);
6379 // Unless SwapSelectOps says the condition should be false.
6380 if (SwapSelectOps)
6381 std::swap(TrueVal, FalseVal);
6382
6383 if (Slct.getOpcode() == RISCVISD::SELECT_CC)
6384 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), VT,
6385 {Slct.getOperand(0), Slct.getOperand(1),
6386 Slct.getOperand(2), TrueVal, FalseVal});
6387
6388 return DAG.getNode(ISD::SELECT, SDLoc(N), VT,
6389 {Slct.getOperand(0), TrueVal, FalseVal});
6390 }
6391
6392 // Attempt combineSelectAndUse on each operand of a commutative operator N.
combineSelectAndUseCommutative(SDNode * N,SelectionDAG & DAG,bool AllOnes)6393 static SDValue combineSelectAndUseCommutative(SDNode *N, SelectionDAG &DAG,
6394 bool AllOnes) {
6395 SDValue N0 = N->getOperand(0);
6396 SDValue N1 = N->getOperand(1);
6397 if (SDValue Result = combineSelectAndUse(N, N0, N1, DAG, AllOnes))
6398 return Result;
6399 if (SDValue Result = combineSelectAndUse(N, N1, N0, DAG, AllOnes))
6400 return Result;
6401 return SDValue();
6402 }
6403
6404 // Transform (add (mul x, c0), c1) ->
6405 // (add (mul (add x, c1/c0), c0), c1%c0).
6406 // if c1/c0 and c1%c0 are simm12, while c1 is not.
6407 // Or transform (add (mul x, c0), c1) ->
6408 // (mul (add x, c1/c0), c0).
6409 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
transformAddImmMulImm(SDNode * N,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)6410 static SDValue transformAddImmMulImm(SDNode *N, SelectionDAG &DAG,
6411 const RISCVSubtarget &Subtarget) {
6412 // Skip for vector types and larger types.
6413 EVT VT = N->getValueType(0);
6414 if (VT.isVector() || VT.getSizeInBits() > Subtarget.getXLen())
6415 return SDValue();
6416 // The first operand node must be a MUL and has no other use.
6417 SDValue N0 = N->getOperand(0);
6418 if (!N0->hasOneUse() || N0->getOpcode() != ISD::MUL)
6419 return SDValue();
6420 // Check if c0 and c1 match above conditions.
6421 auto *N0C = dyn_cast<ConstantSDNode>(N0->getOperand(1));
6422 auto *N1C = dyn_cast<ConstantSDNode>(N->getOperand(1));
6423 if (!N0C || !N1C)
6424 return SDValue();
6425 int64_t C0 = N0C->getSExtValue();
6426 int64_t C1 = N1C->getSExtValue();
6427 if (C0 == -1 || C0 == 0 || C0 == 1 || (C1 / C0) == 0 || isInt<12>(C1) ||
6428 !isInt<12>(C1 % C0) || !isInt<12>(C1 / C0))
6429 return SDValue();
6430 // If C0 * (C1 / C0) is a 12-bit integer, this transform will be reversed.
6431 if (isInt<12>(C0 * (C1 / C0)))
6432 return SDValue();
6433 // Build new nodes (add (mul (add x, c1/c0), c0), c1%c0).
6434 SDLoc DL(N);
6435 SDValue New0 = DAG.getNode(ISD::ADD, DL, VT, N0->getOperand(0),
6436 DAG.getConstant(C1 / C0, DL, VT));
6437 SDValue New1 =
6438 DAG.getNode(ISD::MUL, DL, VT, New0, DAG.getConstant(C0, DL, VT));
6439 if ((C1 % C0) == 0)
6440 return New1;
6441 return DAG.getNode(ISD::ADD, DL, VT, New1, DAG.getConstant(C1 % C0, DL, VT));
6442 }
6443
performADDCombine(SDNode * N,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)6444 static SDValue performADDCombine(SDNode *N, SelectionDAG &DAG,
6445 const RISCVSubtarget &Subtarget) {
6446 // Transform (add (mul x, c0), c1) ->
6447 // (add (mul (add x, c1/c0), c0), c1%c0).
6448 // if c1/c0 and c1%c0 are simm12, while c1 is not.
6449 // Or transform (add (mul x, c0), c1) ->
6450 // (mul (add x, c1/c0), c0).
6451 // if c1%c0 is zero, and c1/c0 is simm12 while c1 is not.
6452 if (SDValue V = transformAddImmMulImm(N, DAG, Subtarget))
6453 return V;
6454 // Fold (add (shl x, c0), (shl y, c1)) ->
6455 // (SLLI (SH*ADD x, y), c0), if c1-c0 equals to [1|2|3].
6456 if (SDValue V = transformAddShlImm(N, DAG, Subtarget))
6457 return V;
6458 // fold (add (select lhs, rhs, cc, 0, y), x) ->
6459 // (select lhs, rhs, cc, x, (add x, y))
6460 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6461 }
6462
performSUBCombine(SDNode * N,SelectionDAG & DAG)6463 static SDValue performSUBCombine(SDNode *N, SelectionDAG &DAG) {
6464 // fold (sub x, (select lhs, rhs, cc, 0, y)) ->
6465 // (select lhs, rhs, cc, x, (sub x, y))
6466 SDValue N0 = N->getOperand(0);
6467 SDValue N1 = N->getOperand(1);
6468 return combineSelectAndUse(N, N1, N0, DAG, /*AllOnes*/ false);
6469 }
6470
performANDCombine(SDNode * N,SelectionDAG & DAG)6471 static SDValue performANDCombine(SDNode *N, SelectionDAG &DAG) {
6472 // fold (and (select lhs, rhs, cc, -1, y), x) ->
6473 // (select lhs, rhs, cc, x, (and x, y))
6474 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ true);
6475 }
6476
performORCombine(SDNode * N,SelectionDAG & DAG,const RISCVSubtarget & Subtarget)6477 static SDValue performORCombine(SDNode *N, SelectionDAG &DAG,
6478 const RISCVSubtarget &Subtarget) {
6479 if (Subtarget.hasStdExtZbp()) {
6480 if (auto GREV = combineORToGREV(SDValue(N, 0), DAG, Subtarget))
6481 return GREV;
6482 if (auto GORC = combineORToGORC(SDValue(N, 0), DAG, Subtarget))
6483 return GORC;
6484 if (auto SHFL = combineORToSHFL(SDValue(N, 0), DAG, Subtarget))
6485 return SHFL;
6486 }
6487
6488 // fold (or (select cond, 0, y), x) ->
6489 // (select cond, x, (or x, y))
6490 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6491 }
6492
performXORCombine(SDNode * N,SelectionDAG & DAG)6493 static SDValue performXORCombine(SDNode *N, SelectionDAG &DAG) {
6494 // fold (xor (select cond, 0, y), x) ->
6495 // (select cond, x, (xor x, y))
6496 return combineSelectAndUseCommutative(N, DAG, /*AllOnes*/ false);
6497 }
6498
6499 // Attempt to turn ANY_EXTEND into SIGN_EXTEND if the input to the ANY_EXTEND
6500 // has users that require SIGN_EXTEND and the SIGN_EXTEND can be done for free
6501 // by an instruction like ADDW/SUBW/MULW. Without this the ANY_EXTEND would be
6502 // removed during type legalization leaving an ADD/SUB/MUL use that won't use
6503 // ADDW/SUBW/MULW.
performANY_EXTENDCombine(SDNode * N,TargetLowering::DAGCombinerInfo & DCI,const RISCVSubtarget & Subtarget)6504 static SDValue performANY_EXTENDCombine(SDNode *N,
6505 TargetLowering::DAGCombinerInfo &DCI,
6506 const RISCVSubtarget &Subtarget) {
6507 if (!Subtarget.is64Bit())
6508 return SDValue();
6509
6510 SelectionDAG &DAG = DCI.DAG;
6511
6512 SDValue Src = N->getOperand(0);
6513 EVT VT = N->getValueType(0);
6514 if (VT != MVT::i64 || Src.getValueType() != MVT::i32)
6515 return SDValue();
6516
6517 // The opcode must be one that can implicitly sign_extend.
6518 // FIXME: Additional opcodes.
6519 switch (Src.getOpcode()) {
6520 default:
6521 return SDValue();
6522 case ISD::MUL:
6523 if (!Subtarget.hasStdExtM())
6524 return SDValue();
6525 LLVM_FALLTHROUGH;
6526 case ISD::ADD:
6527 case ISD::SUB:
6528 break;
6529 }
6530
6531 // Only handle cases where the result is used by a CopyToReg. That likely
6532 // means the value is a liveout of the basic block. This helps prevent
6533 // infinite combine loops like PR51206.
6534 if (none_of(N->uses(),
6535 [](SDNode *User) { return User->getOpcode() == ISD::CopyToReg; }))
6536 return SDValue();
6537
6538 SmallVector<SDNode *, 4> SetCCs;
6539 for (SDNode::use_iterator UI = Src.getNode()->use_begin(),
6540 UE = Src.getNode()->use_end();
6541 UI != UE; ++UI) {
6542 SDNode *User = *UI;
6543 if (User == N)
6544 continue;
6545 if (UI.getUse().getResNo() != Src.getResNo())
6546 continue;
6547 // All i32 setccs are legalized by sign extending operands.
6548 if (User->getOpcode() == ISD::SETCC) {
6549 SetCCs.push_back(User);
6550 continue;
6551 }
6552 // We don't know if we can extend this user.
6553 break;
6554 }
6555
6556 // If we don't have any SetCCs, this isn't worthwhile.
6557 if (SetCCs.empty())
6558 return SDValue();
6559
6560 SDLoc DL(N);
6561 SDValue SExt = DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, Src);
6562 DCI.CombineTo(N, SExt);
6563
6564 // Promote all the setccs.
6565 for (SDNode *SetCC : SetCCs) {
6566 SmallVector<SDValue, 4> Ops;
6567
6568 for (unsigned j = 0; j != 2; ++j) {
6569 SDValue SOp = SetCC->getOperand(j);
6570 if (SOp == Src)
6571 Ops.push_back(SExt);
6572 else
6573 Ops.push_back(DAG.getNode(ISD::SIGN_EXTEND, DL, MVT::i64, SOp));
6574 }
6575
6576 Ops.push_back(SetCC->getOperand(2));
6577 DCI.CombineTo(SetCC,
6578 DAG.getNode(ISD::SETCC, DL, SetCC->getValueType(0), Ops));
6579 }
6580 return SDValue(N, 0);
6581 }
6582
6583 // Try to form VWMUL or VWMULU.
6584 // FIXME: Support VWMULSU.
combineMUL_VLToVWMUL(SDNode * N,SDValue Op0,SDValue Op1,SelectionDAG & DAG)6585 static SDValue combineMUL_VLToVWMUL(SDNode *N, SDValue Op0, SDValue Op1,
6586 SelectionDAG &DAG) {
6587 assert(N->getOpcode() == RISCVISD::MUL_VL && "Unexpected opcode");
6588 bool IsSignExt = Op0.getOpcode() == RISCVISD::VSEXT_VL;
6589 bool IsZeroExt = Op0.getOpcode() == RISCVISD::VZEXT_VL;
6590 if ((!IsSignExt && !IsZeroExt) || !Op0.hasOneUse())
6591 return SDValue();
6592
6593 SDValue Mask = N->getOperand(2);
6594 SDValue VL = N->getOperand(3);
6595
6596 // Make sure the mask and VL match.
6597 if (Op0.getOperand(1) != Mask || Op0.getOperand(2) != VL)
6598 return SDValue();
6599
6600 MVT VT = N->getSimpleValueType(0);
6601
6602 // Determine the narrow size for a widening multiply.
6603 unsigned NarrowSize = VT.getScalarSizeInBits() / 2;
6604 MVT NarrowVT = MVT::getVectorVT(MVT::getIntegerVT(NarrowSize),
6605 VT.getVectorElementCount());
6606
6607 SDLoc DL(N);
6608
6609 // See if the other operand is the same opcode.
6610 if (Op0.getOpcode() == Op1.getOpcode()) {
6611 if (!Op1.hasOneUse())
6612 return SDValue();
6613
6614 // Make sure the mask and VL match.
6615 if (Op1.getOperand(1) != Mask || Op1.getOperand(2) != VL)
6616 return SDValue();
6617
6618 Op1 = Op1.getOperand(0);
6619 } else if (Op1.getOpcode() == RISCVISD::VMV_V_X_VL) {
6620 // The operand is a splat of a scalar.
6621
6622 // The VL must be the same.
6623 if (Op1.getOperand(1) != VL)
6624 return SDValue();
6625
6626 // Get the scalar value.
6627 Op1 = Op1.getOperand(0);
6628
6629 // See if have enough sign bits or zero bits in the scalar to use a
6630 // widening multiply by splatting to smaller element size.
6631 unsigned EltBits = VT.getScalarSizeInBits();
6632 unsigned ScalarBits = Op1.getValueSizeInBits();
6633 // Make sure we're getting all element bits from the scalar register.
6634 // FIXME: Support implicit sign extension of vmv.v.x?
6635 if (ScalarBits < EltBits)
6636 return SDValue();
6637
6638 if (IsSignExt) {
6639 if (DAG.ComputeNumSignBits(Op1) <= (ScalarBits - NarrowSize))
6640 return SDValue();
6641 } else {
6642 APInt Mask = APInt::getBitsSetFrom(ScalarBits, NarrowSize);
6643 if (!DAG.MaskedValueIsZero(Op1, Mask))
6644 return SDValue();
6645 }
6646
6647 Op1 = DAG.getNode(RISCVISD::VMV_V_X_VL, DL, NarrowVT, Op1, VL);
6648 } else
6649 return SDValue();
6650
6651 Op0 = Op0.getOperand(0);
6652
6653 // Re-introduce narrower extends if needed.
6654 unsigned ExtOpc = IsSignExt ? RISCVISD::VSEXT_VL : RISCVISD::VZEXT_VL;
6655 if (Op0.getValueType() != NarrowVT)
6656 Op0 = DAG.getNode(ExtOpc, DL, NarrowVT, Op0, Mask, VL);
6657 if (Op1.getValueType() != NarrowVT)
6658 Op1 = DAG.getNode(ExtOpc, DL, NarrowVT, Op1, Mask, VL);
6659
6660 unsigned WMulOpc = IsSignExt ? RISCVISD::VWMUL_VL : RISCVISD::VWMULU_VL;
6661 return DAG.getNode(WMulOpc, DL, VT, Op0, Op1, Mask, VL);
6662 }
6663
PerformDAGCombine(SDNode * N,DAGCombinerInfo & DCI) const6664 SDValue RISCVTargetLowering::PerformDAGCombine(SDNode *N,
6665 DAGCombinerInfo &DCI) const {
6666 SelectionDAG &DAG = DCI.DAG;
6667
6668 // Helper to call SimplifyDemandedBits on an operand of N where only some low
6669 // bits are demanded. N will be added to the Worklist if it was not deleted.
6670 // Caller should return SDValue(N, 0) if this returns true.
6671 auto SimplifyDemandedLowBitsHelper = [&](unsigned OpNo, unsigned LowBits) {
6672 SDValue Op = N->getOperand(OpNo);
6673 APInt Mask = APInt::getLowBitsSet(Op.getValueSizeInBits(), LowBits);
6674 if (!SimplifyDemandedBits(Op, Mask, DCI))
6675 return false;
6676
6677 if (N->getOpcode() != ISD::DELETED_NODE)
6678 DCI.AddToWorklist(N);
6679 return true;
6680 };
6681
6682 switch (N->getOpcode()) {
6683 default:
6684 break;
6685 case RISCVISD::SplitF64: {
6686 SDValue Op0 = N->getOperand(0);
6687 // If the input to SplitF64 is just BuildPairF64 then the operation is
6688 // redundant. Instead, use BuildPairF64's operands directly.
6689 if (Op0->getOpcode() == RISCVISD::BuildPairF64)
6690 return DCI.CombineTo(N, Op0.getOperand(0), Op0.getOperand(1));
6691
6692 SDLoc DL(N);
6693
6694 // It's cheaper to materialise two 32-bit integers than to load a double
6695 // from the constant pool and transfer it to integer registers through the
6696 // stack.
6697 if (ConstantFPSDNode *C = dyn_cast<ConstantFPSDNode>(Op0)) {
6698 APInt V = C->getValueAPF().bitcastToAPInt();
6699 SDValue Lo = DAG.getConstant(V.trunc(32), DL, MVT::i32);
6700 SDValue Hi = DAG.getConstant(V.lshr(32).trunc(32), DL, MVT::i32);
6701 return DCI.CombineTo(N, Lo, Hi);
6702 }
6703
6704 // This is a target-specific version of a DAGCombine performed in
6705 // DAGCombiner::visitBITCAST. It performs the equivalent of:
6706 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6707 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6708 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6709 !Op0.getNode()->hasOneUse())
6710 break;
6711 SDValue NewSplitF64 =
6712 DAG.getNode(RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32),
6713 Op0.getOperand(0));
6714 SDValue Lo = NewSplitF64.getValue(0);
6715 SDValue Hi = NewSplitF64.getValue(1);
6716 APInt SignBit = APInt::getSignMask(32);
6717 if (Op0.getOpcode() == ISD::FNEG) {
6718 SDValue NewHi = DAG.getNode(ISD::XOR, DL, MVT::i32, Hi,
6719 DAG.getConstant(SignBit, DL, MVT::i32));
6720 return DCI.CombineTo(N, Lo, NewHi);
6721 }
6722 assert(Op0.getOpcode() == ISD::FABS);
6723 SDValue NewHi = DAG.getNode(ISD::AND, DL, MVT::i32, Hi,
6724 DAG.getConstant(~SignBit, DL, MVT::i32));
6725 return DCI.CombineTo(N, Lo, NewHi);
6726 }
6727 case RISCVISD::SLLW:
6728 case RISCVISD::SRAW:
6729 case RISCVISD::SRLW:
6730 case RISCVISD::ROLW:
6731 case RISCVISD::RORW: {
6732 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6733 if (SimplifyDemandedLowBitsHelper(0, 32) ||
6734 SimplifyDemandedLowBitsHelper(1, 5))
6735 return SDValue(N, 0);
6736 break;
6737 }
6738 case RISCVISD::CLZW:
6739 case RISCVISD::CTZW: {
6740 // Only the lower 32 bits of the first operand are read
6741 if (SimplifyDemandedLowBitsHelper(0, 32))
6742 return SDValue(N, 0);
6743 break;
6744 }
6745 case RISCVISD::FSL:
6746 case RISCVISD::FSR: {
6747 // Only the lower log2(Bitwidth)+1 bits of the the shift amount are read.
6748 unsigned BitWidth = N->getOperand(2).getValueSizeInBits();
6749 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6750 if (SimplifyDemandedLowBitsHelper(2, Log2_32(BitWidth) + 1))
6751 return SDValue(N, 0);
6752 break;
6753 }
6754 case RISCVISD::FSLW:
6755 case RISCVISD::FSRW: {
6756 // Only the lower 32 bits of Values and lower 6 bits of shift amount are
6757 // read.
6758 if (SimplifyDemandedLowBitsHelper(0, 32) ||
6759 SimplifyDemandedLowBitsHelper(1, 32) ||
6760 SimplifyDemandedLowBitsHelper(2, 6))
6761 return SDValue(N, 0);
6762 break;
6763 }
6764 case RISCVISD::GREV:
6765 case RISCVISD::GORC: {
6766 // Only the lower log2(Bitwidth) bits of the the shift amount are read.
6767 unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6768 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6769 if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth)))
6770 return SDValue(N, 0);
6771
6772 return combineGREVI_GORCI(N, DCI.DAG);
6773 }
6774 case RISCVISD::GREVW:
6775 case RISCVISD::GORCW: {
6776 // Only the lower 32 bits of LHS and lower 5 bits of RHS are read.
6777 if (SimplifyDemandedLowBitsHelper(0, 32) ||
6778 SimplifyDemandedLowBitsHelper(1, 5))
6779 return SDValue(N, 0);
6780
6781 return combineGREVI_GORCI(N, DCI.DAG);
6782 }
6783 case RISCVISD::SHFL:
6784 case RISCVISD::UNSHFL: {
6785 // Only the lower log2(Bitwidth)-1 bits of the the shift amount are read.
6786 unsigned BitWidth = N->getOperand(1).getValueSizeInBits();
6787 assert(isPowerOf2_32(BitWidth) && "Unexpected bit width");
6788 if (SimplifyDemandedLowBitsHelper(1, Log2_32(BitWidth) - 1))
6789 return SDValue(N, 0);
6790
6791 break;
6792 }
6793 case RISCVISD::SHFLW:
6794 case RISCVISD::UNSHFLW: {
6795 // Only the lower 32 bits of LHS and lower 4 bits of RHS are read.
6796 SDValue LHS = N->getOperand(0);
6797 SDValue RHS = N->getOperand(1);
6798 APInt LHSMask = APInt::getLowBitsSet(LHS.getValueSizeInBits(), 32);
6799 APInt RHSMask = APInt::getLowBitsSet(RHS.getValueSizeInBits(), 4);
6800 if (SimplifyDemandedLowBitsHelper(0, 32) ||
6801 SimplifyDemandedLowBitsHelper(1, 4))
6802 return SDValue(N, 0);
6803
6804 break;
6805 }
6806 case RISCVISD::BCOMPRESSW:
6807 case RISCVISD::BDECOMPRESSW: {
6808 // Only the lower 32 bits of LHS and RHS are read.
6809 if (SimplifyDemandedLowBitsHelper(0, 32) ||
6810 SimplifyDemandedLowBitsHelper(1, 32))
6811 return SDValue(N, 0);
6812
6813 break;
6814 }
6815 case RISCVISD::FMV_X_ANYEXTH:
6816 case RISCVISD::FMV_X_ANYEXTW_RV64: {
6817 SDLoc DL(N);
6818 SDValue Op0 = N->getOperand(0);
6819 MVT VT = N->getSimpleValueType(0);
6820 // If the input to FMV_X_ANYEXTW_RV64 is just FMV_W_X_RV64 then the
6821 // conversion is unnecessary and can be replaced with the FMV_W_X_RV64
6822 // operand. Similar for FMV_X_ANYEXTH and FMV_H_X.
6823 if ((N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 &&
6824 Op0->getOpcode() == RISCVISD::FMV_W_X_RV64) ||
6825 (N->getOpcode() == RISCVISD::FMV_X_ANYEXTH &&
6826 Op0->getOpcode() == RISCVISD::FMV_H_X)) {
6827 assert(Op0.getOperand(0).getValueType() == VT &&
6828 "Unexpected value type!");
6829 return Op0.getOperand(0);
6830 }
6831
6832 // This is a target-specific version of a DAGCombine performed in
6833 // DAGCombiner::visitBITCAST. It performs the equivalent of:
6834 // fold (bitconvert (fneg x)) -> (xor (bitconvert x), signbit)
6835 // fold (bitconvert (fabs x)) -> (and (bitconvert x), (not signbit))
6836 if (!(Op0.getOpcode() == ISD::FNEG || Op0.getOpcode() == ISD::FABS) ||
6837 !Op0.getNode()->hasOneUse())
6838 break;
6839 SDValue NewFMV = DAG.getNode(N->getOpcode(), DL, VT, Op0.getOperand(0));
6840 unsigned FPBits = N->getOpcode() == RISCVISD::FMV_X_ANYEXTW_RV64 ? 32 : 16;
6841 APInt SignBit = APInt::getSignMask(FPBits).sextOrSelf(VT.getSizeInBits());
6842 if (Op0.getOpcode() == ISD::FNEG)
6843 return DAG.getNode(ISD::XOR, DL, VT, NewFMV,
6844 DAG.getConstant(SignBit, DL, VT));
6845
6846 assert(Op0.getOpcode() == ISD::FABS);
6847 return DAG.getNode(ISD::AND, DL, VT, NewFMV,
6848 DAG.getConstant(~SignBit, DL, VT));
6849 }
6850 case ISD::ADD:
6851 return performADDCombine(N, DAG, Subtarget);
6852 case ISD::SUB:
6853 return performSUBCombine(N, DAG);
6854 case ISD::AND:
6855 return performANDCombine(N, DAG);
6856 case ISD::OR:
6857 return performORCombine(N, DAG, Subtarget);
6858 case ISD::XOR:
6859 return performXORCombine(N, DAG);
6860 case ISD::ANY_EXTEND:
6861 return performANY_EXTENDCombine(N, DCI, Subtarget);
6862 case ISD::ZERO_EXTEND:
6863 // Fold (zero_extend (fp_to_uint X)) to prevent forming fcvt+zexti32 during
6864 // type legalization. This is safe because fp_to_uint produces poison if
6865 // it overflows.
6866 if (N->getValueType(0) == MVT::i64 && Subtarget.is64Bit() &&
6867 N->getOperand(0).getOpcode() == ISD::FP_TO_UINT &&
6868 isTypeLegal(N->getOperand(0).getOperand(0).getValueType()))
6869 return DAG.getNode(ISD::FP_TO_UINT, SDLoc(N), MVT::i64,
6870 N->getOperand(0).getOperand(0));
6871 return SDValue();
6872 case RISCVISD::SELECT_CC: {
6873 // Transform
6874 SDValue LHS = N->getOperand(0);
6875 SDValue RHS = N->getOperand(1);
6876 SDValue TrueV = N->getOperand(3);
6877 SDValue FalseV = N->getOperand(4);
6878
6879 // If the True and False values are the same, we don't need a select_cc.
6880 if (TrueV == FalseV)
6881 return TrueV;
6882
6883 ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(2))->get();
6884 if (!ISD::isIntEqualitySetCC(CCVal))
6885 break;
6886
6887 // Fold (select_cc (setlt X, Y), 0, ne, trueV, falseV) ->
6888 // (select_cc X, Y, lt, trueV, falseV)
6889 // Sometimes the setcc is introduced after select_cc has been formed.
6890 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6891 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6892 // If we're looking for eq 0 instead of ne 0, we need to invert the
6893 // condition.
6894 bool Invert = CCVal == ISD::SETEQ;
6895 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6896 if (Invert)
6897 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6898
6899 SDLoc DL(N);
6900 RHS = LHS.getOperand(1);
6901 LHS = LHS.getOperand(0);
6902 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6903
6904 SDValue TargetCC = DAG.getCondCode(CCVal);
6905 return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
6906 {LHS, RHS, TargetCC, TrueV, FalseV});
6907 }
6908
6909 // Fold (select_cc (xor X, Y), 0, eq/ne, trueV, falseV) ->
6910 // (select_cc X, Y, eq/ne, trueV, falseV)
6911 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6912 return DAG.getNode(RISCVISD::SELECT_CC, SDLoc(N), N->getValueType(0),
6913 {LHS.getOperand(0), LHS.getOperand(1),
6914 N->getOperand(2), TrueV, FalseV});
6915 // (select_cc X, 1, setne, trueV, falseV) ->
6916 // (select_cc X, 0, seteq, trueV, falseV) if we can prove X is 0/1.
6917 // This can occur when legalizing some floating point comparisons.
6918 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6919 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6920 SDLoc DL(N);
6921 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6922 SDValue TargetCC = DAG.getCondCode(CCVal);
6923 RHS = DAG.getConstant(0, DL, LHS.getValueType());
6924 return DAG.getNode(RISCVISD::SELECT_CC, DL, N->getValueType(0),
6925 {LHS, RHS, TargetCC, TrueV, FalseV});
6926 }
6927
6928 break;
6929 }
6930 case RISCVISD::BR_CC: {
6931 SDValue LHS = N->getOperand(1);
6932 SDValue RHS = N->getOperand(2);
6933 ISD::CondCode CCVal = cast<CondCodeSDNode>(N->getOperand(3))->get();
6934 if (!ISD::isIntEqualitySetCC(CCVal))
6935 break;
6936
6937 // Fold (br_cc (setlt X, Y), 0, ne, dest) ->
6938 // (br_cc X, Y, lt, dest)
6939 // Sometimes the setcc is introduced after br_cc has been formed.
6940 if (LHS.getOpcode() == ISD::SETCC && isNullConstant(RHS) &&
6941 LHS.getOperand(0).getValueType() == Subtarget.getXLenVT()) {
6942 // If we're looking for eq 0 instead of ne 0, we need to invert the
6943 // condition.
6944 bool Invert = CCVal == ISD::SETEQ;
6945 CCVal = cast<CondCodeSDNode>(LHS.getOperand(2))->get();
6946 if (Invert)
6947 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6948
6949 SDLoc DL(N);
6950 RHS = LHS.getOperand(1);
6951 LHS = LHS.getOperand(0);
6952 translateSetCCForBranch(DL, LHS, RHS, CCVal, DAG);
6953
6954 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6955 N->getOperand(0), LHS, RHS, DAG.getCondCode(CCVal),
6956 N->getOperand(4));
6957 }
6958
6959 // Fold (br_cc (xor X, Y), 0, eq/ne, dest) ->
6960 // (br_cc X, Y, eq/ne, trueV, falseV)
6961 if (LHS.getOpcode() == ISD::XOR && isNullConstant(RHS))
6962 return DAG.getNode(RISCVISD::BR_CC, SDLoc(N), N->getValueType(0),
6963 N->getOperand(0), LHS.getOperand(0), LHS.getOperand(1),
6964 N->getOperand(3), N->getOperand(4));
6965
6966 // (br_cc X, 1, setne, br_cc) ->
6967 // (br_cc X, 0, seteq, br_cc) if we can prove X is 0/1.
6968 // This can occur when legalizing some floating point comparisons.
6969 APInt Mask = APInt::getBitsSetFrom(LHS.getValueSizeInBits(), 1);
6970 if (isOneConstant(RHS) && DAG.MaskedValueIsZero(LHS, Mask)) {
6971 SDLoc DL(N);
6972 CCVal = ISD::getSetCCInverse(CCVal, LHS.getValueType());
6973 SDValue TargetCC = DAG.getCondCode(CCVal);
6974 RHS = DAG.getConstant(0, DL, LHS.getValueType());
6975 return DAG.getNode(RISCVISD::BR_CC, DL, N->getValueType(0),
6976 N->getOperand(0), LHS, RHS, TargetCC,
6977 N->getOperand(4));
6978 }
6979 break;
6980 }
6981 case ISD::FCOPYSIGN: {
6982 EVT VT = N->getValueType(0);
6983 if (!VT.isVector())
6984 break;
6985 // There is a form of VFSGNJ which injects the negated sign of its second
6986 // operand. Try and bubble any FNEG up after the extend/round to produce
6987 // this optimized pattern. Avoid modifying cases where FP_ROUND and
6988 // TRUNC=1.
6989 SDValue In2 = N->getOperand(1);
6990 // Avoid cases where the extend/round has multiple uses, as duplicating
6991 // those is typically more expensive than removing a fneg.
6992 if (!In2.hasOneUse())
6993 break;
6994 if (In2.getOpcode() != ISD::FP_EXTEND &&
6995 (In2.getOpcode() != ISD::FP_ROUND || In2.getConstantOperandVal(1) != 0))
6996 break;
6997 In2 = In2.getOperand(0);
6998 if (In2.getOpcode() != ISD::FNEG)
6999 break;
7000 SDLoc DL(N);
7001 SDValue NewFPExtRound = DAG.getFPExtendOrRound(In2.getOperand(0), DL, VT);
7002 return DAG.getNode(ISD::FCOPYSIGN, DL, VT, N->getOperand(0),
7003 DAG.getNode(ISD::FNEG, DL, VT, NewFPExtRound));
7004 }
7005 case ISD::MGATHER:
7006 case ISD::MSCATTER:
7007 case ISD::VP_GATHER:
7008 case ISD::VP_SCATTER: {
7009 if (!DCI.isBeforeLegalize())
7010 break;
7011 SDValue Index, ScaleOp;
7012 bool IsIndexScaled = false;
7013 bool IsIndexSigned = false;
7014 if (const auto *VPGSN = dyn_cast<VPGatherScatterSDNode>(N)) {
7015 Index = VPGSN->getIndex();
7016 ScaleOp = VPGSN->getScale();
7017 IsIndexScaled = VPGSN->isIndexScaled();
7018 IsIndexSigned = VPGSN->isIndexSigned();
7019 } else {
7020 const auto *MGSN = cast<MaskedGatherScatterSDNode>(N);
7021 Index = MGSN->getIndex();
7022 ScaleOp = MGSN->getScale();
7023 IsIndexScaled = MGSN->isIndexScaled();
7024 IsIndexSigned = MGSN->isIndexSigned();
7025 }
7026 EVT IndexVT = Index.getValueType();
7027 MVT XLenVT = Subtarget.getXLenVT();
7028 // RISCV indexed loads only support the "unsigned unscaled" addressing
7029 // mode, so anything else must be manually legalized.
7030 bool NeedsIdxLegalization =
7031 IsIndexScaled ||
7032 (IsIndexSigned && IndexVT.getVectorElementType().bitsLT(XLenVT));
7033 if (!NeedsIdxLegalization)
7034 break;
7035
7036 SDLoc DL(N);
7037
7038 // Any index legalization should first promote to XLenVT, so we don't lose
7039 // bits when scaling. This may create an illegal index type so we let
7040 // LLVM's legalization take care of the splitting.
7041 // FIXME: LLVM can't split VP_GATHER or VP_SCATTER yet.
7042 if (IndexVT.getVectorElementType().bitsLT(XLenVT)) {
7043 IndexVT = IndexVT.changeVectorElementType(XLenVT);
7044 Index = DAG.getNode(IsIndexSigned ? ISD::SIGN_EXTEND : ISD::ZERO_EXTEND,
7045 DL, IndexVT, Index);
7046 }
7047
7048 unsigned Scale = cast<ConstantSDNode>(ScaleOp)->getZExtValue();
7049 if (IsIndexScaled && Scale != 1) {
7050 // Manually scale the indices by the element size.
7051 // TODO: Sanitize the scale operand here?
7052 // TODO: For VP nodes, should we use VP_SHL here?
7053 assert(isPowerOf2_32(Scale) && "Expecting power-of-two types");
7054 SDValue SplatScale = DAG.getConstant(Log2_32(Scale), DL, IndexVT);
7055 Index = DAG.getNode(ISD::SHL, DL, IndexVT, Index, SplatScale);
7056 }
7057
7058 ISD::MemIndexType NewIndexTy = ISD::UNSIGNED_UNSCALED;
7059 if (const auto *VPGN = dyn_cast<VPGatherSDNode>(N))
7060 return DAG.getGatherVP(N->getVTList(), VPGN->getMemoryVT(), DL,
7061 {VPGN->getChain(), VPGN->getBasePtr(), Index,
7062 VPGN->getScale(), VPGN->getMask(),
7063 VPGN->getVectorLength()},
7064 VPGN->getMemOperand(), NewIndexTy);
7065 if (const auto *VPSN = dyn_cast<VPScatterSDNode>(N))
7066 return DAG.getScatterVP(N->getVTList(), VPSN->getMemoryVT(), DL,
7067 {VPSN->getChain(), VPSN->getValue(),
7068 VPSN->getBasePtr(), Index, VPSN->getScale(),
7069 VPSN->getMask(), VPSN->getVectorLength()},
7070 VPSN->getMemOperand(), NewIndexTy);
7071 if (const auto *MGN = dyn_cast<MaskedGatherSDNode>(N))
7072 return DAG.getMaskedGather(
7073 N->getVTList(), MGN->getMemoryVT(), DL,
7074 {MGN->getChain(), MGN->getPassThru(), MGN->getMask(),
7075 MGN->getBasePtr(), Index, MGN->getScale()},
7076 MGN->getMemOperand(), NewIndexTy, MGN->getExtensionType());
7077 const auto *MSN = cast<MaskedScatterSDNode>(N);
7078 return DAG.getMaskedScatter(
7079 N->getVTList(), MSN->getMemoryVT(), DL,
7080 {MSN->getChain(), MSN->getValue(), MSN->getMask(), MSN->getBasePtr(),
7081 Index, MSN->getScale()},
7082 MSN->getMemOperand(), NewIndexTy, MSN->isTruncatingStore());
7083 }
7084 case RISCVISD::SRA_VL:
7085 case RISCVISD::SRL_VL:
7086 case RISCVISD::SHL_VL: {
7087 SDValue ShAmt = N->getOperand(1);
7088 if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7089 // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7090 SDLoc DL(N);
7091 SDValue VL = N->getOperand(3);
7092 EVT VT = N->getValueType(0);
7093 ShAmt =
7094 DAG.getNode(RISCVISD::VMV_V_X_VL, DL, VT, ShAmt.getOperand(0), VL);
7095 return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt,
7096 N->getOperand(2), N->getOperand(3));
7097 }
7098 break;
7099 }
7100 case ISD::SRA:
7101 case ISD::SRL:
7102 case ISD::SHL: {
7103 SDValue ShAmt = N->getOperand(1);
7104 if (ShAmt.getOpcode() == RISCVISD::SPLAT_VECTOR_SPLIT_I64_VL) {
7105 // We don't need the upper 32 bits of a 64-bit element for a shift amount.
7106 SDLoc DL(N);
7107 EVT VT = N->getValueType(0);
7108 ShAmt =
7109 DAG.getNode(RISCVISD::SPLAT_VECTOR_I64, DL, VT, ShAmt.getOperand(0));
7110 return DAG.getNode(N->getOpcode(), DL, VT, N->getOperand(0), ShAmt);
7111 }
7112 break;
7113 }
7114 case RISCVISD::MUL_VL: {
7115 SDValue Op0 = N->getOperand(0);
7116 SDValue Op1 = N->getOperand(1);
7117 if (SDValue V = combineMUL_VLToVWMUL(N, Op0, Op1, DAG))
7118 return V;
7119 if (SDValue V = combineMUL_VLToVWMUL(N, Op1, Op0, DAG))
7120 return V;
7121 return SDValue();
7122 }
7123 case ISD::STORE: {
7124 auto *Store = cast<StoreSDNode>(N);
7125 SDValue Val = Store->getValue();
7126 // Combine store of vmv.x.s to vse with VL of 1.
7127 // FIXME: Support FP.
7128 if (Val.getOpcode() == RISCVISD::VMV_X_S) {
7129 SDValue Src = Val.getOperand(0);
7130 EVT VecVT = Src.getValueType();
7131 EVT MemVT = Store->getMemoryVT();
7132 // The memory VT and the element type must match.
7133 if (VecVT.getVectorElementType() == MemVT) {
7134 SDLoc DL(N);
7135 MVT MaskVT = MVT::getVectorVT(MVT::i1, VecVT.getVectorElementCount());
7136 return DAG.getStoreVP(Store->getChain(), DL, Src, Store->getBasePtr(),
7137 DAG.getConstant(1, DL, MaskVT),
7138 DAG.getConstant(1, DL, Subtarget.getXLenVT()),
7139 Store->getPointerInfo(),
7140 Store->getOriginalAlign(),
7141 Store->getMemOperand()->getFlags());
7142 }
7143 }
7144
7145 break;
7146 }
7147 }
7148
7149 return SDValue();
7150 }
7151
isDesirableToCommuteWithShift(const SDNode * N,CombineLevel Level) const7152 bool RISCVTargetLowering::isDesirableToCommuteWithShift(
7153 const SDNode *N, CombineLevel Level) const {
7154 // The following folds are only desirable if `(OP _, c1 << c2)` can be
7155 // materialised in fewer instructions than `(OP _, c1)`:
7156 //
7157 // (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
7158 // (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
7159 SDValue N0 = N->getOperand(0);
7160 EVT Ty = N0.getValueType();
7161 if (Ty.isScalarInteger() &&
7162 (N0.getOpcode() == ISD::ADD || N0.getOpcode() == ISD::OR)) {
7163 auto *C1 = dyn_cast<ConstantSDNode>(N0->getOperand(1));
7164 auto *C2 = dyn_cast<ConstantSDNode>(N->getOperand(1));
7165 if (C1 && C2) {
7166 const APInt &C1Int = C1->getAPIntValue();
7167 APInt ShiftedC1Int = C1Int << C2->getAPIntValue();
7168
7169 // We can materialise `c1 << c2` into an add immediate, so it's "free",
7170 // and the combine should happen, to potentially allow further combines
7171 // later.
7172 if (ShiftedC1Int.getMinSignedBits() <= 64 &&
7173 isLegalAddImmediate(ShiftedC1Int.getSExtValue()))
7174 return true;
7175
7176 // We can materialise `c1` in an add immediate, so it's "free", and the
7177 // combine should be prevented.
7178 if (C1Int.getMinSignedBits() <= 64 &&
7179 isLegalAddImmediate(C1Int.getSExtValue()))
7180 return false;
7181
7182 // Neither constant will fit into an immediate, so find materialisation
7183 // costs.
7184 int C1Cost = RISCVMatInt::getIntMatCost(C1Int, Ty.getSizeInBits(),
7185 Subtarget.getFeatureBits(),
7186 /*CompressionCost*/true);
7187 int ShiftedC1Cost = RISCVMatInt::getIntMatCost(
7188 ShiftedC1Int, Ty.getSizeInBits(), Subtarget.getFeatureBits(),
7189 /*CompressionCost*/true);
7190
7191 // Materialising `c1` is cheaper than materialising `c1 << c2`, so the
7192 // combine should be prevented.
7193 if (C1Cost < ShiftedC1Cost)
7194 return false;
7195 }
7196 }
7197 return true;
7198 }
7199
targetShrinkDemandedConstant(SDValue Op,const APInt & DemandedBits,const APInt & DemandedElts,TargetLoweringOpt & TLO) const7200 bool RISCVTargetLowering::targetShrinkDemandedConstant(
7201 SDValue Op, const APInt &DemandedBits, const APInt &DemandedElts,
7202 TargetLoweringOpt &TLO) const {
7203 // Delay this optimization as late as possible.
7204 if (!TLO.LegalOps)
7205 return false;
7206
7207 EVT VT = Op.getValueType();
7208 if (VT.isVector())
7209 return false;
7210
7211 // Only handle AND for now.
7212 if (Op.getOpcode() != ISD::AND)
7213 return false;
7214
7215 ConstantSDNode *C = dyn_cast<ConstantSDNode>(Op.getOperand(1));
7216 if (!C)
7217 return false;
7218
7219 const APInt &Mask = C->getAPIntValue();
7220
7221 // Clear all non-demanded bits initially.
7222 APInt ShrunkMask = Mask & DemandedBits;
7223
7224 // Try to make a smaller immediate by setting undemanded bits.
7225
7226 APInt ExpandedMask = Mask | ~DemandedBits;
7227
7228 auto IsLegalMask = [ShrunkMask, ExpandedMask](const APInt &Mask) -> bool {
7229 return ShrunkMask.isSubsetOf(Mask) && Mask.isSubsetOf(ExpandedMask);
7230 };
7231 auto UseMask = [Mask, Op, VT, &TLO](const APInt &NewMask) -> bool {
7232 if (NewMask == Mask)
7233 return true;
7234 SDLoc DL(Op);
7235 SDValue NewC = TLO.DAG.getConstant(NewMask, DL, VT);
7236 SDValue NewOp = TLO.DAG.getNode(ISD::AND, DL, VT, Op.getOperand(0), NewC);
7237 return TLO.CombineTo(Op, NewOp);
7238 };
7239
7240 // If the shrunk mask fits in sign extended 12 bits, let the target
7241 // independent code apply it.
7242 if (ShrunkMask.isSignedIntN(12))
7243 return false;
7244
7245 // Preserve (and X, 0xffff) when zext.h is supported.
7246 if (Subtarget.hasStdExtZbb() || Subtarget.hasStdExtZbp()) {
7247 APInt NewMask = APInt(Mask.getBitWidth(), 0xffff);
7248 if (IsLegalMask(NewMask))
7249 return UseMask(NewMask);
7250 }
7251
7252 // Try to preserve (and X, 0xffffffff), the (zext_inreg X, i32) pattern.
7253 if (VT == MVT::i64) {
7254 APInt NewMask = APInt(64, 0xffffffff);
7255 if (IsLegalMask(NewMask))
7256 return UseMask(NewMask);
7257 }
7258
7259 // For the remaining optimizations, we need to be able to make a negative
7260 // number through a combination of mask and undemanded bits.
7261 if (!ExpandedMask.isNegative())
7262 return false;
7263
7264 // What is the fewest number of bits we need to represent the negative number.
7265 unsigned MinSignedBits = ExpandedMask.getMinSignedBits();
7266
7267 // Try to make a 12 bit negative immediate. If that fails try to make a 32
7268 // bit negative immediate unless the shrunk immediate already fits in 32 bits.
7269 APInt NewMask = ShrunkMask;
7270 if (MinSignedBits <= 12)
7271 NewMask.setBitsFrom(11);
7272 else if (MinSignedBits <= 32 && !ShrunkMask.isSignedIntN(32))
7273 NewMask.setBitsFrom(31);
7274 else
7275 return false;
7276
7277 // Sanity check that our new mask is a subset of the demanded mask.
7278 assert(IsLegalMask(NewMask));
7279 return UseMask(NewMask);
7280 }
7281
computeGREV(APInt & Src,unsigned ShAmt)7282 static void computeGREV(APInt &Src, unsigned ShAmt) {
7283 ShAmt &= Src.getBitWidth() - 1;
7284 uint64_t x = Src.getZExtValue();
7285 if (ShAmt & 1)
7286 x = ((x & 0x5555555555555555LL) << 1) | ((x & 0xAAAAAAAAAAAAAAAALL) >> 1);
7287 if (ShAmt & 2)
7288 x = ((x & 0x3333333333333333LL) << 2) | ((x & 0xCCCCCCCCCCCCCCCCLL) >> 2);
7289 if (ShAmt & 4)
7290 x = ((x & 0x0F0F0F0F0F0F0F0FLL) << 4) | ((x & 0xF0F0F0F0F0F0F0F0LL) >> 4);
7291 if (ShAmt & 8)
7292 x = ((x & 0x00FF00FF00FF00FFLL) << 8) | ((x & 0xFF00FF00FF00FF00LL) >> 8);
7293 if (ShAmt & 16)
7294 x = ((x & 0x0000FFFF0000FFFFLL) << 16) | ((x & 0xFFFF0000FFFF0000LL) >> 16);
7295 if (ShAmt & 32)
7296 x = ((x & 0x00000000FFFFFFFFLL) << 32) | ((x & 0xFFFFFFFF00000000LL) >> 32);
7297 Src = x;
7298 }
7299
computeKnownBitsForTargetNode(const SDValue Op,KnownBits & Known,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const7300 void RISCVTargetLowering::computeKnownBitsForTargetNode(const SDValue Op,
7301 KnownBits &Known,
7302 const APInt &DemandedElts,
7303 const SelectionDAG &DAG,
7304 unsigned Depth) const {
7305 unsigned BitWidth = Known.getBitWidth();
7306 unsigned Opc = Op.getOpcode();
7307 assert((Opc >= ISD::BUILTIN_OP_END ||
7308 Opc == ISD::INTRINSIC_WO_CHAIN ||
7309 Opc == ISD::INTRINSIC_W_CHAIN ||
7310 Opc == ISD::INTRINSIC_VOID) &&
7311 "Should use MaskedValueIsZero if you don't know whether Op"
7312 " is a target node!");
7313
7314 Known.resetAll();
7315 switch (Opc) {
7316 default: break;
7317 case RISCVISD::SELECT_CC: {
7318 Known = DAG.computeKnownBits(Op.getOperand(4), Depth + 1);
7319 // If we don't know any bits, early out.
7320 if (Known.isUnknown())
7321 break;
7322 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(3), Depth + 1);
7323
7324 // Only known if known in both the LHS and RHS.
7325 Known = KnownBits::commonBits(Known, Known2);
7326 break;
7327 }
7328 case RISCVISD::REMUW: {
7329 KnownBits Known2;
7330 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7331 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7332 // We only care about the lower 32 bits.
7333 Known = KnownBits::urem(Known.trunc(32), Known2.trunc(32));
7334 // Restore the original width by sign extending.
7335 Known = Known.sext(BitWidth);
7336 break;
7337 }
7338 case RISCVISD::DIVUW: {
7339 KnownBits Known2;
7340 Known = DAG.computeKnownBits(Op.getOperand(0), DemandedElts, Depth + 1);
7341 Known2 = DAG.computeKnownBits(Op.getOperand(1), DemandedElts, Depth + 1);
7342 // We only care about the lower 32 bits.
7343 Known = KnownBits::udiv(Known.trunc(32), Known2.trunc(32));
7344 // Restore the original width by sign extending.
7345 Known = Known.sext(BitWidth);
7346 break;
7347 }
7348 case RISCVISD::CTZW: {
7349 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7350 unsigned PossibleTZ = Known2.trunc(32).countMaxTrailingZeros();
7351 unsigned LowBits = Log2_32(PossibleTZ) + 1;
7352 Known.Zero.setBitsFrom(LowBits);
7353 break;
7354 }
7355 case RISCVISD::CLZW: {
7356 KnownBits Known2 = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7357 unsigned PossibleLZ = Known2.trunc(32).countMaxLeadingZeros();
7358 unsigned LowBits = Log2_32(PossibleLZ) + 1;
7359 Known.Zero.setBitsFrom(LowBits);
7360 break;
7361 }
7362 case RISCVISD::GREV:
7363 case RISCVISD::GREVW: {
7364 if (auto *C = dyn_cast<ConstantSDNode>(Op.getOperand(1))) {
7365 Known = DAG.computeKnownBits(Op.getOperand(0), Depth + 1);
7366 if (Opc == RISCVISD::GREVW)
7367 Known = Known.trunc(32);
7368 unsigned ShAmt = C->getZExtValue();
7369 computeGREV(Known.Zero, ShAmt);
7370 computeGREV(Known.One, ShAmt);
7371 if (Opc == RISCVISD::GREVW)
7372 Known = Known.sext(BitWidth);
7373 }
7374 break;
7375 }
7376 case RISCVISD::READ_VLENB:
7377 // We assume VLENB is at least 16 bytes.
7378 Known.Zero.setLowBits(4);
7379 // We assume VLENB is no more than 65536 / 8 bytes.
7380 Known.Zero.setBitsFrom(14);
7381 break;
7382 case ISD::INTRINSIC_W_CHAIN: {
7383 unsigned IntNo = Op.getConstantOperandVal(1);
7384 switch (IntNo) {
7385 default:
7386 // We can't do anything for most intrinsics.
7387 break;
7388 case Intrinsic::riscv_vsetvli:
7389 case Intrinsic::riscv_vsetvlimax:
7390 // Assume that VL output is positive and would fit in an int32_t.
7391 // TODO: VLEN might be capped at 16 bits in a future V spec update.
7392 if (BitWidth >= 32)
7393 Known.Zero.setBitsFrom(31);
7394 break;
7395 }
7396 break;
7397 }
7398 }
7399 }
7400
ComputeNumSignBitsForTargetNode(SDValue Op,const APInt & DemandedElts,const SelectionDAG & DAG,unsigned Depth) const7401 unsigned RISCVTargetLowering::ComputeNumSignBitsForTargetNode(
7402 SDValue Op, const APInt &DemandedElts, const SelectionDAG &DAG,
7403 unsigned Depth) const {
7404 switch (Op.getOpcode()) {
7405 default:
7406 break;
7407 case RISCVISD::SELECT_CC: {
7408 unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(3), DemandedElts, Depth + 1);
7409 if (Tmp == 1) return 1; // Early out.
7410 unsigned Tmp2 = DAG.ComputeNumSignBits(Op.getOperand(4), DemandedElts, Depth + 1);
7411 return std::min(Tmp, Tmp2);
7412 }
7413 case RISCVISD::SLLW:
7414 case RISCVISD::SRAW:
7415 case RISCVISD::SRLW:
7416 case RISCVISD::DIVW:
7417 case RISCVISD::DIVUW:
7418 case RISCVISD::REMUW:
7419 case RISCVISD::ROLW:
7420 case RISCVISD::RORW:
7421 case RISCVISD::GREVW:
7422 case RISCVISD::GORCW:
7423 case RISCVISD::FSLW:
7424 case RISCVISD::FSRW:
7425 case RISCVISD::SHFLW:
7426 case RISCVISD::UNSHFLW:
7427 case RISCVISD::BCOMPRESSW:
7428 case RISCVISD::BDECOMPRESSW:
7429 case RISCVISD::FCVT_W_RTZ_RV64:
7430 case RISCVISD::FCVT_WU_RTZ_RV64:
7431 // TODO: As the result is sign-extended, this is conservatively correct. A
7432 // more precise answer could be calculated for SRAW depending on known
7433 // bits in the shift amount.
7434 return 33;
7435 case RISCVISD::SHFL:
7436 case RISCVISD::UNSHFL: {
7437 // There is no SHFLIW, but a i64 SHFLI with bit 4 of the control word
7438 // cleared doesn't affect bit 31. The upper 32 bits will be shuffled, but
7439 // will stay within the upper 32 bits. If there were more than 32 sign bits
7440 // before there will be at least 33 sign bits after.
7441 if (Op.getValueType() == MVT::i64 &&
7442 isa<ConstantSDNode>(Op.getOperand(1)) &&
7443 (Op.getConstantOperandVal(1) & 0x10) == 0) {
7444 unsigned Tmp = DAG.ComputeNumSignBits(Op.getOperand(0), Depth + 1);
7445 if (Tmp > 32)
7446 return 33;
7447 }
7448 break;
7449 }
7450 case RISCVISD::VMV_X_S:
7451 // The number of sign bits of the scalar result is computed by obtaining the
7452 // element type of the input vector operand, subtracting its width from the
7453 // XLEN, and then adding one (sign bit within the element type). If the
7454 // element type is wider than XLen, the least-significant XLEN bits are
7455 // taken.
7456 if (Op.getOperand(0).getScalarValueSizeInBits() > Subtarget.getXLen())
7457 return 1;
7458 return Subtarget.getXLen() - Op.getOperand(0).getScalarValueSizeInBits() + 1;
7459 }
7460
7461 return 1;
7462 }
7463
emitReadCycleWidePseudo(MachineInstr & MI,MachineBasicBlock * BB)7464 static MachineBasicBlock *emitReadCycleWidePseudo(MachineInstr &MI,
7465 MachineBasicBlock *BB) {
7466 assert(MI.getOpcode() == RISCV::ReadCycleWide && "Unexpected instruction");
7467
7468 // To read the 64-bit cycle CSR on a 32-bit target, we read the two halves.
7469 // Should the count have wrapped while it was being read, we need to try
7470 // again.
7471 // ...
7472 // read:
7473 // rdcycleh x3 # load high word of cycle
7474 // rdcycle x2 # load low word of cycle
7475 // rdcycleh x4 # load high word of cycle
7476 // bne x3, x4, read # check if high word reads match, otherwise try again
7477 // ...
7478
7479 MachineFunction &MF = *BB->getParent();
7480 const BasicBlock *LLVM_BB = BB->getBasicBlock();
7481 MachineFunction::iterator It = ++BB->getIterator();
7482
7483 MachineBasicBlock *LoopMBB = MF.CreateMachineBasicBlock(LLVM_BB);
7484 MF.insert(It, LoopMBB);
7485
7486 MachineBasicBlock *DoneMBB = MF.CreateMachineBasicBlock(LLVM_BB);
7487 MF.insert(It, DoneMBB);
7488
7489 // Transfer the remainder of BB and its successor edges to DoneMBB.
7490 DoneMBB->splice(DoneMBB->begin(), BB,
7491 std::next(MachineBasicBlock::iterator(MI)), BB->end());
7492 DoneMBB->transferSuccessorsAndUpdatePHIs(BB);
7493
7494 BB->addSuccessor(LoopMBB);
7495
7496 MachineRegisterInfo &RegInfo = MF.getRegInfo();
7497 Register ReadAgainReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
7498 Register LoReg = MI.getOperand(0).getReg();
7499 Register HiReg = MI.getOperand(1).getReg();
7500 DebugLoc DL = MI.getDebugLoc();
7501
7502 const TargetInstrInfo *TII = MF.getSubtarget().getInstrInfo();
7503 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), HiReg)
7504 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
7505 .addReg(RISCV::X0);
7506 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), LoReg)
7507 .addImm(RISCVSysReg::lookupSysRegByName("CYCLE")->Encoding)
7508 .addReg(RISCV::X0);
7509 BuildMI(LoopMBB, DL, TII->get(RISCV::CSRRS), ReadAgainReg)
7510 .addImm(RISCVSysReg::lookupSysRegByName("CYCLEH")->Encoding)
7511 .addReg(RISCV::X0);
7512
7513 BuildMI(LoopMBB, DL, TII->get(RISCV::BNE))
7514 .addReg(HiReg)
7515 .addReg(ReadAgainReg)
7516 .addMBB(LoopMBB);
7517
7518 LoopMBB->addSuccessor(LoopMBB);
7519 LoopMBB->addSuccessor(DoneMBB);
7520
7521 MI.eraseFromParent();
7522
7523 return DoneMBB;
7524 }
7525
emitSplitF64Pseudo(MachineInstr & MI,MachineBasicBlock * BB)7526 static MachineBasicBlock *emitSplitF64Pseudo(MachineInstr &MI,
7527 MachineBasicBlock *BB) {
7528 assert(MI.getOpcode() == RISCV::SplitF64Pseudo && "Unexpected instruction");
7529
7530 MachineFunction &MF = *BB->getParent();
7531 DebugLoc DL = MI.getDebugLoc();
7532 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7533 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7534 Register LoReg = MI.getOperand(0).getReg();
7535 Register HiReg = MI.getOperand(1).getReg();
7536 Register SrcReg = MI.getOperand(2).getReg();
7537 const TargetRegisterClass *SrcRC = &RISCV::FPR64RegClass;
7538 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7539
7540 TII.storeRegToStackSlot(*BB, MI, SrcReg, MI.getOperand(2).isKill(), FI, SrcRC,
7541 RI);
7542 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7543 MachineMemOperand *MMOLo =
7544 MF.getMachineMemOperand(MPI, MachineMemOperand::MOLoad, 4, Align(8));
7545 MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7546 MPI.getWithOffset(4), MachineMemOperand::MOLoad, 4, Align(8));
7547 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), LoReg)
7548 .addFrameIndex(FI)
7549 .addImm(0)
7550 .addMemOperand(MMOLo);
7551 BuildMI(*BB, MI, DL, TII.get(RISCV::LW), HiReg)
7552 .addFrameIndex(FI)
7553 .addImm(4)
7554 .addMemOperand(MMOHi);
7555 MI.eraseFromParent(); // The pseudo instruction is gone now.
7556 return BB;
7557 }
7558
emitBuildPairF64Pseudo(MachineInstr & MI,MachineBasicBlock * BB)7559 static MachineBasicBlock *emitBuildPairF64Pseudo(MachineInstr &MI,
7560 MachineBasicBlock *BB) {
7561 assert(MI.getOpcode() == RISCV::BuildPairF64Pseudo &&
7562 "Unexpected instruction");
7563
7564 MachineFunction &MF = *BB->getParent();
7565 DebugLoc DL = MI.getDebugLoc();
7566 const TargetInstrInfo &TII = *MF.getSubtarget().getInstrInfo();
7567 const TargetRegisterInfo *RI = MF.getSubtarget().getRegisterInfo();
7568 Register DstReg = MI.getOperand(0).getReg();
7569 Register LoReg = MI.getOperand(1).getReg();
7570 Register HiReg = MI.getOperand(2).getReg();
7571 const TargetRegisterClass *DstRC = &RISCV::FPR64RegClass;
7572 int FI = MF.getInfo<RISCVMachineFunctionInfo>()->getMoveF64FrameIndex(MF);
7573
7574 MachinePointerInfo MPI = MachinePointerInfo::getFixedStack(MF, FI);
7575 MachineMemOperand *MMOLo =
7576 MF.getMachineMemOperand(MPI, MachineMemOperand::MOStore, 4, Align(8));
7577 MachineMemOperand *MMOHi = MF.getMachineMemOperand(
7578 MPI.getWithOffset(4), MachineMemOperand::MOStore, 4, Align(8));
7579 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7580 .addReg(LoReg, getKillRegState(MI.getOperand(1).isKill()))
7581 .addFrameIndex(FI)
7582 .addImm(0)
7583 .addMemOperand(MMOLo);
7584 BuildMI(*BB, MI, DL, TII.get(RISCV::SW))
7585 .addReg(HiReg, getKillRegState(MI.getOperand(2).isKill()))
7586 .addFrameIndex(FI)
7587 .addImm(4)
7588 .addMemOperand(MMOHi);
7589 TII.loadRegFromStackSlot(*BB, MI, DstReg, FI, DstRC, RI);
7590 MI.eraseFromParent(); // The pseudo instruction is gone now.
7591 return BB;
7592 }
7593
isSelectPseudo(MachineInstr & MI)7594 static bool isSelectPseudo(MachineInstr &MI) {
7595 switch (MI.getOpcode()) {
7596 default:
7597 return false;
7598 case RISCV::Select_GPR_Using_CC_GPR:
7599 case RISCV::Select_FPR16_Using_CC_GPR:
7600 case RISCV::Select_FPR32_Using_CC_GPR:
7601 case RISCV::Select_FPR64_Using_CC_GPR:
7602 return true;
7603 }
7604 }
7605
emitSelectPseudo(MachineInstr & MI,MachineBasicBlock * BB,const RISCVSubtarget & Subtarget)7606 static MachineBasicBlock *emitSelectPseudo(MachineInstr &MI,
7607 MachineBasicBlock *BB,
7608 const RISCVSubtarget &Subtarget) {
7609 // To "insert" Select_* instructions, we actually have to insert the triangle
7610 // control-flow pattern. The incoming instructions know the destination vreg
7611 // to set, the condition code register to branch on, the true/false values to
7612 // select between, and the condcode to use to select the appropriate branch.
7613 //
7614 // We produce the following control flow:
7615 // HeadMBB
7616 // | \
7617 // | IfFalseMBB
7618 // | /
7619 // TailMBB
7620 //
7621 // When we find a sequence of selects we attempt to optimize their emission
7622 // by sharing the control flow. Currently we only handle cases where we have
7623 // multiple selects with the exact same condition (same LHS, RHS and CC).
7624 // The selects may be interleaved with other instructions if the other
7625 // instructions meet some requirements we deem safe:
7626 // - They are debug instructions. Otherwise,
7627 // - They do not have side-effects, do not access memory and their inputs do
7628 // not depend on the results of the select pseudo-instructions.
7629 // The TrueV/FalseV operands of the selects cannot depend on the result of
7630 // previous selects in the sequence.
7631 // These conditions could be further relaxed. See the X86 target for a
7632 // related approach and more information.
7633 Register LHS = MI.getOperand(1).getReg();
7634 Register RHS = MI.getOperand(2).getReg();
7635 auto CC = static_cast<RISCVCC::CondCode>(MI.getOperand(3).getImm());
7636
7637 SmallVector<MachineInstr *, 4> SelectDebugValues;
7638 SmallSet<Register, 4> SelectDests;
7639 SelectDests.insert(MI.getOperand(0).getReg());
7640
7641 MachineInstr *LastSelectPseudo = &MI;
7642
7643 for (auto E = BB->end(), SequenceMBBI = MachineBasicBlock::iterator(MI);
7644 SequenceMBBI != E; ++SequenceMBBI) {
7645 if (SequenceMBBI->isDebugInstr())
7646 continue;
7647 else if (isSelectPseudo(*SequenceMBBI)) {
7648 if (SequenceMBBI->getOperand(1).getReg() != LHS ||
7649 SequenceMBBI->getOperand(2).getReg() != RHS ||
7650 SequenceMBBI->getOperand(3).getImm() != CC ||
7651 SelectDests.count(SequenceMBBI->getOperand(4).getReg()) ||
7652 SelectDests.count(SequenceMBBI->getOperand(5).getReg()))
7653 break;
7654 LastSelectPseudo = &*SequenceMBBI;
7655 SequenceMBBI->collectDebugValues(SelectDebugValues);
7656 SelectDests.insert(SequenceMBBI->getOperand(0).getReg());
7657 } else {
7658 if (SequenceMBBI->hasUnmodeledSideEffects() ||
7659 SequenceMBBI->mayLoadOrStore())
7660 break;
7661 if (llvm::any_of(SequenceMBBI->operands(), [&](MachineOperand &MO) {
7662 return MO.isReg() && MO.isUse() && SelectDests.count(MO.getReg());
7663 }))
7664 break;
7665 }
7666 }
7667
7668 const RISCVInstrInfo &TII = *Subtarget.getInstrInfo();
7669 const BasicBlock *LLVM_BB = BB->getBasicBlock();
7670 DebugLoc DL = MI.getDebugLoc();
7671 MachineFunction::iterator I = ++BB->getIterator();
7672
7673 MachineBasicBlock *HeadMBB = BB;
7674 MachineFunction *F = BB->getParent();
7675 MachineBasicBlock *TailMBB = F->CreateMachineBasicBlock(LLVM_BB);
7676 MachineBasicBlock *IfFalseMBB = F->CreateMachineBasicBlock(LLVM_BB);
7677
7678 F->insert(I, IfFalseMBB);
7679 F->insert(I, TailMBB);
7680
7681 // Transfer debug instructions associated with the selects to TailMBB.
7682 for (MachineInstr *DebugInstr : SelectDebugValues) {
7683 TailMBB->push_back(DebugInstr->removeFromParent());
7684 }
7685
7686 // Move all instructions after the sequence to TailMBB.
7687 TailMBB->splice(TailMBB->end(), HeadMBB,
7688 std::next(LastSelectPseudo->getIterator()), HeadMBB->end());
7689 // Update machine-CFG edges by transferring all successors of the current
7690 // block to the new block which will contain the Phi nodes for the selects.
7691 TailMBB->transferSuccessorsAndUpdatePHIs(HeadMBB);
7692 // Set the successors for HeadMBB.
7693 HeadMBB->addSuccessor(IfFalseMBB);
7694 HeadMBB->addSuccessor(TailMBB);
7695
7696 // Insert appropriate branch.
7697 BuildMI(HeadMBB, DL, TII.getBrCond(CC))
7698 .addReg(LHS)
7699 .addReg(RHS)
7700 .addMBB(TailMBB);
7701
7702 // IfFalseMBB just falls through to TailMBB.
7703 IfFalseMBB->addSuccessor(TailMBB);
7704
7705 // Create PHIs for all of the select pseudo-instructions.
7706 auto SelectMBBI = MI.getIterator();
7707 auto SelectEnd = std::next(LastSelectPseudo->getIterator());
7708 auto InsertionPoint = TailMBB->begin();
7709 while (SelectMBBI != SelectEnd) {
7710 auto Next = std::next(SelectMBBI);
7711 if (isSelectPseudo(*SelectMBBI)) {
7712 // %Result = phi [ %TrueValue, HeadMBB ], [ %FalseValue, IfFalseMBB ]
7713 BuildMI(*TailMBB, InsertionPoint, SelectMBBI->getDebugLoc(),
7714 TII.get(RISCV::PHI), SelectMBBI->getOperand(0).getReg())
7715 .addReg(SelectMBBI->getOperand(4).getReg())
7716 .addMBB(HeadMBB)
7717 .addReg(SelectMBBI->getOperand(5).getReg())
7718 .addMBB(IfFalseMBB);
7719 SelectMBBI->eraseFromParent();
7720 }
7721 SelectMBBI = Next;
7722 }
7723
7724 F->getProperties().reset(MachineFunctionProperties::Property::NoPHIs);
7725 return TailMBB;
7726 }
7727
7728 MachineBasicBlock *
EmitInstrWithCustomInserter(MachineInstr & MI,MachineBasicBlock * BB) const7729 RISCVTargetLowering::EmitInstrWithCustomInserter(MachineInstr &MI,
7730 MachineBasicBlock *BB) const {
7731 switch (MI.getOpcode()) {
7732 default:
7733 llvm_unreachable("Unexpected instr type to insert");
7734 case RISCV::ReadCycleWide:
7735 assert(!Subtarget.is64Bit() &&
7736 "ReadCycleWrite is only to be used on riscv32");
7737 return emitReadCycleWidePseudo(MI, BB);
7738 case RISCV::Select_GPR_Using_CC_GPR:
7739 case RISCV::Select_FPR16_Using_CC_GPR:
7740 case RISCV::Select_FPR32_Using_CC_GPR:
7741 case RISCV::Select_FPR64_Using_CC_GPR:
7742 return emitSelectPseudo(MI, BB, Subtarget);
7743 case RISCV::BuildPairF64Pseudo:
7744 return emitBuildPairF64Pseudo(MI, BB);
7745 case RISCV::SplitF64Pseudo:
7746 return emitSplitF64Pseudo(MI, BB);
7747 }
7748 }
7749
7750 // Calling Convention Implementation.
7751 // The expectations for frontend ABI lowering vary from target to target.
7752 // Ideally, an LLVM frontend would be able to avoid worrying about many ABI
7753 // details, but this is a longer term goal. For now, we simply try to keep the
7754 // role of the frontend as simple and well-defined as possible. The rules can
7755 // be summarised as:
7756 // * Never split up large scalar arguments. We handle them here.
7757 // * If a hardfloat calling convention is being used, and the struct may be
7758 // passed in a pair of registers (fp+fp, int+fp), and both registers are
7759 // available, then pass as two separate arguments. If either the GPRs or FPRs
7760 // are exhausted, then pass according to the rule below.
7761 // * If a struct could never be passed in registers or directly in a stack
7762 // slot (as it is larger than 2*XLEN and the floating point rules don't
7763 // apply), then pass it using a pointer with the byval attribute.
7764 // * If a struct is less than 2*XLEN, then coerce to either a two-element
7765 // word-sized array or a 2*XLEN scalar (depending on alignment).
7766 // * The frontend can determine whether a struct is returned by reference or
7767 // not based on its size and fields. If it will be returned by reference, the
7768 // frontend must modify the prototype so a pointer with the sret annotation is
7769 // passed as the first argument. This is not necessary for large scalar
7770 // returns.
7771 // * Struct return values and varargs should be coerced to structs containing
7772 // register-size fields in the same situations they would be for fixed
7773 // arguments.
7774
7775 static const MCPhysReg ArgGPRs[] = {
7776 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13,
7777 RISCV::X14, RISCV::X15, RISCV::X16, RISCV::X17
7778 };
7779 static const MCPhysReg ArgFPR16s[] = {
7780 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H,
7781 RISCV::F14_H, RISCV::F15_H, RISCV::F16_H, RISCV::F17_H
7782 };
7783 static const MCPhysReg ArgFPR32s[] = {
7784 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F,
7785 RISCV::F14_F, RISCV::F15_F, RISCV::F16_F, RISCV::F17_F
7786 };
7787 static const MCPhysReg ArgFPR64s[] = {
7788 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D,
7789 RISCV::F14_D, RISCV::F15_D, RISCV::F16_D, RISCV::F17_D
7790 };
7791 // This is an interim calling convention and it may be changed in the future.
7792 static const MCPhysReg ArgVRs[] = {
7793 RISCV::V8, RISCV::V9, RISCV::V10, RISCV::V11, RISCV::V12, RISCV::V13,
7794 RISCV::V14, RISCV::V15, RISCV::V16, RISCV::V17, RISCV::V18, RISCV::V19,
7795 RISCV::V20, RISCV::V21, RISCV::V22, RISCV::V23};
7796 static const MCPhysReg ArgVRM2s[] = {RISCV::V8M2, RISCV::V10M2, RISCV::V12M2,
7797 RISCV::V14M2, RISCV::V16M2, RISCV::V18M2,
7798 RISCV::V20M2, RISCV::V22M2};
7799 static const MCPhysReg ArgVRM4s[] = {RISCV::V8M4, RISCV::V12M4, RISCV::V16M4,
7800 RISCV::V20M4};
7801 static const MCPhysReg ArgVRM8s[] = {RISCV::V8M8, RISCV::V16M8};
7802
7803 // Pass a 2*XLEN argument that has been split into two XLEN values through
7804 // registers or the stack as necessary.
CC_RISCVAssign2XLen(unsigned XLen,CCState & State,CCValAssign VA1,ISD::ArgFlagsTy ArgFlags1,unsigned ValNo2,MVT ValVT2,MVT LocVT2,ISD::ArgFlagsTy ArgFlags2)7805 static bool CC_RISCVAssign2XLen(unsigned XLen, CCState &State, CCValAssign VA1,
7806 ISD::ArgFlagsTy ArgFlags1, unsigned ValNo2,
7807 MVT ValVT2, MVT LocVT2,
7808 ISD::ArgFlagsTy ArgFlags2) {
7809 unsigned XLenInBytes = XLen / 8;
7810 if (Register Reg = State.AllocateReg(ArgGPRs)) {
7811 // At least one half can be passed via register.
7812 State.addLoc(CCValAssign::getReg(VA1.getValNo(), VA1.getValVT(), Reg,
7813 VA1.getLocVT(), CCValAssign::Full));
7814 } else {
7815 // Both halves must be passed on the stack, with proper alignment.
7816 Align StackAlign =
7817 std::max(Align(XLenInBytes), ArgFlags1.getNonZeroOrigAlign());
7818 State.addLoc(
7819 CCValAssign::getMem(VA1.getValNo(), VA1.getValVT(),
7820 State.AllocateStack(XLenInBytes, StackAlign),
7821 VA1.getLocVT(), CCValAssign::Full));
7822 State.addLoc(CCValAssign::getMem(
7823 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7824 LocVT2, CCValAssign::Full));
7825 return false;
7826 }
7827
7828 if (Register Reg = State.AllocateReg(ArgGPRs)) {
7829 // The second half can also be passed via register.
7830 State.addLoc(
7831 CCValAssign::getReg(ValNo2, ValVT2, Reg, LocVT2, CCValAssign::Full));
7832 } else {
7833 // The second half is passed via the stack, without additional alignment.
7834 State.addLoc(CCValAssign::getMem(
7835 ValNo2, ValVT2, State.AllocateStack(XLenInBytes, Align(XLenInBytes)),
7836 LocVT2, CCValAssign::Full));
7837 }
7838
7839 return false;
7840 }
7841
allocateRVVReg(MVT ValVT,unsigned ValNo,Optional<unsigned> FirstMaskArgument,CCState & State,const RISCVTargetLowering & TLI)7842 static unsigned allocateRVVReg(MVT ValVT, unsigned ValNo,
7843 Optional<unsigned> FirstMaskArgument,
7844 CCState &State, const RISCVTargetLowering &TLI) {
7845 const TargetRegisterClass *RC = TLI.getRegClassFor(ValVT);
7846 if (RC == &RISCV::VRRegClass) {
7847 // Assign the first mask argument to V0.
7848 // This is an interim calling convention and it may be changed in the
7849 // future.
7850 if (FirstMaskArgument.hasValue() && ValNo == FirstMaskArgument.getValue())
7851 return State.AllocateReg(RISCV::V0);
7852 return State.AllocateReg(ArgVRs);
7853 }
7854 if (RC == &RISCV::VRM2RegClass)
7855 return State.AllocateReg(ArgVRM2s);
7856 if (RC == &RISCV::VRM4RegClass)
7857 return State.AllocateReg(ArgVRM4s);
7858 if (RC == &RISCV::VRM8RegClass)
7859 return State.AllocateReg(ArgVRM8s);
7860 llvm_unreachable("Unhandled register class for ValueType");
7861 }
7862
7863 // Implements the RISC-V calling convention. Returns true upon failure.
CC_RISCV(const DataLayout & DL,RISCVABI::ABI ABI,unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State,bool IsFixed,bool IsRet,Type * OrigTy,const RISCVTargetLowering & TLI,Optional<unsigned> FirstMaskArgument)7864 static bool CC_RISCV(const DataLayout &DL, RISCVABI::ABI ABI, unsigned ValNo,
7865 MVT ValVT, MVT LocVT, CCValAssign::LocInfo LocInfo,
7866 ISD::ArgFlagsTy ArgFlags, CCState &State, bool IsFixed,
7867 bool IsRet, Type *OrigTy, const RISCVTargetLowering &TLI,
7868 Optional<unsigned> FirstMaskArgument) {
7869 unsigned XLen = DL.getLargestLegalIntTypeSizeInBits();
7870 assert(XLen == 32 || XLen == 64);
7871 MVT XLenVT = XLen == 32 ? MVT::i32 : MVT::i64;
7872
7873 // Any return value split in to more than two values can't be returned
7874 // directly. Vectors are returned via the available vector registers.
7875 if (!LocVT.isVector() && IsRet && ValNo > 1)
7876 return true;
7877
7878 // UseGPRForF16_F32 if targeting one of the soft-float ABIs, if passing a
7879 // variadic argument, or if no F16/F32 argument registers are available.
7880 bool UseGPRForF16_F32 = true;
7881 // UseGPRForF64 if targeting soft-float ABIs or an FLEN=32 ABI, if passing a
7882 // variadic argument, or if no F64 argument registers are available.
7883 bool UseGPRForF64 = true;
7884
7885 switch (ABI) {
7886 default:
7887 llvm_unreachable("Unexpected ABI");
7888 case RISCVABI::ABI_ILP32:
7889 case RISCVABI::ABI_LP64:
7890 break;
7891 case RISCVABI::ABI_ILP32F:
7892 case RISCVABI::ABI_LP64F:
7893 UseGPRForF16_F32 = !IsFixed;
7894 break;
7895 case RISCVABI::ABI_ILP32D:
7896 case RISCVABI::ABI_LP64D:
7897 UseGPRForF16_F32 = !IsFixed;
7898 UseGPRForF64 = !IsFixed;
7899 break;
7900 }
7901
7902 // FPR16, FPR32, and FPR64 alias each other.
7903 if (State.getFirstUnallocated(ArgFPR32s) == array_lengthof(ArgFPR32s)) {
7904 UseGPRForF16_F32 = true;
7905 UseGPRForF64 = true;
7906 }
7907
7908 // From this point on, rely on UseGPRForF16_F32, UseGPRForF64 and
7909 // similar local variables rather than directly checking against the target
7910 // ABI.
7911
7912 if (UseGPRForF16_F32 && (ValVT == MVT::f16 || ValVT == MVT::f32)) {
7913 LocVT = XLenVT;
7914 LocInfo = CCValAssign::BCvt;
7915 } else if (UseGPRForF64 && XLen == 64 && ValVT == MVT::f64) {
7916 LocVT = MVT::i64;
7917 LocInfo = CCValAssign::BCvt;
7918 }
7919
7920 // If this is a variadic argument, the RISC-V calling convention requires
7921 // that it is assigned an 'even' or 'aligned' register if it has 8-byte
7922 // alignment (RV32) or 16-byte alignment (RV64). An aligned register should
7923 // be used regardless of whether the original argument was split during
7924 // legalisation or not. The argument will not be passed by registers if the
7925 // original type is larger than 2*XLEN, so the register alignment rule does
7926 // not apply.
7927 unsigned TwoXLenInBytes = (2 * XLen) / 8;
7928 if (!IsFixed && ArgFlags.getNonZeroOrigAlign() == TwoXLenInBytes &&
7929 DL.getTypeAllocSize(OrigTy) == TwoXLenInBytes) {
7930 unsigned RegIdx = State.getFirstUnallocated(ArgGPRs);
7931 // Skip 'odd' register if necessary.
7932 if (RegIdx != array_lengthof(ArgGPRs) && RegIdx % 2 == 1)
7933 State.AllocateReg(ArgGPRs);
7934 }
7935
7936 SmallVectorImpl<CCValAssign> &PendingLocs = State.getPendingLocs();
7937 SmallVectorImpl<ISD::ArgFlagsTy> &PendingArgFlags =
7938 State.getPendingArgFlags();
7939
7940 assert(PendingLocs.size() == PendingArgFlags.size() &&
7941 "PendingLocs and PendingArgFlags out of sync");
7942
7943 // Handle passing f64 on RV32D with a soft float ABI or when floating point
7944 // registers are exhausted.
7945 if (UseGPRForF64 && XLen == 32 && ValVT == MVT::f64) {
7946 assert(!ArgFlags.isSplit() && PendingLocs.empty() &&
7947 "Can't lower f64 if it is split");
7948 // Depending on available argument GPRS, f64 may be passed in a pair of
7949 // GPRs, split between a GPR and the stack, or passed completely on the
7950 // stack. LowerCall/LowerFormalArguments/LowerReturn must recognise these
7951 // cases.
7952 Register Reg = State.AllocateReg(ArgGPRs);
7953 LocVT = MVT::i32;
7954 if (!Reg) {
7955 unsigned StackOffset = State.AllocateStack(8, Align(8));
7956 State.addLoc(
7957 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
7958 return false;
7959 }
7960 if (!State.AllocateReg(ArgGPRs))
7961 State.AllocateStack(4, Align(4));
7962 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
7963 return false;
7964 }
7965
7966 // Fixed-length vectors are located in the corresponding scalable-vector
7967 // container types.
7968 if (ValVT.isFixedLengthVector())
7969 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
7970
7971 // Split arguments might be passed indirectly, so keep track of the pending
7972 // values. Split vectors are passed via a mix of registers and indirectly, so
7973 // treat them as we would any other argument.
7974 if (ValVT.isScalarInteger() && (ArgFlags.isSplit() || !PendingLocs.empty())) {
7975 LocVT = XLenVT;
7976 LocInfo = CCValAssign::Indirect;
7977 PendingLocs.push_back(
7978 CCValAssign::getPending(ValNo, ValVT, LocVT, LocInfo));
7979 PendingArgFlags.push_back(ArgFlags);
7980 if (!ArgFlags.isSplitEnd()) {
7981 return false;
7982 }
7983 }
7984
7985 // If the split argument only had two elements, it should be passed directly
7986 // in registers or on the stack.
7987 if (ValVT.isScalarInteger() && ArgFlags.isSplitEnd() &&
7988 PendingLocs.size() <= 2) {
7989 assert(PendingLocs.size() == 2 && "Unexpected PendingLocs.size()");
7990 // Apply the normal calling convention rules to the first half of the
7991 // split argument.
7992 CCValAssign VA = PendingLocs[0];
7993 ISD::ArgFlagsTy AF = PendingArgFlags[0];
7994 PendingLocs.clear();
7995 PendingArgFlags.clear();
7996 return CC_RISCVAssign2XLen(XLen, State, VA, AF, ValNo, ValVT, LocVT,
7997 ArgFlags);
7998 }
7999
8000 // Allocate to a register if possible, or else a stack slot.
8001 Register Reg;
8002 unsigned StoreSizeBytes = XLen / 8;
8003 Align StackAlign = Align(XLen / 8);
8004
8005 if (ValVT == MVT::f16 && !UseGPRForF16_F32)
8006 Reg = State.AllocateReg(ArgFPR16s);
8007 else if (ValVT == MVT::f32 && !UseGPRForF16_F32)
8008 Reg = State.AllocateReg(ArgFPR32s);
8009 else if (ValVT == MVT::f64 && !UseGPRForF64)
8010 Reg = State.AllocateReg(ArgFPR64s);
8011 else if (ValVT.isVector()) {
8012 Reg = allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI);
8013 if (!Reg) {
8014 // For return values, the vector must be passed fully via registers or
8015 // via the stack.
8016 // FIXME: The proposed vector ABI only mandates v8-v15 for return values,
8017 // but we're using all of them.
8018 if (IsRet)
8019 return true;
8020 // Try using a GPR to pass the address
8021 if ((Reg = State.AllocateReg(ArgGPRs))) {
8022 LocVT = XLenVT;
8023 LocInfo = CCValAssign::Indirect;
8024 } else if (ValVT.isScalableVector()) {
8025 report_fatal_error("Unable to pass scalable vector types on the stack");
8026 } else {
8027 // Pass fixed-length vectors on the stack.
8028 LocVT = ValVT;
8029 StoreSizeBytes = ValVT.getStoreSize();
8030 // Align vectors to their element sizes, being careful for vXi1
8031 // vectors.
8032 StackAlign = MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8033 }
8034 }
8035 } else {
8036 Reg = State.AllocateReg(ArgGPRs);
8037 }
8038
8039 unsigned StackOffset =
8040 Reg ? 0 : State.AllocateStack(StoreSizeBytes, StackAlign);
8041
8042 // If we reach this point and PendingLocs is non-empty, we must be at the
8043 // end of a split argument that must be passed indirectly.
8044 if (!PendingLocs.empty()) {
8045 assert(ArgFlags.isSplitEnd() && "Expected ArgFlags.isSplitEnd()");
8046 assert(PendingLocs.size() > 2 && "Unexpected PendingLocs.size()");
8047
8048 for (auto &It : PendingLocs) {
8049 if (Reg)
8050 It.convertToReg(Reg);
8051 else
8052 It.convertToMem(StackOffset);
8053 State.addLoc(It);
8054 }
8055 PendingLocs.clear();
8056 PendingArgFlags.clear();
8057 return false;
8058 }
8059
8060 assert((!UseGPRForF16_F32 || !UseGPRForF64 || LocVT == XLenVT ||
8061 (TLI.getSubtarget().hasStdExtV() && ValVT.isVector())) &&
8062 "Expected an XLenVT or vector types at this stage");
8063
8064 if (Reg) {
8065 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8066 return false;
8067 }
8068
8069 // When a floating-point value is passed on the stack, no bit-conversion is
8070 // needed.
8071 if (ValVT.isFloatingPoint()) {
8072 LocVT = ValVT;
8073 LocInfo = CCValAssign::Full;
8074 }
8075 State.addLoc(CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8076 return false;
8077 }
8078
8079 template <typename ArgTy>
preAssignMask(const ArgTy & Args)8080 static Optional<unsigned> preAssignMask(const ArgTy &Args) {
8081 for (const auto &ArgIdx : enumerate(Args)) {
8082 MVT ArgVT = ArgIdx.value().VT;
8083 if (ArgVT.isVector() && ArgVT.getVectorElementType() == MVT::i1)
8084 return ArgIdx.index();
8085 }
8086 return None;
8087 }
8088
analyzeInputArgs(MachineFunction & MF,CCState & CCInfo,const SmallVectorImpl<ISD::InputArg> & Ins,bool IsRet,RISCVCCAssignFn Fn) const8089 void RISCVTargetLowering::analyzeInputArgs(
8090 MachineFunction &MF, CCState &CCInfo,
8091 const SmallVectorImpl<ISD::InputArg> &Ins, bool IsRet,
8092 RISCVCCAssignFn Fn) const {
8093 unsigned NumArgs = Ins.size();
8094 FunctionType *FType = MF.getFunction().getFunctionType();
8095
8096 Optional<unsigned> FirstMaskArgument;
8097 if (Subtarget.hasStdExtV())
8098 FirstMaskArgument = preAssignMask(Ins);
8099
8100 for (unsigned i = 0; i != NumArgs; ++i) {
8101 MVT ArgVT = Ins[i].VT;
8102 ISD::ArgFlagsTy ArgFlags = Ins[i].Flags;
8103
8104 Type *ArgTy = nullptr;
8105 if (IsRet)
8106 ArgTy = FType->getReturnType();
8107 else if (Ins[i].isOrigArg())
8108 ArgTy = FType->getParamType(Ins[i].getOrigArgIndex());
8109
8110 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8111 if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8112 ArgFlags, CCInfo, /*IsFixed=*/true, IsRet, ArgTy, *this,
8113 FirstMaskArgument)) {
8114 LLVM_DEBUG(dbgs() << "InputArg #" << i << " has unhandled type "
8115 << EVT(ArgVT).getEVTString() << '\n');
8116 llvm_unreachable(nullptr);
8117 }
8118 }
8119 }
8120
analyzeOutputArgs(MachineFunction & MF,CCState & CCInfo,const SmallVectorImpl<ISD::OutputArg> & Outs,bool IsRet,CallLoweringInfo * CLI,RISCVCCAssignFn Fn) const8121 void RISCVTargetLowering::analyzeOutputArgs(
8122 MachineFunction &MF, CCState &CCInfo,
8123 const SmallVectorImpl<ISD::OutputArg> &Outs, bool IsRet,
8124 CallLoweringInfo *CLI, RISCVCCAssignFn Fn) const {
8125 unsigned NumArgs = Outs.size();
8126
8127 Optional<unsigned> FirstMaskArgument;
8128 if (Subtarget.hasStdExtV())
8129 FirstMaskArgument = preAssignMask(Outs);
8130
8131 for (unsigned i = 0; i != NumArgs; i++) {
8132 MVT ArgVT = Outs[i].VT;
8133 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8134 Type *OrigTy = CLI ? CLI->getArgs()[Outs[i].OrigArgIndex].Ty : nullptr;
8135
8136 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8137 if (Fn(MF.getDataLayout(), ABI, i, ArgVT, ArgVT, CCValAssign::Full,
8138 ArgFlags, CCInfo, Outs[i].IsFixed, IsRet, OrigTy, *this,
8139 FirstMaskArgument)) {
8140 LLVM_DEBUG(dbgs() << "OutputArg #" << i << " has unhandled type "
8141 << EVT(ArgVT).getEVTString() << "\n");
8142 llvm_unreachable(nullptr);
8143 }
8144 }
8145 }
8146
8147 // Convert Val to a ValVT. Should not be called for CCValAssign::Indirect
8148 // values.
convertLocVTToValVT(SelectionDAG & DAG,SDValue Val,const CCValAssign & VA,const SDLoc & DL,const RISCVSubtarget & Subtarget)8149 static SDValue convertLocVTToValVT(SelectionDAG &DAG, SDValue Val,
8150 const CCValAssign &VA, const SDLoc &DL,
8151 const RISCVSubtarget &Subtarget) {
8152 switch (VA.getLocInfo()) {
8153 default:
8154 llvm_unreachable("Unexpected CCValAssign::LocInfo");
8155 case CCValAssign::Full:
8156 if (VA.getValVT().isFixedLengthVector() && VA.getLocVT().isScalableVector())
8157 Val = convertFromScalableVector(VA.getValVT(), Val, DAG, Subtarget);
8158 break;
8159 case CCValAssign::BCvt:
8160 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8161 Val = DAG.getNode(RISCVISD::FMV_H_X, DL, MVT::f16, Val);
8162 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8163 Val = DAG.getNode(RISCVISD::FMV_W_X_RV64, DL, MVT::f32, Val);
8164 else
8165 Val = DAG.getNode(ISD::BITCAST, DL, VA.getValVT(), Val);
8166 break;
8167 }
8168 return Val;
8169 }
8170
8171 // The caller is responsible for loading the full value if the argument is
8172 // passed with CCValAssign::Indirect.
unpackFromRegLoc(SelectionDAG & DAG,SDValue Chain,const CCValAssign & VA,const SDLoc & DL,const RISCVTargetLowering & TLI)8173 static SDValue unpackFromRegLoc(SelectionDAG &DAG, SDValue Chain,
8174 const CCValAssign &VA, const SDLoc &DL,
8175 const RISCVTargetLowering &TLI) {
8176 MachineFunction &MF = DAG.getMachineFunction();
8177 MachineRegisterInfo &RegInfo = MF.getRegInfo();
8178 EVT LocVT = VA.getLocVT();
8179 SDValue Val;
8180 const TargetRegisterClass *RC = TLI.getRegClassFor(LocVT.getSimpleVT());
8181 Register VReg = RegInfo.createVirtualRegister(RC);
8182 RegInfo.addLiveIn(VA.getLocReg(), VReg);
8183 Val = DAG.getCopyFromReg(Chain, DL, VReg, LocVT);
8184
8185 if (VA.getLocInfo() == CCValAssign::Indirect)
8186 return Val;
8187
8188 return convertLocVTToValVT(DAG, Val, VA, DL, TLI.getSubtarget());
8189 }
8190
convertValVTToLocVT(SelectionDAG & DAG,SDValue Val,const CCValAssign & VA,const SDLoc & DL,const RISCVSubtarget & Subtarget)8191 static SDValue convertValVTToLocVT(SelectionDAG &DAG, SDValue Val,
8192 const CCValAssign &VA, const SDLoc &DL,
8193 const RISCVSubtarget &Subtarget) {
8194 EVT LocVT = VA.getLocVT();
8195
8196 switch (VA.getLocInfo()) {
8197 default:
8198 llvm_unreachable("Unexpected CCValAssign::LocInfo");
8199 case CCValAssign::Full:
8200 if (VA.getValVT().isFixedLengthVector() && LocVT.isScalableVector())
8201 Val = convertToScalableVector(LocVT, Val, DAG, Subtarget);
8202 break;
8203 case CCValAssign::BCvt:
8204 if (VA.getLocVT().isInteger() && VA.getValVT() == MVT::f16)
8205 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTH, DL, VA.getLocVT(), Val);
8206 else if (VA.getLocVT() == MVT::i64 && VA.getValVT() == MVT::f32)
8207 Val = DAG.getNode(RISCVISD::FMV_X_ANYEXTW_RV64, DL, MVT::i64, Val);
8208 else
8209 Val = DAG.getNode(ISD::BITCAST, DL, LocVT, Val);
8210 break;
8211 }
8212 return Val;
8213 }
8214
8215 // The caller is responsible for loading the full value if the argument is
8216 // passed with CCValAssign::Indirect.
unpackFromMemLoc(SelectionDAG & DAG,SDValue Chain,const CCValAssign & VA,const SDLoc & DL)8217 static SDValue unpackFromMemLoc(SelectionDAG &DAG, SDValue Chain,
8218 const CCValAssign &VA, const SDLoc &DL) {
8219 MachineFunction &MF = DAG.getMachineFunction();
8220 MachineFrameInfo &MFI = MF.getFrameInfo();
8221 EVT LocVT = VA.getLocVT();
8222 EVT ValVT = VA.getValVT();
8223 EVT PtrVT = MVT::getIntegerVT(DAG.getDataLayout().getPointerSizeInBits(0));
8224 int FI = MFI.CreateFixedObject(ValVT.getStoreSize(), VA.getLocMemOffset(),
8225 /*Immutable=*/true);
8226 SDValue FIN = DAG.getFrameIndex(FI, PtrVT);
8227 SDValue Val;
8228
8229 ISD::LoadExtType ExtType;
8230 switch (VA.getLocInfo()) {
8231 default:
8232 llvm_unreachable("Unexpected CCValAssign::LocInfo");
8233 case CCValAssign::Full:
8234 case CCValAssign::Indirect:
8235 case CCValAssign::BCvt:
8236 ExtType = ISD::NON_EXTLOAD;
8237 break;
8238 }
8239 Val = DAG.getExtLoad(
8240 ExtType, DL, LocVT, Chain, FIN,
8241 MachinePointerInfo::getFixedStack(DAG.getMachineFunction(), FI), ValVT);
8242 return Val;
8243 }
8244
unpackF64OnRV32DSoftABI(SelectionDAG & DAG,SDValue Chain,const CCValAssign & VA,const SDLoc & DL)8245 static SDValue unpackF64OnRV32DSoftABI(SelectionDAG &DAG, SDValue Chain,
8246 const CCValAssign &VA, const SDLoc &DL) {
8247 assert(VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64 &&
8248 "Unexpected VA");
8249 MachineFunction &MF = DAG.getMachineFunction();
8250 MachineFrameInfo &MFI = MF.getFrameInfo();
8251 MachineRegisterInfo &RegInfo = MF.getRegInfo();
8252
8253 if (VA.isMemLoc()) {
8254 // f64 is passed on the stack.
8255 int FI = MFI.CreateFixedObject(8, VA.getLocMemOffset(), /*Immutable=*/true);
8256 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8257 return DAG.getLoad(MVT::f64, DL, Chain, FIN,
8258 MachinePointerInfo::getFixedStack(MF, FI));
8259 }
8260
8261 assert(VA.isRegLoc() && "Expected register VA assignment");
8262
8263 Register LoVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8264 RegInfo.addLiveIn(VA.getLocReg(), LoVReg);
8265 SDValue Lo = DAG.getCopyFromReg(Chain, DL, LoVReg, MVT::i32);
8266 SDValue Hi;
8267 if (VA.getLocReg() == RISCV::X17) {
8268 // Second half of f64 is passed on the stack.
8269 int FI = MFI.CreateFixedObject(4, 0, /*Immutable=*/true);
8270 SDValue FIN = DAG.getFrameIndex(FI, MVT::i32);
8271 Hi = DAG.getLoad(MVT::i32, DL, Chain, FIN,
8272 MachinePointerInfo::getFixedStack(MF, FI));
8273 } else {
8274 // Second half of f64 is passed in another GPR.
8275 Register HiVReg = RegInfo.createVirtualRegister(&RISCV::GPRRegClass);
8276 RegInfo.addLiveIn(VA.getLocReg() + 1, HiVReg);
8277 Hi = DAG.getCopyFromReg(Chain, DL, HiVReg, MVT::i32);
8278 }
8279 return DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, Lo, Hi);
8280 }
8281
8282 // FastCC has less than 1% performance improvement for some particular
8283 // benchmark. But theoretically, it may has benenfit for some cases.
CC_RISCV_FastCC(const DataLayout & DL,RISCVABI::ABI ABI,unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State,bool IsFixed,bool IsRet,Type * OrigTy,const RISCVTargetLowering & TLI,Optional<unsigned> FirstMaskArgument)8284 static bool CC_RISCV_FastCC(const DataLayout &DL, RISCVABI::ABI ABI,
8285 unsigned ValNo, MVT ValVT, MVT LocVT,
8286 CCValAssign::LocInfo LocInfo,
8287 ISD::ArgFlagsTy ArgFlags, CCState &State,
8288 bool IsFixed, bool IsRet, Type *OrigTy,
8289 const RISCVTargetLowering &TLI,
8290 Optional<unsigned> FirstMaskArgument) {
8291
8292 // X5 and X6 might be used for save-restore libcall.
8293 static const MCPhysReg GPRList[] = {
8294 RISCV::X10, RISCV::X11, RISCV::X12, RISCV::X13, RISCV::X14,
8295 RISCV::X15, RISCV::X16, RISCV::X17, RISCV::X7, RISCV::X28,
8296 RISCV::X29, RISCV::X30, RISCV::X31};
8297
8298 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8299 if (unsigned Reg = State.AllocateReg(GPRList)) {
8300 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8301 return false;
8302 }
8303 }
8304
8305 if (LocVT == MVT::f16) {
8306 static const MCPhysReg FPR16List[] = {
8307 RISCV::F10_H, RISCV::F11_H, RISCV::F12_H, RISCV::F13_H, RISCV::F14_H,
8308 RISCV::F15_H, RISCV::F16_H, RISCV::F17_H, RISCV::F0_H, RISCV::F1_H,
8309 RISCV::F2_H, RISCV::F3_H, RISCV::F4_H, RISCV::F5_H, RISCV::F6_H,
8310 RISCV::F7_H, RISCV::F28_H, RISCV::F29_H, RISCV::F30_H, RISCV::F31_H};
8311 if (unsigned Reg = State.AllocateReg(FPR16List)) {
8312 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8313 return false;
8314 }
8315 }
8316
8317 if (LocVT == MVT::f32) {
8318 static const MCPhysReg FPR32List[] = {
8319 RISCV::F10_F, RISCV::F11_F, RISCV::F12_F, RISCV::F13_F, RISCV::F14_F,
8320 RISCV::F15_F, RISCV::F16_F, RISCV::F17_F, RISCV::F0_F, RISCV::F1_F,
8321 RISCV::F2_F, RISCV::F3_F, RISCV::F4_F, RISCV::F5_F, RISCV::F6_F,
8322 RISCV::F7_F, RISCV::F28_F, RISCV::F29_F, RISCV::F30_F, RISCV::F31_F};
8323 if (unsigned Reg = State.AllocateReg(FPR32List)) {
8324 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8325 return false;
8326 }
8327 }
8328
8329 if (LocVT == MVT::f64) {
8330 static const MCPhysReg FPR64List[] = {
8331 RISCV::F10_D, RISCV::F11_D, RISCV::F12_D, RISCV::F13_D, RISCV::F14_D,
8332 RISCV::F15_D, RISCV::F16_D, RISCV::F17_D, RISCV::F0_D, RISCV::F1_D,
8333 RISCV::F2_D, RISCV::F3_D, RISCV::F4_D, RISCV::F5_D, RISCV::F6_D,
8334 RISCV::F7_D, RISCV::F28_D, RISCV::F29_D, RISCV::F30_D, RISCV::F31_D};
8335 if (unsigned Reg = State.AllocateReg(FPR64List)) {
8336 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8337 return false;
8338 }
8339 }
8340
8341 if (LocVT == MVT::i32 || LocVT == MVT::f32) {
8342 unsigned Offset4 = State.AllocateStack(4, Align(4));
8343 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset4, LocVT, LocInfo));
8344 return false;
8345 }
8346
8347 if (LocVT == MVT::i64 || LocVT == MVT::f64) {
8348 unsigned Offset5 = State.AllocateStack(8, Align(8));
8349 State.addLoc(CCValAssign::getMem(ValNo, ValVT, Offset5, LocVT, LocInfo));
8350 return false;
8351 }
8352
8353 if (LocVT.isVector()) {
8354 if (unsigned Reg =
8355 allocateRVVReg(ValVT, ValNo, FirstMaskArgument, State, TLI)) {
8356 // Fixed-length vectors are located in the corresponding scalable-vector
8357 // container types.
8358 if (ValVT.isFixedLengthVector())
8359 LocVT = TLI.getContainerForFixedLengthVector(LocVT);
8360 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8361 } else {
8362 // Try and pass the address via a "fast" GPR.
8363 if (unsigned GPRReg = State.AllocateReg(GPRList)) {
8364 LocInfo = CCValAssign::Indirect;
8365 LocVT = TLI.getSubtarget().getXLenVT();
8366 State.addLoc(CCValAssign::getReg(ValNo, ValVT, GPRReg, LocVT, LocInfo));
8367 } else if (ValVT.isFixedLengthVector()) {
8368 auto StackAlign =
8369 MaybeAlign(ValVT.getScalarSizeInBits() / 8).valueOrOne();
8370 unsigned StackOffset =
8371 State.AllocateStack(ValVT.getStoreSize(), StackAlign);
8372 State.addLoc(
8373 CCValAssign::getMem(ValNo, ValVT, StackOffset, LocVT, LocInfo));
8374 } else {
8375 // Can't pass scalable vectors on the stack.
8376 return true;
8377 }
8378 }
8379
8380 return false;
8381 }
8382
8383 return true; // CC didn't match.
8384 }
8385
CC_RISCV_GHC(unsigned ValNo,MVT ValVT,MVT LocVT,CCValAssign::LocInfo LocInfo,ISD::ArgFlagsTy ArgFlags,CCState & State)8386 static bool CC_RISCV_GHC(unsigned ValNo, MVT ValVT, MVT LocVT,
8387 CCValAssign::LocInfo LocInfo,
8388 ISD::ArgFlagsTy ArgFlags, CCState &State) {
8389
8390 if (LocVT == MVT::i32 || LocVT == MVT::i64) {
8391 // Pass in STG registers: Base, Sp, Hp, R1, R2, R3, R4, R5, R6, R7, SpLim
8392 // s1 s2 s3 s4 s5 s6 s7 s8 s9 s10 s11
8393 static const MCPhysReg GPRList[] = {
8394 RISCV::X9, RISCV::X18, RISCV::X19, RISCV::X20, RISCV::X21, RISCV::X22,
8395 RISCV::X23, RISCV::X24, RISCV::X25, RISCV::X26, RISCV::X27};
8396 if (unsigned Reg = State.AllocateReg(GPRList)) {
8397 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8398 return false;
8399 }
8400 }
8401
8402 if (LocVT == MVT::f32) {
8403 // Pass in STG registers: F1, ..., F6
8404 // fs0 ... fs5
8405 static const MCPhysReg FPR32List[] = {RISCV::F8_F, RISCV::F9_F,
8406 RISCV::F18_F, RISCV::F19_F,
8407 RISCV::F20_F, RISCV::F21_F};
8408 if (unsigned Reg = State.AllocateReg(FPR32List)) {
8409 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8410 return false;
8411 }
8412 }
8413
8414 if (LocVT == MVT::f64) {
8415 // Pass in STG registers: D1, ..., D6
8416 // fs6 ... fs11
8417 static const MCPhysReg FPR64List[] = {RISCV::F22_D, RISCV::F23_D,
8418 RISCV::F24_D, RISCV::F25_D,
8419 RISCV::F26_D, RISCV::F27_D};
8420 if (unsigned Reg = State.AllocateReg(FPR64List)) {
8421 State.addLoc(CCValAssign::getReg(ValNo, ValVT, Reg, LocVT, LocInfo));
8422 return false;
8423 }
8424 }
8425
8426 report_fatal_error("No registers left in GHC calling convention");
8427 return true;
8428 }
8429
8430 // Transform physical registers into virtual registers.
LowerFormalArguments(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::InputArg> & Ins,const SDLoc & DL,SelectionDAG & DAG,SmallVectorImpl<SDValue> & InVals) const8431 SDValue RISCVTargetLowering::LowerFormalArguments(
8432 SDValue Chain, CallingConv::ID CallConv, bool IsVarArg,
8433 const SmallVectorImpl<ISD::InputArg> &Ins, const SDLoc &DL,
8434 SelectionDAG &DAG, SmallVectorImpl<SDValue> &InVals) const {
8435
8436 MachineFunction &MF = DAG.getMachineFunction();
8437
8438 switch (CallConv) {
8439 default:
8440 report_fatal_error("Unsupported calling convention");
8441 case CallingConv::C:
8442 case CallingConv::Fast:
8443 break;
8444 case CallingConv::GHC:
8445 if (!MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtF] ||
8446 !MF.getSubtarget().getFeatureBits()[RISCV::FeatureStdExtD])
8447 report_fatal_error(
8448 "GHC calling convention requires the F and D instruction set extensions");
8449 }
8450
8451 const Function &Func = MF.getFunction();
8452 if (Func.hasFnAttribute("interrupt")) {
8453 if (!Func.arg_empty())
8454 report_fatal_error(
8455 "Functions with the interrupt attribute cannot have arguments!");
8456
8457 StringRef Kind =
8458 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
8459
8460 if (!(Kind == "user" || Kind == "supervisor" || Kind == "machine"))
8461 report_fatal_error(
8462 "Function interrupt attribute argument not supported!");
8463 }
8464
8465 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8466 MVT XLenVT = Subtarget.getXLenVT();
8467 unsigned XLenInBytes = Subtarget.getXLen() / 8;
8468 // Used with vargs to acumulate store chains.
8469 std::vector<SDValue> OutChains;
8470
8471 // Assign locations to all of the incoming arguments.
8472 SmallVector<CCValAssign, 16> ArgLocs;
8473 CCState CCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
8474
8475 if (CallConv == CallingConv::GHC)
8476 CCInfo.AnalyzeFormalArguments(Ins, CC_RISCV_GHC);
8477 else
8478 analyzeInputArgs(MF, CCInfo, Ins, /*IsRet=*/false,
8479 CallConv == CallingConv::Fast ? CC_RISCV_FastCC
8480 : CC_RISCV);
8481
8482 for (unsigned i = 0, e = ArgLocs.size(); i != e; ++i) {
8483 CCValAssign &VA = ArgLocs[i];
8484 SDValue ArgValue;
8485 // Passing f64 on RV32D with a soft float ABI must be handled as a special
8486 // case.
8487 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64)
8488 ArgValue = unpackF64OnRV32DSoftABI(DAG, Chain, VA, DL);
8489 else if (VA.isRegLoc())
8490 ArgValue = unpackFromRegLoc(DAG, Chain, VA, DL, *this);
8491 else
8492 ArgValue = unpackFromMemLoc(DAG, Chain, VA, DL);
8493
8494 if (VA.getLocInfo() == CCValAssign::Indirect) {
8495 // If the original argument was split and passed by reference (e.g. i128
8496 // on RV32), we need to load all parts of it here (using the same
8497 // address). Vectors may be partly split to registers and partly to the
8498 // stack, in which case the base address is partly offset and subsequent
8499 // stores are relative to that.
8500 InVals.push_back(DAG.getLoad(VA.getValVT(), DL, Chain, ArgValue,
8501 MachinePointerInfo()));
8502 unsigned ArgIndex = Ins[i].OrigArgIndex;
8503 unsigned ArgPartOffset = Ins[i].PartOffset;
8504 assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8505 while (i + 1 != e && Ins[i + 1].OrigArgIndex == ArgIndex) {
8506 CCValAssign &PartVA = ArgLocs[i + 1];
8507 unsigned PartOffset = Ins[i + 1].PartOffset - ArgPartOffset;
8508 SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8509 if (PartVA.getValVT().isScalableVector())
8510 Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8511 SDValue Address = DAG.getNode(ISD::ADD, DL, PtrVT, ArgValue, Offset);
8512 InVals.push_back(DAG.getLoad(PartVA.getValVT(), DL, Chain, Address,
8513 MachinePointerInfo()));
8514 ++i;
8515 }
8516 continue;
8517 }
8518 InVals.push_back(ArgValue);
8519 }
8520
8521 if (IsVarArg) {
8522 ArrayRef<MCPhysReg> ArgRegs = makeArrayRef(ArgGPRs);
8523 unsigned Idx = CCInfo.getFirstUnallocated(ArgRegs);
8524 const TargetRegisterClass *RC = &RISCV::GPRRegClass;
8525 MachineFrameInfo &MFI = MF.getFrameInfo();
8526 MachineRegisterInfo &RegInfo = MF.getRegInfo();
8527 RISCVMachineFunctionInfo *RVFI = MF.getInfo<RISCVMachineFunctionInfo>();
8528
8529 // Offset of the first variable argument from stack pointer, and size of
8530 // the vararg save area. For now, the varargs save area is either zero or
8531 // large enough to hold a0-a7.
8532 int VaArgOffset, VarArgsSaveSize;
8533
8534 // If all registers are allocated, then all varargs must be passed on the
8535 // stack and we don't need to save any argregs.
8536 if (ArgRegs.size() == Idx) {
8537 VaArgOffset = CCInfo.getNextStackOffset();
8538 VarArgsSaveSize = 0;
8539 } else {
8540 VarArgsSaveSize = XLenInBytes * (ArgRegs.size() - Idx);
8541 VaArgOffset = -VarArgsSaveSize;
8542 }
8543
8544 // Record the frame index of the first variable argument
8545 // which is a value necessary to VASTART.
8546 int FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
8547 RVFI->setVarArgsFrameIndex(FI);
8548
8549 // If saving an odd number of registers then create an extra stack slot to
8550 // ensure that the frame pointer is 2*XLEN-aligned, which in turn ensures
8551 // offsets to even-numbered registered remain 2*XLEN-aligned.
8552 if (Idx % 2) {
8553 MFI.CreateFixedObject(XLenInBytes, VaArgOffset - (int)XLenInBytes, true);
8554 VarArgsSaveSize += XLenInBytes;
8555 }
8556
8557 // Copy the integer registers that may have been used for passing varargs
8558 // to the vararg save area.
8559 for (unsigned I = Idx; I < ArgRegs.size();
8560 ++I, VaArgOffset += XLenInBytes) {
8561 const Register Reg = RegInfo.createVirtualRegister(RC);
8562 RegInfo.addLiveIn(ArgRegs[I], Reg);
8563 SDValue ArgValue = DAG.getCopyFromReg(Chain, DL, Reg, XLenVT);
8564 FI = MFI.CreateFixedObject(XLenInBytes, VaArgOffset, true);
8565 SDValue PtrOff = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8566 SDValue Store = DAG.getStore(Chain, DL, ArgValue, PtrOff,
8567 MachinePointerInfo::getFixedStack(MF, FI));
8568 cast<StoreSDNode>(Store.getNode())
8569 ->getMemOperand()
8570 ->setValue((Value *)nullptr);
8571 OutChains.push_back(Store);
8572 }
8573 RVFI->setVarArgsSaveSize(VarArgsSaveSize);
8574 }
8575
8576 // All stores are grouped in one node to allow the matching between
8577 // the size of Ins and InVals. This only happens for vararg functions.
8578 if (!OutChains.empty()) {
8579 OutChains.push_back(Chain);
8580 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, OutChains);
8581 }
8582
8583 return Chain;
8584 }
8585
8586 /// isEligibleForTailCallOptimization - Check whether the call is eligible
8587 /// for tail call optimization.
8588 /// Note: This is modelled after ARM's IsEligibleForTailCallOptimization.
isEligibleForTailCallOptimization(CCState & CCInfo,CallLoweringInfo & CLI,MachineFunction & MF,const SmallVector<CCValAssign,16> & ArgLocs) const8589 bool RISCVTargetLowering::isEligibleForTailCallOptimization(
8590 CCState &CCInfo, CallLoweringInfo &CLI, MachineFunction &MF,
8591 const SmallVector<CCValAssign, 16> &ArgLocs) const {
8592
8593 auto &Callee = CLI.Callee;
8594 auto CalleeCC = CLI.CallConv;
8595 auto &Outs = CLI.Outs;
8596 auto &Caller = MF.getFunction();
8597 auto CallerCC = Caller.getCallingConv();
8598
8599 // Exception-handling functions need a special set of instructions to
8600 // indicate a return to the hardware. Tail-calling another function would
8601 // probably break this.
8602 // TODO: The "interrupt" attribute isn't currently defined by RISC-V. This
8603 // should be expanded as new function attributes are introduced.
8604 if (Caller.hasFnAttribute("interrupt"))
8605 return false;
8606
8607 // Do not tail call opt if the stack is used to pass parameters.
8608 if (CCInfo.getNextStackOffset() != 0)
8609 return false;
8610
8611 // Do not tail call opt if any parameters need to be passed indirectly.
8612 // Since long doubles (fp128) and i128 are larger than 2*XLEN, they are
8613 // passed indirectly. So the address of the value will be passed in a
8614 // register, or if not available, then the address is put on the stack. In
8615 // order to pass indirectly, space on the stack often needs to be allocated
8616 // in order to store the value. In this case the CCInfo.getNextStackOffset()
8617 // != 0 check is not enough and we need to check if any CCValAssign ArgsLocs
8618 // are passed CCValAssign::Indirect.
8619 for (auto &VA : ArgLocs)
8620 if (VA.getLocInfo() == CCValAssign::Indirect)
8621 return false;
8622
8623 // Do not tail call opt if either caller or callee uses struct return
8624 // semantics.
8625 auto IsCallerStructRet = Caller.hasStructRetAttr();
8626 auto IsCalleeStructRet = Outs.empty() ? false : Outs[0].Flags.isSRet();
8627 if (IsCallerStructRet || IsCalleeStructRet)
8628 return false;
8629
8630 // Externally-defined functions with weak linkage should not be
8631 // tail-called. The behaviour of branch instructions in this situation (as
8632 // used for tail calls) is implementation-defined, so we cannot rely on the
8633 // linker replacing the tail call with a return.
8634 if (GlobalAddressSDNode *G = dyn_cast<GlobalAddressSDNode>(Callee)) {
8635 const GlobalValue *GV = G->getGlobal();
8636 if (GV->hasExternalWeakLinkage())
8637 return false;
8638 }
8639
8640 // The callee has to preserve all registers the caller needs to preserve.
8641 const RISCVRegisterInfo *TRI = Subtarget.getRegisterInfo();
8642 const uint32_t *CallerPreserved = TRI->getCallPreservedMask(MF, CallerCC);
8643 if (CalleeCC != CallerCC) {
8644 const uint32_t *CalleePreserved = TRI->getCallPreservedMask(MF, CalleeCC);
8645 if (!TRI->regmaskSubsetEqual(CallerPreserved, CalleePreserved))
8646 return false;
8647 }
8648
8649 // Byval parameters hand the function a pointer directly into the stack area
8650 // we want to reuse during a tail call. Working around this *is* possible
8651 // but less efficient and uglier in LowerCall.
8652 for (auto &Arg : Outs)
8653 if (Arg.Flags.isByVal())
8654 return false;
8655
8656 return true;
8657 }
8658
getPrefTypeAlign(EVT VT,SelectionDAG & DAG)8659 static Align getPrefTypeAlign(EVT VT, SelectionDAG &DAG) {
8660 return DAG.getDataLayout().getPrefTypeAlign(
8661 VT.getTypeForEVT(*DAG.getContext()));
8662 }
8663
8664 // Lower a call to a callseq_start + CALL + callseq_end chain, and add input
8665 // and output parameter nodes.
LowerCall(CallLoweringInfo & CLI,SmallVectorImpl<SDValue> & InVals) const8666 SDValue RISCVTargetLowering::LowerCall(CallLoweringInfo &CLI,
8667 SmallVectorImpl<SDValue> &InVals) const {
8668 SelectionDAG &DAG = CLI.DAG;
8669 SDLoc &DL = CLI.DL;
8670 SmallVectorImpl<ISD::OutputArg> &Outs = CLI.Outs;
8671 SmallVectorImpl<SDValue> &OutVals = CLI.OutVals;
8672 SmallVectorImpl<ISD::InputArg> &Ins = CLI.Ins;
8673 SDValue Chain = CLI.Chain;
8674 SDValue Callee = CLI.Callee;
8675 bool &IsTailCall = CLI.IsTailCall;
8676 CallingConv::ID CallConv = CLI.CallConv;
8677 bool IsVarArg = CLI.IsVarArg;
8678 EVT PtrVT = getPointerTy(DAG.getDataLayout());
8679 MVT XLenVT = Subtarget.getXLenVT();
8680
8681 MachineFunction &MF = DAG.getMachineFunction();
8682
8683 // Analyze the operands of the call, assigning locations to each operand.
8684 SmallVector<CCValAssign, 16> ArgLocs;
8685 CCState ArgCCInfo(CallConv, IsVarArg, MF, ArgLocs, *DAG.getContext());
8686
8687 if (CallConv == CallingConv::GHC)
8688 ArgCCInfo.AnalyzeCallOperands(Outs, CC_RISCV_GHC);
8689 else
8690 analyzeOutputArgs(MF, ArgCCInfo, Outs, /*IsRet=*/false, &CLI,
8691 CallConv == CallingConv::Fast ? CC_RISCV_FastCC
8692 : CC_RISCV);
8693
8694 // Check if it's really possible to do a tail call.
8695 if (IsTailCall)
8696 IsTailCall = isEligibleForTailCallOptimization(ArgCCInfo, CLI, MF, ArgLocs);
8697
8698 if (IsTailCall)
8699 ++NumTailCalls;
8700 else if (CLI.CB && CLI.CB->isMustTailCall())
8701 report_fatal_error("failed to perform tail call elimination on a call "
8702 "site marked musttail");
8703
8704 // Get a count of how many bytes are to be pushed on the stack.
8705 unsigned NumBytes = ArgCCInfo.getNextStackOffset();
8706
8707 // Create local copies for byval args
8708 SmallVector<SDValue, 8> ByValArgs;
8709 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8710 ISD::ArgFlagsTy Flags = Outs[i].Flags;
8711 if (!Flags.isByVal())
8712 continue;
8713
8714 SDValue Arg = OutVals[i];
8715 unsigned Size = Flags.getByValSize();
8716 Align Alignment = Flags.getNonZeroByValAlign();
8717
8718 int FI =
8719 MF.getFrameInfo().CreateStackObject(Size, Alignment, /*isSS=*/false);
8720 SDValue FIPtr = DAG.getFrameIndex(FI, getPointerTy(DAG.getDataLayout()));
8721 SDValue SizeNode = DAG.getConstant(Size, DL, XLenVT);
8722
8723 Chain = DAG.getMemcpy(Chain, DL, FIPtr, Arg, SizeNode, Alignment,
8724 /*IsVolatile=*/false,
8725 /*AlwaysInline=*/false, IsTailCall,
8726 MachinePointerInfo(), MachinePointerInfo());
8727 ByValArgs.push_back(FIPtr);
8728 }
8729
8730 if (!IsTailCall)
8731 Chain = DAG.getCALLSEQ_START(Chain, NumBytes, 0, CLI.DL);
8732
8733 // Copy argument values to their designated locations.
8734 SmallVector<std::pair<Register, SDValue>, 8> RegsToPass;
8735 SmallVector<SDValue, 8> MemOpChains;
8736 SDValue StackPtr;
8737 for (unsigned i = 0, j = 0, e = ArgLocs.size(); i != e; ++i) {
8738 CCValAssign &VA = ArgLocs[i];
8739 SDValue ArgValue = OutVals[i];
8740 ISD::ArgFlagsTy Flags = Outs[i].Flags;
8741
8742 // Handle passing f64 on RV32D with a soft float ABI as a special case.
8743 bool IsF64OnRV32DSoftABI =
8744 VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64;
8745 if (IsF64OnRV32DSoftABI && VA.isRegLoc()) {
8746 SDValue SplitF64 = DAG.getNode(
8747 RISCVISD::SplitF64, DL, DAG.getVTList(MVT::i32, MVT::i32), ArgValue);
8748 SDValue Lo = SplitF64.getValue(0);
8749 SDValue Hi = SplitF64.getValue(1);
8750
8751 Register RegLo = VA.getLocReg();
8752 RegsToPass.push_back(std::make_pair(RegLo, Lo));
8753
8754 if (RegLo == RISCV::X17) {
8755 // Second half of f64 is passed on the stack.
8756 // Work out the address of the stack slot.
8757 if (!StackPtr.getNode())
8758 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8759 // Emit the store.
8760 MemOpChains.push_back(
8761 DAG.getStore(Chain, DL, Hi, StackPtr, MachinePointerInfo()));
8762 } else {
8763 // Second half of f64 is passed in another GPR.
8764 assert(RegLo < RISCV::X31 && "Invalid register pair");
8765 Register RegHigh = RegLo + 1;
8766 RegsToPass.push_back(std::make_pair(RegHigh, Hi));
8767 }
8768 continue;
8769 }
8770
8771 // IsF64OnRV32DSoftABI && VA.isMemLoc() is handled below in the same way
8772 // as any other MemLoc.
8773
8774 // Promote the value if needed.
8775 // For now, only handle fully promoted and indirect arguments.
8776 if (VA.getLocInfo() == CCValAssign::Indirect) {
8777 // Store the argument in a stack slot and pass its address.
8778 Align StackAlign =
8779 std::max(getPrefTypeAlign(Outs[i].ArgVT, DAG),
8780 getPrefTypeAlign(ArgValue.getValueType(), DAG));
8781 TypeSize StoredSize = ArgValue.getValueType().getStoreSize();
8782 // If the original argument was split (e.g. i128), we need
8783 // to store the required parts of it here (and pass just one address).
8784 // Vectors may be partly split to registers and partly to the stack, in
8785 // which case the base address is partly offset and subsequent stores are
8786 // relative to that.
8787 unsigned ArgIndex = Outs[i].OrigArgIndex;
8788 unsigned ArgPartOffset = Outs[i].PartOffset;
8789 assert(VA.getValVT().isVector() || ArgPartOffset == 0);
8790 // Calculate the total size to store. We don't have access to what we're
8791 // actually storing other than performing the loop and collecting the
8792 // info.
8793 SmallVector<std::pair<SDValue, SDValue>> Parts;
8794 while (i + 1 != e && Outs[i + 1].OrigArgIndex == ArgIndex) {
8795 SDValue PartValue = OutVals[i + 1];
8796 unsigned PartOffset = Outs[i + 1].PartOffset - ArgPartOffset;
8797 SDValue Offset = DAG.getIntPtrConstant(PartOffset, DL);
8798 EVT PartVT = PartValue.getValueType();
8799 if (PartVT.isScalableVector())
8800 Offset = DAG.getNode(ISD::VSCALE, DL, XLenVT, Offset);
8801 StoredSize += PartVT.getStoreSize();
8802 StackAlign = std::max(StackAlign, getPrefTypeAlign(PartVT, DAG));
8803 Parts.push_back(std::make_pair(PartValue, Offset));
8804 ++i;
8805 }
8806 SDValue SpillSlot = DAG.CreateStackTemporary(StoredSize, StackAlign);
8807 int FI = cast<FrameIndexSDNode>(SpillSlot)->getIndex();
8808 MemOpChains.push_back(
8809 DAG.getStore(Chain, DL, ArgValue, SpillSlot,
8810 MachinePointerInfo::getFixedStack(MF, FI)));
8811 for (const auto &Part : Parts) {
8812 SDValue PartValue = Part.first;
8813 SDValue PartOffset = Part.second;
8814 SDValue Address =
8815 DAG.getNode(ISD::ADD, DL, PtrVT, SpillSlot, PartOffset);
8816 MemOpChains.push_back(
8817 DAG.getStore(Chain, DL, PartValue, Address,
8818 MachinePointerInfo::getFixedStack(MF, FI)));
8819 }
8820 ArgValue = SpillSlot;
8821 } else {
8822 ArgValue = convertValVTToLocVT(DAG, ArgValue, VA, DL, Subtarget);
8823 }
8824
8825 // Use local copy if it is a byval arg.
8826 if (Flags.isByVal())
8827 ArgValue = ByValArgs[j++];
8828
8829 if (VA.isRegLoc()) {
8830 // Queue up the argument copies and emit them at the end.
8831 RegsToPass.push_back(std::make_pair(VA.getLocReg(), ArgValue));
8832 } else {
8833 assert(VA.isMemLoc() && "Argument not register or memory");
8834 assert(!IsTailCall && "Tail call not allowed if stack is used "
8835 "for passing parameters");
8836
8837 // Work out the address of the stack slot.
8838 if (!StackPtr.getNode())
8839 StackPtr = DAG.getCopyFromReg(Chain, DL, RISCV::X2, PtrVT);
8840 SDValue Address =
8841 DAG.getNode(ISD::ADD, DL, PtrVT, StackPtr,
8842 DAG.getIntPtrConstant(VA.getLocMemOffset(), DL));
8843
8844 // Emit the store.
8845 MemOpChains.push_back(
8846 DAG.getStore(Chain, DL, ArgValue, Address, MachinePointerInfo()));
8847 }
8848 }
8849
8850 // Join the stores, which are independent of one another.
8851 if (!MemOpChains.empty())
8852 Chain = DAG.getNode(ISD::TokenFactor, DL, MVT::Other, MemOpChains);
8853
8854 SDValue Glue;
8855
8856 // Build a sequence of copy-to-reg nodes, chained and glued together.
8857 for (auto &Reg : RegsToPass) {
8858 Chain = DAG.getCopyToReg(Chain, DL, Reg.first, Reg.second, Glue);
8859 Glue = Chain.getValue(1);
8860 }
8861
8862 // Validate that none of the argument registers have been marked as
8863 // reserved, if so report an error. Do the same for the return address if this
8864 // is not a tailcall.
8865 validateCCReservedRegs(RegsToPass, MF);
8866 if (!IsTailCall &&
8867 MF.getSubtarget<RISCVSubtarget>().isRegisterReservedByUser(RISCV::X1))
8868 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
8869 MF.getFunction(),
8870 "Return address register required, but has been reserved."});
8871
8872 // If the callee is a GlobalAddress/ExternalSymbol node, turn it into a
8873 // TargetGlobalAddress/TargetExternalSymbol node so that legalize won't
8874 // split it and then direct call can be matched by PseudoCALL.
8875 if (GlobalAddressSDNode *S = dyn_cast<GlobalAddressSDNode>(Callee)) {
8876 const GlobalValue *GV = S->getGlobal();
8877
8878 unsigned OpFlags = RISCVII::MO_CALL;
8879 if (!getTargetMachine().shouldAssumeDSOLocal(*GV->getParent(), GV))
8880 OpFlags = RISCVII::MO_PLT;
8881
8882 Callee = DAG.getTargetGlobalAddress(GV, DL, PtrVT, 0, OpFlags);
8883 } else if (ExternalSymbolSDNode *S = dyn_cast<ExternalSymbolSDNode>(Callee)) {
8884 unsigned OpFlags = RISCVII::MO_CALL;
8885
8886 if (!getTargetMachine().shouldAssumeDSOLocal(*MF.getFunction().getParent(),
8887 nullptr))
8888 OpFlags = RISCVII::MO_PLT;
8889
8890 Callee = DAG.getTargetExternalSymbol(S->getSymbol(), PtrVT, OpFlags);
8891 }
8892
8893 // The first call operand is the chain and the second is the target address.
8894 SmallVector<SDValue, 8> Ops;
8895 Ops.push_back(Chain);
8896 Ops.push_back(Callee);
8897
8898 // Add argument registers to the end of the list so that they are
8899 // known live into the call.
8900 for (auto &Reg : RegsToPass)
8901 Ops.push_back(DAG.getRegister(Reg.first, Reg.second.getValueType()));
8902
8903 if (!IsTailCall) {
8904 // Add a register mask operand representing the call-preserved registers.
8905 const TargetRegisterInfo *TRI = Subtarget.getRegisterInfo();
8906 const uint32_t *Mask = TRI->getCallPreservedMask(MF, CallConv);
8907 assert(Mask && "Missing call preserved mask for calling convention");
8908 Ops.push_back(DAG.getRegisterMask(Mask));
8909 }
8910
8911 // Glue the call to the argument copies, if any.
8912 if (Glue.getNode())
8913 Ops.push_back(Glue);
8914
8915 // Emit the call.
8916 SDVTList NodeTys = DAG.getVTList(MVT::Other, MVT::Glue);
8917
8918 if (IsTailCall) {
8919 MF.getFrameInfo().setHasTailCall();
8920 return DAG.getNode(RISCVISD::TAIL, DL, NodeTys, Ops);
8921 }
8922
8923 Chain = DAG.getNode(RISCVISD::CALL, DL, NodeTys, Ops);
8924 DAG.addNoMergeSiteInfo(Chain.getNode(), CLI.NoMerge);
8925 Glue = Chain.getValue(1);
8926
8927 // Mark the end of the call, which is glued to the call itself.
8928 Chain = DAG.getCALLSEQ_END(Chain,
8929 DAG.getConstant(NumBytes, DL, PtrVT, true),
8930 DAG.getConstant(0, DL, PtrVT, true),
8931 Glue, DL);
8932 Glue = Chain.getValue(1);
8933
8934 // Assign locations to each value returned by this call.
8935 SmallVector<CCValAssign, 16> RVLocs;
8936 CCState RetCCInfo(CallConv, IsVarArg, MF, RVLocs, *DAG.getContext());
8937 analyzeInputArgs(MF, RetCCInfo, Ins, /*IsRet=*/true, CC_RISCV);
8938
8939 // Copy all of the result registers out of their specified physreg.
8940 for (auto &VA : RVLocs) {
8941 // Copy the value out
8942 SDValue RetValue =
8943 DAG.getCopyFromReg(Chain, DL, VA.getLocReg(), VA.getLocVT(), Glue);
8944 // Glue the RetValue to the end of the call sequence
8945 Chain = RetValue.getValue(1);
8946 Glue = RetValue.getValue(2);
8947
8948 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
8949 assert(VA.getLocReg() == ArgGPRs[0] && "Unexpected reg assignment");
8950 SDValue RetValue2 =
8951 DAG.getCopyFromReg(Chain, DL, ArgGPRs[1], MVT::i32, Glue);
8952 Chain = RetValue2.getValue(1);
8953 Glue = RetValue2.getValue(2);
8954 RetValue = DAG.getNode(RISCVISD::BuildPairF64, DL, MVT::f64, RetValue,
8955 RetValue2);
8956 }
8957
8958 RetValue = convertLocVTToValVT(DAG, RetValue, VA, DL, Subtarget);
8959
8960 InVals.push_back(RetValue);
8961 }
8962
8963 return Chain;
8964 }
8965
CanLowerReturn(CallingConv::ID CallConv,MachineFunction & MF,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,LLVMContext & Context) const8966 bool RISCVTargetLowering::CanLowerReturn(
8967 CallingConv::ID CallConv, MachineFunction &MF, bool IsVarArg,
8968 const SmallVectorImpl<ISD::OutputArg> &Outs, LLVMContext &Context) const {
8969 SmallVector<CCValAssign, 16> RVLocs;
8970 CCState CCInfo(CallConv, IsVarArg, MF, RVLocs, Context);
8971
8972 Optional<unsigned> FirstMaskArgument;
8973 if (Subtarget.hasStdExtV())
8974 FirstMaskArgument = preAssignMask(Outs);
8975
8976 for (unsigned i = 0, e = Outs.size(); i != e; ++i) {
8977 MVT VT = Outs[i].VT;
8978 ISD::ArgFlagsTy ArgFlags = Outs[i].Flags;
8979 RISCVABI::ABI ABI = MF.getSubtarget<RISCVSubtarget>().getTargetABI();
8980 if (CC_RISCV(MF.getDataLayout(), ABI, i, VT, VT, CCValAssign::Full,
8981 ArgFlags, CCInfo, /*IsFixed=*/true, /*IsRet=*/true, nullptr,
8982 *this, FirstMaskArgument))
8983 return false;
8984 }
8985 return true;
8986 }
8987
8988 SDValue
LowerReturn(SDValue Chain,CallingConv::ID CallConv,bool IsVarArg,const SmallVectorImpl<ISD::OutputArg> & Outs,const SmallVectorImpl<SDValue> & OutVals,const SDLoc & DL,SelectionDAG & DAG) const8989 RISCVTargetLowering::LowerReturn(SDValue Chain, CallingConv::ID CallConv,
8990 bool IsVarArg,
8991 const SmallVectorImpl<ISD::OutputArg> &Outs,
8992 const SmallVectorImpl<SDValue> &OutVals,
8993 const SDLoc &DL, SelectionDAG &DAG) const {
8994 const MachineFunction &MF = DAG.getMachineFunction();
8995 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
8996
8997 // Stores the assignment of the return value to a location.
8998 SmallVector<CCValAssign, 16> RVLocs;
8999
9000 // Info about the registers and stack slot.
9001 CCState CCInfo(CallConv, IsVarArg, DAG.getMachineFunction(), RVLocs,
9002 *DAG.getContext());
9003
9004 analyzeOutputArgs(DAG.getMachineFunction(), CCInfo, Outs, /*IsRet=*/true,
9005 nullptr, CC_RISCV);
9006
9007 if (CallConv == CallingConv::GHC && !RVLocs.empty())
9008 report_fatal_error("GHC functions return void only");
9009
9010 SDValue Glue;
9011 SmallVector<SDValue, 4> RetOps(1, Chain);
9012
9013 // Copy the result values into the output registers.
9014 for (unsigned i = 0, e = RVLocs.size(); i < e; ++i) {
9015 SDValue Val = OutVals[i];
9016 CCValAssign &VA = RVLocs[i];
9017 assert(VA.isRegLoc() && "Can only return in registers!");
9018
9019 if (VA.getLocVT() == MVT::i32 && VA.getValVT() == MVT::f64) {
9020 // Handle returning f64 on RV32D with a soft float ABI.
9021 assert(VA.isRegLoc() && "Expected return via registers");
9022 SDValue SplitF64 = DAG.getNode(RISCVISD::SplitF64, DL,
9023 DAG.getVTList(MVT::i32, MVT::i32), Val);
9024 SDValue Lo = SplitF64.getValue(0);
9025 SDValue Hi = SplitF64.getValue(1);
9026 Register RegLo = VA.getLocReg();
9027 assert(RegLo < RISCV::X31 && "Invalid register pair");
9028 Register RegHi = RegLo + 1;
9029
9030 if (STI.isRegisterReservedByUser(RegLo) ||
9031 STI.isRegisterReservedByUser(RegHi))
9032 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9033 MF.getFunction(),
9034 "Return value register required, but has been reserved."});
9035
9036 Chain = DAG.getCopyToReg(Chain, DL, RegLo, Lo, Glue);
9037 Glue = Chain.getValue(1);
9038 RetOps.push_back(DAG.getRegister(RegLo, MVT::i32));
9039 Chain = DAG.getCopyToReg(Chain, DL, RegHi, Hi, Glue);
9040 Glue = Chain.getValue(1);
9041 RetOps.push_back(DAG.getRegister(RegHi, MVT::i32));
9042 } else {
9043 // Handle a 'normal' return.
9044 Val = convertValVTToLocVT(DAG, Val, VA, DL, Subtarget);
9045 Chain = DAG.getCopyToReg(Chain, DL, VA.getLocReg(), Val, Glue);
9046
9047 if (STI.isRegisterReservedByUser(VA.getLocReg()))
9048 MF.getFunction().getContext().diagnose(DiagnosticInfoUnsupported{
9049 MF.getFunction(),
9050 "Return value register required, but has been reserved."});
9051
9052 // Guarantee that all emitted copies are stuck together.
9053 Glue = Chain.getValue(1);
9054 RetOps.push_back(DAG.getRegister(VA.getLocReg(), VA.getLocVT()));
9055 }
9056 }
9057
9058 RetOps[0] = Chain; // Update chain.
9059
9060 // Add the glue node if we have it.
9061 if (Glue.getNode()) {
9062 RetOps.push_back(Glue);
9063 }
9064
9065 unsigned RetOpc = RISCVISD::RET_FLAG;
9066 // Interrupt service routines use different return instructions.
9067 const Function &Func = DAG.getMachineFunction().getFunction();
9068 if (Func.hasFnAttribute("interrupt")) {
9069 if (!Func.getReturnType()->isVoidTy())
9070 report_fatal_error(
9071 "Functions with the interrupt attribute must have void return type!");
9072
9073 MachineFunction &MF = DAG.getMachineFunction();
9074 StringRef Kind =
9075 MF.getFunction().getFnAttribute("interrupt").getValueAsString();
9076
9077 if (Kind == "user")
9078 RetOpc = RISCVISD::URET_FLAG;
9079 else if (Kind == "supervisor")
9080 RetOpc = RISCVISD::SRET_FLAG;
9081 else
9082 RetOpc = RISCVISD::MRET_FLAG;
9083 }
9084
9085 return DAG.getNode(RetOpc, DL, MVT::Other, RetOps);
9086 }
9087
validateCCReservedRegs(const SmallVectorImpl<std::pair<llvm::Register,llvm::SDValue>> & Regs,MachineFunction & MF) const9088 void RISCVTargetLowering::validateCCReservedRegs(
9089 const SmallVectorImpl<std::pair<llvm::Register, llvm::SDValue>> &Regs,
9090 MachineFunction &MF) const {
9091 const Function &F = MF.getFunction();
9092 const RISCVSubtarget &STI = MF.getSubtarget<RISCVSubtarget>();
9093
9094 if (llvm::any_of(Regs, [&STI](auto Reg) {
9095 return STI.isRegisterReservedByUser(Reg.first);
9096 }))
9097 F.getContext().diagnose(DiagnosticInfoUnsupported{
9098 F, "Argument register required, but has been reserved."});
9099 }
9100
mayBeEmittedAsTailCall(const CallInst * CI) const9101 bool RISCVTargetLowering::mayBeEmittedAsTailCall(const CallInst *CI) const {
9102 return CI->isTailCall();
9103 }
9104
getTargetNodeName(unsigned Opcode) const9105 const char *RISCVTargetLowering::getTargetNodeName(unsigned Opcode) const {
9106 #define NODE_NAME_CASE(NODE) \
9107 case RISCVISD::NODE: \
9108 return "RISCVISD::" #NODE;
9109 // clang-format off
9110 switch ((RISCVISD::NodeType)Opcode) {
9111 case RISCVISD::FIRST_NUMBER:
9112 break;
9113 NODE_NAME_CASE(RET_FLAG)
9114 NODE_NAME_CASE(URET_FLAG)
9115 NODE_NAME_CASE(SRET_FLAG)
9116 NODE_NAME_CASE(MRET_FLAG)
9117 NODE_NAME_CASE(CALL)
9118 NODE_NAME_CASE(SELECT_CC)
9119 NODE_NAME_CASE(BR_CC)
9120 NODE_NAME_CASE(BuildPairF64)
9121 NODE_NAME_CASE(SplitF64)
9122 NODE_NAME_CASE(TAIL)
9123 NODE_NAME_CASE(MULHSU)
9124 NODE_NAME_CASE(SLLW)
9125 NODE_NAME_CASE(SRAW)
9126 NODE_NAME_CASE(SRLW)
9127 NODE_NAME_CASE(DIVW)
9128 NODE_NAME_CASE(DIVUW)
9129 NODE_NAME_CASE(REMUW)
9130 NODE_NAME_CASE(ROLW)
9131 NODE_NAME_CASE(RORW)
9132 NODE_NAME_CASE(CLZW)
9133 NODE_NAME_CASE(CTZW)
9134 NODE_NAME_CASE(FSLW)
9135 NODE_NAME_CASE(FSRW)
9136 NODE_NAME_CASE(FSL)
9137 NODE_NAME_CASE(FSR)
9138 NODE_NAME_CASE(FMV_H_X)
9139 NODE_NAME_CASE(FMV_X_ANYEXTH)
9140 NODE_NAME_CASE(FMV_W_X_RV64)
9141 NODE_NAME_CASE(FMV_X_ANYEXTW_RV64)
9142 NODE_NAME_CASE(FCVT_X_RTZ)
9143 NODE_NAME_CASE(FCVT_XU_RTZ)
9144 NODE_NAME_CASE(FCVT_W_RTZ_RV64)
9145 NODE_NAME_CASE(FCVT_WU_RTZ_RV64)
9146 NODE_NAME_CASE(READ_CYCLE_WIDE)
9147 NODE_NAME_CASE(GREV)
9148 NODE_NAME_CASE(GREVW)
9149 NODE_NAME_CASE(GORC)
9150 NODE_NAME_CASE(GORCW)
9151 NODE_NAME_CASE(SHFL)
9152 NODE_NAME_CASE(SHFLW)
9153 NODE_NAME_CASE(UNSHFL)
9154 NODE_NAME_CASE(UNSHFLW)
9155 NODE_NAME_CASE(BCOMPRESS)
9156 NODE_NAME_CASE(BCOMPRESSW)
9157 NODE_NAME_CASE(BDECOMPRESS)
9158 NODE_NAME_CASE(BDECOMPRESSW)
9159 NODE_NAME_CASE(VMV_V_X_VL)
9160 NODE_NAME_CASE(VFMV_V_F_VL)
9161 NODE_NAME_CASE(VMV_X_S)
9162 NODE_NAME_CASE(VMV_S_X_VL)
9163 NODE_NAME_CASE(VFMV_S_F_VL)
9164 NODE_NAME_CASE(SPLAT_VECTOR_I64)
9165 NODE_NAME_CASE(SPLAT_VECTOR_SPLIT_I64_VL)
9166 NODE_NAME_CASE(READ_VLENB)
9167 NODE_NAME_CASE(TRUNCATE_VECTOR_VL)
9168 NODE_NAME_CASE(VSLIDEUP_VL)
9169 NODE_NAME_CASE(VSLIDE1UP_VL)
9170 NODE_NAME_CASE(VSLIDEDOWN_VL)
9171 NODE_NAME_CASE(VSLIDE1DOWN_VL)
9172 NODE_NAME_CASE(VID_VL)
9173 NODE_NAME_CASE(VFNCVT_ROD_VL)
9174 NODE_NAME_CASE(VECREDUCE_ADD_VL)
9175 NODE_NAME_CASE(VECREDUCE_UMAX_VL)
9176 NODE_NAME_CASE(VECREDUCE_SMAX_VL)
9177 NODE_NAME_CASE(VECREDUCE_UMIN_VL)
9178 NODE_NAME_CASE(VECREDUCE_SMIN_VL)
9179 NODE_NAME_CASE(VECREDUCE_AND_VL)
9180 NODE_NAME_CASE(VECREDUCE_OR_VL)
9181 NODE_NAME_CASE(VECREDUCE_XOR_VL)
9182 NODE_NAME_CASE(VECREDUCE_FADD_VL)
9183 NODE_NAME_CASE(VECREDUCE_SEQ_FADD_VL)
9184 NODE_NAME_CASE(VECREDUCE_FMIN_VL)
9185 NODE_NAME_CASE(VECREDUCE_FMAX_VL)
9186 NODE_NAME_CASE(ADD_VL)
9187 NODE_NAME_CASE(AND_VL)
9188 NODE_NAME_CASE(MUL_VL)
9189 NODE_NAME_CASE(OR_VL)
9190 NODE_NAME_CASE(SDIV_VL)
9191 NODE_NAME_CASE(SHL_VL)
9192 NODE_NAME_CASE(SREM_VL)
9193 NODE_NAME_CASE(SRA_VL)
9194 NODE_NAME_CASE(SRL_VL)
9195 NODE_NAME_CASE(SUB_VL)
9196 NODE_NAME_CASE(UDIV_VL)
9197 NODE_NAME_CASE(UREM_VL)
9198 NODE_NAME_CASE(XOR_VL)
9199 NODE_NAME_CASE(SADDSAT_VL)
9200 NODE_NAME_CASE(UADDSAT_VL)
9201 NODE_NAME_CASE(SSUBSAT_VL)
9202 NODE_NAME_CASE(USUBSAT_VL)
9203 NODE_NAME_CASE(FADD_VL)
9204 NODE_NAME_CASE(FSUB_VL)
9205 NODE_NAME_CASE(FMUL_VL)
9206 NODE_NAME_CASE(FDIV_VL)
9207 NODE_NAME_CASE(FNEG_VL)
9208 NODE_NAME_CASE(FABS_VL)
9209 NODE_NAME_CASE(FSQRT_VL)
9210 NODE_NAME_CASE(FMA_VL)
9211 NODE_NAME_CASE(FCOPYSIGN_VL)
9212 NODE_NAME_CASE(SMIN_VL)
9213 NODE_NAME_CASE(SMAX_VL)
9214 NODE_NAME_CASE(UMIN_VL)
9215 NODE_NAME_CASE(UMAX_VL)
9216 NODE_NAME_CASE(FMINNUM_VL)
9217 NODE_NAME_CASE(FMAXNUM_VL)
9218 NODE_NAME_CASE(MULHS_VL)
9219 NODE_NAME_CASE(MULHU_VL)
9220 NODE_NAME_CASE(FP_TO_SINT_VL)
9221 NODE_NAME_CASE(FP_TO_UINT_VL)
9222 NODE_NAME_CASE(SINT_TO_FP_VL)
9223 NODE_NAME_CASE(UINT_TO_FP_VL)
9224 NODE_NAME_CASE(FP_EXTEND_VL)
9225 NODE_NAME_CASE(FP_ROUND_VL)
9226 NODE_NAME_CASE(VWMUL_VL)
9227 NODE_NAME_CASE(VWMULU_VL)
9228 NODE_NAME_CASE(SETCC_VL)
9229 NODE_NAME_CASE(VSELECT_VL)
9230 NODE_NAME_CASE(VMAND_VL)
9231 NODE_NAME_CASE(VMOR_VL)
9232 NODE_NAME_CASE(VMXOR_VL)
9233 NODE_NAME_CASE(VMCLR_VL)
9234 NODE_NAME_CASE(VMSET_VL)
9235 NODE_NAME_CASE(VRGATHER_VX_VL)
9236 NODE_NAME_CASE(VRGATHER_VV_VL)
9237 NODE_NAME_CASE(VRGATHEREI16_VV_VL)
9238 NODE_NAME_CASE(VSEXT_VL)
9239 NODE_NAME_CASE(VZEXT_VL)
9240 NODE_NAME_CASE(VPOPC_VL)
9241 NODE_NAME_CASE(VLE_VL)
9242 NODE_NAME_CASE(VSE_VL)
9243 NODE_NAME_CASE(READ_CSR)
9244 NODE_NAME_CASE(WRITE_CSR)
9245 NODE_NAME_CASE(SWAP_CSR)
9246 }
9247 // clang-format on
9248 return nullptr;
9249 #undef NODE_NAME_CASE
9250 }
9251
9252 /// getConstraintType - Given a constraint letter, return the type of
9253 /// constraint it is for this target.
9254 RISCVTargetLowering::ConstraintType
getConstraintType(StringRef Constraint) const9255 RISCVTargetLowering::getConstraintType(StringRef Constraint) const {
9256 if (Constraint.size() == 1) {
9257 switch (Constraint[0]) {
9258 default:
9259 break;
9260 case 'f':
9261 return C_RegisterClass;
9262 case 'I':
9263 case 'J':
9264 case 'K':
9265 return C_Immediate;
9266 case 'A':
9267 return C_Memory;
9268 case 'S': // A symbolic address
9269 return C_Other;
9270 }
9271 } else {
9272 if (Constraint == "vr" || Constraint == "vm")
9273 return C_RegisterClass;
9274 }
9275 return TargetLowering::getConstraintType(Constraint);
9276 }
9277
9278 std::pair<unsigned, const TargetRegisterClass *>
getRegForInlineAsmConstraint(const TargetRegisterInfo * TRI,StringRef Constraint,MVT VT) const9279 RISCVTargetLowering::getRegForInlineAsmConstraint(const TargetRegisterInfo *TRI,
9280 StringRef Constraint,
9281 MVT VT) const {
9282 // First, see if this is a constraint that directly corresponds to a
9283 // RISCV register class.
9284 if (Constraint.size() == 1) {
9285 switch (Constraint[0]) {
9286 case 'r':
9287 return std::make_pair(0U, &RISCV::GPRRegClass);
9288 case 'f':
9289 if (Subtarget.hasStdExtZfh() && VT == MVT::f16)
9290 return std::make_pair(0U, &RISCV::FPR16RegClass);
9291 if (Subtarget.hasStdExtF() && VT == MVT::f32)
9292 return std::make_pair(0U, &RISCV::FPR32RegClass);
9293 if (Subtarget.hasStdExtD() && VT == MVT::f64)
9294 return std::make_pair(0U, &RISCV::FPR64RegClass);
9295 break;
9296 default:
9297 break;
9298 }
9299 } else {
9300 if (Constraint == "vr") {
9301 for (const auto *RC : {&RISCV::VRRegClass, &RISCV::VRM2RegClass,
9302 &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
9303 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy))
9304 return std::make_pair(0U, RC);
9305 }
9306 } else if (Constraint == "vm") {
9307 if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
9308 return std::make_pair(0U, &RISCV::VMRegClass);
9309 }
9310 }
9311
9312 // Clang will correctly decode the usage of register name aliases into their
9313 // official names. However, other frontends like `rustc` do not. This allows
9314 // users of these frontends to use the ABI names for registers in LLVM-style
9315 // register constraints.
9316 unsigned XRegFromAlias = StringSwitch<unsigned>(Constraint.lower())
9317 .Case("{zero}", RISCV::X0)
9318 .Case("{ra}", RISCV::X1)
9319 .Case("{sp}", RISCV::X2)
9320 .Case("{gp}", RISCV::X3)
9321 .Case("{tp}", RISCV::X4)
9322 .Case("{t0}", RISCV::X5)
9323 .Case("{t1}", RISCV::X6)
9324 .Case("{t2}", RISCV::X7)
9325 .Cases("{s0}", "{fp}", RISCV::X8)
9326 .Case("{s1}", RISCV::X9)
9327 .Case("{a0}", RISCV::X10)
9328 .Case("{a1}", RISCV::X11)
9329 .Case("{a2}", RISCV::X12)
9330 .Case("{a3}", RISCV::X13)
9331 .Case("{a4}", RISCV::X14)
9332 .Case("{a5}", RISCV::X15)
9333 .Case("{a6}", RISCV::X16)
9334 .Case("{a7}", RISCV::X17)
9335 .Case("{s2}", RISCV::X18)
9336 .Case("{s3}", RISCV::X19)
9337 .Case("{s4}", RISCV::X20)
9338 .Case("{s5}", RISCV::X21)
9339 .Case("{s6}", RISCV::X22)
9340 .Case("{s7}", RISCV::X23)
9341 .Case("{s8}", RISCV::X24)
9342 .Case("{s9}", RISCV::X25)
9343 .Case("{s10}", RISCV::X26)
9344 .Case("{s11}", RISCV::X27)
9345 .Case("{t3}", RISCV::X28)
9346 .Case("{t4}", RISCV::X29)
9347 .Case("{t5}", RISCV::X30)
9348 .Case("{t6}", RISCV::X31)
9349 .Default(RISCV::NoRegister);
9350 if (XRegFromAlias != RISCV::NoRegister)
9351 return std::make_pair(XRegFromAlias, &RISCV::GPRRegClass);
9352
9353 // Since TargetLowering::getRegForInlineAsmConstraint uses the name of the
9354 // TableGen record rather than the AsmName to choose registers for InlineAsm
9355 // constraints, plus we want to match those names to the widest floating point
9356 // register type available, manually select floating point registers here.
9357 //
9358 // The second case is the ABI name of the register, so that frontends can also
9359 // use the ABI names in register constraint lists.
9360 if (Subtarget.hasStdExtF()) {
9361 unsigned FReg = StringSwitch<unsigned>(Constraint.lower())
9362 .Cases("{f0}", "{ft0}", RISCV::F0_F)
9363 .Cases("{f1}", "{ft1}", RISCV::F1_F)
9364 .Cases("{f2}", "{ft2}", RISCV::F2_F)
9365 .Cases("{f3}", "{ft3}", RISCV::F3_F)
9366 .Cases("{f4}", "{ft4}", RISCV::F4_F)
9367 .Cases("{f5}", "{ft5}", RISCV::F5_F)
9368 .Cases("{f6}", "{ft6}", RISCV::F6_F)
9369 .Cases("{f7}", "{ft7}", RISCV::F7_F)
9370 .Cases("{f8}", "{fs0}", RISCV::F8_F)
9371 .Cases("{f9}", "{fs1}", RISCV::F9_F)
9372 .Cases("{f10}", "{fa0}", RISCV::F10_F)
9373 .Cases("{f11}", "{fa1}", RISCV::F11_F)
9374 .Cases("{f12}", "{fa2}", RISCV::F12_F)
9375 .Cases("{f13}", "{fa3}", RISCV::F13_F)
9376 .Cases("{f14}", "{fa4}", RISCV::F14_F)
9377 .Cases("{f15}", "{fa5}", RISCV::F15_F)
9378 .Cases("{f16}", "{fa6}", RISCV::F16_F)
9379 .Cases("{f17}", "{fa7}", RISCV::F17_F)
9380 .Cases("{f18}", "{fs2}", RISCV::F18_F)
9381 .Cases("{f19}", "{fs3}", RISCV::F19_F)
9382 .Cases("{f20}", "{fs4}", RISCV::F20_F)
9383 .Cases("{f21}", "{fs5}", RISCV::F21_F)
9384 .Cases("{f22}", "{fs6}", RISCV::F22_F)
9385 .Cases("{f23}", "{fs7}", RISCV::F23_F)
9386 .Cases("{f24}", "{fs8}", RISCV::F24_F)
9387 .Cases("{f25}", "{fs9}", RISCV::F25_F)
9388 .Cases("{f26}", "{fs10}", RISCV::F26_F)
9389 .Cases("{f27}", "{fs11}", RISCV::F27_F)
9390 .Cases("{f28}", "{ft8}", RISCV::F28_F)
9391 .Cases("{f29}", "{ft9}", RISCV::F29_F)
9392 .Cases("{f30}", "{ft10}", RISCV::F30_F)
9393 .Cases("{f31}", "{ft11}", RISCV::F31_F)
9394 .Default(RISCV::NoRegister);
9395 if (FReg != RISCV::NoRegister) {
9396 assert(RISCV::F0_F <= FReg && FReg <= RISCV::F31_F && "Unknown fp-reg");
9397 if (Subtarget.hasStdExtD()) {
9398 unsigned RegNo = FReg - RISCV::F0_F;
9399 unsigned DReg = RISCV::F0_D + RegNo;
9400 return std::make_pair(DReg, &RISCV::FPR64RegClass);
9401 }
9402 return std::make_pair(FReg, &RISCV::FPR32RegClass);
9403 }
9404 }
9405
9406 if (Subtarget.hasStdExtV()) {
9407 Register VReg = StringSwitch<Register>(Constraint.lower())
9408 .Case("{v0}", RISCV::V0)
9409 .Case("{v1}", RISCV::V1)
9410 .Case("{v2}", RISCV::V2)
9411 .Case("{v3}", RISCV::V3)
9412 .Case("{v4}", RISCV::V4)
9413 .Case("{v5}", RISCV::V5)
9414 .Case("{v6}", RISCV::V6)
9415 .Case("{v7}", RISCV::V7)
9416 .Case("{v8}", RISCV::V8)
9417 .Case("{v9}", RISCV::V9)
9418 .Case("{v10}", RISCV::V10)
9419 .Case("{v11}", RISCV::V11)
9420 .Case("{v12}", RISCV::V12)
9421 .Case("{v13}", RISCV::V13)
9422 .Case("{v14}", RISCV::V14)
9423 .Case("{v15}", RISCV::V15)
9424 .Case("{v16}", RISCV::V16)
9425 .Case("{v17}", RISCV::V17)
9426 .Case("{v18}", RISCV::V18)
9427 .Case("{v19}", RISCV::V19)
9428 .Case("{v20}", RISCV::V20)
9429 .Case("{v21}", RISCV::V21)
9430 .Case("{v22}", RISCV::V22)
9431 .Case("{v23}", RISCV::V23)
9432 .Case("{v24}", RISCV::V24)
9433 .Case("{v25}", RISCV::V25)
9434 .Case("{v26}", RISCV::V26)
9435 .Case("{v27}", RISCV::V27)
9436 .Case("{v28}", RISCV::V28)
9437 .Case("{v29}", RISCV::V29)
9438 .Case("{v30}", RISCV::V30)
9439 .Case("{v31}", RISCV::V31)
9440 .Default(RISCV::NoRegister);
9441 if (VReg != RISCV::NoRegister) {
9442 if (TRI->isTypeLegalForClass(RISCV::VMRegClass, VT.SimpleTy))
9443 return std::make_pair(VReg, &RISCV::VMRegClass);
9444 if (TRI->isTypeLegalForClass(RISCV::VRRegClass, VT.SimpleTy))
9445 return std::make_pair(VReg, &RISCV::VRRegClass);
9446 for (const auto *RC :
9447 {&RISCV::VRM2RegClass, &RISCV::VRM4RegClass, &RISCV::VRM8RegClass}) {
9448 if (TRI->isTypeLegalForClass(*RC, VT.SimpleTy)) {
9449 VReg = TRI->getMatchingSuperReg(VReg, RISCV::sub_vrm1_0, RC);
9450 return std::make_pair(VReg, RC);
9451 }
9452 }
9453 }
9454 }
9455
9456 return TargetLowering::getRegForInlineAsmConstraint(TRI, Constraint, VT);
9457 }
9458
9459 unsigned
getInlineAsmMemConstraint(StringRef ConstraintCode) const9460 RISCVTargetLowering::getInlineAsmMemConstraint(StringRef ConstraintCode) const {
9461 // Currently only support length 1 constraints.
9462 if (ConstraintCode.size() == 1) {
9463 switch (ConstraintCode[0]) {
9464 case 'A':
9465 return InlineAsm::Constraint_A;
9466 default:
9467 break;
9468 }
9469 }
9470
9471 return TargetLowering::getInlineAsmMemConstraint(ConstraintCode);
9472 }
9473
LowerAsmOperandForConstraint(SDValue Op,std::string & Constraint,std::vector<SDValue> & Ops,SelectionDAG & DAG) const9474 void RISCVTargetLowering::LowerAsmOperandForConstraint(
9475 SDValue Op, std::string &Constraint, std::vector<SDValue> &Ops,
9476 SelectionDAG &DAG) const {
9477 // Currently only support length 1 constraints.
9478 if (Constraint.length() == 1) {
9479 switch (Constraint[0]) {
9480 case 'I':
9481 // Validate & create a 12-bit signed immediate operand.
9482 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
9483 uint64_t CVal = C->getSExtValue();
9484 if (isInt<12>(CVal))
9485 Ops.push_back(
9486 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
9487 }
9488 return;
9489 case 'J':
9490 // Validate & create an integer zero operand.
9491 if (auto *C = dyn_cast<ConstantSDNode>(Op))
9492 if (C->getZExtValue() == 0)
9493 Ops.push_back(
9494 DAG.getTargetConstant(0, SDLoc(Op), Subtarget.getXLenVT()));
9495 return;
9496 case 'K':
9497 // Validate & create a 5-bit unsigned immediate operand.
9498 if (auto *C = dyn_cast<ConstantSDNode>(Op)) {
9499 uint64_t CVal = C->getZExtValue();
9500 if (isUInt<5>(CVal))
9501 Ops.push_back(
9502 DAG.getTargetConstant(CVal, SDLoc(Op), Subtarget.getXLenVT()));
9503 }
9504 return;
9505 case 'S':
9506 if (const auto *GA = dyn_cast<GlobalAddressSDNode>(Op)) {
9507 Ops.push_back(DAG.getTargetGlobalAddress(GA->getGlobal(), SDLoc(Op),
9508 GA->getValueType(0)));
9509 } else if (const auto *BA = dyn_cast<BlockAddressSDNode>(Op)) {
9510 Ops.push_back(DAG.getTargetBlockAddress(BA->getBlockAddress(),
9511 BA->getValueType(0)));
9512 }
9513 return;
9514 default:
9515 break;
9516 }
9517 }
9518 TargetLowering::LowerAsmOperandForConstraint(Op, Constraint, Ops, DAG);
9519 }
9520
emitLeadingFence(IRBuilderBase & Builder,Instruction * Inst,AtomicOrdering Ord) const9521 Instruction *RISCVTargetLowering::emitLeadingFence(IRBuilderBase &Builder,
9522 Instruction *Inst,
9523 AtomicOrdering Ord) const {
9524 if (isa<LoadInst>(Inst) && Ord == AtomicOrdering::SequentiallyConsistent)
9525 return Builder.CreateFence(Ord);
9526 if (isa<StoreInst>(Inst) && isReleaseOrStronger(Ord))
9527 return Builder.CreateFence(AtomicOrdering::Release);
9528 return nullptr;
9529 }
9530
emitTrailingFence(IRBuilderBase & Builder,Instruction * Inst,AtomicOrdering Ord) const9531 Instruction *RISCVTargetLowering::emitTrailingFence(IRBuilderBase &Builder,
9532 Instruction *Inst,
9533 AtomicOrdering Ord) const {
9534 if (isa<LoadInst>(Inst) && isAcquireOrStronger(Ord))
9535 return Builder.CreateFence(AtomicOrdering::Acquire);
9536 return nullptr;
9537 }
9538
9539 TargetLowering::AtomicExpansionKind
shouldExpandAtomicRMWInIR(AtomicRMWInst * AI) const9540 RISCVTargetLowering::shouldExpandAtomicRMWInIR(AtomicRMWInst *AI) const {
9541 // atomicrmw {fadd,fsub} must be expanded to use compare-exchange, as floating
9542 // point operations can't be used in an lr/sc sequence without breaking the
9543 // forward-progress guarantee.
9544 if (AI->isFloatingPointOperation())
9545 return AtomicExpansionKind::CmpXChg;
9546
9547 unsigned Size = AI->getType()->getPrimitiveSizeInBits();
9548 if (Size == 8 || Size == 16)
9549 return AtomicExpansionKind::MaskedIntrinsic;
9550 return AtomicExpansionKind::None;
9551 }
9552
9553 static Intrinsic::ID
getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen,AtomicRMWInst::BinOp BinOp)9554 getIntrinsicForMaskedAtomicRMWBinOp(unsigned XLen, AtomicRMWInst::BinOp BinOp) {
9555 if (XLen == 32) {
9556 switch (BinOp) {
9557 default:
9558 llvm_unreachable("Unexpected AtomicRMW BinOp");
9559 case AtomicRMWInst::Xchg:
9560 return Intrinsic::riscv_masked_atomicrmw_xchg_i32;
9561 case AtomicRMWInst::Add:
9562 return Intrinsic::riscv_masked_atomicrmw_add_i32;
9563 case AtomicRMWInst::Sub:
9564 return Intrinsic::riscv_masked_atomicrmw_sub_i32;
9565 case AtomicRMWInst::Nand:
9566 return Intrinsic::riscv_masked_atomicrmw_nand_i32;
9567 case AtomicRMWInst::Max:
9568 return Intrinsic::riscv_masked_atomicrmw_max_i32;
9569 case AtomicRMWInst::Min:
9570 return Intrinsic::riscv_masked_atomicrmw_min_i32;
9571 case AtomicRMWInst::UMax:
9572 return Intrinsic::riscv_masked_atomicrmw_umax_i32;
9573 case AtomicRMWInst::UMin:
9574 return Intrinsic::riscv_masked_atomicrmw_umin_i32;
9575 }
9576 }
9577
9578 if (XLen == 64) {
9579 switch (BinOp) {
9580 default:
9581 llvm_unreachable("Unexpected AtomicRMW BinOp");
9582 case AtomicRMWInst::Xchg:
9583 return Intrinsic::riscv_masked_atomicrmw_xchg_i64;
9584 case AtomicRMWInst::Add:
9585 return Intrinsic::riscv_masked_atomicrmw_add_i64;
9586 case AtomicRMWInst::Sub:
9587 return Intrinsic::riscv_masked_atomicrmw_sub_i64;
9588 case AtomicRMWInst::Nand:
9589 return Intrinsic::riscv_masked_atomicrmw_nand_i64;
9590 case AtomicRMWInst::Max:
9591 return Intrinsic::riscv_masked_atomicrmw_max_i64;
9592 case AtomicRMWInst::Min:
9593 return Intrinsic::riscv_masked_atomicrmw_min_i64;
9594 case AtomicRMWInst::UMax:
9595 return Intrinsic::riscv_masked_atomicrmw_umax_i64;
9596 case AtomicRMWInst::UMin:
9597 return Intrinsic::riscv_masked_atomicrmw_umin_i64;
9598 }
9599 }
9600
9601 llvm_unreachable("Unexpected XLen\n");
9602 }
9603
emitMaskedAtomicRMWIntrinsic(IRBuilderBase & Builder,AtomicRMWInst * AI,Value * AlignedAddr,Value * Incr,Value * Mask,Value * ShiftAmt,AtomicOrdering Ord) const9604 Value *RISCVTargetLowering::emitMaskedAtomicRMWIntrinsic(
9605 IRBuilderBase &Builder, AtomicRMWInst *AI, Value *AlignedAddr, Value *Incr,
9606 Value *Mask, Value *ShiftAmt, AtomicOrdering Ord) const {
9607 unsigned XLen = Subtarget.getXLen();
9608 Value *Ordering =
9609 Builder.getIntN(XLen, static_cast<uint64_t>(AI->getOrdering()));
9610 Type *Tys[] = {AlignedAddr->getType()};
9611 Function *LrwOpScwLoop = Intrinsic::getDeclaration(
9612 AI->getModule(),
9613 getIntrinsicForMaskedAtomicRMWBinOp(XLen, AI->getOperation()), Tys);
9614
9615 if (XLen == 64) {
9616 Incr = Builder.CreateSExt(Incr, Builder.getInt64Ty());
9617 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9618 ShiftAmt = Builder.CreateSExt(ShiftAmt, Builder.getInt64Ty());
9619 }
9620
9621 Value *Result;
9622
9623 // Must pass the shift amount needed to sign extend the loaded value prior
9624 // to performing a signed comparison for min/max. ShiftAmt is the number of
9625 // bits to shift the value into position. Pass XLen-ShiftAmt-ValWidth, which
9626 // is the number of bits to left+right shift the value in order to
9627 // sign-extend.
9628 if (AI->getOperation() == AtomicRMWInst::Min ||
9629 AI->getOperation() == AtomicRMWInst::Max) {
9630 const DataLayout &DL = AI->getModule()->getDataLayout();
9631 unsigned ValWidth =
9632 DL.getTypeStoreSizeInBits(AI->getValOperand()->getType());
9633 Value *SextShamt =
9634 Builder.CreateSub(Builder.getIntN(XLen, XLen - ValWidth), ShiftAmt);
9635 Result = Builder.CreateCall(LrwOpScwLoop,
9636 {AlignedAddr, Incr, Mask, SextShamt, Ordering});
9637 } else {
9638 Result =
9639 Builder.CreateCall(LrwOpScwLoop, {AlignedAddr, Incr, Mask, Ordering});
9640 }
9641
9642 if (XLen == 64)
9643 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9644 return Result;
9645 }
9646
9647 TargetLowering::AtomicExpansionKind
shouldExpandAtomicCmpXchgInIR(AtomicCmpXchgInst * CI) const9648 RISCVTargetLowering::shouldExpandAtomicCmpXchgInIR(
9649 AtomicCmpXchgInst *CI) const {
9650 unsigned Size = CI->getCompareOperand()->getType()->getPrimitiveSizeInBits();
9651 if (Size == 8 || Size == 16)
9652 return AtomicExpansionKind::MaskedIntrinsic;
9653 return AtomicExpansionKind::None;
9654 }
9655
emitMaskedAtomicCmpXchgIntrinsic(IRBuilderBase & Builder,AtomicCmpXchgInst * CI,Value * AlignedAddr,Value * CmpVal,Value * NewVal,Value * Mask,AtomicOrdering Ord) const9656 Value *RISCVTargetLowering::emitMaskedAtomicCmpXchgIntrinsic(
9657 IRBuilderBase &Builder, AtomicCmpXchgInst *CI, Value *AlignedAddr,
9658 Value *CmpVal, Value *NewVal, Value *Mask, AtomicOrdering Ord) const {
9659 unsigned XLen = Subtarget.getXLen();
9660 Value *Ordering = Builder.getIntN(XLen, static_cast<uint64_t>(Ord));
9661 Intrinsic::ID CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i32;
9662 if (XLen == 64) {
9663 CmpVal = Builder.CreateSExt(CmpVal, Builder.getInt64Ty());
9664 NewVal = Builder.CreateSExt(NewVal, Builder.getInt64Ty());
9665 Mask = Builder.CreateSExt(Mask, Builder.getInt64Ty());
9666 CmpXchgIntrID = Intrinsic::riscv_masked_cmpxchg_i64;
9667 }
9668 Type *Tys[] = {AlignedAddr->getType()};
9669 Function *MaskedCmpXchg =
9670 Intrinsic::getDeclaration(CI->getModule(), CmpXchgIntrID, Tys);
9671 Value *Result = Builder.CreateCall(
9672 MaskedCmpXchg, {AlignedAddr, CmpVal, NewVal, Mask, Ordering});
9673 if (XLen == 64)
9674 Result = Builder.CreateTrunc(Result, Builder.getInt32Ty());
9675 return Result;
9676 }
9677
shouldRemoveExtendFromGSIndex(EVT VT) const9678 bool RISCVTargetLowering::shouldRemoveExtendFromGSIndex(EVT VT) const {
9679 return false;
9680 }
9681
isFMAFasterThanFMulAndFAdd(const MachineFunction & MF,EVT VT) const9682 bool RISCVTargetLowering::isFMAFasterThanFMulAndFAdd(const MachineFunction &MF,
9683 EVT VT) const {
9684 VT = VT.getScalarType();
9685
9686 if (!VT.isSimple())
9687 return false;
9688
9689 switch (VT.getSimpleVT().SimpleTy) {
9690 case MVT::f16:
9691 return Subtarget.hasStdExtZfh();
9692 case MVT::f32:
9693 return Subtarget.hasStdExtF();
9694 case MVT::f64:
9695 return Subtarget.hasStdExtD();
9696 default:
9697 break;
9698 }
9699
9700 return false;
9701 }
9702
getExceptionPointerRegister(const Constant * PersonalityFn) const9703 Register RISCVTargetLowering::getExceptionPointerRegister(
9704 const Constant *PersonalityFn) const {
9705 return RISCV::X10;
9706 }
9707
getExceptionSelectorRegister(const Constant * PersonalityFn) const9708 Register RISCVTargetLowering::getExceptionSelectorRegister(
9709 const Constant *PersonalityFn) const {
9710 return RISCV::X11;
9711 }
9712
shouldExtendTypeInLibCall(EVT Type) const9713 bool RISCVTargetLowering::shouldExtendTypeInLibCall(EVT Type) const {
9714 // Return false to suppress the unnecessary extensions if the LibCall
9715 // arguments or return value is f32 type for LP64 ABI.
9716 RISCVABI::ABI ABI = Subtarget.getTargetABI();
9717 if (ABI == RISCVABI::ABI_LP64 && (Type == MVT::f32))
9718 return false;
9719
9720 return true;
9721 }
9722
shouldSignExtendTypeInLibCall(EVT Type,bool IsSigned) const9723 bool RISCVTargetLowering::shouldSignExtendTypeInLibCall(EVT Type, bool IsSigned) const {
9724 if (Subtarget.is64Bit() && Type == MVT::i32)
9725 return true;
9726
9727 return IsSigned;
9728 }
9729
decomposeMulByConstant(LLVMContext & Context,EVT VT,SDValue C) const9730 bool RISCVTargetLowering::decomposeMulByConstant(LLVMContext &Context, EVT VT,
9731 SDValue C) const {
9732 // Check integral scalar types.
9733 if (VT.isScalarInteger()) {
9734 // Omit the optimization if the sub target has the M extension and the data
9735 // size exceeds XLen.
9736 if (Subtarget.hasStdExtM() && VT.getSizeInBits() > Subtarget.getXLen())
9737 return false;
9738 if (auto *ConstNode = dyn_cast<ConstantSDNode>(C.getNode())) {
9739 // Break the MUL to a SLLI and an ADD/SUB.
9740 const APInt &Imm = ConstNode->getAPIntValue();
9741 if ((Imm + 1).isPowerOf2() || (Imm - 1).isPowerOf2() ||
9742 (1 - Imm).isPowerOf2() || (-1 - Imm).isPowerOf2())
9743 return true;
9744 // Optimize the MUL to (SH*ADD x, (SLLI x, bits)) if Imm is not simm12.
9745 if (Subtarget.hasStdExtZba() && !Imm.isSignedIntN(12) &&
9746 ((Imm - 2).isPowerOf2() || (Imm - 4).isPowerOf2() ||
9747 (Imm - 8).isPowerOf2()))
9748 return true;
9749 // Omit the following optimization if the sub target has the M extension
9750 // and the data size >= XLen.
9751 if (Subtarget.hasStdExtM() && VT.getSizeInBits() >= Subtarget.getXLen())
9752 return false;
9753 // Break the MUL to two SLLI instructions and an ADD/SUB, if Imm needs
9754 // a pair of LUI/ADDI.
9755 if (!Imm.isSignedIntN(12) && Imm.countTrailingZeros() < 12) {
9756 APInt ImmS = Imm.ashr(Imm.countTrailingZeros());
9757 if ((ImmS + 1).isPowerOf2() || (ImmS - 1).isPowerOf2() ||
9758 (1 - ImmS).isPowerOf2())
9759 return true;
9760 }
9761 }
9762 }
9763
9764 return false;
9765 }
9766
isMulAddWithConstProfitable(const SDValue & AddNode,const SDValue & ConstNode) const9767 bool RISCVTargetLowering::isMulAddWithConstProfitable(
9768 const SDValue &AddNode, const SDValue &ConstNode) const {
9769 // Let the DAGCombiner decide for vectors.
9770 EVT VT = AddNode.getValueType();
9771 if (VT.isVector())
9772 return true;
9773
9774 // Let the DAGCombiner decide for larger types.
9775 if (VT.getScalarSizeInBits() > Subtarget.getXLen())
9776 return true;
9777
9778 // It is worse if c1 is simm12 while c1*c2 is not.
9779 ConstantSDNode *C1Node = cast<ConstantSDNode>(AddNode.getOperand(1));
9780 ConstantSDNode *C2Node = cast<ConstantSDNode>(ConstNode);
9781 const APInt &C1 = C1Node->getAPIntValue();
9782 const APInt &C2 = C2Node->getAPIntValue();
9783 if (C1.isSignedIntN(12) && !(C1 * C2).isSignedIntN(12))
9784 return false;
9785
9786 // Default to true and let the DAGCombiner decide.
9787 return true;
9788 }
9789
allowsMisalignedMemoryAccesses(EVT VT,unsigned AddrSpace,Align Alignment,MachineMemOperand::Flags Flags,bool * Fast) const9790 bool RISCVTargetLowering::allowsMisalignedMemoryAccesses(
9791 EVT VT, unsigned AddrSpace, Align Alignment, MachineMemOperand::Flags Flags,
9792 bool *Fast) const {
9793 if (!VT.isVector())
9794 return false;
9795
9796 EVT ElemVT = VT.getVectorElementType();
9797 if (Alignment >= ElemVT.getStoreSize()) {
9798 if (Fast)
9799 *Fast = true;
9800 return true;
9801 }
9802
9803 return false;
9804 }
9805
splitValueIntoRegisterParts(SelectionDAG & DAG,const SDLoc & DL,SDValue Val,SDValue * Parts,unsigned NumParts,MVT PartVT,Optional<CallingConv::ID> CC) const9806 bool RISCVTargetLowering::splitValueIntoRegisterParts(
9807 SelectionDAG &DAG, const SDLoc &DL, SDValue Val, SDValue *Parts,
9808 unsigned NumParts, MVT PartVT, Optional<CallingConv::ID> CC) const {
9809 bool IsABIRegCopy = CC.hasValue();
9810 EVT ValueVT = Val.getValueType();
9811 if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9812 // Cast the f16 to i16, extend to i32, pad with ones to make a float nan,
9813 // and cast to f32.
9814 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i16, Val);
9815 Val = DAG.getNode(ISD::ANY_EXTEND, DL, MVT::i32, Val);
9816 Val = DAG.getNode(ISD::OR, DL, MVT::i32, Val,
9817 DAG.getConstant(0xFFFF0000, DL, MVT::i32));
9818 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f32, Val);
9819 Parts[0] = Val;
9820 return true;
9821 }
9822
9823 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9824 LLVMContext &Context = *DAG.getContext();
9825 EVT ValueEltVT = ValueVT.getVectorElementType();
9826 EVT PartEltVT = PartVT.getVectorElementType();
9827 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9828 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9829 if (PartVTBitSize % ValueVTBitSize == 0) {
9830 // If the element types are different, bitcast to the same element type of
9831 // PartVT first.
9832 if (ValueEltVT != PartEltVT) {
9833 unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9834 assert(Count != 0 && "The number of element should not be zero.");
9835 EVT SameEltTypeVT =
9836 EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9837 Val = DAG.getNode(ISD::BITCAST, DL, SameEltTypeVT, Val);
9838 }
9839 Val = DAG.getNode(ISD::INSERT_SUBVECTOR, DL, PartVT, DAG.getUNDEF(PartVT),
9840 Val, DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9841 Parts[0] = Val;
9842 return true;
9843 }
9844 }
9845 return false;
9846 }
9847
joinRegisterPartsIntoValue(SelectionDAG & DAG,const SDLoc & DL,const SDValue * Parts,unsigned NumParts,MVT PartVT,EVT ValueVT,Optional<CallingConv::ID> CC) const9848 SDValue RISCVTargetLowering::joinRegisterPartsIntoValue(
9849 SelectionDAG &DAG, const SDLoc &DL, const SDValue *Parts, unsigned NumParts,
9850 MVT PartVT, EVT ValueVT, Optional<CallingConv::ID> CC) const {
9851 bool IsABIRegCopy = CC.hasValue();
9852 if (IsABIRegCopy && ValueVT == MVT::f16 && PartVT == MVT::f32) {
9853 SDValue Val = Parts[0];
9854
9855 // Cast the f32 to i32, truncate to i16, and cast back to f16.
9856 Val = DAG.getNode(ISD::BITCAST, DL, MVT::i32, Val);
9857 Val = DAG.getNode(ISD::TRUNCATE, DL, MVT::i16, Val);
9858 Val = DAG.getNode(ISD::BITCAST, DL, MVT::f16, Val);
9859 return Val;
9860 }
9861
9862 if (ValueVT.isScalableVector() && PartVT.isScalableVector()) {
9863 LLVMContext &Context = *DAG.getContext();
9864 SDValue Val = Parts[0];
9865 EVT ValueEltVT = ValueVT.getVectorElementType();
9866 EVT PartEltVT = PartVT.getVectorElementType();
9867 unsigned ValueVTBitSize = ValueVT.getSizeInBits().getKnownMinSize();
9868 unsigned PartVTBitSize = PartVT.getSizeInBits().getKnownMinSize();
9869 if (PartVTBitSize % ValueVTBitSize == 0) {
9870 EVT SameEltTypeVT = ValueVT;
9871 // If the element types are different, convert it to the same element type
9872 // of PartVT.
9873 if (ValueEltVT != PartEltVT) {
9874 unsigned Count = ValueVTBitSize / PartEltVT.getSizeInBits();
9875 assert(Count != 0 && "The number of element should not be zero.");
9876 SameEltTypeVT =
9877 EVT::getVectorVT(Context, PartEltVT, Count, /*IsScalable=*/true);
9878 }
9879 Val = DAG.getNode(ISD::EXTRACT_SUBVECTOR, DL, SameEltTypeVT, Val,
9880 DAG.getConstant(0, DL, Subtarget.getXLenVT()));
9881 if (ValueEltVT != PartEltVT)
9882 Val = DAG.getNode(ISD::BITCAST, DL, ValueVT, Val);
9883 return Val;
9884 }
9885 }
9886 return SDValue();
9887 }
9888
9889 #define GET_REGISTER_MATCHER
9890 #include "RISCVGenAsmMatcher.inc"
9891
9892 Register
getRegisterByName(const char * RegName,LLT VT,const MachineFunction & MF) const9893 RISCVTargetLowering::getRegisterByName(const char *RegName, LLT VT,
9894 const MachineFunction &MF) const {
9895 Register Reg = MatchRegisterAltName(RegName);
9896 if (Reg == RISCV::NoRegister)
9897 Reg = MatchRegisterName(RegName);
9898 if (Reg == RISCV::NoRegister)
9899 report_fatal_error(
9900 Twine("Invalid register name \"" + StringRef(RegName) + "\"."));
9901 BitVector ReservedRegs = Subtarget.getRegisterInfo()->getReservedRegs(MF);
9902 if (!ReservedRegs.test(Reg) && !Subtarget.isRegisterReservedByUser(Reg))
9903 report_fatal_error(Twine("Trying to obtain non-reserved register \"" +
9904 StringRef(RegName) + "\"."));
9905 return Reg;
9906 }
9907
9908 namespace llvm {
9909 namespace RISCVVIntrinsicsTable {
9910
9911 #define GET_RISCVVIntrinsicsTable_IMPL
9912 #include "RISCVGenSearchableTables.inc"
9913
9914 } // namespace RISCVVIntrinsicsTable
9915
9916 } // namespace llvm
9917