1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
5  <vscale x 1 x half>,
6  half,
7  i32);
8
9define <vscale x 1 x half> @intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, half %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f16_nxv1f16_f16:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    fmv.h.x ft0, a0
13; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
14; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
15; CHECK-NEXT:    vmv1r.v v8, v25
16; CHECK-NEXT:    ret
17entry:
18  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.nxv1f16.f16(
19    <vscale x 1 x half> %0,
20    half %1,
21    i32 %2)
22
23  ret <vscale x 1 x half> %a
24}
25
26declare <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
27  <vscale x 1 x half>,
28  <vscale x 1 x half>,
29  half,
30  <vscale x 1 x i1>,
31  i32);
32
33define <vscale x 1 x half> @intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, half %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
34; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f16_nxv1f16_f16:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    fmv.h.x ft0, a0
37; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
38; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
39; CHECK-NEXT:    ret
40entry:
41  %a = call <vscale x 1 x half> @llvm.riscv.vfslide1up.mask.nxv1f16.f16(
42    <vscale x 1 x half> %0,
43    <vscale x 1 x half> %1,
44    half %2,
45    <vscale x 1 x i1> %3,
46    i32 %4)
47
48  ret <vscale x 1 x half> %a
49}
50
51declare <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
52  <vscale x 2 x half>,
53  half,
54  i32);
55
56define <vscale x 2 x half> @intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, half %1, i32 %2) nounwind {
57; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f16_nxv2f16_f16:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    fmv.h.x ft0, a0
60; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
61; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
62; CHECK-NEXT:    vmv1r.v v8, v25
63; CHECK-NEXT:    ret
64entry:
65  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.nxv2f16.f16(
66    <vscale x 2 x half> %0,
67    half %1,
68    i32 %2)
69
70  ret <vscale x 2 x half> %a
71}
72
73declare <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
74  <vscale x 2 x half>,
75  <vscale x 2 x half>,
76  half,
77  <vscale x 2 x i1>,
78  i32);
79
80define <vscale x 2 x half> @intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, half %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
81; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f16_nxv2f16_f16:
82; CHECK:       # %bb.0: # %entry
83; CHECK-NEXT:    fmv.h.x ft0, a0
84; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
85; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
86; CHECK-NEXT:    ret
87entry:
88  %a = call <vscale x 2 x half> @llvm.riscv.vfslide1up.mask.nxv2f16.f16(
89    <vscale x 2 x half> %0,
90    <vscale x 2 x half> %1,
91    half %2,
92    <vscale x 2 x i1> %3,
93    i32 %4)
94
95  ret <vscale x 2 x half> %a
96}
97
98declare <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
99  <vscale x 4 x half>,
100  half,
101  i32);
102
103define <vscale x 4 x half> @intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, half %1, i32 %2) nounwind {
104; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f16_nxv4f16_f16:
105; CHECK:       # %bb.0: # %entry
106; CHECK-NEXT:    fmv.h.x ft0, a0
107; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
108; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
109; CHECK-NEXT:    vmv1r.v v8, v25
110; CHECK-NEXT:    ret
111entry:
112  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.nxv4f16.f16(
113    <vscale x 4 x half> %0,
114    half %1,
115    i32 %2)
116
117  ret <vscale x 4 x half> %a
118}
119
120declare <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
121  <vscale x 4 x half>,
122  <vscale x 4 x half>,
123  half,
124  <vscale x 4 x i1>,
125  i32);
126
127define <vscale x 4 x half> @intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, half %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
128; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f16_nxv4f16_f16:
129; CHECK:       # %bb.0: # %entry
130; CHECK-NEXT:    fmv.h.x ft0, a0
131; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
132; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
133; CHECK-NEXT:    ret
134entry:
135  %a = call <vscale x 4 x half> @llvm.riscv.vfslide1up.mask.nxv4f16.f16(
136    <vscale x 4 x half> %0,
137    <vscale x 4 x half> %1,
138    half %2,
139    <vscale x 4 x i1> %3,
140    i32 %4)
141
142  ret <vscale x 4 x half> %a
143}
144
145declare <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
146  <vscale x 8 x half>,
147  half,
148  i32);
149
150define <vscale x 8 x half> @intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, half %1, i32 %2) nounwind {
151; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f16_nxv8f16_f16:
152; CHECK:       # %bb.0: # %entry
153; CHECK-NEXT:    fmv.h.x ft0, a0
154; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
155; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
156; CHECK-NEXT:    vmv2r.v v8, v26
157; CHECK-NEXT:    ret
158entry:
159  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.nxv8f16.f16(
160    <vscale x 8 x half> %0,
161    half %1,
162    i32 %2)
163
164  ret <vscale x 8 x half> %a
165}
166
167declare <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
168  <vscale x 8 x half>,
169  <vscale x 8 x half>,
170  half,
171  <vscale x 8 x i1>,
172  i32);
173
174define <vscale x 8 x half> @intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, half %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
175; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f16_nxv8f16_f16:
176; CHECK:       # %bb.0: # %entry
177; CHECK-NEXT:    fmv.h.x ft0, a0
178; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
179; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
180; CHECK-NEXT:    ret
181entry:
182  %a = call <vscale x 8 x half> @llvm.riscv.vfslide1up.mask.nxv8f16.f16(
183    <vscale x 8 x half> %0,
184    <vscale x 8 x half> %1,
185    half %2,
186    <vscale x 8 x i1> %3,
187    i32 %4)
188
189  ret <vscale x 8 x half> %a
190}
191
192declare <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
193  <vscale x 16 x half>,
194  half,
195  i32);
196
197define <vscale x 16 x half> @intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, half %1, i32 %2) nounwind {
198; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f16_nxv16f16_f16:
199; CHECK:       # %bb.0: # %entry
200; CHECK-NEXT:    fmv.h.x ft0, a0
201; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
202; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
203; CHECK-NEXT:    vmv4r.v v8, v28
204; CHECK-NEXT:    ret
205entry:
206  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.nxv16f16.f16(
207    <vscale x 16 x half> %0,
208    half %1,
209    i32 %2)
210
211  ret <vscale x 16 x half> %a
212}
213
214declare <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
215  <vscale x 16 x half>,
216  <vscale x 16 x half>,
217  half,
218  <vscale x 16 x i1>,
219  i32);
220
221define <vscale x 16 x half> @intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, half %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
222; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f16_nxv16f16_f16:
223; CHECK:       # %bb.0: # %entry
224; CHECK-NEXT:    fmv.h.x ft0, a0
225; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
226; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
227; CHECK-NEXT:    ret
228entry:
229  %a = call <vscale x 16 x half> @llvm.riscv.vfslide1up.mask.nxv16f16.f16(
230    <vscale x 16 x half> %0,
231    <vscale x 16 x half> %1,
232    half %2,
233    <vscale x 16 x i1> %3,
234    i32 %4)
235
236  ret <vscale x 16 x half> %a
237}
238
239declare <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
240  <vscale x 32 x half>,
241  half,
242  i32);
243
244define <vscale x 32 x half> @intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, half %1, i32 %2) nounwind {
245; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv32f16_nxv32f16_f16:
246; CHECK:       # %bb.0: # %entry
247; CHECK-NEXT:    fmv.h.x ft0, a0
248; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
249; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
250; CHECK-NEXT:    vmv8r.v v8, v16
251; CHECK-NEXT:    ret
252entry:
253  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.nxv32f16.f16(
254    <vscale x 32 x half> %0,
255    half %1,
256    i32 %2)
257
258  ret <vscale x 32 x half> %a
259}
260
261declare <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
262  <vscale x 32 x half>,
263  <vscale x 32 x half>,
264  half,
265  <vscale x 32 x i1>,
266  i32);
267
268define <vscale x 32 x half> @intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16(<vscale x 32 x half> %0, <vscale x 32 x half> %1, half %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
269; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv32f16_nxv32f16_f16:
270; CHECK:       # %bb.0: # %entry
271; CHECK-NEXT:    fmv.h.x ft0, a0
272; CHECK-NEXT:    vsetvli zero, a1, e16, m8, tu, mu
273; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
274; CHECK-NEXT:    ret
275entry:
276  %a = call <vscale x 32 x half> @llvm.riscv.vfslide1up.mask.nxv32f16.f16(
277    <vscale x 32 x half> %0,
278    <vscale x 32 x half> %1,
279    half %2,
280    <vscale x 32 x i1> %3,
281    i32 %4)
282
283  ret <vscale x 32 x half> %a
284}
285
286declare <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
287  <vscale x 1 x float>,
288  float,
289  i32);
290
291define <vscale x 1 x float> @intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, float %1, i32 %2) nounwind {
292; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f32_nxv1f32_f32:
293; CHECK:       # %bb.0: # %entry
294; CHECK-NEXT:    fmv.w.x ft0, a0
295; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
296; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
297; CHECK-NEXT:    vmv1r.v v8, v25
298; CHECK-NEXT:    ret
299entry:
300  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.nxv1f32.f32(
301    <vscale x 1 x float> %0,
302    float %1,
303    i32 %2)
304
305  ret <vscale x 1 x float> %a
306}
307
308declare <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
309  <vscale x 1 x float>,
310  <vscale x 1 x float>,
311  float,
312  <vscale x 1 x i1>,
313  i32);
314
315define <vscale x 1 x float> @intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, float %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
316; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f32_nxv1f32_f32:
317; CHECK:       # %bb.0: # %entry
318; CHECK-NEXT:    fmv.w.x ft0, a0
319; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
320; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
321; CHECK-NEXT:    ret
322entry:
323  %a = call <vscale x 1 x float> @llvm.riscv.vfslide1up.mask.nxv1f32.f32(
324    <vscale x 1 x float> %0,
325    <vscale x 1 x float> %1,
326    float %2,
327    <vscale x 1 x i1> %3,
328    i32 %4)
329
330  ret <vscale x 1 x float> %a
331}
332
333declare <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
334  <vscale x 2 x float>,
335  float,
336  i32);
337
338define <vscale x 2 x float> @intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, float %1, i32 %2) nounwind {
339; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f32_nxv2f32_f32:
340; CHECK:       # %bb.0: # %entry
341; CHECK-NEXT:    fmv.w.x ft0, a0
342; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
343; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
344; CHECK-NEXT:    vmv1r.v v8, v25
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.nxv2f32.f32(
348    <vscale x 2 x float> %0,
349    float %1,
350    i32 %2)
351
352  ret <vscale x 2 x float> %a
353}
354
355declare <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
356  <vscale x 2 x float>,
357  <vscale x 2 x float>,
358  float,
359  <vscale x 2 x i1>,
360  i32);
361
362define <vscale x 2 x float> @intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, float %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
363; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f32_nxv2f32_f32:
364; CHECK:       # %bb.0: # %entry
365; CHECK-NEXT:    fmv.w.x ft0, a0
366; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
367; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
368; CHECK-NEXT:    ret
369entry:
370  %a = call <vscale x 2 x float> @llvm.riscv.vfslide1up.mask.nxv2f32.f32(
371    <vscale x 2 x float> %0,
372    <vscale x 2 x float> %1,
373    float %2,
374    <vscale x 2 x i1> %3,
375    i32 %4)
376
377  ret <vscale x 2 x float> %a
378}
379
380declare <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
381  <vscale x 4 x float>,
382  float,
383  i32);
384
385define <vscale x 4 x float> @intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, float %1, i32 %2) nounwind {
386; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f32_nxv4f32_f32:
387; CHECK:       # %bb.0: # %entry
388; CHECK-NEXT:    fmv.w.x ft0, a0
389; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
390; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
391; CHECK-NEXT:    vmv2r.v v8, v26
392; CHECK-NEXT:    ret
393entry:
394  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.nxv4f32.f32(
395    <vscale x 4 x float> %0,
396    float %1,
397    i32 %2)
398
399  ret <vscale x 4 x float> %a
400}
401
402declare <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
403  <vscale x 4 x float>,
404  <vscale x 4 x float>,
405  float,
406  <vscale x 4 x i1>,
407  i32);
408
409define <vscale x 4 x float> @intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, float %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
410; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f32_nxv4f32_f32:
411; CHECK:       # %bb.0: # %entry
412; CHECK-NEXT:    fmv.w.x ft0, a0
413; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
414; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
415; CHECK-NEXT:    ret
416entry:
417  %a = call <vscale x 4 x float> @llvm.riscv.vfslide1up.mask.nxv4f32.f32(
418    <vscale x 4 x float> %0,
419    <vscale x 4 x float> %1,
420    float %2,
421    <vscale x 4 x i1> %3,
422    i32 %4)
423
424  ret <vscale x 4 x float> %a
425}
426
427declare <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
428  <vscale x 8 x float>,
429  float,
430  i32);
431
432define <vscale x 8 x float> @intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, float %1, i32 %2) nounwind {
433; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f32_nxv8f32_f32:
434; CHECK:       # %bb.0: # %entry
435; CHECK-NEXT:    fmv.w.x ft0, a0
436; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
437; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
438; CHECK-NEXT:    vmv4r.v v8, v28
439; CHECK-NEXT:    ret
440entry:
441  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.nxv8f32.f32(
442    <vscale x 8 x float> %0,
443    float %1,
444    i32 %2)
445
446  ret <vscale x 8 x float> %a
447}
448
449declare <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
450  <vscale x 8 x float>,
451  <vscale x 8 x float>,
452  float,
453  <vscale x 8 x i1>,
454  i32);
455
456define <vscale x 8 x float> @intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, float %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
457; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f32_nxv8f32_f32:
458; CHECK:       # %bb.0: # %entry
459; CHECK-NEXT:    fmv.w.x ft0, a0
460; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
461; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
462; CHECK-NEXT:    ret
463entry:
464  %a = call <vscale x 8 x float> @llvm.riscv.vfslide1up.mask.nxv8f32.f32(
465    <vscale x 8 x float> %0,
466    <vscale x 8 x float> %1,
467    float %2,
468    <vscale x 8 x i1> %3,
469    i32 %4)
470
471  ret <vscale x 8 x float> %a
472}
473
474declare <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
475  <vscale x 16 x float>,
476  float,
477  i32);
478
479define <vscale x 16 x float> @intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, float %1, i32 %2) nounwind {
480; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv16f32_nxv16f32_f32:
481; CHECK:       # %bb.0: # %entry
482; CHECK-NEXT:    fmv.w.x ft0, a0
483; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
484; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
485; CHECK-NEXT:    vmv8r.v v8, v16
486; CHECK-NEXT:    ret
487entry:
488  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.nxv16f32.f32(
489    <vscale x 16 x float> %0,
490    float %1,
491    i32 %2)
492
493  ret <vscale x 16 x float> %a
494}
495
496declare <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
497  <vscale x 16 x float>,
498  <vscale x 16 x float>,
499  float,
500  <vscale x 16 x i1>,
501  i32);
502
503define <vscale x 16 x float> @intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32(<vscale x 16 x float> %0, <vscale x 16 x float> %1, float %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
504; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv16f32_nxv16f32_f32:
505; CHECK:       # %bb.0: # %entry
506; CHECK-NEXT:    fmv.w.x ft0, a0
507; CHECK-NEXT:    vsetvli zero, a1, e32, m8, tu, mu
508; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
509; CHECK-NEXT:    ret
510entry:
511  %a = call <vscale x 16 x float> @llvm.riscv.vfslide1up.mask.nxv16f32.f32(
512    <vscale x 16 x float> %0,
513    <vscale x 16 x float> %1,
514    float %2,
515    <vscale x 16 x i1> %3,
516    i32 %4)
517
518  ret <vscale x 16 x float> %a
519}
520
521declare <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
522  <vscale x 1 x double>,
523  double,
524  i32);
525
526define <vscale x 1 x double> @intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, double %1, i32 %2) nounwind {
527; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv1f64_nxv1f64_f64:
528; CHECK:       # %bb.0: # %entry
529; CHECK-NEXT:    addi sp, sp, -16
530; CHECK-NEXT:    sw a0, 8(sp)
531; CHECK-NEXT:    sw a1, 12(sp)
532; CHECK-NEXT:    fld ft0, 8(sp)
533; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
534; CHECK-NEXT:    vfslide1up.vf v25, v8, ft0
535; CHECK-NEXT:    vmv1r.v v8, v25
536; CHECK-NEXT:    addi sp, sp, 16
537; CHECK-NEXT:    ret
538entry:
539  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.nxv1f64.f64(
540    <vscale x 1 x double> %0,
541    double %1,
542    i32 %2)
543
544  ret <vscale x 1 x double> %a
545}
546
547declare <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
548  <vscale x 1 x double>,
549  <vscale x 1 x double>,
550  double,
551  <vscale x 1 x i1>,
552  i32);
553
554define <vscale x 1 x double> @intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, double %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
555; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv1f64_nxv1f64_f64:
556; CHECK:       # %bb.0: # %entry
557; CHECK-NEXT:    addi sp, sp, -16
558; CHECK-NEXT:    sw a0, 8(sp)
559; CHECK-NEXT:    sw a1, 12(sp)
560; CHECK-NEXT:    fld ft0, 8(sp)
561; CHECK-NEXT:    vsetvli zero, a2, e64, m1, tu, mu
562; CHECK-NEXT:    vfslide1up.vf v8, v9, ft0, v0.t
563; CHECK-NEXT:    addi sp, sp, 16
564; CHECK-NEXT:    ret
565entry:
566  %a = call <vscale x 1 x double> @llvm.riscv.vfslide1up.mask.nxv1f64.f64(
567    <vscale x 1 x double> %0,
568    <vscale x 1 x double> %1,
569    double %2,
570    <vscale x 1 x i1> %3,
571    i32 %4)
572
573  ret <vscale x 1 x double> %a
574}
575
576declare <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
577  <vscale x 2 x double>,
578  double,
579  i32);
580
581define <vscale x 2 x double> @intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, double %1, i32 %2) nounwind {
582; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv2f64_nxv2f64_f64:
583; CHECK:       # %bb.0: # %entry
584; CHECK-NEXT:    addi sp, sp, -16
585; CHECK-NEXT:    sw a0, 8(sp)
586; CHECK-NEXT:    sw a1, 12(sp)
587; CHECK-NEXT:    fld ft0, 8(sp)
588; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
589; CHECK-NEXT:    vfslide1up.vf v26, v8, ft0
590; CHECK-NEXT:    vmv2r.v v8, v26
591; CHECK-NEXT:    addi sp, sp, 16
592; CHECK-NEXT:    ret
593entry:
594  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.nxv2f64.f64(
595    <vscale x 2 x double> %0,
596    double %1,
597    i32 %2)
598
599  ret <vscale x 2 x double> %a
600}
601
602declare <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
603  <vscale x 2 x double>,
604  <vscale x 2 x double>,
605  double,
606  <vscale x 2 x i1>,
607  i32);
608
609define <vscale x 2 x double> @intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, double %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
610; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv2f64_nxv2f64_f64:
611; CHECK:       # %bb.0: # %entry
612; CHECK-NEXT:    addi sp, sp, -16
613; CHECK-NEXT:    sw a0, 8(sp)
614; CHECK-NEXT:    sw a1, 12(sp)
615; CHECK-NEXT:    fld ft0, 8(sp)
616; CHECK-NEXT:    vsetvli zero, a2, e64, m2, tu, mu
617; CHECK-NEXT:    vfslide1up.vf v8, v10, ft0, v0.t
618; CHECK-NEXT:    addi sp, sp, 16
619; CHECK-NEXT:    ret
620entry:
621  %a = call <vscale x 2 x double> @llvm.riscv.vfslide1up.mask.nxv2f64.f64(
622    <vscale x 2 x double> %0,
623    <vscale x 2 x double> %1,
624    double %2,
625    <vscale x 2 x i1> %3,
626    i32 %4)
627
628  ret <vscale x 2 x double> %a
629}
630
631declare <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
632  <vscale x 4 x double>,
633  double,
634  i32);
635
636define <vscale x 4 x double> @intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, double %1, i32 %2) nounwind {
637; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv4f64_nxv4f64_f64:
638; CHECK:       # %bb.0: # %entry
639; CHECK-NEXT:    addi sp, sp, -16
640; CHECK-NEXT:    sw a0, 8(sp)
641; CHECK-NEXT:    sw a1, 12(sp)
642; CHECK-NEXT:    fld ft0, 8(sp)
643; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
644; CHECK-NEXT:    vfslide1up.vf v28, v8, ft0
645; CHECK-NEXT:    vmv4r.v v8, v28
646; CHECK-NEXT:    addi sp, sp, 16
647; CHECK-NEXT:    ret
648entry:
649  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.nxv4f64.f64(
650    <vscale x 4 x double> %0,
651    double %1,
652    i32 %2)
653
654  ret <vscale x 4 x double> %a
655}
656
657declare <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
658  <vscale x 4 x double>,
659  <vscale x 4 x double>,
660  double,
661  <vscale x 4 x i1>,
662  i32);
663
664define <vscale x 4 x double> @intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, double %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
665; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv4f64_nxv4f64_f64:
666; CHECK:       # %bb.0: # %entry
667; CHECK-NEXT:    addi sp, sp, -16
668; CHECK-NEXT:    sw a0, 8(sp)
669; CHECK-NEXT:    sw a1, 12(sp)
670; CHECK-NEXT:    fld ft0, 8(sp)
671; CHECK-NEXT:    vsetvli zero, a2, e64, m4, tu, mu
672; CHECK-NEXT:    vfslide1up.vf v8, v12, ft0, v0.t
673; CHECK-NEXT:    addi sp, sp, 16
674; CHECK-NEXT:    ret
675entry:
676  %a = call <vscale x 4 x double> @llvm.riscv.vfslide1up.mask.nxv4f64.f64(
677    <vscale x 4 x double> %0,
678    <vscale x 4 x double> %1,
679    double %2,
680    <vscale x 4 x i1> %3,
681    i32 %4)
682
683  ret <vscale x 4 x double> %a
684}
685
686declare <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
687  <vscale x 8 x double>,
688  double,
689  i32);
690
691define <vscale x 8 x double> @intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, double %1, i32 %2) nounwind {
692; CHECK-LABEL: intrinsic_vfslide1up_vf_nxv8f64_nxv8f64_f64:
693; CHECK:       # %bb.0: # %entry
694; CHECK-NEXT:    addi sp, sp, -16
695; CHECK-NEXT:    sw a0, 8(sp)
696; CHECK-NEXT:    sw a1, 12(sp)
697; CHECK-NEXT:    fld ft0, 8(sp)
698; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
699; CHECK-NEXT:    vfslide1up.vf v16, v8, ft0
700; CHECK-NEXT:    vmv8r.v v8, v16
701; CHECK-NEXT:    addi sp, sp, 16
702; CHECK-NEXT:    ret
703entry:
704  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.nxv8f64.f64(
705    <vscale x 8 x double> %0,
706    double %1,
707    i32 %2)
708
709  ret <vscale x 8 x double> %a
710}
711
712declare <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
713  <vscale x 8 x double>,
714  <vscale x 8 x double>,
715  double,
716  <vscale x 8 x i1>,
717  i32);
718
719define <vscale x 8 x double> @intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64(<vscale x 8 x double> %0, <vscale x 8 x double> %1, double %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
720; CHECK-LABEL: intrinsic_vfslide1up_mask_vf_nxv8f64_nxv8f64_f64:
721; CHECK:       # %bb.0: # %entry
722; CHECK-NEXT:    addi sp, sp, -16
723; CHECK-NEXT:    sw a0, 8(sp)
724; CHECK-NEXT:    sw a1, 12(sp)
725; CHECK-NEXT:    fld ft0, 8(sp)
726; CHECK-NEXT:    vsetvli zero, a2, e64, m8, tu, mu
727; CHECK-NEXT:    vfslide1up.vf v8, v16, ft0, v0.t
728; CHECK-NEXT:    addi sp, sp, 16
729; CHECK-NEXT:    ret
730entry:
731  %a = call <vscale x 8 x double> @llvm.riscv.vfslide1up.mask.nxv8f64.f64(
732    <vscale x 8 x double> %0,
733    <vscale x 8 x double> %1,
734    double %2,
735    <vscale x 8 x i1> %3,
736    i32 %4)
737
738  ret <vscale x 8 x double> %a
739}
740