1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  i32,
8  i32);
9
10define <vscale x 1 x i8> @intrinsic_vslidedown_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, i32 %3) nounwind {
11; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i8_nxv1i8:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
14; CHECK-NEXT:    vslidedown.vx v8, v9, a0
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
18    <vscale x 1 x i8> %0,
19    <vscale x 1 x i8> %1,
20    i32 %2,
21    i32 %3)
22
23  ret <vscale x 1 x i8> %a
24}
25
26declare <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
27  <vscale x 1 x i8>,
28  <vscale x 1 x i8>,
29  i32,
30  <vscale x 1 x i1>,
31  i32);
32
33define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
34; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i8_nxv1i8:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
37; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
38; CHECK-NEXT:    ret
39entry:
40  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
41    <vscale x 1 x i8> %0,
42    <vscale x 1 x i8> %1,
43    i32 %2,
44    <vscale x 1 x i1> %3,
45    i32 %4)
46
47  ret <vscale x 1 x i8> %a
48}
49
50define <vscale x 1 x i8> @intrinsic_vslidedown_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
51; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i8_nxv1i8:
52; CHECK:       # %bb.0: # %entry
53; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
54; CHECK-NEXT:    vslidedown.vi v8, v9, 9
55; CHECK-NEXT:    ret
56entry:
57  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8(
58    <vscale x 1 x i8> %0,
59    <vscale x 1 x i8> %1,
60    i32 9,
61    i32 %2)
62
63  ret <vscale x 1 x i8> %a
64}
65
66define <vscale x 1 x i8> @intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
67; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i8_nxv1i8:
68; CHECK:       # %bb.0: # %entry
69; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
70; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
71; CHECK-NEXT:    ret
72entry:
73  %a = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8(
74    <vscale x 1 x i8> %0,
75    <vscale x 1 x i8> %1,
76    i32 9,
77    <vscale x 1 x i1> %2,
78    i32 %3)
79
80  ret <vscale x 1 x i8> %a
81}
82
83declare <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
84  <vscale x 2 x i8>,
85  <vscale x 2 x i8>,
86  i32,
87  i32);
88
89define <vscale x 2 x i8> @intrinsic_vslidedown_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, i32 %3) nounwind {
90; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i8_nxv2i8:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
93; CHECK-NEXT:    vslidedown.vx v8, v9, a0
94; CHECK-NEXT:    ret
95entry:
96  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
97    <vscale x 2 x i8> %0,
98    <vscale x 2 x i8> %1,
99    i32 %2,
100    i32 %3)
101
102  ret <vscale x 2 x i8> %a
103}
104
105declare <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
106  <vscale x 2 x i8>,
107  <vscale x 2 x i8>,
108  i32,
109  <vscale x 2 x i1>,
110  i32);
111
112define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
113; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i8_nxv2i8:
114; CHECK:       # %bb.0: # %entry
115; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
116; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
117; CHECK-NEXT:    ret
118entry:
119  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
120    <vscale x 2 x i8> %0,
121    <vscale x 2 x i8> %1,
122    i32 %2,
123    <vscale x 2 x i1> %3,
124    i32 %4)
125
126  ret <vscale x 2 x i8> %a
127}
128
129define <vscale x 2 x i8> @intrinsic_vslidedown_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
130; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i8_nxv2i8:
131; CHECK:       # %bb.0: # %entry
132; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
133; CHECK-NEXT:    vslidedown.vi v8, v9, 9
134; CHECK-NEXT:    ret
135entry:
136  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8(
137    <vscale x 2 x i8> %0,
138    <vscale x 2 x i8> %1,
139    i32 9,
140    i32 %2)
141
142  ret <vscale x 2 x i8> %a
143}
144
145define <vscale x 2 x i8> @intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
146; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i8_nxv2i8:
147; CHECK:       # %bb.0: # %entry
148; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
149; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
150; CHECK-NEXT:    ret
151entry:
152  %a = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8(
153    <vscale x 2 x i8> %0,
154    <vscale x 2 x i8> %1,
155    i32 9,
156    <vscale x 2 x i1> %2,
157    i32 %3)
158
159  ret <vscale x 2 x i8> %a
160}
161
162declare <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
163  <vscale x 4 x i8>,
164  <vscale x 4 x i8>,
165  i32,
166  i32);
167
168define <vscale x 4 x i8> @intrinsic_vslidedown_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, i32 %3) nounwind {
169; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i8_nxv4i8:
170; CHECK:       # %bb.0: # %entry
171; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
172; CHECK-NEXT:    vslidedown.vx v8, v9, a0
173; CHECK-NEXT:    ret
174entry:
175  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
176    <vscale x 4 x i8> %0,
177    <vscale x 4 x i8> %1,
178    i32 %2,
179    i32 %3)
180
181  ret <vscale x 4 x i8> %a
182}
183
184declare <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
185  <vscale x 4 x i8>,
186  <vscale x 4 x i8>,
187  i32,
188  <vscale x 4 x i1>,
189  i32);
190
191define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
192; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i8_nxv4i8:
193; CHECK:       # %bb.0: # %entry
194; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
195; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
196; CHECK-NEXT:    ret
197entry:
198  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
199    <vscale x 4 x i8> %0,
200    <vscale x 4 x i8> %1,
201    i32 %2,
202    <vscale x 4 x i1> %3,
203    i32 %4)
204
205  ret <vscale x 4 x i8> %a
206}
207
208define <vscale x 4 x i8> @intrinsic_vslidedown_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
209; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i8_nxv4i8:
210; CHECK:       # %bb.0: # %entry
211; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
212; CHECK-NEXT:    vslidedown.vi v8, v9, 9
213; CHECK-NEXT:    ret
214entry:
215  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8(
216    <vscale x 4 x i8> %0,
217    <vscale x 4 x i8> %1,
218    i32 9,
219    i32 %2)
220
221  ret <vscale x 4 x i8> %a
222}
223
224define <vscale x 4 x i8> @intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
225; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i8_nxv4i8:
226; CHECK:       # %bb.0: # %entry
227; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
228; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
229; CHECK-NEXT:    ret
230entry:
231  %a = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8(
232    <vscale x 4 x i8> %0,
233    <vscale x 4 x i8> %1,
234    i32 9,
235    <vscale x 4 x i1> %2,
236    i32 %3)
237
238  ret <vscale x 4 x i8> %a
239}
240
241declare <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
242  <vscale x 8 x i8>,
243  <vscale x 8 x i8>,
244  i32,
245  i32);
246
247define <vscale x 8 x i8> @intrinsic_vslidedown_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, i32 %3) nounwind {
248; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i8_nxv8i8:
249; CHECK:       # %bb.0: # %entry
250; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
251; CHECK-NEXT:    vslidedown.vx v8, v9, a0
252; CHECK-NEXT:    ret
253entry:
254  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
255    <vscale x 8 x i8> %0,
256    <vscale x 8 x i8> %1,
257    i32 %2,
258    i32 %3)
259
260  ret <vscale x 8 x i8> %a
261}
262
263declare <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
264  <vscale x 8 x i8>,
265  <vscale x 8 x i8>,
266  i32,
267  <vscale x 8 x i1>,
268  i32);
269
270define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
271; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i8_nxv8i8:
272; CHECK:       # %bb.0: # %entry
273; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
274; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
275; CHECK-NEXT:    ret
276entry:
277  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
278    <vscale x 8 x i8> %0,
279    <vscale x 8 x i8> %1,
280    i32 %2,
281    <vscale x 8 x i1> %3,
282    i32 %4)
283
284  ret <vscale x 8 x i8> %a
285}
286
287define <vscale x 8 x i8> @intrinsic_vslidedown_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
288; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i8_nxv8i8:
289; CHECK:       # %bb.0: # %entry
290; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
291; CHECK-NEXT:    vslidedown.vi v8, v9, 9
292; CHECK-NEXT:    ret
293entry:
294  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8(
295    <vscale x 8 x i8> %0,
296    <vscale x 8 x i8> %1,
297    i32 9,
298    i32 %2)
299
300  ret <vscale x 8 x i8> %a
301}
302
303define <vscale x 8 x i8> @intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
304; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i8_nxv8i8:
305; CHECK:       # %bb.0: # %entry
306; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
307; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
308; CHECK-NEXT:    ret
309entry:
310  %a = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8(
311    <vscale x 8 x i8> %0,
312    <vscale x 8 x i8> %1,
313    i32 9,
314    <vscale x 8 x i1> %2,
315    i32 %3)
316
317  ret <vscale x 8 x i8> %a
318}
319
320declare <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
321  <vscale x 16 x i8>,
322  <vscale x 16 x i8>,
323  i32,
324  i32);
325
326define <vscale x 16 x i8> @intrinsic_vslidedown_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, i32 %3) nounwind {
327; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i8_nxv16i8:
328; CHECK:       # %bb.0: # %entry
329; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
330; CHECK-NEXT:    vslidedown.vx v8, v10, a0
331; CHECK-NEXT:    ret
332entry:
333  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
334    <vscale x 16 x i8> %0,
335    <vscale x 16 x i8> %1,
336    i32 %2,
337    i32 %3)
338
339  ret <vscale x 16 x i8> %a
340}
341
342declare <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
343  <vscale x 16 x i8>,
344  <vscale x 16 x i8>,
345  i32,
346  <vscale x 16 x i1>,
347  i32);
348
349define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
350; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i8_nxv16i8:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
353; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
354; CHECK-NEXT:    ret
355entry:
356  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
357    <vscale x 16 x i8> %0,
358    <vscale x 16 x i8> %1,
359    i32 %2,
360    <vscale x 16 x i1> %3,
361    i32 %4)
362
363  ret <vscale x 16 x i8> %a
364}
365
366define <vscale x 16 x i8> @intrinsic_vslidedown_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
367; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i8_nxv16i8:
368; CHECK:       # %bb.0: # %entry
369; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
370; CHECK-NEXT:    vslidedown.vi v8, v10, 9
371; CHECK-NEXT:    ret
372entry:
373  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8(
374    <vscale x 16 x i8> %0,
375    <vscale x 16 x i8> %1,
376    i32 9,
377    i32 %2)
378
379  ret <vscale x 16 x i8> %a
380}
381
382define <vscale x 16 x i8> @intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
383; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i8_nxv16i8:
384; CHECK:       # %bb.0: # %entry
385; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
386; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
387; CHECK-NEXT:    ret
388entry:
389  %a = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8(
390    <vscale x 16 x i8> %0,
391    <vscale x 16 x i8> %1,
392    i32 9,
393    <vscale x 16 x i1> %2,
394    i32 %3)
395
396  ret <vscale x 16 x i8> %a
397}
398
399declare <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
400  <vscale x 32 x i8>,
401  <vscale x 32 x i8>,
402  i32,
403  i32);
404
405define <vscale x 32 x i8> @intrinsic_vslidedown_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, i32 %3) nounwind {
406; CHECK-LABEL: intrinsic_vslidedown_vx_nxv32i8_nxv32i8:
407; CHECK:       # %bb.0: # %entry
408; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
409; CHECK-NEXT:    vslidedown.vx v8, v12, a0
410; CHECK-NEXT:    ret
411entry:
412  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
413    <vscale x 32 x i8> %0,
414    <vscale x 32 x i8> %1,
415    i32 %2,
416    i32 %3)
417
418  ret <vscale x 32 x i8> %a
419}
420
421declare <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
422  <vscale x 32 x i8>,
423  <vscale x 32 x i8>,
424  i32,
425  <vscale x 32 x i1>,
426  i32);
427
428define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
429; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv32i8_nxv32i8:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
432; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
436    <vscale x 32 x i8> %0,
437    <vscale x 32 x i8> %1,
438    i32 %2,
439    <vscale x 32 x i1> %3,
440    i32 %4)
441
442  ret <vscale x 32 x i8> %a
443}
444
445define <vscale x 32 x i8> @intrinsic_vslidedown_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
446; CHECK-LABEL: intrinsic_vslidedown_vi_nxv32i8_nxv32i8:
447; CHECK:       # %bb.0: # %entry
448; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
449; CHECK-NEXT:    vslidedown.vi v8, v12, 9
450; CHECK-NEXT:    ret
451entry:
452  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8(
453    <vscale x 32 x i8> %0,
454    <vscale x 32 x i8> %1,
455    i32 9,
456    i32 %2)
457
458  ret <vscale x 32 x i8> %a
459}
460
461define <vscale x 32 x i8> @intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
462; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv32i8_nxv32i8:
463; CHECK:       # %bb.0: # %entry
464; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
465; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
466; CHECK-NEXT:    ret
467entry:
468  %a = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8(
469    <vscale x 32 x i8> %0,
470    <vscale x 32 x i8> %1,
471    i32 9,
472    <vscale x 32 x i1> %2,
473    i32 %3)
474
475  ret <vscale x 32 x i8> %a
476}
477
478declare <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
479  <vscale x 1 x i16>,
480  <vscale x 1 x i16>,
481  i32,
482  i32);
483
484define <vscale x 1 x i16> @intrinsic_vslidedown_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, i32 %3) nounwind {
485; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i16_nxv1i16:
486; CHECK:       # %bb.0: # %entry
487; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
488; CHECK-NEXT:    vslidedown.vx v8, v9, a0
489; CHECK-NEXT:    ret
490entry:
491  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
492    <vscale x 1 x i16> %0,
493    <vscale x 1 x i16> %1,
494    i32 %2,
495    i32 %3)
496
497  ret <vscale x 1 x i16> %a
498}
499
500declare <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
501  <vscale x 1 x i16>,
502  <vscale x 1 x i16>,
503  i32,
504  <vscale x 1 x i1>,
505  i32);
506
507define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
508; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i16_nxv1i16:
509; CHECK:       # %bb.0: # %entry
510; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
511; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
512; CHECK-NEXT:    ret
513entry:
514  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
515    <vscale x 1 x i16> %0,
516    <vscale x 1 x i16> %1,
517    i32 %2,
518    <vscale x 1 x i1> %3,
519    i32 %4)
520
521  ret <vscale x 1 x i16> %a
522}
523
524define <vscale x 1 x i16> @intrinsic_vslidedown_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
525; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i16_nxv1i16:
526; CHECK:       # %bb.0: # %entry
527; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
528; CHECK-NEXT:    vslidedown.vi v8, v9, 9
529; CHECK-NEXT:    ret
530entry:
531  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16(
532    <vscale x 1 x i16> %0,
533    <vscale x 1 x i16> %1,
534    i32 9,
535    i32 %2)
536
537  ret <vscale x 1 x i16> %a
538}
539
540define <vscale x 1 x i16> @intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
541; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i16_nxv1i16:
542; CHECK:       # %bb.0: # %entry
543; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
544; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
545; CHECK-NEXT:    ret
546entry:
547  %a = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16(
548    <vscale x 1 x i16> %0,
549    <vscale x 1 x i16> %1,
550    i32 9,
551    <vscale x 1 x i1> %2,
552    i32 %3)
553
554  ret <vscale x 1 x i16> %a
555}
556
557declare <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
558  <vscale x 2 x i16>,
559  <vscale x 2 x i16>,
560  i32,
561  i32);
562
563define <vscale x 2 x i16> @intrinsic_vslidedown_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, i32 %3) nounwind {
564; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i16_nxv2i16:
565; CHECK:       # %bb.0: # %entry
566; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
567; CHECK-NEXT:    vslidedown.vx v8, v9, a0
568; CHECK-NEXT:    ret
569entry:
570  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
571    <vscale x 2 x i16> %0,
572    <vscale x 2 x i16> %1,
573    i32 %2,
574    i32 %3)
575
576  ret <vscale x 2 x i16> %a
577}
578
579declare <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
580  <vscale x 2 x i16>,
581  <vscale x 2 x i16>,
582  i32,
583  <vscale x 2 x i1>,
584  i32);
585
586define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
587; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i16_nxv2i16:
588; CHECK:       # %bb.0: # %entry
589; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
590; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
591; CHECK-NEXT:    ret
592entry:
593  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
594    <vscale x 2 x i16> %0,
595    <vscale x 2 x i16> %1,
596    i32 %2,
597    <vscale x 2 x i1> %3,
598    i32 %4)
599
600  ret <vscale x 2 x i16> %a
601}
602
603define <vscale x 2 x i16> @intrinsic_vslidedown_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
604; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i16_nxv2i16:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
607; CHECK-NEXT:    vslidedown.vi v8, v9, 9
608; CHECK-NEXT:    ret
609entry:
610  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16(
611    <vscale x 2 x i16> %0,
612    <vscale x 2 x i16> %1,
613    i32 9,
614    i32 %2)
615
616  ret <vscale x 2 x i16> %a
617}
618
619define <vscale x 2 x i16> @intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
620; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i16_nxv2i16:
621; CHECK:       # %bb.0: # %entry
622; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
623; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
624; CHECK-NEXT:    ret
625entry:
626  %a = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16(
627    <vscale x 2 x i16> %0,
628    <vscale x 2 x i16> %1,
629    i32 9,
630    <vscale x 2 x i1> %2,
631    i32 %3)
632
633  ret <vscale x 2 x i16> %a
634}
635
636declare <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
637  <vscale x 4 x i16>,
638  <vscale x 4 x i16>,
639  i32,
640  i32);
641
642define <vscale x 4 x i16> @intrinsic_vslidedown_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, i32 %3) nounwind {
643; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i16_nxv4i16:
644; CHECK:       # %bb.0: # %entry
645; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
646; CHECK-NEXT:    vslidedown.vx v8, v9, a0
647; CHECK-NEXT:    ret
648entry:
649  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
650    <vscale x 4 x i16> %0,
651    <vscale x 4 x i16> %1,
652    i32 %2,
653    i32 %3)
654
655  ret <vscale x 4 x i16> %a
656}
657
658declare <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
659  <vscale x 4 x i16>,
660  <vscale x 4 x i16>,
661  i32,
662  <vscale x 4 x i1>,
663  i32);
664
665define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
666; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i16_nxv4i16:
667; CHECK:       # %bb.0: # %entry
668; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
669; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
670; CHECK-NEXT:    ret
671entry:
672  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
673    <vscale x 4 x i16> %0,
674    <vscale x 4 x i16> %1,
675    i32 %2,
676    <vscale x 4 x i1> %3,
677    i32 %4)
678
679  ret <vscale x 4 x i16> %a
680}
681
682define <vscale x 4 x i16> @intrinsic_vslidedown_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
683; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i16_nxv4i16:
684; CHECK:       # %bb.0: # %entry
685; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
686; CHECK-NEXT:    vslidedown.vi v8, v9, 9
687; CHECK-NEXT:    ret
688entry:
689  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16(
690    <vscale x 4 x i16> %0,
691    <vscale x 4 x i16> %1,
692    i32 9,
693    i32 %2)
694
695  ret <vscale x 4 x i16> %a
696}
697
698define <vscale x 4 x i16> @intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
699; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i16_nxv4i16:
700; CHECK:       # %bb.0: # %entry
701; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
702; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
703; CHECK-NEXT:    ret
704entry:
705  %a = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16(
706    <vscale x 4 x i16> %0,
707    <vscale x 4 x i16> %1,
708    i32 9,
709    <vscale x 4 x i1> %2,
710    i32 %3)
711
712  ret <vscale x 4 x i16> %a
713}
714
715declare <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
716  <vscale x 8 x i16>,
717  <vscale x 8 x i16>,
718  i32,
719  i32);
720
721define <vscale x 8 x i16> @intrinsic_vslidedown_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, i32 %3) nounwind {
722; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i16_nxv8i16:
723; CHECK:       # %bb.0: # %entry
724; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
725; CHECK-NEXT:    vslidedown.vx v8, v10, a0
726; CHECK-NEXT:    ret
727entry:
728  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
729    <vscale x 8 x i16> %0,
730    <vscale x 8 x i16> %1,
731    i32 %2,
732    i32 %3)
733
734  ret <vscale x 8 x i16> %a
735}
736
737declare <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
738  <vscale x 8 x i16>,
739  <vscale x 8 x i16>,
740  i32,
741  <vscale x 8 x i1>,
742  i32);
743
744define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
745; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i16_nxv8i16:
746; CHECK:       # %bb.0: # %entry
747; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
748; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
749; CHECK-NEXT:    ret
750entry:
751  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
752    <vscale x 8 x i16> %0,
753    <vscale x 8 x i16> %1,
754    i32 %2,
755    <vscale x 8 x i1> %3,
756    i32 %4)
757
758  ret <vscale x 8 x i16> %a
759}
760
761define <vscale x 8 x i16> @intrinsic_vslidedown_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
762; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i16_nxv8i16:
763; CHECK:       # %bb.0: # %entry
764; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
765; CHECK-NEXT:    vslidedown.vi v8, v10, 9
766; CHECK-NEXT:    ret
767entry:
768  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16(
769    <vscale x 8 x i16> %0,
770    <vscale x 8 x i16> %1,
771    i32 9,
772    i32 %2)
773
774  ret <vscale x 8 x i16> %a
775}
776
777define <vscale x 8 x i16> @intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
778; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i16_nxv8i16:
779; CHECK:       # %bb.0: # %entry
780; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
781; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
782; CHECK-NEXT:    ret
783entry:
784  %a = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16(
785    <vscale x 8 x i16> %0,
786    <vscale x 8 x i16> %1,
787    i32 9,
788    <vscale x 8 x i1> %2,
789    i32 %3)
790
791  ret <vscale x 8 x i16> %a
792}
793
794declare <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
795  <vscale x 16 x i16>,
796  <vscale x 16 x i16>,
797  i32,
798  i32);
799
800define <vscale x 16 x i16> @intrinsic_vslidedown_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, i32 %3) nounwind {
801; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16i16_nxv16i16:
802; CHECK:       # %bb.0: # %entry
803; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
804; CHECK-NEXT:    vslidedown.vx v8, v12, a0
805; CHECK-NEXT:    ret
806entry:
807  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
808    <vscale x 16 x i16> %0,
809    <vscale x 16 x i16> %1,
810    i32 %2,
811    i32 %3)
812
813  ret <vscale x 16 x i16> %a
814}
815
816declare <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
817  <vscale x 16 x i16>,
818  <vscale x 16 x i16>,
819  i32,
820  <vscale x 16 x i1>,
821  i32);
822
823define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
824; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16i16_nxv16i16:
825; CHECK:       # %bb.0: # %entry
826; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
827; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
828; CHECK-NEXT:    ret
829entry:
830  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
831    <vscale x 16 x i16> %0,
832    <vscale x 16 x i16> %1,
833    i32 %2,
834    <vscale x 16 x i1> %3,
835    i32 %4)
836
837  ret <vscale x 16 x i16> %a
838}
839
840define <vscale x 16 x i16> @intrinsic_vslidedown_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
841; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16i16_nxv16i16:
842; CHECK:       # %bb.0: # %entry
843; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
844; CHECK-NEXT:    vslidedown.vi v8, v12, 9
845; CHECK-NEXT:    ret
846entry:
847  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16(
848    <vscale x 16 x i16> %0,
849    <vscale x 16 x i16> %1,
850    i32 9,
851    i32 %2)
852
853  ret <vscale x 16 x i16> %a
854}
855
856define <vscale x 16 x i16> @intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
857; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16i16_nxv16i16:
858; CHECK:       # %bb.0: # %entry
859; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
860; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
861; CHECK-NEXT:    ret
862entry:
863  %a = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16(
864    <vscale x 16 x i16> %0,
865    <vscale x 16 x i16> %1,
866    i32 9,
867    <vscale x 16 x i1> %2,
868    i32 %3)
869
870  ret <vscale x 16 x i16> %a
871}
872
873declare <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
874  <vscale x 1 x i32>,
875  <vscale x 1 x i32>,
876  i32,
877  i32);
878
879define <vscale x 1 x i32> @intrinsic_vslidedown_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, i32 %3) nounwind {
880; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i32_nxv1i32:
881; CHECK:       # %bb.0: # %entry
882; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
883; CHECK-NEXT:    vslidedown.vx v8, v9, a0
884; CHECK-NEXT:    ret
885entry:
886  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
887    <vscale x 1 x i32> %0,
888    <vscale x 1 x i32> %1,
889    i32 %2,
890    i32 %3)
891
892  ret <vscale x 1 x i32> %a
893}
894
895declare <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
896  <vscale x 1 x i32>,
897  <vscale x 1 x i32>,
898  i32,
899  <vscale x 1 x i1>,
900  i32);
901
902define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
903; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i32_nxv1i32:
904; CHECK:       # %bb.0: # %entry
905; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
906; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
907; CHECK-NEXT:    ret
908entry:
909  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
910    <vscale x 1 x i32> %0,
911    <vscale x 1 x i32> %1,
912    i32 %2,
913    <vscale x 1 x i1> %3,
914    i32 %4)
915
916  ret <vscale x 1 x i32> %a
917}
918
919define <vscale x 1 x i32> @intrinsic_vslidedown_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
920; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i32_nxv1i32:
921; CHECK:       # %bb.0: # %entry
922; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
923; CHECK-NEXT:    vslidedown.vi v8, v9, 9
924; CHECK-NEXT:    ret
925entry:
926  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32(
927    <vscale x 1 x i32> %0,
928    <vscale x 1 x i32> %1,
929    i32 9,
930    i32 %2)
931
932  ret <vscale x 1 x i32> %a
933}
934
935define <vscale x 1 x i32> @intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
936; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i32_nxv1i32:
937; CHECK:       # %bb.0: # %entry
938; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
939; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
940; CHECK-NEXT:    ret
941entry:
942  %a = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32(
943    <vscale x 1 x i32> %0,
944    <vscale x 1 x i32> %1,
945    i32 9,
946    <vscale x 1 x i1> %2,
947    i32 %3)
948
949  ret <vscale x 1 x i32> %a
950}
951
952declare <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
953  <vscale x 2 x i32>,
954  <vscale x 2 x i32>,
955  i32,
956  i32);
957
958define <vscale x 2 x i32> @intrinsic_vslidedown_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, i32 %3) nounwind {
959; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i32_nxv2i32:
960; CHECK:       # %bb.0: # %entry
961; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
962; CHECK-NEXT:    vslidedown.vx v8, v9, a0
963; CHECK-NEXT:    ret
964entry:
965  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
966    <vscale x 2 x i32> %0,
967    <vscale x 2 x i32> %1,
968    i32 %2,
969    i32 %3)
970
971  ret <vscale x 2 x i32> %a
972}
973
974declare <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
975  <vscale x 2 x i32>,
976  <vscale x 2 x i32>,
977  i32,
978  <vscale x 2 x i1>,
979  i32);
980
981define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
982; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i32_nxv2i32:
983; CHECK:       # %bb.0: # %entry
984; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
985; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
986; CHECK-NEXT:    ret
987entry:
988  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
989    <vscale x 2 x i32> %0,
990    <vscale x 2 x i32> %1,
991    i32 %2,
992    <vscale x 2 x i1> %3,
993    i32 %4)
994
995  ret <vscale x 2 x i32> %a
996}
997
998define <vscale x 2 x i32> @intrinsic_vslidedown_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
999; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i32_nxv2i32:
1000; CHECK:       # %bb.0: # %entry
1001; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
1002; CHECK-NEXT:    vslidedown.vi v8, v9, 9
1003; CHECK-NEXT:    ret
1004entry:
1005  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32(
1006    <vscale x 2 x i32> %0,
1007    <vscale x 2 x i32> %1,
1008    i32 9,
1009    i32 %2)
1010
1011  ret <vscale x 2 x i32> %a
1012}
1013
1014define <vscale x 2 x i32> @intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1015; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i32_nxv2i32:
1016; CHECK:       # %bb.0: # %entry
1017; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
1018; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
1019; CHECK-NEXT:    ret
1020entry:
1021  %a = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32(
1022    <vscale x 2 x i32> %0,
1023    <vscale x 2 x i32> %1,
1024    i32 9,
1025    <vscale x 2 x i1> %2,
1026    i32 %3)
1027
1028  ret <vscale x 2 x i32> %a
1029}
1030
1031declare <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
1032  <vscale x 4 x i32>,
1033  <vscale x 4 x i32>,
1034  i32,
1035  i32);
1036
1037define <vscale x 4 x i32> @intrinsic_vslidedown_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, i32 %3) nounwind {
1038; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i32_nxv4i32:
1039; CHECK:       # %bb.0: # %entry
1040; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1041; CHECK-NEXT:    vslidedown.vx v8, v10, a0
1042; CHECK-NEXT:    ret
1043entry:
1044  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
1045    <vscale x 4 x i32> %0,
1046    <vscale x 4 x i32> %1,
1047    i32 %2,
1048    i32 %3)
1049
1050  ret <vscale x 4 x i32> %a
1051}
1052
1053declare <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
1054  <vscale x 4 x i32>,
1055  <vscale x 4 x i32>,
1056  i32,
1057  <vscale x 4 x i1>,
1058  i32);
1059
1060define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1061; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i32_nxv4i32:
1062; CHECK:       # %bb.0: # %entry
1063; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1064; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
1065; CHECK-NEXT:    ret
1066entry:
1067  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
1068    <vscale x 4 x i32> %0,
1069    <vscale x 4 x i32> %1,
1070    i32 %2,
1071    <vscale x 4 x i1> %3,
1072    i32 %4)
1073
1074  ret <vscale x 4 x i32> %a
1075}
1076
1077define <vscale x 4 x i32> @intrinsic_vslidedown_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
1078; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i32_nxv4i32:
1079; CHECK:       # %bb.0: # %entry
1080; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
1081; CHECK-NEXT:    vslidedown.vi v8, v10, 9
1082; CHECK-NEXT:    ret
1083entry:
1084  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32(
1085    <vscale x 4 x i32> %0,
1086    <vscale x 4 x i32> %1,
1087    i32 9,
1088    i32 %2)
1089
1090  ret <vscale x 4 x i32> %a
1091}
1092
1093define <vscale x 4 x i32> @intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1094; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i32_nxv4i32:
1095; CHECK:       # %bb.0: # %entry
1096; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
1097; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
1098; CHECK-NEXT:    ret
1099entry:
1100  %a = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32(
1101    <vscale x 4 x i32> %0,
1102    <vscale x 4 x i32> %1,
1103    i32 9,
1104    <vscale x 4 x i1> %2,
1105    i32 %3)
1106
1107  ret <vscale x 4 x i32> %a
1108}
1109
1110declare <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
1111  <vscale x 8 x i32>,
1112  <vscale x 8 x i32>,
1113  i32,
1114  i32);
1115
1116define <vscale x 8 x i32> @intrinsic_vslidedown_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, i32 %3) nounwind {
1117; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8i32_nxv8i32:
1118; CHECK:       # %bb.0: # %entry
1119; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1120; CHECK-NEXT:    vslidedown.vx v8, v12, a0
1121; CHECK-NEXT:    ret
1122entry:
1123  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
1124    <vscale x 8 x i32> %0,
1125    <vscale x 8 x i32> %1,
1126    i32 %2,
1127    i32 %3)
1128
1129  ret <vscale x 8 x i32> %a
1130}
1131
1132declare <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
1133  <vscale x 8 x i32>,
1134  <vscale x 8 x i32>,
1135  i32,
1136  <vscale x 8 x i1>,
1137  i32);
1138
1139define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1140; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8i32_nxv8i32:
1141; CHECK:       # %bb.0: # %entry
1142; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1143; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
1144; CHECK-NEXT:    ret
1145entry:
1146  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
1147    <vscale x 8 x i32> %0,
1148    <vscale x 8 x i32> %1,
1149    i32 %2,
1150    <vscale x 8 x i1> %3,
1151    i32 %4)
1152
1153  ret <vscale x 8 x i32> %a
1154}
1155
1156define <vscale x 8 x i32> @intrinsic_vslidedown_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
1157; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8i32_nxv8i32:
1158; CHECK:       # %bb.0: # %entry
1159; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
1160; CHECK-NEXT:    vslidedown.vi v8, v12, 9
1161; CHECK-NEXT:    ret
1162entry:
1163  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32(
1164    <vscale x 8 x i32> %0,
1165    <vscale x 8 x i32> %1,
1166    i32 9,
1167    i32 %2)
1168
1169  ret <vscale x 8 x i32> %a
1170}
1171
1172define <vscale x 8 x i32> @intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1173; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8i32_nxv8i32:
1174; CHECK:       # %bb.0: # %entry
1175; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
1176; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
1177; CHECK-NEXT:    ret
1178entry:
1179  %a = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32(
1180    <vscale x 8 x i32> %0,
1181    <vscale x 8 x i32> %1,
1182    i32 9,
1183    <vscale x 8 x i1> %2,
1184    i32 %3)
1185
1186  ret <vscale x 8 x i32> %a
1187}
1188
1189declare <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
1190  <vscale x 1 x i64>,
1191  <vscale x 1 x i64>,
1192  i32,
1193  i32);
1194
1195define <vscale x 1 x i64> @intrinsic_vslidedown_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, i32 %3) nounwind {
1196; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1i64_nxv1i64:
1197; CHECK:       # %bb.0: # %entry
1198; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1199; CHECK-NEXT:    vslidedown.vx v8, v9, a0
1200; CHECK-NEXT:    ret
1201entry:
1202  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
1203    <vscale x 1 x i64> %0,
1204    <vscale x 1 x i64> %1,
1205    i32 %2,
1206    i32 %3)
1207
1208  ret <vscale x 1 x i64> %a
1209}
1210
1211declare <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
1212  <vscale x 1 x i64>,
1213  <vscale x 1 x i64>,
1214  i32,
1215  <vscale x 1 x i1>,
1216  i32);
1217
1218define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1219; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1i64_nxv1i64:
1220; CHECK:       # %bb.0: # %entry
1221; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
1222; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
1223; CHECK-NEXT:    ret
1224entry:
1225  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
1226    <vscale x 1 x i64> %0,
1227    <vscale x 1 x i64> %1,
1228    i32 %2,
1229    <vscale x 1 x i1> %3,
1230    i32 %4)
1231
1232  ret <vscale x 1 x i64> %a
1233}
1234
1235define <vscale x 1 x i64> @intrinsic_vslidedown_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
1236; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1i64_nxv1i64:
1237; CHECK:       # %bb.0: # %entry
1238; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
1239; CHECK-NEXT:    vslidedown.vi v8, v9, 9
1240; CHECK-NEXT:    ret
1241entry:
1242  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64(
1243    <vscale x 1 x i64> %0,
1244    <vscale x 1 x i64> %1,
1245    i32 9,
1246    i32 %2)
1247
1248  ret <vscale x 1 x i64> %a
1249}
1250
1251define <vscale x 1 x i64> @intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1252; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1i64_nxv1i64:
1253; CHECK:       # %bb.0: # %entry
1254; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
1255; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
1256; CHECK-NEXT:    ret
1257entry:
1258  %a = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64(
1259    <vscale x 1 x i64> %0,
1260    <vscale x 1 x i64> %1,
1261    i32 9,
1262    <vscale x 1 x i1> %2,
1263    i32 %3)
1264
1265  ret <vscale x 1 x i64> %a
1266}
1267
1268declare <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
1269  <vscale x 2 x i64>,
1270  <vscale x 2 x i64>,
1271  i32,
1272  i32);
1273
1274define <vscale x 2 x i64> @intrinsic_vslidedown_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, i32 %3) nounwind {
1275; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2i64_nxv2i64:
1276; CHECK:       # %bb.0: # %entry
1277; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1278; CHECK-NEXT:    vslidedown.vx v8, v10, a0
1279; CHECK-NEXT:    ret
1280entry:
1281  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
1282    <vscale x 2 x i64> %0,
1283    <vscale x 2 x i64> %1,
1284    i32 %2,
1285    i32 %3)
1286
1287  ret <vscale x 2 x i64> %a
1288}
1289
1290declare <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
1291  <vscale x 2 x i64>,
1292  <vscale x 2 x i64>,
1293  i32,
1294  <vscale x 2 x i1>,
1295  i32);
1296
1297define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1298; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2i64_nxv2i64:
1299; CHECK:       # %bb.0: # %entry
1300; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
1301; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
1302; CHECK-NEXT:    ret
1303entry:
1304  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
1305    <vscale x 2 x i64> %0,
1306    <vscale x 2 x i64> %1,
1307    i32 %2,
1308    <vscale x 2 x i1> %3,
1309    i32 %4)
1310
1311  ret <vscale x 2 x i64> %a
1312}
1313
1314define <vscale x 2 x i64> @intrinsic_vslidedown_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
1315; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2i64_nxv2i64:
1316; CHECK:       # %bb.0: # %entry
1317; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
1318; CHECK-NEXT:    vslidedown.vi v8, v10, 9
1319; CHECK-NEXT:    ret
1320entry:
1321  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64(
1322    <vscale x 2 x i64> %0,
1323    <vscale x 2 x i64> %1,
1324    i32 9,
1325    i32 %2)
1326
1327  ret <vscale x 2 x i64> %a
1328}
1329
1330define <vscale x 2 x i64> @intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1331; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2i64_nxv2i64:
1332; CHECK:       # %bb.0: # %entry
1333; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
1334; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
1335; CHECK-NEXT:    ret
1336entry:
1337  %a = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64(
1338    <vscale x 2 x i64> %0,
1339    <vscale x 2 x i64> %1,
1340    i32 9,
1341    <vscale x 2 x i1> %2,
1342    i32 %3)
1343
1344  ret <vscale x 2 x i64> %a
1345}
1346
1347declare <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
1348  <vscale x 4 x i64>,
1349  <vscale x 4 x i64>,
1350  i32,
1351  i32);
1352
1353define <vscale x 4 x i64> @intrinsic_vslidedown_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, i32 %3) nounwind {
1354; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4i64_nxv4i64:
1355; CHECK:       # %bb.0: # %entry
1356; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1357; CHECK-NEXT:    vslidedown.vx v8, v12, a0
1358; CHECK-NEXT:    ret
1359entry:
1360  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
1361    <vscale x 4 x i64> %0,
1362    <vscale x 4 x i64> %1,
1363    i32 %2,
1364    i32 %3)
1365
1366  ret <vscale x 4 x i64> %a
1367}
1368
1369declare <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
1370  <vscale x 4 x i64>,
1371  <vscale x 4 x i64>,
1372  i32,
1373  <vscale x 4 x i1>,
1374  i32);
1375
1376define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1377; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4i64_nxv4i64:
1378; CHECK:       # %bb.0: # %entry
1379; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
1380; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
1381; CHECK-NEXT:    ret
1382entry:
1383  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
1384    <vscale x 4 x i64> %0,
1385    <vscale x 4 x i64> %1,
1386    i32 %2,
1387    <vscale x 4 x i1> %3,
1388    i32 %4)
1389
1390  ret <vscale x 4 x i64> %a
1391}
1392
1393define <vscale x 4 x i64> @intrinsic_vslidedown_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
1394; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4i64_nxv4i64:
1395; CHECK:       # %bb.0: # %entry
1396; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
1397; CHECK-NEXT:    vslidedown.vi v8, v12, 9
1398; CHECK-NEXT:    ret
1399entry:
1400  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64(
1401    <vscale x 4 x i64> %0,
1402    <vscale x 4 x i64> %1,
1403    i32 9,
1404    i32 %2)
1405
1406  ret <vscale x 4 x i64> %a
1407}
1408
1409define <vscale x 4 x i64> @intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1410; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4i64_nxv4i64:
1411; CHECK:       # %bb.0: # %entry
1412; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
1413; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
1414; CHECK-NEXT:    ret
1415entry:
1416  %a = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64(
1417    <vscale x 4 x i64> %0,
1418    <vscale x 4 x i64> %1,
1419    i32 9,
1420    <vscale x 4 x i1> %2,
1421    i32 %3)
1422
1423  ret <vscale x 4 x i64> %a
1424}
1425
1426declare <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
1427  <vscale x 1 x half>,
1428  <vscale x 1 x half>,
1429  i32,
1430  i32);
1431
1432define <vscale x 1 x half> @intrinsic_vslidedown_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, i32 %3) nounwind {
1433; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f16_nxv1f16:
1434; CHECK:       # %bb.0: # %entry
1435; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1436; CHECK-NEXT:    vslidedown.vx v8, v9, a0
1437; CHECK-NEXT:    ret
1438entry:
1439  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
1440    <vscale x 1 x half> %0,
1441    <vscale x 1 x half> %1,
1442    i32 %2,
1443    i32 %3)
1444
1445  ret <vscale x 1 x half> %a
1446}
1447
1448declare <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
1449  <vscale x 1 x half>,
1450  <vscale x 1 x half>,
1451  i32,
1452  <vscale x 1 x i1>,
1453  i32);
1454
1455define <vscale x 1 x half> @intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1456; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f16_nxv1f16:
1457; CHECK:       # %bb.0: # %entry
1458; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1459; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
1460; CHECK-NEXT:    ret
1461entry:
1462  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
1463    <vscale x 1 x half> %0,
1464    <vscale x 1 x half> %1,
1465    i32 %2,
1466    <vscale x 1 x i1> %3,
1467    i32 %4)
1468
1469  ret <vscale x 1 x half> %a
1470}
1471
1472define <vscale x 1 x half> @intrinsic_vslidedown_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, i32 %2) nounwind {
1473; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f16_nxv1f16:
1474; CHECK:       # %bb.0: # %entry
1475; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
1476; CHECK-NEXT:    vslidedown.vi v8, v9, 9
1477; CHECK-NEXT:    ret
1478entry:
1479  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.nxv1f16(
1480    <vscale x 1 x half> %0,
1481    <vscale x 1 x half> %1,
1482    i32 9,
1483    i32 %2)
1484
1485  ret <vscale x 1 x half> %a
1486}
1487
1488define <vscale x 1 x half> @intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16(<vscale x 1 x half> %0, <vscale x 1 x half> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1489; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f16_nxv1f16:
1490; CHECK:       # %bb.0: # %entry
1491; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
1492; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
1493; CHECK-NEXT:    ret
1494entry:
1495  %a = call <vscale x 1 x half> @llvm.riscv.vslidedown.mask.nxv1f16(
1496    <vscale x 1 x half> %0,
1497    <vscale x 1 x half> %1,
1498    i32 9,
1499    <vscale x 1 x i1> %2,
1500    i32 %3)
1501
1502  ret <vscale x 1 x half> %a
1503}
1504
1505declare <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
1506  <vscale x 2 x half>,
1507  <vscale x 2 x half>,
1508  i32,
1509  i32);
1510
1511define <vscale x 2 x half> @intrinsic_vslidedown_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, i32 %3) nounwind {
1512; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f16_nxv2f16:
1513; CHECK:       # %bb.0: # %entry
1514; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1515; CHECK-NEXT:    vslidedown.vx v8, v9, a0
1516; CHECK-NEXT:    ret
1517entry:
1518  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
1519    <vscale x 2 x half> %0,
1520    <vscale x 2 x half> %1,
1521    i32 %2,
1522    i32 %3)
1523
1524  ret <vscale x 2 x half> %a
1525}
1526
1527declare <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
1528  <vscale x 2 x half>,
1529  <vscale x 2 x half>,
1530  i32,
1531  <vscale x 2 x i1>,
1532  i32);
1533
1534define <vscale x 2 x half> @intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1535; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f16_nxv2f16:
1536; CHECK:       # %bb.0: # %entry
1537; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1538; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
1539; CHECK-NEXT:    ret
1540entry:
1541  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
1542    <vscale x 2 x half> %0,
1543    <vscale x 2 x half> %1,
1544    i32 %2,
1545    <vscale x 2 x i1> %3,
1546    i32 %4)
1547
1548  ret <vscale x 2 x half> %a
1549}
1550
1551define <vscale x 2 x half> @intrinsic_vslidedown_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, i32 %2) nounwind {
1552; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f16_nxv2f16:
1553; CHECK:       # %bb.0: # %entry
1554; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
1555; CHECK-NEXT:    vslidedown.vi v8, v9, 9
1556; CHECK-NEXT:    ret
1557entry:
1558  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.nxv2f16(
1559    <vscale x 2 x half> %0,
1560    <vscale x 2 x half> %1,
1561    i32 9,
1562    i32 %2)
1563
1564  ret <vscale x 2 x half> %a
1565}
1566
1567define <vscale x 2 x half> @intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16(<vscale x 2 x half> %0, <vscale x 2 x half> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1568; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f16_nxv2f16:
1569; CHECK:       # %bb.0: # %entry
1570; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
1571; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
1572; CHECK-NEXT:    ret
1573entry:
1574  %a = call <vscale x 2 x half> @llvm.riscv.vslidedown.mask.nxv2f16(
1575    <vscale x 2 x half> %0,
1576    <vscale x 2 x half> %1,
1577    i32 9,
1578    <vscale x 2 x i1> %2,
1579    i32 %3)
1580
1581  ret <vscale x 2 x half> %a
1582}
1583
1584declare <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
1585  <vscale x 4 x half>,
1586  <vscale x 4 x half>,
1587  i32,
1588  i32);
1589
1590define <vscale x 4 x half> @intrinsic_vslidedown_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, i32 %3) nounwind {
1591; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f16_nxv4f16:
1592; CHECK:       # %bb.0: # %entry
1593; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1594; CHECK-NEXT:    vslidedown.vx v8, v9, a0
1595; CHECK-NEXT:    ret
1596entry:
1597  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
1598    <vscale x 4 x half> %0,
1599    <vscale x 4 x half> %1,
1600    i32 %2,
1601    i32 %3)
1602
1603  ret <vscale x 4 x half> %a
1604}
1605
1606declare <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
1607  <vscale x 4 x half>,
1608  <vscale x 4 x half>,
1609  i32,
1610  <vscale x 4 x i1>,
1611  i32);
1612
1613define <vscale x 4 x half> @intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1614; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f16_nxv4f16:
1615; CHECK:       # %bb.0: # %entry
1616; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1617; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
1618; CHECK-NEXT:    ret
1619entry:
1620  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
1621    <vscale x 4 x half> %0,
1622    <vscale x 4 x half> %1,
1623    i32 %2,
1624    <vscale x 4 x i1> %3,
1625    i32 %4)
1626
1627  ret <vscale x 4 x half> %a
1628}
1629
1630define <vscale x 4 x half> @intrinsic_vslidedown_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, i32 %2) nounwind {
1631; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f16_nxv4f16:
1632; CHECK:       # %bb.0: # %entry
1633; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
1634; CHECK-NEXT:    vslidedown.vi v8, v9, 9
1635; CHECK-NEXT:    ret
1636entry:
1637  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.nxv4f16(
1638    <vscale x 4 x half> %0,
1639    <vscale x 4 x half> %1,
1640    i32 9,
1641    i32 %2)
1642
1643  ret <vscale x 4 x half> %a
1644}
1645
1646define <vscale x 4 x half> @intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1647; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f16_nxv4f16:
1648; CHECK:       # %bb.0: # %entry
1649; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
1650; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
1651; CHECK-NEXT:    ret
1652entry:
1653  %a = call <vscale x 4 x half> @llvm.riscv.vslidedown.mask.nxv4f16(
1654    <vscale x 4 x half> %0,
1655    <vscale x 4 x half> %1,
1656    i32 9,
1657    <vscale x 4 x i1> %2,
1658    i32 %3)
1659
1660  ret <vscale x 4 x half> %a
1661}
1662
1663declare <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
1664  <vscale x 8 x half>,
1665  <vscale x 8 x half>,
1666  i32,
1667  i32);
1668
1669define <vscale x 8 x half> @intrinsic_vslidedown_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, i32 %3) nounwind {
1670; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f16_nxv8f16:
1671; CHECK:       # %bb.0: # %entry
1672; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1673; CHECK-NEXT:    vslidedown.vx v8, v10, a0
1674; CHECK-NEXT:    ret
1675entry:
1676  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
1677    <vscale x 8 x half> %0,
1678    <vscale x 8 x half> %1,
1679    i32 %2,
1680    i32 %3)
1681
1682  ret <vscale x 8 x half> %a
1683}
1684
1685declare <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
1686  <vscale x 8 x half>,
1687  <vscale x 8 x half>,
1688  i32,
1689  <vscale x 8 x i1>,
1690  i32);
1691
1692define <vscale x 8 x half> @intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1693; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f16_nxv8f16:
1694; CHECK:       # %bb.0: # %entry
1695; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1696; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
1697; CHECK-NEXT:    ret
1698entry:
1699  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
1700    <vscale x 8 x half> %0,
1701    <vscale x 8 x half> %1,
1702    i32 %2,
1703    <vscale x 8 x i1> %3,
1704    i32 %4)
1705
1706  ret <vscale x 8 x half> %a
1707}
1708
1709define <vscale x 8 x half> @intrinsic_vslidedown_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, i32 %2) nounwind {
1710; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f16_nxv8f16:
1711; CHECK:       # %bb.0: # %entry
1712; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
1713; CHECK-NEXT:    vslidedown.vi v8, v10, 9
1714; CHECK-NEXT:    ret
1715entry:
1716  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.nxv8f16(
1717    <vscale x 8 x half> %0,
1718    <vscale x 8 x half> %1,
1719    i32 9,
1720    i32 %2)
1721
1722  ret <vscale x 8 x half> %a
1723}
1724
1725define <vscale x 8 x half> @intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16(<vscale x 8 x half> %0, <vscale x 8 x half> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1726; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f16_nxv8f16:
1727; CHECK:       # %bb.0: # %entry
1728; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
1729; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
1730; CHECK-NEXT:    ret
1731entry:
1732  %a = call <vscale x 8 x half> @llvm.riscv.vslidedown.mask.nxv8f16(
1733    <vscale x 8 x half> %0,
1734    <vscale x 8 x half> %1,
1735    i32 9,
1736    <vscale x 8 x i1> %2,
1737    i32 %3)
1738
1739  ret <vscale x 8 x half> %a
1740}
1741
1742declare <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
1743  <vscale x 16 x half>,
1744  <vscale x 16 x half>,
1745  i32,
1746  i32);
1747
1748define <vscale x 16 x half> @intrinsic_vslidedown_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, i32 %3) nounwind {
1749; CHECK-LABEL: intrinsic_vslidedown_vx_nxv16f16_nxv16f16:
1750; CHECK:       # %bb.0: # %entry
1751; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1752; CHECK-NEXT:    vslidedown.vx v8, v12, a0
1753; CHECK-NEXT:    ret
1754entry:
1755  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
1756    <vscale x 16 x half> %0,
1757    <vscale x 16 x half> %1,
1758    i32 %2,
1759    i32 %3)
1760
1761  ret <vscale x 16 x half> %a
1762}
1763
1764declare <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
1765  <vscale x 16 x half>,
1766  <vscale x 16 x half>,
1767  i32,
1768  <vscale x 16 x i1>,
1769  i32);
1770
1771define <vscale x 16 x half> @intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1772; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv16f16_nxv16f16:
1773; CHECK:       # %bb.0: # %entry
1774; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1775; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
1776; CHECK-NEXT:    ret
1777entry:
1778  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
1779    <vscale x 16 x half> %0,
1780    <vscale x 16 x half> %1,
1781    i32 %2,
1782    <vscale x 16 x i1> %3,
1783    i32 %4)
1784
1785  ret <vscale x 16 x half> %a
1786}
1787
1788define <vscale x 16 x half> @intrinsic_vslidedown_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, i32 %2) nounwind {
1789; CHECK-LABEL: intrinsic_vslidedown_vi_nxv16f16_nxv16f16:
1790; CHECK:       # %bb.0: # %entry
1791; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
1792; CHECK-NEXT:    vslidedown.vi v8, v12, 9
1793; CHECK-NEXT:    ret
1794entry:
1795  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.nxv16f16(
1796    <vscale x 16 x half> %0,
1797    <vscale x 16 x half> %1,
1798    i32 9,
1799    i32 %2)
1800
1801  ret <vscale x 16 x half> %a
1802}
1803
1804define <vscale x 16 x half> @intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16(<vscale x 16 x half> %0, <vscale x 16 x half> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
1805; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv16f16_nxv16f16:
1806; CHECK:       # %bb.0: # %entry
1807; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
1808; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
1809; CHECK-NEXT:    ret
1810entry:
1811  %a = call <vscale x 16 x half> @llvm.riscv.vslidedown.mask.nxv16f16(
1812    <vscale x 16 x half> %0,
1813    <vscale x 16 x half> %1,
1814    i32 9,
1815    <vscale x 16 x i1> %2,
1816    i32 %3)
1817
1818  ret <vscale x 16 x half> %a
1819}
1820
1821declare <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
1822  <vscale x 1 x float>,
1823  <vscale x 1 x float>,
1824  i32,
1825  i32);
1826
1827define <vscale x 1 x float> @intrinsic_vslidedown_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, i32 %3) nounwind {
1828; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f32_nxv1f32:
1829; CHECK:       # %bb.0: # %entry
1830; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1831; CHECK-NEXT:    vslidedown.vx v8, v9, a0
1832; CHECK-NEXT:    ret
1833entry:
1834  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
1835    <vscale x 1 x float> %0,
1836    <vscale x 1 x float> %1,
1837    i32 %2,
1838    i32 %3)
1839
1840  ret <vscale x 1 x float> %a
1841}
1842
1843declare <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
1844  <vscale x 1 x float>,
1845  <vscale x 1 x float>,
1846  i32,
1847  <vscale x 1 x i1>,
1848  i32);
1849
1850define <vscale x 1 x float> @intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1851; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f32_nxv1f32:
1852; CHECK:       # %bb.0: # %entry
1853; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1854; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
1855; CHECK-NEXT:    ret
1856entry:
1857  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
1858    <vscale x 1 x float> %0,
1859    <vscale x 1 x float> %1,
1860    i32 %2,
1861    <vscale x 1 x i1> %3,
1862    i32 %4)
1863
1864  ret <vscale x 1 x float> %a
1865}
1866
1867define <vscale x 1 x float> @intrinsic_vslidedown_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, i32 %2) nounwind {
1868; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f32_nxv1f32:
1869; CHECK:       # %bb.0: # %entry
1870; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
1871; CHECK-NEXT:    vslidedown.vi v8, v9, 9
1872; CHECK-NEXT:    ret
1873entry:
1874  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32(
1875    <vscale x 1 x float> %0,
1876    <vscale x 1 x float> %1,
1877    i32 9,
1878    i32 %2)
1879
1880  ret <vscale x 1 x float> %a
1881}
1882
1883define <vscale x 1 x float> @intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32(<vscale x 1 x float> %0, <vscale x 1 x float> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1884; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f32_nxv1f32:
1885; CHECK:       # %bb.0: # %entry
1886; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
1887; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
1888; CHECK-NEXT:    ret
1889entry:
1890  %a = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32(
1891    <vscale x 1 x float> %0,
1892    <vscale x 1 x float> %1,
1893    i32 9,
1894    <vscale x 1 x i1> %2,
1895    i32 %3)
1896
1897  ret <vscale x 1 x float> %a
1898}
1899
1900declare <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
1901  <vscale x 2 x float>,
1902  <vscale x 2 x float>,
1903  i32,
1904  i32);
1905
1906define <vscale x 2 x float> @intrinsic_vslidedown_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, i32 %3) nounwind {
1907; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f32_nxv2f32:
1908; CHECK:       # %bb.0: # %entry
1909; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1910; CHECK-NEXT:    vslidedown.vx v8, v9, a0
1911; CHECK-NEXT:    ret
1912entry:
1913  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
1914    <vscale x 2 x float> %0,
1915    <vscale x 2 x float> %1,
1916    i32 %2,
1917    i32 %3)
1918
1919  ret <vscale x 2 x float> %a
1920}
1921
1922declare <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
1923  <vscale x 2 x float>,
1924  <vscale x 2 x float>,
1925  i32,
1926  <vscale x 2 x i1>,
1927  i32);
1928
1929define <vscale x 2 x float> @intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1930; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f32_nxv2f32:
1931; CHECK:       # %bb.0: # %entry
1932; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1933; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
1934; CHECK-NEXT:    ret
1935entry:
1936  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
1937    <vscale x 2 x float> %0,
1938    <vscale x 2 x float> %1,
1939    i32 %2,
1940    <vscale x 2 x i1> %3,
1941    i32 %4)
1942
1943  ret <vscale x 2 x float> %a
1944}
1945
1946define <vscale x 2 x float> @intrinsic_vslidedown_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, i32 %2) nounwind {
1947; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f32_nxv2f32:
1948; CHECK:       # %bb.0: # %entry
1949; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
1950; CHECK-NEXT:    vslidedown.vi v8, v9, 9
1951; CHECK-NEXT:    ret
1952entry:
1953  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32(
1954    <vscale x 2 x float> %0,
1955    <vscale x 2 x float> %1,
1956    i32 9,
1957    i32 %2)
1958
1959  ret <vscale x 2 x float> %a
1960}
1961
1962define <vscale x 2 x float> @intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1963; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f32_nxv2f32:
1964; CHECK:       # %bb.0: # %entry
1965; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
1966; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
1967; CHECK-NEXT:    ret
1968entry:
1969  %a = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32(
1970    <vscale x 2 x float> %0,
1971    <vscale x 2 x float> %1,
1972    i32 9,
1973    <vscale x 2 x i1> %2,
1974    i32 %3)
1975
1976  ret <vscale x 2 x float> %a
1977}
1978
1979declare <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
1980  <vscale x 4 x float>,
1981  <vscale x 4 x float>,
1982  i32,
1983  i32);
1984
1985define <vscale x 4 x float> @intrinsic_vslidedown_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, i32 %3) nounwind {
1986; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f32_nxv4f32:
1987; CHECK:       # %bb.0: # %entry
1988; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1989; CHECK-NEXT:    vslidedown.vx v8, v10, a0
1990; CHECK-NEXT:    ret
1991entry:
1992  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
1993    <vscale x 4 x float> %0,
1994    <vscale x 4 x float> %1,
1995    i32 %2,
1996    i32 %3)
1997
1998  ret <vscale x 4 x float> %a
1999}
2000
2001declare <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
2002  <vscale x 4 x float>,
2003  <vscale x 4 x float>,
2004  i32,
2005  <vscale x 4 x i1>,
2006  i32);
2007
2008define <vscale x 4 x float> @intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2009; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f32_nxv4f32:
2010; CHECK:       # %bb.0: # %entry
2011; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
2012; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
2013; CHECK-NEXT:    ret
2014entry:
2015  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
2016    <vscale x 4 x float> %0,
2017    <vscale x 4 x float> %1,
2018    i32 %2,
2019    <vscale x 4 x i1> %3,
2020    i32 %4)
2021
2022  ret <vscale x 4 x float> %a
2023}
2024
2025define <vscale x 4 x float> @intrinsic_vslidedown_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, i32 %2) nounwind {
2026; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f32_nxv4f32:
2027; CHECK:       # %bb.0: # %entry
2028; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
2029; CHECK-NEXT:    vslidedown.vi v8, v10, 9
2030; CHECK-NEXT:    ret
2031entry:
2032  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32(
2033    <vscale x 4 x float> %0,
2034    <vscale x 4 x float> %1,
2035    i32 9,
2036    i32 %2)
2037
2038  ret <vscale x 4 x float> %a
2039}
2040
2041define <vscale x 4 x float> @intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32(<vscale x 4 x float> %0, <vscale x 4 x float> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2042; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f32_nxv4f32:
2043; CHECK:       # %bb.0: # %entry
2044; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
2045; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
2046; CHECK-NEXT:    ret
2047entry:
2048  %a = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32(
2049    <vscale x 4 x float> %0,
2050    <vscale x 4 x float> %1,
2051    i32 9,
2052    <vscale x 4 x i1> %2,
2053    i32 %3)
2054
2055  ret <vscale x 4 x float> %a
2056}
2057
2058declare <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
2059  <vscale x 8 x float>,
2060  <vscale x 8 x float>,
2061  i32,
2062  i32);
2063
2064define <vscale x 8 x float> @intrinsic_vslidedown_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, i32 %3) nounwind {
2065; CHECK-LABEL: intrinsic_vslidedown_vx_nxv8f32_nxv8f32:
2066; CHECK:       # %bb.0: # %entry
2067; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
2068; CHECK-NEXT:    vslidedown.vx v8, v12, a0
2069; CHECK-NEXT:    ret
2070entry:
2071  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
2072    <vscale x 8 x float> %0,
2073    <vscale x 8 x float> %1,
2074    i32 %2,
2075    i32 %3)
2076
2077  ret <vscale x 8 x float> %a
2078}
2079
2080declare <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
2081  <vscale x 8 x float>,
2082  <vscale x 8 x float>,
2083  i32,
2084  <vscale x 8 x i1>,
2085  i32);
2086
2087define <vscale x 8 x float> @intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
2088; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv8f32_nxv8f32:
2089; CHECK:       # %bb.0: # %entry
2090; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
2091; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
2092; CHECK-NEXT:    ret
2093entry:
2094  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
2095    <vscale x 8 x float> %0,
2096    <vscale x 8 x float> %1,
2097    i32 %2,
2098    <vscale x 8 x i1> %3,
2099    i32 %4)
2100
2101  ret <vscale x 8 x float> %a
2102}
2103
2104define <vscale x 8 x float> @intrinsic_vslidedown_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, i32 %2) nounwind {
2105; CHECK-LABEL: intrinsic_vslidedown_vi_nxv8f32_nxv8f32:
2106; CHECK:       # %bb.0: # %entry
2107; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
2108; CHECK-NEXT:    vslidedown.vi v8, v12, 9
2109; CHECK-NEXT:    ret
2110entry:
2111  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32(
2112    <vscale x 8 x float> %0,
2113    <vscale x 8 x float> %1,
2114    i32 9,
2115    i32 %2)
2116
2117  ret <vscale x 8 x float> %a
2118}
2119
2120define <vscale x 8 x float> @intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32(<vscale x 8 x float> %0, <vscale x 8 x float> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
2121; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv8f32_nxv8f32:
2122; CHECK:       # %bb.0: # %entry
2123; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
2124; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
2125; CHECK-NEXT:    ret
2126entry:
2127  %a = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32(
2128    <vscale x 8 x float> %0,
2129    <vscale x 8 x float> %1,
2130    i32 9,
2131    <vscale x 8 x i1> %2,
2132    i32 %3)
2133
2134  ret <vscale x 8 x float> %a
2135}
2136
2137declare <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
2138  <vscale x 1 x double>,
2139  <vscale x 1 x double>,
2140  i32,
2141  i32);
2142
2143define <vscale x 1 x double> @intrinsic_vslidedown_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, i32 %3) nounwind {
2144; CHECK-LABEL: intrinsic_vslidedown_vx_nxv1f64_nxv1f64:
2145; CHECK:       # %bb.0: # %entry
2146; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
2147; CHECK-NEXT:    vslidedown.vx v8, v9, a0
2148; CHECK-NEXT:    ret
2149entry:
2150  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
2151    <vscale x 1 x double> %0,
2152    <vscale x 1 x double> %1,
2153    i32 %2,
2154    i32 %3)
2155
2156  ret <vscale x 1 x double> %a
2157}
2158
2159declare <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
2160  <vscale x 1 x double>,
2161  <vscale x 1 x double>,
2162  i32,
2163  <vscale x 1 x i1>,
2164  i32);
2165
2166define <vscale x 1 x double> @intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
2167; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv1f64_nxv1f64:
2168; CHECK:       # %bb.0: # %entry
2169; CHECK-NEXT:    vsetvli zero, a1, e64, m1, tu, mu
2170; CHECK-NEXT:    vslidedown.vx v8, v9, a0, v0.t
2171; CHECK-NEXT:    ret
2172entry:
2173  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
2174    <vscale x 1 x double> %0,
2175    <vscale x 1 x double> %1,
2176    i32 %2,
2177    <vscale x 1 x i1> %3,
2178    i32 %4)
2179
2180  ret <vscale x 1 x double> %a
2181}
2182
2183define <vscale x 1 x double> @intrinsic_vslidedown_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, i32 %2) nounwind {
2184; CHECK-LABEL: intrinsic_vslidedown_vi_nxv1f64_nxv1f64:
2185; CHECK:       # %bb.0: # %entry
2186; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
2187; CHECK-NEXT:    vslidedown.vi v8, v9, 9
2188; CHECK-NEXT:    ret
2189entry:
2190  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64(
2191    <vscale x 1 x double> %0,
2192    <vscale x 1 x double> %1,
2193    i32 9,
2194    i32 %2)
2195
2196  ret <vscale x 1 x double> %a
2197}
2198
2199define <vscale x 1 x double> @intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
2200; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv1f64_nxv1f64:
2201; CHECK:       # %bb.0: # %entry
2202; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
2203; CHECK-NEXT:    vslidedown.vi v8, v9, 9, v0.t
2204; CHECK-NEXT:    ret
2205entry:
2206  %a = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64(
2207    <vscale x 1 x double> %0,
2208    <vscale x 1 x double> %1,
2209    i32 9,
2210    <vscale x 1 x i1> %2,
2211    i32 %3)
2212
2213  ret <vscale x 1 x double> %a
2214}
2215
2216declare <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
2217  <vscale x 2 x double>,
2218  <vscale x 2 x double>,
2219  i32,
2220  i32);
2221
2222define <vscale x 2 x double> @intrinsic_vslidedown_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, i32 %3) nounwind {
2223; CHECK-LABEL: intrinsic_vslidedown_vx_nxv2f64_nxv2f64:
2224; CHECK:       # %bb.0: # %entry
2225; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
2226; CHECK-NEXT:    vslidedown.vx v8, v10, a0
2227; CHECK-NEXT:    ret
2228entry:
2229  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
2230    <vscale x 2 x double> %0,
2231    <vscale x 2 x double> %1,
2232    i32 %2,
2233    i32 %3)
2234
2235  ret <vscale x 2 x double> %a
2236}
2237
2238declare <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
2239  <vscale x 2 x double>,
2240  <vscale x 2 x double>,
2241  i32,
2242  <vscale x 2 x i1>,
2243  i32);
2244
2245define <vscale x 2 x double> @intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
2246; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv2f64_nxv2f64:
2247; CHECK:       # %bb.0: # %entry
2248; CHECK-NEXT:    vsetvli zero, a1, e64, m2, tu, mu
2249; CHECK-NEXT:    vslidedown.vx v8, v10, a0, v0.t
2250; CHECK-NEXT:    ret
2251entry:
2252  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
2253    <vscale x 2 x double> %0,
2254    <vscale x 2 x double> %1,
2255    i32 %2,
2256    <vscale x 2 x i1> %3,
2257    i32 %4)
2258
2259  ret <vscale x 2 x double> %a
2260}
2261
2262define <vscale x 2 x double> @intrinsic_vslidedown_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, i32 %2) nounwind {
2263; CHECK-LABEL: intrinsic_vslidedown_vi_nxv2f64_nxv2f64:
2264; CHECK:       # %bb.0: # %entry
2265; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
2266; CHECK-NEXT:    vslidedown.vi v8, v10, 9
2267; CHECK-NEXT:    ret
2268entry:
2269  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64(
2270    <vscale x 2 x double> %0,
2271    <vscale x 2 x double> %1,
2272    i32 9,
2273    i32 %2)
2274
2275  ret <vscale x 2 x double> %a
2276}
2277
2278define <vscale x 2 x double> @intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64(<vscale x 2 x double> %0, <vscale x 2 x double> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
2279; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv2f64_nxv2f64:
2280; CHECK:       # %bb.0: # %entry
2281; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
2282; CHECK-NEXT:    vslidedown.vi v8, v10, 9, v0.t
2283; CHECK-NEXT:    ret
2284entry:
2285  %a = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64(
2286    <vscale x 2 x double> %0,
2287    <vscale x 2 x double> %1,
2288    i32 9,
2289    <vscale x 2 x i1> %2,
2290    i32 %3)
2291
2292  ret <vscale x 2 x double> %a
2293}
2294
2295declare <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
2296  <vscale x 4 x double>,
2297  <vscale x 4 x double>,
2298  i32,
2299  i32);
2300
2301define <vscale x 4 x double> @intrinsic_vslidedown_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, i32 %3) nounwind {
2302; CHECK-LABEL: intrinsic_vslidedown_vx_nxv4f64_nxv4f64:
2303; CHECK:       # %bb.0: # %entry
2304; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
2305; CHECK-NEXT:    vslidedown.vx v8, v12, a0
2306; CHECK-NEXT:    ret
2307entry:
2308  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
2309    <vscale x 4 x double> %0,
2310    <vscale x 4 x double> %1,
2311    i32 %2,
2312    i32 %3)
2313
2314  ret <vscale x 4 x double> %a
2315}
2316
2317declare <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
2318  <vscale x 4 x double>,
2319  <vscale x 4 x double>,
2320  i32,
2321  <vscale x 4 x i1>,
2322  i32);
2323
2324define <vscale x 4 x double> @intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
2325; CHECK-LABEL: intrinsic_vslidedown_mask_vx_nxv4f64_nxv4f64:
2326; CHECK:       # %bb.0: # %entry
2327; CHECK-NEXT:    vsetvli zero, a1, e64, m4, tu, mu
2328; CHECK-NEXT:    vslidedown.vx v8, v12, a0, v0.t
2329; CHECK-NEXT:    ret
2330entry:
2331  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
2332    <vscale x 4 x double> %0,
2333    <vscale x 4 x double> %1,
2334    i32 %2,
2335    <vscale x 4 x i1> %3,
2336    i32 %4)
2337
2338  ret <vscale x 4 x double> %a
2339}
2340
2341define <vscale x 4 x double> @intrinsic_vslidedown_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, i32 %2) nounwind {
2342; CHECK-LABEL: intrinsic_vslidedown_vi_nxv4f64_nxv4f64:
2343; CHECK:       # %bb.0: # %entry
2344; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
2345; CHECK-NEXT:    vslidedown.vi v8, v12, 9
2346; CHECK-NEXT:    ret
2347entry:
2348  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64(
2349    <vscale x 4 x double> %0,
2350    <vscale x 4 x double> %1,
2351    i32 9,
2352    i32 %2)
2353
2354  ret <vscale x 4 x double> %a
2355}
2356
2357define <vscale x 4 x double> @intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64(<vscale x 4 x double> %0, <vscale x 4 x double> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2358; CHECK-LABEL: intrinsic_vslidedown_mask_vi_nxv4f64_nxv4f64:
2359; CHECK:       # %bb.0: # %entry
2360; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
2361; CHECK-NEXT:    vslidedown.vi v8, v12, 9, v0.t
2362; CHECK-NEXT:    ret
2363entry:
2364  %a = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64(
2365    <vscale x 4 x double> %0,
2366    <vscale x 4 x double> %1,
2367    i32 9,
2368    <vscale x 4 x i1> %2,
2369    i32 %3)
2370
2371  ret <vscale x 4 x double> %a
2372}
2373