1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN: -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5
6 #include <riscv_vector.h>
7
8 //
9 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8(
10 // CHECK-RV64-NEXT: entry:
11 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
13 //
test_vslidedown_vx_i8mf8(vint8mf8_t dst,vint8mf8_t src,size_t offset,size_t vl)14 vint8mf8_t test_vslidedown_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src,
15 size_t offset, size_t vl) {
16 return vslidedown_vx_i8mf8(dst, src, offset, vl);
17 }
18
19 //
20 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4(
21 // CHECK-RV64-NEXT: entry:
22 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
24 //
test_vslidedown_vx_i8mf4(vint8mf4_t dst,vint8mf4_t src,size_t offset,size_t vl)25 vint8mf4_t test_vslidedown_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src,
26 size_t offset, size_t vl) {
27 return vslidedown_vx_i8mf4(dst, src, offset, vl);
28 }
29
30 //
31 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2(
32 // CHECK-RV64-NEXT: entry:
33 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
35 //
test_vslidedown_vx_i8mf2(vint8mf2_t dst,vint8mf2_t src,size_t offset,size_t vl)36 vint8mf2_t test_vslidedown_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src,
37 size_t offset, size_t vl) {
38 return vslidedown_vx_i8mf2(dst, src, offset, vl);
39 }
40
41 //
42 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1(
43 // CHECK-RV64-NEXT: entry:
44 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
45 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
46 //
test_vslidedown_vx_i8m1(vint8m1_t dst,vint8m1_t src,size_t offset,size_t vl)47 vint8m1_t test_vslidedown_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
48 size_t vl) {
49 return vslidedown_vx_i8m1(dst, src, offset, vl);
50 }
51
52 //
53 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2(
54 // CHECK-RV64-NEXT: entry:
55 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
57 //
test_vslidedown_vx_i8m2(vint8m2_t dst,vint8m2_t src,size_t offset,size_t vl)58 vint8m2_t test_vslidedown_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
59 size_t vl) {
60 return vslidedown_vx_i8m2(dst, src, offset, vl);
61 }
62
63 //
64 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4(
65 // CHECK-RV64-NEXT: entry:
66 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
68 //
test_vslidedown_vx_i8m4(vint8m4_t dst,vint8m4_t src,size_t offset,size_t vl)69 vint8m4_t test_vslidedown_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
70 size_t vl) {
71 return vslidedown_vx_i8m4(dst, src, offset, vl);
72 }
73
74 //
75 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8(
76 // CHECK-RV64-NEXT: entry:
77 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
78 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
79 //
test_vslidedown_vx_i8m8(vint8m8_t dst,vint8m8_t src,size_t offset,size_t vl)80 vint8m8_t test_vslidedown_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
81 size_t vl) {
82 return vslidedown_vx_i8m8(dst, src, offset, vl);
83 }
84
85 //
86 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4(
87 // CHECK-RV64-NEXT: entry:
88 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
89 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
90 //
test_vslidedown_vx_i16mf4(vint16mf4_t dst,vint16mf4_t src,size_t offset,size_t vl)91 vint16mf4_t test_vslidedown_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
92 size_t offset, size_t vl) {
93 return vslidedown_vx_i16mf4(dst, src, offset, vl);
94 }
95
96 //
97 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2(
98 // CHECK-RV64-NEXT: entry:
99 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
101 //
test_vslidedown_vx_i16mf2(vint16mf2_t dst,vint16mf2_t src,size_t offset,size_t vl)102 vint16mf2_t test_vslidedown_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
103 size_t offset, size_t vl) {
104 return vslidedown_vx_i16mf2(dst, src, offset, vl);
105 }
106
107 //
108 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
112 //
test_vslidedown_vx_i16m1(vint16m1_t dst,vint16m1_t src,size_t offset,size_t vl)113 vint16m1_t test_vslidedown_vx_i16m1(vint16m1_t dst, vint16m1_t src,
114 size_t offset, size_t vl) {
115 return vslidedown_vx_i16m1(dst, src, offset, vl);
116 }
117
118 //
119 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2(
120 // CHECK-RV64-NEXT: entry:
121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
123 //
test_vslidedown_vx_i16m2(vint16m2_t dst,vint16m2_t src,size_t offset,size_t vl)124 vint16m2_t test_vslidedown_vx_i16m2(vint16m2_t dst, vint16m2_t src,
125 size_t offset, size_t vl) {
126 return vslidedown_vx_i16m2(dst, src, offset, vl);
127 }
128
129 //
130 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4(
131 // CHECK-RV64-NEXT: entry:
132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
133 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
134 //
test_vslidedown_vx_i16m4(vint16m4_t dst,vint16m4_t src,size_t offset,size_t vl)135 vint16m4_t test_vslidedown_vx_i16m4(vint16m4_t dst, vint16m4_t src,
136 size_t offset, size_t vl) {
137 return vslidedown_vx_i16m4(dst, src, offset, vl);
138 }
139
140 //
141 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8(
142 // CHECK-RV64-NEXT: entry:
143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
144 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
145 //
test_vslidedown_vx_i16m8(vint16m8_t dst,vint16m8_t src,size_t offset,size_t vl)146 vint16m8_t test_vslidedown_vx_i16m8(vint16m8_t dst, vint16m8_t src,
147 size_t offset, size_t vl) {
148 return vslidedown_vx_i16m8(dst, src, offset, vl);
149 }
150
151 //
152 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2(
153 // CHECK-RV64-NEXT: entry:
154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
155 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
156 //
test_vslidedown_vx_i32mf2(vint32mf2_t dst,vint32mf2_t src,size_t offset,size_t vl)157 vint32mf2_t test_vslidedown_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
158 size_t offset, size_t vl) {
159 return vslidedown_vx_i32mf2(dst, src, offset, vl);
160 }
161
162 //
163 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1(
164 // CHECK-RV64-NEXT: entry:
165 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
166 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
167 //
test_vslidedown_vx_i32m1(vint32m1_t dst,vint32m1_t src,size_t offset,size_t vl)168 vint32m1_t test_vslidedown_vx_i32m1(vint32m1_t dst, vint32m1_t src,
169 size_t offset, size_t vl) {
170 return vslidedown_vx_i32m1(dst, src, offset, vl);
171 }
172
173 //
174 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2(
175 // CHECK-RV64-NEXT: entry:
176 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
177 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
178 //
test_vslidedown_vx_i32m2(vint32m2_t dst,vint32m2_t src,size_t offset,size_t vl)179 vint32m2_t test_vslidedown_vx_i32m2(vint32m2_t dst, vint32m2_t src,
180 size_t offset, size_t vl) {
181 return vslidedown_vx_i32m2(dst, src, offset, vl);
182 }
183
184 //
185 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4(
186 // CHECK-RV64-NEXT: entry:
187 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
189 //
test_vslidedown_vx_i32m4(vint32m4_t dst,vint32m4_t src,size_t offset,size_t vl)190 vint32m4_t test_vslidedown_vx_i32m4(vint32m4_t dst, vint32m4_t src,
191 size_t offset, size_t vl) {
192 return vslidedown_vx_i32m4(dst, src, offset, vl);
193 }
194
195 //
196 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8(
197 // CHECK-RV64-NEXT: entry:
198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
200 //
test_vslidedown_vx_i32m8(vint32m8_t dst,vint32m8_t src,size_t offset,size_t vl)201 vint32m8_t test_vslidedown_vx_i32m8(vint32m8_t dst, vint32m8_t src,
202 size_t offset, size_t vl) {
203 return vslidedown_vx_i32m8(dst, src, offset, vl);
204 }
205
206 //
207 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1(
208 // CHECK-RV64-NEXT: entry:
209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
211 //
test_vslidedown_vx_i64m1(vint64m1_t dst,vint64m1_t src,size_t offset,size_t vl)212 vint64m1_t test_vslidedown_vx_i64m1(vint64m1_t dst, vint64m1_t src,
213 size_t offset, size_t vl) {
214 return vslidedown_vx_i64m1(dst, src, offset, vl);
215 }
216
217 //
218 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2(
219 // CHECK-RV64-NEXT: entry:
220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
222 //
test_vslidedown_vx_i64m2(vint64m2_t dst,vint64m2_t src,size_t offset,size_t vl)223 vint64m2_t test_vslidedown_vx_i64m2(vint64m2_t dst, vint64m2_t src,
224 size_t offset, size_t vl) {
225 return vslidedown_vx_i64m2(dst, src, offset, vl);
226 }
227
228 //
229 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4(
230 // CHECK-RV64-NEXT: entry:
231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
233 //
test_vslidedown_vx_i64m4(vint64m4_t dst,vint64m4_t src,size_t offset,size_t vl)234 vint64m4_t test_vslidedown_vx_i64m4(vint64m4_t dst, vint64m4_t src,
235 size_t offset, size_t vl) {
236 return vslidedown_vx_i64m4(dst, src, offset, vl);
237 }
238
239 //
240 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8(
241 // CHECK-RV64-NEXT: entry:
242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
244 //
test_vslidedown_vx_i64m8(vint64m8_t dst,vint64m8_t src,size_t offset,size_t vl)245 vint64m8_t test_vslidedown_vx_i64m8(vint64m8_t dst, vint64m8_t src,
246 size_t offset, size_t vl) {
247 return vslidedown_vx_i64m8(dst, src, offset, vl);
248 }
249
250 //
251 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8(
252 // CHECK-RV64-NEXT: entry:
253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
254 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
255 //
test_vslidedown_vx_u8mf8(vuint8mf8_t dst,vuint8mf8_t src,size_t offset,size_t vl)256 vuint8mf8_t test_vslidedown_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
257 size_t offset, size_t vl) {
258 return vslidedown_vx_u8mf8(dst, src, offset, vl);
259 }
260
261 //
262 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4(
263 // CHECK-RV64-NEXT: entry:
264 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
265 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
266 //
test_vslidedown_vx_u8mf4(vuint8mf4_t dst,vuint8mf4_t src,size_t offset,size_t vl)267 vuint8mf4_t test_vslidedown_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
268 size_t offset, size_t vl) {
269 return vslidedown_vx_u8mf4(dst, src, offset, vl);
270 }
271
272 //
273 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2(
274 // CHECK-RV64-NEXT: entry:
275 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
276 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
277 //
test_vslidedown_vx_u8mf2(vuint8mf2_t dst,vuint8mf2_t src,size_t offset,size_t vl)278 vuint8mf2_t test_vslidedown_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
279 size_t offset, size_t vl) {
280 return vslidedown_vx_u8mf2(dst, src, offset, vl);
281 }
282
283 //
284 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1(
285 // CHECK-RV64-NEXT: entry:
286 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
287 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
288 //
test_vslidedown_vx_u8m1(vuint8m1_t dst,vuint8m1_t src,size_t offset,size_t vl)289 vuint8m1_t test_vslidedown_vx_u8m1(vuint8m1_t dst, vuint8m1_t src,
290 size_t offset, size_t vl) {
291 return vslidedown_vx_u8m1(dst, src, offset, vl);
292 }
293
294 //
295 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2(
296 // CHECK-RV64-NEXT: entry:
297 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
298 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
299 //
test_vslidedown_vx_u8m2(vuint8m2_t dst,vuint8m2_t src,size_t offset,size_t vl)300 vuint8m2_t test_vslidedown_vx_u8m2(vuint8m2_t dst, vuint8m2_t src,
301 size_t offset, size_t vl) {
302 return vslidedown_vx_u8m2(dst, src, offset, vl);
303 }
304
305 //
306 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4(
307 // CHECK-RV64-NEXT: entry:
308 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
309 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
310 //
test_vslidedown_vx_u8m4(vuint8m4_t dst,vuint8m4_t src,size_t offset,size_t vl)311 vuint8m4_t test_vslidedown_vx_u8m4(vuint8m4_t dst, vuint8m4_t src,
312 size_t offset, size_t vl) {
313 return vslidedown_vx_u8m4(dst, src, offset, vl);
314 }
315
316 //
317 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8(
318 // CHECK-RV64-NEXT: entry:
319 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
320 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
321 //
test_vslidedown_vx_u8m8(vuint8m8_t dst,vuint8m8_t src,size_t offset,size_t vl)322 vuint8m8_t test_vslidedown_vx_u8m8(vuint8m8_t dst, vuint8m8_t src,
323 size_t offset, size_t vl) {
324 return vslidedown_vx_u8m8(dst, src, offset, vl);
325 }
326
327 //
328 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4(
329 // CHECK-RV64-NEXT: entry:
330 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
332 //
test_vslidedown_vx_u16mf4(vuint16mf4_t dst,vuint16mf4_t src,size_t offset,size_t vl)333 vuint16mf4_t test_vslidedown_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
334 size_t offset, size_t vl) {
335 return vslidedown_vx_u16mf4(dst, src, offset, vl);
336 }
337
338 //
339 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2(
340 // CHECK-RV64-NEXT: entry:
341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
343 //
test_vslidedown_vx_u16mf2(vuint16mf2_t dst,vuint16mf2_t src,size_t offset,size_t vl)344 vuint16mf2_t test_vslidedown_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
345 size_t offset, size_t vl) {
346 return vslidedown_vx_u16mf2(dst, src, offset, vl);
347 }
348
349 //
350 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1(
351 // CHECK-RV64-NEXT: entry:
352 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
353 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
354 //
test_vslidedown_vx_u16m1(vuint16m1_t dst,vuint16m1_t src,size_t offset,size_t vl)355 vuint16m1_t test_vslidedown_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
356 size_t offset, size_t vl) {
357 return vslidedown_vx_u16m1(dst, src, offset, vl);
358 }
359
360 //
361 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2(
362 // CHECK-RV64-NEXT: entry:
363 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
365 //
test_vslidedown_vx_u16m2(vuint16m2_t dst,vuint16m2_t src,size_t offset,size_t vl)366 vuint16m2_t test_vslidedown_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
367 size_t offset, size_t vl) {
368 return vslidedown_vx_u16m2(dst, src, offset, vl);
369 }
370
371 //
372 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4(
373 // CHECK-RV64-NEXT: entry:
374 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
375 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
376 //
test_vslidedown_vx_u16m4(vuint16m4_t dst,vuint16m4_t src,size_t offset,size_t vl)377 vuint16m4_t test_vslidedown_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
378 size_t offset, size_t vl) {
379 return vslidedown_vx_u16m4(dst, src, offset, vl);
380 }
381
382 //
383 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8(
384 // CHECK-RV64-NEXT: entry:
385 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
386 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
387 //
test_vslidedown_vx_u16m8(vuint16m8_t dst,vuint16m8_t src,size_t offset,size_t vl)388 vuint16m8_t test_vslidedown_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
389 size_t offset, size_t vl) {
390 return vslidedown_vx_u16m8(dst, src, offset, vl);
391 }
392
393 //
394 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2(
395 // CHECK-RV64-NEXT: entry:
396 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
398 //
test_vslidedown_vx_u32mf2(vuint32mf2_t dst,vuint32mf2_t src,size_t offset,size_t vl)399 vuint32mf2_t test_vslidedown_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
400 size_t offset, size_t vl) {
401 return vslidedown_vx_u32mf2(dst, src, offset, vl);
402 }
403
404 //
405 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1(
406 // CHECK-RV64-NEXT: entry:
407 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
408 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
409 //
test_vslidedown_vx_u32m1(vuint32m1_t dst,vuint32m1_t src,size_t offset,size_t vl)410 vuint32m1_t test_vslidedown_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
411 size_t offset, size_t vl) {
412 return vslidedown_vx_u32m1(dst, src, offset, vl);
413 }
414
415 //
416 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2(
417 // CHECK-RV64-NEXT: entry:
418 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
419 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
420 //
test_vslidedown_vx_u32m2(vuint32m2_t dst,vuint32m2_t src,size_t offset,size_t vl)421 vuint32m2_t test_vslidedown_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
422 size_t offset, size_t vl) {
423 return vslidedown_vx_u32m2(dst, src, offset, vl);
424 }
425
426 //
427 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4(
428 // CHECK-RV64-NEXT: entry:
429 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
430 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
431 //
test_vslidedown_vx_u32m4(vuint32m4_t dst,vuint32m4_t src,size_t offset,size_t vl)432 vuint32m4_t test_vslidedown_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
433 size_t offset, size_t vl) {
434 return vslidedown_vx_u32m4(dst, src, offset, vl);
435 }
436
437 //
438 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8(
439 // CHECK-RV64-NEXT: entry:
440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
442 //
test_vslidedown_vx_u32m8(vuint32m8_t dst,vuint32m8_t src,size_t offset,size_t vl)443 vuint32m8_t test_vslidedown_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
444 size_t offset, size_t vl) {
445 return vslidedown_vx_u32m8(dst, src, offset, vl);
446 }
447
448 //
449 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1(
450 // CHECK-RV64-NEXT: entry:
451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
452 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
453 //
test_vslidedown_vx_u64m1(vuint64m1_t dst,vuint64m1_t src,size_t offset,size_t vl)454 vuint64m1_t test_vslidedown_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
455 size_t offset, size_t vl) {
456 return vslidedown_vx_u64m1(dst, src, offset, vl);
457 }
458
459 //
460 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2(
461 // CHECK-RV64-NEXT: entry:
462 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
463 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
464 //
test_vslidedown_vx_u64m2(vuint64m2_t dst,vuint64m2_t src,size_t offset,size_t vl)465 vuint64m2_t test_vslidedown_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
466 size_t offset, size_t vl) {
467 return vslidedown_vx_u64m2(dst, src, offset, vl);
468 }
469
470 //
471 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4(
472 // CHECK-RV64-NEXT: entry:
473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
474 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
475 //
test_vslidedown_vx_u64m4(vuint64m4_t dst,vuint64m4_t src,size_t offset,size_t vl)476 vuint64m4_t test_vslidedown_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
477 size_t offset, size_t vl) {
478 return vslidedown_vx_u64m4(dst, src, offset, vl);
479 }
480
481 //
482 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8(
483 // CHECK-RV64-NEXT: entry:
484 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
485 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
486 //
test_vslidedown_vx_u64m8(vuint64m8_t dst,vuint64m8_t src,size_t offset,size_t vl)487 vuint64m8_t test_vslidedown_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
488 size_t offset, size_t vl) {
489 return vslidedown_vx_u64m8(dst, src, offset, vl);
490 }
491
492 //
493 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2(
494 // CHECK-RV64-NEXT: entry:
495 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
497 //
test_vslidedown_vx_f32mf2(vfloat32mf2_t dst,vfloat32mf2_t src,size_t offset,size_t vl)498 vfloat32mf2_t test_vslidedown_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
499 size_t offset, size_t vl) {
500 return vslidedown_vx_f32mf2(dst, src, offset, vl);
501 }
502
503 //
504 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1(
505 // CHECK-RV64-NEXT: entry:
506 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
507 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
508 //
test_vslidedown_vx_f32m1(vfloat32m1_t dst,vfloat32m1_t src,size_t offset,size_t vl)509 vfloat32m1_t test_vslidedown_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
510 size_t offset, size_t vl) {
511 return vslidedown_vx_f32m1(dst, src, offset, vl);
512 }
513
514 //
515 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2(
516 // CHECK-RV64-NEXT: entry:
517 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
518 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
519 //
test_vslidedown_vx_f32m2(vfloat32m2_t dst,vfloat32m2_t src,size_t offset,size_t vl)520 vfloat32m2_t test_vslidedown_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
521 size_t offset, size_t vl) {
522 return vslidedown_vx_f32m2(dst, src, offset, vl);
523 }
524
525 //
526 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4(
527 // CHECK-RV64-NEXT: entry:
528 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
529 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
530 //
test_vslidedown_vx_f32m4(vfloat32m4_t dst,vfloat32m4_t src,size_t offset,size_t vl)531 vfloat32m4_t test_vslidedown_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
532 size_t offset, size_t vl) {
533 return vslidedown_vx_f32m4(dst, src, offset, vl);
534 }
535
536 //
537 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8(
538 // CHECK-RV64-NEXT: entry:
539 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
540 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
541 //
test_vslidedown_vx_f32m8(vfloat32m8_t dst,vfloat32m8_t src,size_t offset,size_t vl)542 vfloat32m8_t test_vslidedown_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
543 size_t offset, size_t vl) {
544 return vslidedown_vx_f32m8(dst, src, offset, vl);
545 }
546
547 //
548 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1(
549 // CHECK-RV64-NEXT: entry:
550 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
552 //
test_vslidedown_vx_f64m1(vfloat64m1_t dst,vfloat64m1_t src,size_t offset,size_t vl)553 vfloat64m1_t test_vslidedown_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
554 size_t offset, size_t vl) {
555 return vslidedown_vx_f64m1(dst, src, offset, vl);
556 }
557
558 //
559 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2(
560 // CHECK-RV64-NEXT: entry:
561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
562 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
563 //
test_vslidedown_vx_f64m2(vfloat64m2_t dst,vfloat64m2_t src,size_t offset,size_t vl)564 vfloat64m2_t test_vslidedown_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
565 size_t offset, size_t vl) {
566 return vslidedown_vx_f64m2(dst, src, offset, vl);
567 }
568
569 //
570 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4(
571 // CHECK-RV64-NEXT: entry:
572 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
573 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
574 //
test_vslidedown_vx_f64m4(vfloat64m4_t dst,vfloat64m4_t src,size_t offset,size_t vl)575 vfloat64m4_t test_vslidedown_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
576 size_t offset, size_t vl) {
577 return vslidedown_vx_f64m4(dst, src, offset, vl);
578 }
579
580 //
581 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8(
582 // CHECK-RV64-NEXT: entry:
583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
584 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
585 //
test_vslidedown_vx_f64m8(vfloat64m8_t dst,vfloat64m8_t src,size_t offset,size_t vl)586 vfloat64m8_t test_vslidedown_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
587 size_t offset, size_t vl) {
588 return vslidedown_vx_f64m8(dst, src, offset, vl);
589 }
590
591 //
592 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf8_m(
593 // CHECK-RV64-NEXT: entry:
594 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
596 //
test_vslidedown_vx_i8mf8_m(vbool64_t mask,vint8mf8_t dst,vint8mf8_t src,size_t offset,size_t vl)597 vint8mf8_t test_vslidedown_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
598 vint8mf8_t src, size_t offset,
599 size_t vl) {
600 return vslidedown_vx_i8mf8_m(mask, dst, src, offset, vl);
601 }
602
603 //
604 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf4_m(
605 // CHECK-RV64-NEXT: entry:
606 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
607 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
608 //
test_vslidedown_vx_i8mf4_m(vbool32_t mask,vint8mf4_t dst,vint8mf4_t src,size_t offset,size_t vl)609 vint8mf4_t test_vslidedown_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
610 vint8mf4_t src, size_t offset,
611 size_t vl) {
612 return vslidedown_vx_i8mf4_m(mask, dst, src, offset, vl);
613 }
614
615 //
616 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8mf2_m(
617 // CHECK-RV64-NEXT: entry:
618 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
619 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
620 //
test_vslidedown_vx_i8mf2_m(vbool16_t mask,vint8mf2_t dst,vint8mf2_t src,size_t offset,size_t vl)621 vint8mf2_t test_vslidedown_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
622 vint8mf2_t src, size_t offset,
623 size_t vl) {
624 return vslidedown_vx_i8mf2_m(mask, dst, src, offset, vl);
625 }
626
627 //
628 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m1_m(
629 // CHECK-RV64-NEXT: entry:
630 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
631 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
632 //
test_vslidedown_vx_i8m1_m(vbool8_t mask,vint8m1_t dst,vint8m1_t src,size_t offset,size_t vl)633 vint8m1_t test_vslidedown_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
634 size_t offset, size_t vl) {
635 return vslidedown_vx_i8m1_m(mask, dst, src, offset, vl);
636 }
637
638 //
639 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m2_m(
640 // CHECK-RV64-NEXT: entry:
641 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
642 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
643 //
test_vslidedown_vx_i8m2_m(vbool4_t mask,vint8m2_t dst,vint8m2_t src,size_t offset,size_t vl)644 vint8m2_t test_vslidedown_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
645 size_t offset, size_t vl) {
646 return vslidedown_vx_i8m2_m(mask, dst, src, offset, vl);
647 }
648
649 //
650 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m4_m(
651 // CHECK-RV64-NEXT: entry:
652 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
653 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
654 //
test_vslidedown_vx_i8m4_m(vbool2_t mask,vint8m4_t dst,vint8m4_t src,size_t offset,size_t vl)655 vint8m4_t test_vslidedown_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
656 size_t offset, size_t vl) {
657 return vslidedown_vx_i8m4_m(mask, dst, src, offset, vl);
658 }
659
660 //
661 // CHECK-RV64-LABEL: @test_vslidedown_vx_i8m8_m(
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
664 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
665 //
test_vslidedown_vx_i8m8_m(vbool1_t mask,vint8m8_t dst,vint8m8_t src,size_t offset,size_t vl)666 vint8m8_t test_vslidedown_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
667 size_t offset, size_t vl) {
668 return vslidedown_vx_i8m8_m(mask, dst, src, offset, vl);
669 }
670
671 //
672 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf4_m(
673 // CHECK-RV64-NEXT: entry:
674 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
675 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
676 //
test_vslidedown_vx_i16mf4_m(vbool64_t mask,vint16mf4_t dst,vint16mf4_t src,size_t offset,size_t vl)677 vint16mf4_t test_vslidedown_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
678 vint16mf4_t src, size_t offset,
679 size_t vl) {
680 return vslidedown_vx_i16mf4_m(mask, dst, src, offset, vl);
681 }
682
683 //
684 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16mf2_m(
685 // CHECK-RV64-NEXT: entry:
686 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
687 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
688 //
test_vslidedown_vx_i16mf2_m(vbool32_t mask,vint16mf2_t dst,vint16mf2_t src,size_t offset,size_t vl)689 vint16mf2_t test_vslidedown_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
690 vint16mf2_t src, size_t offset,
691 size_t vl) {
692 return vslidedown_vx_i16mf2_m(mask, dst, src, offset, vl);
693 }
694
695 //
696 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m1_m(
697 // CHECK-RV64-NEXT: entry:
698 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
699 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
700 //
test_vslidedown_vx_i16m1_m(vbool16_t mask,vint16m1_t dst,vint16m1_t src,size_t offset,size_t vl)701 vint16m1_t test_vslidedown_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
702 vint16m1_t src, size_t offset,
703 size_t vl) {
704 return vslidedown_vx_i16m1_m(mask, dst, src, offset, vl);
705 }
706
707 //
708 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m2_m(
709 // CHECK-RV64-NEXT: entry:
710 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
711 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
712 //
test_vslidedown_vx_i16m2_m(vbool8_t mask,vint16m2_t dst,vint16m2_t src,size_t offset,size_t vl)713 vint16m2_t test_vslidedown_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
714 vint16m2_t src, size_t offset,
715 size_t vl) {
716 return vslidedown_vx_i16m2_m(mask, dst, src, offset, vl);
717 }
718
719 //
720 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m4_m(
721 // CHECK-RV64-NEXT: entry:
722 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
723 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
724 //
test_vslidedown_vx_i16m4_m(vbool4_t mask,vint16m4_t dst,vint16m4_t src,size_t offset,size_t vl)725 vint16m4_t test_vslidedown_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
726 vint16m4_t src, size_t offset,
727 size_t vl) {
728 return vslidedown_vx_i16m4_m(mask, dst, src, offset, vl);
729 }
730
731 //
732 // CHECK-RV64-LABEL: @test_vslidedown_vx_i16m8_m(
733 // CHECK-RV64-NEXT: entry:
734 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
735 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
736 //
test_vslidedown_vx_i16m8_m(vbool2_t mask,vint16m8_t dst,vint16m8_t src,size_t offset,size_t vl)737 vint16m8_t test_vslidedown_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
738 vint16m8_t src, size_t offset,
739 size_t vl) {
740 return vslidedown_vx_i16m8_m(mask, dst, src, offset, vl);
741 }
742
743 //
744 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32mf2_m(
745 // CHECK-RV64-NEXT: entry:
746 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
747 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
748 //
test_vslidedown_vx_i32mf2_m(vbool64_t mask,vint32mf2_t dst,vint32mf2_t src,size_t offset,size_t vl)749 vint32mf2_t test_vslidedown_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
750 vint32mf2_t src, size_t offset,
751 size_t vl) {
752 return vslidedown_vx_i32mf2_m(mask, dst, src, offset, vl);
753 }
754
755 //
756 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m1_m(
757 // CHECK-RV64-NEXT: entry:
758 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
759 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
760 //
test_vslidedown_vx_i32m1_m(vbool32_t mask,vint32m1_t dst,vint32m1_t src,size_t offset,size_t vl)761 vint32m1_t test_vslidedown_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
762 vint32m1_t src, size_t offset,
763 size_t vl) {
764 return vslidedown_vx_i32m1_m(mask, dst, src, offset, vl);
765 }
766
767 //
768 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m2_m(
769 // CHECK-RV64-NEXT: entry:
770 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
771 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
772 //
test_vslidedown_vx_i32m2_m(vbool16_t mask,vint32m2_t dst,vint32m2_t src,size_t offset,size_t vl)773 vint32m2_t test_vslidedown_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
774 vint32m2_t src, size_t offset,
775 size_t vl) {
776 return vslidedown_vx_i32m2_m(mask, dst, src, offset, vl);
777 }
778
779 //
780 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m4_m(
781 // CHECK-RV64-NEXT: entry:
782 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
783 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
784 //
test_vslidedown_vx_i32m4_m(vbool8_t mask,vint32m4_t dst,vint32m4_t src,size_t offset,size_t vl)785 vint32m4_t test_vslidedown_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
786 vint32m4_t src, size_t offset,
787 size_t vl) {
788 return vslidedown_vx_i32m4_m(mask, dst, src, offset, vl);
789 }
790
791 //
792 // CHECK-RV64-LABEL: @test_vslidedown_vx_i32m8_m(
793 // CHECK-RV64-NEXT: entry:
794 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
795 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
796 //
test_vslidedown_vx_i32m8_m(vbool4_t mask,vint32m8_t dst,vint32m8_t src,size_t offset,size_t vl)797 vint32m8_t test_vslidedown_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
798 vint32m8_t src, size_t offset,
799 size_t vl) {
800 return vslidedown_vx_i32m8_m(mask, dst, src, offset, vl);
801 }
802
803 //
804 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m1_m(
805 // CHECK-RV64-NEXT: entry:
806 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
807 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
808 //
test_vslidedown_vx_i64m1_m(vbool64_t mask,vint64m1_t dst,vint64m1_t src,size_t offset,size_t vl)809 vint64m1_t test_vslidedown_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
810 vint64m1_t src, size_t offset,
811 size_t vl) {
812 return vslidedown_vx_i64m1_m(mask, dst, src, offset, vl);
813 }
814
815 //
816 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m2_m(
817 // CHECK-RV64-NEXT: entry:
818 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
819 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
820 //
test_vslidedown_vx_i64m2_m(vbool32_t mask,vint64m2_t dst,vint64m2_t src,size_t offset,size_t vl)821 vint64m2_t test_vslidedown_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
822 vint64m2_t src, size_t offset,
823 size_t vl) {
824 return vslidedown_vx_i64m2_m(mask, dst, src, offset, vl);
825 }
826
827 //
828 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m4_m(
829 // CHECK-RV64-NEXT: entry:
830 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
831 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
832 //
test_vslidedown_vx_i64m4_m(vbool16_t mask,vint64m4_t dst,vint64m4_t src,size_t offset,size_t vl)833 vint64m4_t test_vslidedown_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
834 vint64m4_t src, size_t offset,
835 size_t vl) {
836 return vslidedown_vx_i64m4_m(mask, dst, src, offset, vl);
837 }
838
839 //
840 // CHECK-RV64-LABEL: @test_vslidedown_vx_i64m8_m(
841 // CHECK-RV64-NEXT: entry:
842 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
843 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
844 //
test_vslidedown_vx_i64m8_m(vbool8_t mask,vint64m8_t dst,vint64m8_t src,size_t offset,size_t vl)845 vint64m8_t test_vslidedown_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
846 vint64m8_t src, size_t offset,
847 size_t vl) {
848 return vslidedown_vx_i64m8_m(mask, dst, src, offset, vl);
849 }
850
851 //
852 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf8_m(
853 // CHECK-RV64-NEXT: entry:
854 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslidedown.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
855 // CHECK-RV64-NEXT: ret <vscale x 1 x i8> [[TMP0]]
856 //
test_vslidedown_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t dst,vuint8mf8_t src,size_t offset,size_t vl)857 vuint8mf8_t test_vslidedown_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
858 vuint8mf8_t src, size_t offset,
859 size_t vl) {
860 return vslidedown_vx_u8mf8_m(mask, dst, src, offset, vl);
861 }
862
863 //
864 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf4_m(
865 // CHECK-RV64-NEXT: entry:
866 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslidedown.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
867 // CHECK-RV64-NEXT: ret <vscale x 2 x i8> [[TMP0]]
868 //
test_vslidedown_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t dst,vuint8mf4_t src,size_t offset,size_t vl)869 vuint8mf4_t test_vslidedown_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
870 vuint8mf4_t src, size_t offset,
871 size_t vl) {
872 return vslidedown_vx_u8mf4_m(mask, dst, src, offset, vl);
873 }
874
875 //
876 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8mf2_m(
877 // CHECK-RV64-NEXT: entry:
878 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslidedown.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
879 // CHECK-RV64-NEXT: ret <vscale x 4 x i8> [[TMP0]]
880 //
test_vslidedown_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t dst,vuint8mf2_t src,size_t offset,size_t vl)881 vuint8mf2_t test_vslidedown_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
882 vuint8mf2_t src, size_t offset,
883 size_t vl) {
884 return vslidedown_vx_u8mf2_m(mask, dst, src, offset, vl);
885 }
886
887 //
888 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m1_m(
889 // CHECK-RV64-NEXT: entry:
890 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslidedown.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
891 // CHECK-RV64-NEXT: ret <vscale x 8 x i8> [[TMP0]]
892 //
test_vslidedown_vx_u8m1_m(vbool8_t mask,vuint8m1_t dst,vuint8m1_t src,size_t offset,size_t vl)893 vuint8m1_t test_vslidedown_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
894 vuint8m1_t src, size_t offset, size_t vl) {
895 return vslidedown_vx_u8m1_m(mask, dst, src, offset, vl);
896 }
897
898 //
899 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m2_m(
900 // CHECK-RV64-NEXT: entry:
901 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslidedown.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
902 // CHECK-RV64-NEXT: ret <vscale x 16 x i8> [[TMP0]]
903 //
test_vslidedown_vx_u8m2_m(vbool4_t mask,vuint8m2_t dst,vuint8m2_t src,size_t offset,size_t vl)904 vuint8m2_t test_vslidedown_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
905 vuint8m2_t src, size_t offset, size_t vl) {
906 return vslidedown_vx_u8m2_m(mask, dst, src, offset, vl);
907 }
908
909 //
910 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m4_m(
911 // CHECK-RV64-NEXT: entry:
912 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslidedown.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
913 // CHECK-RV64-NEXT: ret <vscale x 32 x i8> [[TMP0]]
914 //
test_vslidedown_vx_u8m4_m(vbool2_t mask,vuint8m4_t dst,vuint8m4_t src,size_t offset,size_t vl)915 vuint8m4_t test_vslidedown_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
916 vuint8m4_t src, size_t offset, size_t vl) {
917 return vslidedown_vx_u8m4_m(mask, dst, src, offset, vl);
918 }
919
920 //
921 // CHECK-RV64-LABEL: @test_vslidedown_vx_u8m8_m(
922 // CHECK-RV64-NEXT: entry:
923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslidedown.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
924 // CHECK-RV64-NEXT: ret <vscale x 64 x i8> [[TMP0]]
925 //
test_vslidedown_vx_u8m8_m(vbool1_t mask,vuint8m8_t dst,vuint8m8_t src,size_t offset,size_t vl)926 vuint8m8_t test_vslidedown_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
927 vuint8m8_t src, size_t offset, size_t vl) {
928 return vslidedown_vx_u8m8_m(mask, dst, src, offset, vl);
929 }
930
931 //
932 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf4_m(
933 // CHECK-RV64-NEXT: entry:
934 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslidedown.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
935 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
936 //
test_vslidedown_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t dst,vuint16mf4_t src,size_t offset,size_t vl)937 vuint16mf4_t test_vslidedown_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
938 vuint16mf4_t src, size_t offset,
939 size_t vl) {
940 return vslidedown_vx_u16mf4_m(mask, dst, src, offset, vl);
941 }
942
943 //
944 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16mf2_m(
945 // CHECK-RV64-NEXT: entry:
946 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslidedown.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
947 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
948 //
test_vslidedown_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t dst,vuint16mf2_t src,size_t offset,size_t vl)949 vuint16mf2_t test_vslidedown_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
950 vuint16mf2_t src, size_t offset,
951 size_t vl) {
952 return vslidedown_vx_u16mf2_m(mask, dst, src, offset, vl);
953 }
954
955 //
956 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m1_m(
957 // CHECK-RV64-NEXT: entry:
958 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslidedown.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
959 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
960 //
test_vslidedown_vx_u16m1_m(vbool16_t mask,vuint16m1_t dst,vuint16m1_t src,size_t offset,size_t vl)961 vuint16m1_t test_vslidedown_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
962 vuint16m1_t src, size_t offset,
963 size_t vl) {
964 return vslidedown_vx_u16m1_m(mask, dst, src, offset, vl);
965 }
966
967 //
968 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m2_m(
969 // CHECK-RV64-NEXT: entry:
970 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslidedown.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
971 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
972 //
test_vslidedown_vx_u16m2_m(vbool8_t mask,vuint16m2_t dst,vuint16m2_t src,size_t offset,size_t vl)973 vuint16m2_t test_vslidedown_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
974 vuint16m2_t src, size_t offset,
975 size_t vl) {
976 return vslidedown_vx_u16m2_m(mask, dst, src, offset, vl);
977 }
978
979 //
980 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m4_m(
981 // CHECK-RV64-NEXT: entry:
982 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslidedown.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
983 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
984 //
test_vslidedown_vx_u16m4_m(vbool4_t mask,vuint16m4_t dst,vuint16m4_t src,size_t offset,size_t vl)985 vuint16m4_t test_vslidedown_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
986 vuint16m4_t src, size_t offset,
987 size_t vl) {
988 return vslidedown_vx_u16m4_m(mask, dst, src, offset, vl);
989 }
990
991 //
992 // CHECK-RV64-LABEL: @test_vslidedown_vx_u16m8_m(
993 // CHECK-RV64-NEXT: entry:
994 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslidedown.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
995 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
996 //
test_vslidedown_vx_u16m8_m(vbool2_t mask,vuint16m8_t dst,vuint16m8_t src,size_t offset,size_t vl)997 vuint16m8_t test_vslidedown_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
998 vuint16m8_t src, size_t offset,
999 size_t vl) {
1000 return vslidedown_vx_u16m8_m(mask, dst, src, offset, vl);
1001 }
1002
1003 //
1004 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32mf2_m(
1005 // CHECK-RV64-NEXT: entry:
1006 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslidedown.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1007 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1008 //
test_vslidedown_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t dst,vuint32mf2_t src,size_t offset,size_t vl)1009 vuint32mf2_t test_vslidedown_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
1010 vuint32mf2_t src, size_t offset,
1011 size_t vl) {
1012 return vslidedown_vx_u32mf2_m(mask, dst, src, offset, vl);
1013 }
1014
1015 //
1016 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m1_m(
1017 // CHECK-RV64-NEXT: entry:
1018 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslidedown.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1019 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1020 //
test_vslidedown_vx_u32m1_m(vbool32_t mask,vuint32m1_t dst,vuint32m1_t src,size_t offset,size_t vl)1021 vuint32m1_t test_vslidedown_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
1022 vuint32m1_t src, size_t offset,
1023 size_t vl) {
1024 return vslidedown_vx_u32m1_m(mask, dst, src, offset, vl);
1025 }
1026
1027 //
1028 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m2_m(
1029 // CHECK-RV64-NEXT: entry:
1030 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslidedown.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1031 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1032 //
test_vslidedown_vx_u32m2_m(vbool16_t mask,vuint32m2_t dst,vuint32m2_t src,size_t offset,size_t vl)1033 vuint32m2_t test_vslidedown_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
1034 vuint32m2_t src, size_t offset,
1035 size_t vl) {
1036 return vslidedown_vx_u32m2_m(mask, dst, src, offset, vl);
1037 }
1038
1039 //
1040 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m4_m(
1041 // CHECK-RV64-NEXT: entry:
1042 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslidedown.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1043 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1044 //
test_vslidedown_vx_u32m4_m(vbool8_t mask,vuint32m4_t dst,vuint32m4_t src,size_t offset,size_t vl)1045 vuint32m4_t test_vslidedown_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
1046 vuint32m4_t src, size_t offset,
1047 size_t vl) {
1048 return vslidedown_vx_u32m4_m(mask, dst, src, offset, vl);
1049 }
1050
1051 //
1052 // CHECK-RV64-LABEL: @test_vslidedown_vx_u32m8_m(
1053 // CHECK-RV64-NEXT: entry:
1054 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslidedown.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1055 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1056 //
test_vslidedown_vx_u32m8_m(vbool4_t mask,vuint32m8_t dst,vuint32m8_t src,size_t offset,size_t vl)1057 vuint32m8_t test_vslidedown_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
1058 vuint32m8_t src, size_t offset,
1059 size_t vl) {
1060 return vslidedown_vx_u32m8_m(mask, dst, src, offset, vl);
1061 }
1062
1063 //
1064 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m1_m(
1065 // CHECK-RV64-NEXT: entry:
1066 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslidedown.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1067 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1068 //
test_vslidedown_vx_u64m1_m(vbool64_t mask,vuint64m1_t dst,vuint64m1_t src,size_t offset,size_t vl)1069 vuint64m1_t test_vslidedown_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
1070 vuint64m1_t src, size_t offset,
1071 size_t vl) {
1072 return vslidedown_vx_u64m1_m(mask, dst, src, offset, vl);
1073 }
1074
1075 //
1076 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m2_m(
1077 // CHECK-RV64-NEXT: entry:
1078 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslidedown.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1079 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1080 //
test_vslidedown_vx_u64m2_m(vbool32_t mask,vuint64m2_t dst,vuint64m2_t src,size_t offset,size_t vl)1081 vuint64m2_t test_vslidedown_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
1082 vuint64m2_t src, size_t offset,
1083 size_t vl) {
1084 return vslidedown_vx_u64m2_m(mask, dst, src, offset, vl);
1085 }
1086
1087 //
1088 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m4_m(
1089 // CHECK-RV64-NEXT: entry:
1090 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslidedown.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1091 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1092 //
test_vslidedown_vx_u64m4_m(vbool16_t mask,vuint64m4_t dst,vuint64m4_t src,size_t offset,size_t vl)1093 vuint64m4_t test_vslidedown_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
1094 vuint64m4_t src, size_t offset,
1095 size_t vl) {
1096 return vslidedown_vx_u64m4_m(mask, dst, src, offset, vl);
1097 }
1098
1099 //
1100 // CHECK-RV64-LABEL: @test_vslidedown_vx_u64m8_m(
1101 // CHECK-RV64-NEXT: entry:
1102 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslidedown.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1103 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1104 //
test_vslidedown_vx_u64m8_m(vbool8_t mask,vuint64m8_t dst,vuint64m8_t src,size_t offset,size_t vl)1105 vuint64m8_t test_vslidedown_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
1106 vuint64m8_t src, size_t offset,
1107 size_t vl) {
1108 return vslidedown_vx_u64m8_m(mask, dst, src, offset, vl);
1109 }
1110
1111 //
1112 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32mf2_m(
1113 // CHECK-RV64-NEXT: entry:
1114 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslidedown.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1115 // CHECK-RV64-NEXT: ret <vscale x 1 x float> [[TMP0]]
1116 //
test_vslidedown_vx_f32mf2_m(vbool64_t mask,vfloat32mf2_t dst,vfloat32mf2_t src,size_t offset,size_t vl)1117 vfloat32mf2_t test_vslidedown_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
1118 vfloat32mf2_t src, size_t offset,
1119 size_t vl) {
1120 return vslidedown_vx_f32mf2_m(mask, dst, src, offset, vl);
1121 }
1122
1123 //
1124 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m1_m(
1125 // CHECK-RV64-NEXT: entry:
1126 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslidedown.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1127 // CHECK-RV64-NEXT: ret <vscale x 2 x float> [[TMP0]]
1128 //
test_vslidedown_vx_f32m1_m(vbool32_t mask,vfloat32m1_t dst,vfloat32m1_t src,size_t offset,size_t vl)1129 vfloat32m1_t test_vslidedown_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
1130 vfloat32m1_t src, size_t offset,
1131 size_t vl) {
1132 return vslidedown_vx_f32m1_m(mask, dst, src, offset, vl);
1133 }
1134
1135 //
1136 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m2_m(
1137 // CHECK-RV64-NEXT: entry:
1138 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslidedown.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1139 // CHECK-RV64-NEXT: ret <vscale x 4 x float> [[TMP0]]
1140 //
test_vslidedown_vx_f32m2_m(vbool16_t mask,vfloat32m2_t dst,vfloat32m2_t src,size_t offset,size_t vl)1141 vfloat32m2_t test_vslidedown_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
1142 vfloat32m2_t src, size_t offset,
1143 size_t vl) {
1144 return vslidedown_vx_f32m2_m(mask, dst, src, offset, vl);
1145 }
1146
1147 //
1148 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m4_m(
1149 // CHECK-RV64-NEXT: entry:
1150 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslidedown.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1151 // CHECK-RV64-NEXT: ret <vscale x 8 x float> [[TMP0]]
1152 //
test_vslidedown_vx_f32m4_m(vbool8_t mask,vfloat32m4_t dst,vfloat32m4_t src,size_t offset,size_t vl)1153 vfloat32m4_t test_vslidedown_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
1154 vfloat32m4_t src, size_t offset,
1155 size_t vl) {
1156 return vslidedown_vx_f32m4_m(mask, dst, src, offset, vl);
1157 }
1158
1159 //
1160 // CHECK-RV64-LABEL: @test_vslidedown_vx_f32m8_m(
1161 // CHECK-RV64-NEXT: entry:
1162 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslidedown.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1163 // CHECK-RV64-NEXT: ret <vscale x 16 x float> [[TMP0]]
1164 //
test_vslidedown_vx_f32m8_m(vbool4_t mask,vfloat32m8_t dst,vfloat32m8_t src,size_t offset,size_t vl)1165 vfloat32m8_t test_vslidedown_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
1166 vfloat32m8_t src, size_t offset,
1167 size_t vl) {
1168 return vslidedown_vx_f32m8_m(mask, dst, src, offset, vl);
1169 }
1170
1171 //
1172 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m1_m(
1173 // CHECK-RV64-NEXT: entry:
1174 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslidedown.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1175 // CHECK-RV64-NEXT: ret <vscale x 1 x double> [[TMP0]]
1176 //
test_vslidedown_vx_f64m1_m(vbool64_t mask,vfloat64m1_t dst,vfloat64m1_t src,size_t offset,size_t vl)1177 vfloat64m1_t test_vslidedown_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
1178 vfloat64m1_t src, size_t offset,
1179 size_t vl) {
1180 return vslidedown_vx_f64m1_m(mask, dst, src, offset, vl);
1181 }
1182
1183 //
1184 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m2_m(
1185 // CHECK-RV64-NEXT: entry:
1186 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslidedown.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1187 // CHECK-RV64-NEXT: ret <vscale x 2 x double> [[TMP0]]
1188 //
test_vslidedown_vx_f64m2_m(vbool32_t mask,vfloat64m2_t dst,vfloat64m2_t src,size_t offset,size_t vl)1189 vfloat64m2_t test_vslidedown_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
1190 vfloat64m2_t src, size_t offset,
1191 size_t vl) {
1192 return vslidedown_vx_f64m2_m(mask, dst, src, offset, vl);
1193 }
1194
1195 //
1196 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m4_m(
1197 // CHECK-RV64-NEXT: entry:
1198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslidedown.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1199 // CHECK-RV64-NEXT: ret <vscale x 4 x double> [[TMP0]]
1200 //
test_vslidedown_vx_f64m4_m(vbool16_t mask,vfloat64m4_t dst,vfloat64m4_t src,size_t offset,size_t vl)1201 vfloat64m4_t test_vslidedown_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
1202 vfloat64m4_t src, size_t offset,
1203 size_t vl) {
1204 return vslidedown_vx_f64m4_m(mask, dst, src, offset, vl);
1205 }
1206
1207 //
1208 // CHECK-RV64-LABEL: @test_vslidedown_vx_f64m8_m(
1209 // CHECK-RV64-NEXT: entry:
1210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslidedown.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1211 // CHECK-RV64-NEXT: ret <vscale x 8 x double> [[TMP0]]
1212 //
test_vslidedown_vx_f64m8_m(vbool8_t mask,vfloat64m8_t dst,vfloat64m8_t src,size_t offset,size_t vl)1213 vfloat64m8_t test_vslidedown_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
1214 vfloat64m8_t src, size_t offset,
1215 size_t vl) {
1216 return vslidedown_vx_f64m8_m(mask, dst, src, offset, vl);
1217 }
1218