1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
13 //
test_vslideup_vx_i8mf8(vint8mf8_t dst,vint8mf8_t src,size_t offset,size_t vl)14 vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
15                                   size_t vl) {
16   return vslideup(dst, src, offset, vl);
17 }
18 
19 //
20 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
21 // CHECK-RV64-NEXT:  entry:
22 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
23 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
24 //
test_vslideup_vx_i8mf4(vint8mf4_t dst,vint8mf4_t src,size_t offset,size_t vl)25 vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
26                                   size_t vl) {
27   return vslideup(dst, src, offset, vl);
28 }
29 
30 //
31 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
32 // CHECK-RV64-NEXT:  entry:
33 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
34 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
35 //
test_vslideup_vx_i8mf2(vint8mf2_t dst,vint8mf2_t src,size_t offset,size_t vl)36 vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
37                                   size_t vl) {
38   return vslideup(dst, src, offset, vl);
39 }
40 
41 //
42 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
43 // CHECK-RV64-NEXT:  entry:
44 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
45 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
46 //
test_vslideup_vx_i8m1(vint8m1_t dst,vint8m1_t src,size_t offset,size_t vl)47 vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
48                                 size_t vl) {
49   return vslideup(dst, src, offset, vl);
50 }
51 
52 //
53 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
54 // CHECK-RV64-NEXT:  entry:
55 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
56 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
57 //
test_vslideup_vx_i8m2(vint8m2_t dst,vint8m2_t src,size_t offset,size_t vl)58 vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
59                                 size_t vl) {
60   return vslideup(dst, src, offset, vl);
61 }
62 
63 //
64 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
65 // CHECK-RV64-NEXT:  entry:
66 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
67 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
68 //
test_vslideup_vx_i8m4(vint8m4_t dst,vint8m4_t src,size_t offset,size_t vl)69 vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
70                                 size_t vl) {
71   return vslideup(dst, src, offset, vl);
72 }
73 
74 //
75 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
76 // CHECK-RV64-NEXT:  entry:
77 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
78 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
79 //
test_vslideup_vx_i8m8(vint8m8_t dst,vint8m8_t src,size_t offset,size_t vl)80 vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
81                                 size_t vl) {
82   return vslideup(dst, src, offset, vl);
83 }
84 
85 //
86 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
87 // CHECK-RV64-NEXT:  entry:
88 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
89 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
90 //
test_vslideup_vx_i16mf4(vint16mf4_t dst,vint16mf4_t src,size_t offset,size_t vl)91 vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
92                                     size_t offset, size_t vl) {
93   return vslideup(dst, src, offset, vl);
94 }
95 
96 //
97 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
98 // CHECK-RV64-NEXT:  entry:
99 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
101 //
test_vslideup_vx_i16mf2(vint16mf2_t dst,vint16mf2_t src,size_t offset,size_t vl)102 vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
103                                     size_t offset, size_t vl) {
104   return vslideup(dst, src, offset, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
112 //
test_vslideup_vx_i16m1(vint16m1_t dst,vint16m1_t src,size_t offset,size_t vl)113 vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
114                                   size_t vl) {
115   return vslideup(dst, src, offset, vl);
116 }
117 
118 //
119 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
123 //
test_vslideup_vx_i16m2(vint16m2_t dst,vint16m2_t src,size_t offset,size_t vl)124 vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
125                                   size_t vl) {
126   return vslideup(dst, src, offset, vl);
127 }
128 
129 //
130 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
131 // CHECK-RV64-NEXT:  entry:
132 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
133 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
134 //
test_vslideup_vx_i16m4(vint16m4_t dst,vint16m4_t src,size_t offset,size_t vl)135 vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
136                                   size_t vl) {
137   return vslideup(dst, src, offset, vl);
138 }
139 
140 //
141 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
142 // CHECK-RV64-NEXT:  entry:
143 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
144 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
145 //
test_vslideup_vx_i16m8(vint16m8_t dst,vint16m8_t src,size_t offset,size_t vl)146 vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
147                                   size_t vl) {
148   return vslideup(dst, src, offset, vl);
149 }
150 
151 //
152 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
153 // CHECK-RV64-NEXT:  entry:
154 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
155 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
156 //
test_vslideup_vx_i32mf2(vint32mf2_t dst,vint32mf2_t src,size_t offset,size_t vl)157 vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
158                                     size_t offset, size_t vl) {
159   return vslideup(dst, src, offset, vl);
160 }
161 
162 //
163 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
164 // CHECK-RV64-NEXT:  entry:
165 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
166 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
167 //
test_vslideup_vx_i32m1(vint32m1_t dst,vint32m1_t src,size_t offset,size_t vl)168 vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
169                                   size_t vl) {
170   return vslideup(dst, src, offset, vl);
171 }
172 
173 //
174 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
175 // CHECK-RV64-NEXT:  entry:
176 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
177 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
178 //
test_vslideup_vx_i32m2(vint32m2_t dst,vint32m2_t src,size_t offset,size_t vl)179 vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
180                                   size_t vl) {
181   return vslideup(dst, src, offset, vl);
182 }
183 
184 //
185 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
186 // CHECK-RV64-NEXT:  entry:
187 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
188 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
189 //
test_vslideup_vx_i32m4(vint32m4_t dst,vint32m4_t src,size_t offset,size_t vl)190 vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
191                                   size_t vl) {
192   return vslideup(dst, src, offset, vl);
193 }
194 
195 //
196 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
197 // CHECK-RV64-NEXT:  entry:
198 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
200 //
test_vslideup_vx_i32m8(vint32m8_t dst,vint32m8_t src,size_t offset,size_t vl)201 vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
202                                   size_t vl) {
203   return vslideup(dst, src, offset, vl);
204 }
205 
206 //
207 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
208 // CHECK-RV64-NEXT:  entry:
209 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
211 //
test_vslideup_vx_i64m1(vint64m1_t dst,vint64m1_t src,size_t offset,size_t vl)212 vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
213                                   size_t vl) {
214   return vslideup(dst, src, offset, vl);
215 }
216 
217 //
218 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
222 //
test_vslideup_vx_i64m2(vint64m2_t dst,vint64m2_t src,size_t offset,size_t vl)223 vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
224                                   size_t vl) {
225   return vslideup(dst, src, offset, vl);
226 }
227 
228 //
229 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
230 // CHECK-RV64-NEXT:  entry:
231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
233 //
test_vslideup_vx_i64m4(vint64m4_t dst,vint64m4_t src,size_t offset,size_t vl)234 vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
235                                   size_t vl) {
236   return vslideup(dst, src, offset, vl);
237 }
238 
239 //
240 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
241 // CHECK-RV64-NEXT:  entry:
242 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
243 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
244 //
test_vslideup_vx_i64m8(vint64m8_t dst,vint64m8_t src,size_t offset,size_t vl)245 vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
246                                   size_t vl) {
247   return vslideup(dst, src, offset, vl);
248 }
249 
250 //
251 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
252 // CHECK-RV64-NEXT:  entry:
253 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
254 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
255 //
test_vslideup_vx_u8mf8(vuint8mf8_t dst,vuint8mf8_t src,size_t offset,size_t vl)256 vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
257                                    size_t offset, size_t vl) {
258   return vslideup(dst, src, offset, vl);
259 }
260 
261 //
262 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
263 // CHECK-RV64-NEXT:  entry:
264 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
265 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
266 //
test_vslideup_vx_u8mf4(vuint8mf4_t dst,vuint8mf4_t src,size_t offset,size_t vl)267 vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
268                                    size_t offset, size_t vl) {
269   return vslideup(dst, src, offset, vl);
270 }
271 
272 //
273 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
274 // CHECK-RV64-NEXT:  entry:
275 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
276 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
277 //
test_vslideup_vx_u8mf2(vuint8mf2_t dst,vuint8mf2_t src,size_t offset,size_t vl)278 vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
279                                    size_t offset, size_t vl) {
280   return vslideup(dst, src, offset, vl);
281 }
282 
283 //
284 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
285 // CHECK-RV64-NEXT:  entry:
286 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
287 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
288 //
test_vslideup_vx_u8m1(vuint8m1_t dst,vuint8m1_t src,size_t offset,size_t vl)289 vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
290                                  size_t vl) {
291   return vslideup(dst, src, offset, vl);
292 }
293 
294 //
295 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
296 // CHECK-RV64-NEXT:  entry:
297 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
298 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
299 //
test_vslideup_vx_u8m2(vuint8m2_t dst,vuint8m2_t src,size_t offset,size_t vl)300 vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
301                                  size_t vl) {
302   return vslideup(dst, src, offset, vl);
303 }
304 
305 //
306 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
307 // CHECK-RV64-NEXT:  entry:
308 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
309 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
310 //
test_vslideup_vx_u8m4(vuint8m4_t dst,vuint8m4_t src,size_t offset,size_t vl)311 vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
312                                  size_t vl) {
313   return vslideup(dst, src, offset, vl);
314 }
315 
316 //
317 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
318 // CHECK-RV64-NEXT:  entry:
319 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
320 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
321 //
test_vslideup_vx_u8m8(vuint8m8_t dst,vuint8m8_t src,size_t offset,size_t vl)322 vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
323                                  size_t vl) {
324   return vslideup(dst, src, offset, vl);
325 }
326 
327 //
328 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
329 // CHECK-RV64-NEXT:  entry:
330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
332 //
test_vslideup_vx_u16mf4(vuint16mf4_t dst,vuint16mf4_t src,size_t offset,size_t vl)333 vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
334                                      size_t offset, size_t vl) {
335   return vslideup(dst, src, offset, vl);
336 }
337 
338 //
339 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
340 // CHECK-RV64-NEXT:  entry:
341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
343 //
test_vslideup_vx_u16mf2(vuint16mf2_t dst,vuint16mf2_t src,size_t offset,size_t vl)344 vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
345                                      size_t offset, size_t vl) {
346   return vslideup(dst, src, offset, vl);
347 }
348 
349 //
350 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
351 // CHECK-RV64-NEXT:  entry:
352 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
353 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
354 //
test_vslideup_vx_u16m1(vuint16m1_t dst,vuint16m1_t src,size_t offset,size_t vl)355 vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
356                                    size_t offset, size_t vl) {
357   return vslideup(dst, src, offset, vl);
358 }
359 
360 //
361 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
362 // CHECK-RV64-NEXT:  entry:
363 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
364 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
365 //
test_vslideup_vx_u16m2(vuint16m2_t dst,vuint16m2_t src,size_t offset,size_t vl)366 vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
367                                    size_t offset, size_t vl) {
368   return vslideup(dst, src, offset, vl);
369 }
370 
371 //
372 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
373 // CHECK-RV64-NEXT:  entry:
374 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
375 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
376 //
test_vslideup_vx_u16m4(vuint16m4_t dst,vuint16m4_t src,size_t offset,size_t vl)377 vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
378                                    size_t offset, size_t vl) {
379   return vslideup(dst, src, offset, vl);
380 }
381 
382 //
383 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
384 // CHECK-RV64-NEXT:  entry:
385 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
386 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
387 //
test_vslideup_vx_u16m8(vuint16m8_t dst,vuint16m8_t src,size_t offset,size_t vl)388 vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
389                                    size_t offset, size_t vl) {
390   return vslideup(dst, src, offset, vl);
391 }
392 
393 //
394 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
395 // CHECK-RV64-NEXT:  entry:
396 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
398 //
test_vslideup_vx_u32mf2(vuint32mf2_t dst,vuint32mf2_t src,size_t offset,size_t vl)399 vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
400                                      size_t offset, size_t vl) {
401   return vslideup(dst, src, offset, vl);
402 }
403 
404 //
405 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
406 // CHECK-RV64-NEXT:  entry:
407 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
408 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
409 //
test_vslideup_vx_u32m1(vuint32m1_t dst,vuint32m1_t src,size_t offset,size_t vl)410 vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
411                                    size_t offset, size_t vl) {
412   return vslideup(dst, src, offset, vl);
413 }
414 
415 //
416 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
417 // CHECK-RV64-NEXT:  entry:
418 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
419 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
420 //
test_vslideup_vx_u32m2(vuint32m2_t dst,vuint32m2_t src,size_t offset,size_t vl)421 vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
422                                    size_t offset, size_t vl) {
423   return vslideup(dst, src, offset, vl);
424 }
425 
426 //
427 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
428 // CHECK-RV64-NEXT:  entry:
429 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
430 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
431 //
test_vslideup_vx_u32m4(vuint32m4_t dst,vuint32m4_t src,size_t offset,size_t vl)432 vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
433                                    size_t offset, size_t vl) {
434   return vslideup(dst, src, offset, vl);
435 }
436 
437 //
438 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
439 // CHECK-RV64-NEXT:  entry:
440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
442 //
test_vslideup_vx_u32m8(vuint32m8_t dst,vuint32m8_t src,size_t offset,size_t vl)443 vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
444                                    size_t offset, size_t vl) {
445   return vslideup(dst, src, offset, vl);
446 }
447 
448 //
449 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
452 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
453 //
test_vslideup_vx_u64m1(vuint64m1_t dst,vuint64m1_t src,size_t offset,size_t vl)454 vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
455                                    size_t offset, size_t vl) {
456   return vslideup(dst, src, offset, vl);
457 }
458 
459 //
460 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
461 // CHECK-RV64-NEXT:  entry:
462 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
463 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
464 //
test_vslideup_vx_u64m2(vuint64m2_t dst,vuint64m2_t src,size_t offset,size_t vl)465 vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
466                                    size_t offset, size_t vl) {
467   return vslideup(dst, src, offset, vl);
468 }
469 
470 //
471 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
472 // CHECK-RV64-NEXT:  entry:
473 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
474 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
475 //
test_vslideup_vx_u64m4(vuint64m4_t dst,vuint64m4_t src,size_t offset,size_t vl)476 vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
477                                    size_t offset, size_t vl) {
478   return vslideup(dst, src, offset, vl);
479 }
480 
481 //
482 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
483 // CHECK-RV64-NEXT:  entry:
484 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
485 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
486 //
test_vslideup_vx_u64m8(vuint64m8_t dst,vuint64m8_t src,size_t offset,size_t vl)487 vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
488                                    size_t offset, size_t vl) {
489   return vslideup(dst, src, offset, vl);
490 }
491 
492 //
493 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
494 // CHECK-RV64-NEXT:  entry:
495 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
497 //
test_vslideup_vx_f32mf2(vfloat32mf2_t dst,vfloat32mf2_t src,size_t offset,size_t vl)498 vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
499                                       size_t offset, size_t vl) {
500   return vslideup(dst, src, offset, vl);
501 }
502 
503 //
504 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
505 // CHECK-RV64-NEXT:  entry:
506 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
507 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
508 //
test_vslideup_vx_f32m1(vfloat32m1_t dst,vfloat32m1_t src,size_t offset,size_t vl)509 vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
510                                     size_t offset, size_t vl) {
511   return vslideup(dst, src, offset, vl);
512 }
513 
514 //
515 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
516 // CHECK-RV64-NEXT:  entry:
517 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
518 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
519 //
test_vslideup_vx_f32m2(vfloat32m2_t dst,vfloat32m2_t src,size_t offset,size_t vl)520 vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
521                                     size_t offset, size_t vl) {
522   return vslideup(dst, src, offset, vl);
523 }
524 
525 //
526 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
527 // CHECK-RV64-NEXT:  entry:
528 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
529 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
530 //
test_vslideup_vx_f32m4(vfloat32m4_t dst,vfloat32m4_t src,size_t offset,size_t vl)531 vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
532                                     size_t offset, size_t vl) {
533   return vslideup(dst, src, offset, vl);
534 }
535 
536 //
537 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
538 // CHECK-RV64-NEXT:  entry:
539 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
540 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
541 //
test_vslideup_vx_f32m8(vfloat32m8_t dst,vfloat32m8_t src,size_t offset,size_t vl)542 vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
543                                     size_t offset, size_t vl) {
544   return vslideup(dst, src, offset, vl);
545 }
546 
547 //
548 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
549 // CHECK-RV64-NEXT:  entry:
550 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
552 //
test_vslideup_vx_f64m1(vfloat64m1_t dst,vfloat64m1_t src,size_t offset,size_t vl)553 vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
554                                     size_t offset, size_t vl) {
555   return vslideup(dst, src, offset, vl);
556 }
557 
558 //
559 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
560 // CHECK-RV64-NEXT:  entry:
561 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
562 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
563 //
test_vslideup_vx_f64m2(vfloat64m2_t dst,vfloat64m2_t src,size_t offset,size_t vl)564 vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
565                                     size_t offset, size_t vl) {
566   return vslideup(dst, src, offset, vl);
567 }
568 
569 //
570 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
571 // CHECK-RV64-NEXT:  entry:
572 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
573 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
574 //
test_vslideup_vx_f64m4(vfloat64m4_t dst,vfloat64m4_t src,size_t offset,size_t vl)575 vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
576                                     size_t offset, size_t vl) {
577   return vslideup(dst, src, offset, vl);
578 }
579 
580 //
581 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
582 // CHECK-RV64-NEXT:  entry:
583 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
584 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
585 //
test_vslideup_vx_f64m8(vfloat64m8_t dst,vfloat64m8_t src,size_t offset,size_t vl)586 vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
587                                     size_t offset, size_t vl) {
588   return vslideup(dst, src, offset, vl);
589 }
590 
591 //
592 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
593 // CHECK-RV64-NEXT:  entry:
594 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
596 //
test_vslideup_vx_i8mf8_m(vbool64_t mask,vint8mf8_t dst,vint8mf8_t src,size_t offset,size_t vl)597 vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
598                                     vint8mf8_t src, size_t offset, size_t vl) {
599   return vslideup(mask, dst, src, offset, vl);
600 }
601 
602 //
603 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
604 // CHECK-RV64-NEXT:  entry:
605 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
606 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
607 //
test_vslideup_vx_i8mf4_m(vbool32_t mask,vint8mf4_t dst,vint8mf4_t src,size_t offset,size_t vl)608 vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
609                                     vint8mf4_t src, size_t offset, size_t vl) {
610   return vslideup(mask, dst, src, offset, vl);
611 }
612 
613 //
614 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
615 // CHECK-RV64-NEXT:  entry:
616 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
617 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
618 //
test_vslideup_vx_i8mf2_m(vbool16_t mask,vint8mf2_t dst,vint8mf2_t src,size_t offset,size_t vl)619 vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
620                                     vint8mf2_t src, size_t offset, size_t vl) {
621   return vslideup(mask, dst, src, offset, vl);
622 }
623 
624 //
625 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
626 // CHECK-RV64-NEXT:  entry:
627 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
628 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
629 //
test_vslideup_vx_i8m1_m(vbool8_t mask,vint8m1_t dst,vint8m1_t src,size_t offset,size_t vl)630 vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
631                                   size_t offset, size_t vl) {
632   return vslideup(mask, dst, src, offset, vl);
633 }
634 
635 //
636 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
637 // CHECK-RV64-NEXT:  entry:
638 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
639 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
640 //
test_vslideup_vx_i8m2_m(vbool4_t mask,vint8m2_t dst,vint8m2_t src,size_t offset,size_t vl)641 vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
642                                   size_t offset, size_t vl) {
643   return vslideup(mask, dst, src, offset, vl);
644 }
645 
646 //
647 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
648 // CHECK-RV64-NEXT:  entry:
649 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
650 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
651 //
test_vslideup_vx_i8m4_m(vbool2_t mask,vint8m4_t dst,vint8m4_t src,size_t offset,size_t vl)652 vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
653                                   size_t offset, size_t vl) {
654   return vslideup(mask, dst, src, offset, vl);
655 }
656 
657 //
658 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
659 // CHECK-RV64-NEXT:  entry:
660 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
661 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
662 //
test_vslideup_vx_i8m8_m(vbool1_t mask,vint8m8_t dst,vint8m8_t src,size_t offset,size_t vl)663 vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
664                                   size_t offset, size_t vl) {
665   return vslideup(mask, dst, src, offset, vl);
666 }
667 
668 //
669 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
670 // CHECK-RV64-NEXT:  entry:
671 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
672 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
673 //
test_vslideup_vx_i16mf4_m(vbool64_t mask,vint16mf4_t dst,vint16mf4_t src,size_t offset,size_t vl)674 vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
675                                       vint16mf4_t src, size_t offset,
676                                       size_t vl) {
677   return vslideup(mask, dst, src, offset, vl);
678 }
679 
680 //
681 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
682 // CHECK-RV64-NEXT:  entry:
683 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
684 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
685 //
test_vslideup_vx_i16mf2_m(vbool32_t mask,vint16mf2_t dst,vint16mf2_t src,size_t offset,size_t vl)686 vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
687                                       vint16mf2_t src, size_t offset,
688                                       size_t vl) {
689   return vslideup(mask, dst, src, offset, vl);
690 }
691 
692 //
693 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
694 // CHECK-RV64-NEXT:  entry:
695 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
696 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
697 //
test_vslideup_vx_i16m1_m(vbool16_t mask,vint16m1_t dst,vint16m1_t src,size_t offset,size_t vl)698 vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
699                                     vint16m1_t src, size_t offset, size_t vl) {
700   return vslideup(mask, dst, src, offset, vl);
701 }
702 
703 //
704 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
705 // CHECK-RV64-NEXT:  entry:
706 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
707 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
708 //
test_vslideup_vx_i16m2_m(vbool8_t mask,vint16m2_t dst,vint16m2_t src,size_t offset,size_t vl)709 vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
710                                     vint16m2_t src, size_t offset, size_t vl) {
711   return vslideup(mask, dst, src, offset, vl);
712 }
713 
714 //
715 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
716 // CHECK-RV64-NEXT:  entry:
717 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
718 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
719 //
test_vslideup_vx_i16m4_m(vbool4_t mask,vint16m4_t dst,vint16m4_t src,size_t offset,size_t vl)720 vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
721                                     vint16m4_t src, size_t offset, size_t vl) {
722   return vslideup(mask, dst, src, offset, vl);
723 }
724 
725 //
726 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
727 // CHECK-RV64-NEXT:  entry:
728 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
729 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
730 //
test_vslideup_vx_i16m8_m(vbool2_t mask,vint16m8_t dst,vint16m8_t src,size_t offset,size_t vl)731 vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
732                                     vint16m8_t src, size_t offset, size_t vl) {
733   return vslideup(mask, dst, src, offset, vl);
734 }
735 
736 //
737 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
738 // CHECK-RV64-NEXT:  entry:
739 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
740 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
741 //
test_vslideup_vx_i32mf2_m(vbool64_t mask,vint32mf2_t dst,vint32mf2_t src,size_t offset,size_t vl)742 vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
743                                       vint32mf2_t src, size_t offset,
744                                       size_t vl) {
745   return vslideup(mask, dst, src, offset, vl);
746 }
747 
748 //
749 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
750 // CHECK-RV64-NEXT:  entry:
751 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
752 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
753 //
test_vslideup_vx_i32m1_m(vbool32_t mask,vint32m1_t dst,vint32m1_t src,size_t offset,size_t vl)754 vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
755                                     vint32m1_t src, size_t offset, size_t vl) {
756   return vslideup(mask, dst, src, offset, vl);
757 }
758 
759 //
760 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
761 // CHECK-RV64-NEXT:  entry:
762 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
763 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
764 //
test_vslideup_vx_i32m2_m(vbool16_t mask,vint32m2_t dst,vint32m2_t src,size_t offset,size_t vl)765 vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
766                                     vint32m2_t src, size_t offset, size_t vl) {
767   return vslideup(mask, dst, src, offset, vl);
768 }
769 
770 //
771 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
772 // CHECK-RV64-NEXT:  entry:
773 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
774 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
775 //
test_vslideup_vx_i32m4_m(vbool8_t mask,vint32m4_t dst,vint32m4_t src,size_t offset,size_t vl)776 vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
777                                     vint32m4_t src, size_t offset, size_t vl) {
778   return vslideup(mask, dst, src, offset, vl);
779 }
780 
781 //
782 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
783 // CHECK-RV64-NEXT:  entry:
784 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
785 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
786 //
test_vslideup_vx_i32m8_m(vbool4_t mask,vint32m8_t dst,vint32m8_t src,size_t offset,size_t vl)787 vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
788                                     vint32m8_t src, size_t offset, size_t vl) {
789   return vslideup(mask, dst, src, offset, vl);
790 }
791 
792 //
793 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
794 // CHECK-RV64-NEXT:  entry:
795 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
796 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
797 //
test_vslideup_vx_i64m1_m(vbool64_t mask,vint64m1_t dst,vint64m1_t src,size_t offset,size_t vl)798 vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
799                                     vint64m1_t src, size_t offset, size_t vl) {
800   return vslideup(mask, dst, src, offset, vl);
801 }
802 
803 //
804 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
805 // CHECK-RV64-NEXT:  entry:
806 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
807 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
808 //
test_vslideup_vx_i64m2_m(vbool32_t mask,vint64m2_t dst,vint64m2_t src,size_t offset,size_t vl)809 vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
810                                     vint64m2_t src, size_t offset, size_t vl) {
811   return vslideup(mask, dst, src, offset, vl);
812 }
813 
814 //
815 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
816 // CHECK-RV64-NEXT:  entry:
817 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
818 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
819 //
test_vslideup_vx_i64m4_m(vbool16_t mask,vint64m4_t dst,vint64m4_t src,size_t offset,size_t vl)820 vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
821                                     vint64m4_t src, size_t offset, size_t vl) {
822   return vslideup(mask, dst, src, offset, vl);
823 }
824 
825 //
826 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
827 // CHECK-RV64-NEXT:  entry:
828 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
829 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
830 //
test_vslideup_vx_i64m8_m(vbool8_t mask,vint64m8_t dst,vint64m8_t src,size_t offset,size_t vl)831 vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
832                                     vint64m8_t src, size_t offset, size_t vl) {
833   return vslideup(mask, dst, src, offset, vl);
834 }
835 
836 //
837 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
838 // CHECK-RV64-NEXT:  entry:
839 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
840 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
841 //
test_vslideup_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t dst,vuint8mf8_t src,size_t offset,size_t vl)842 vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
843                                      vuint8mf8_t src, size_t offset,
844                                      size_t vl) {
845   return vslideup(mask, dst, src, offset, vl);
846 }
847 
848 //
849 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
850 // CHECK-RV64-NEXT:  entry:
851 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
852 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
853 //
test_vslideup_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t dst,vuint8mf4_t src,size_t offset,size_t vl)854 vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
855                                      vuint8mf4_t src, size_t offset,
856                                      size_t vl) {
857   return vslideup(mask, dst, src, offset, vl);
858 }
859 
860 //
861 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
862 // CHECK-RV64-NEXT:  entry:
863 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
864 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
865 //
test_vslideup_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t dst,vuint8mf2_t src,size_t offset,size_t vl)866 vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
867                                      vuint8mf2_t src, size_t offset,
868                                      size_t vl) {
869   return vslideup(mask, dst, src, offset, vl);
870 }
871 
872 //
873 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
874 // CHECK-RV64-NEXT:  entry:
875 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
876 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
877 //
test_vslideup_vx_u8m1_m(vbool8_t mask,vuint8m1_t dst,vuint8m1_t src,size_t offset,size_t vl)878 vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
879                                    vuint8m1_t src, size_t offset, size_t vl) {
880   return vslideup(mask, dst, src, offset, vl);
881 }
882 
883 //
884 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
885 // CHECK-RV64-NEXT:  entry:
886 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
887 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
888 //
test_vslideup_vx_u8m2_m(vbool4_t mask,vuint8m2_t dst,vuint8m2_t src,size_t offset,size_t vl)889 vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
890                                    vuint8m2_t src, size_t offset, size_t vl) {
891   return vslideup(mask, dst, src, offset, vl);
892 }
893 
894 //
895 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
896 // CHECK-RV64-NEXT:  entry:
897 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
898 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
899 //
test_vslideup_vx_u8m4_m(vbool2_t mask,vuint8m4_t dst,vuint8m4_t src,size_t offset,size_t vl)900 vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
901                                    vuint8m4_t src, size_t offset, size_t vl) {
902   return vslideup(mask, dst, src, offset, vl);
903 }
904 
905 //
906 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
907 // CHECK-RV64-NEXT:  entry:
908 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
909 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
910 //
test_vslideup_vx_u8m8_m(vbool1_t mask,vuint8m8_t dst,vuint8m8_t src,size_t offset,size_t vl)911 vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
912                                    vuint8m8_t src, size_t offset, size_t vl) {
913   return vslideup(mask, dst, src, offset, vl);
914 }
915 
916 //
917 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
918 // CHECK-RV64-NEXT:  entry:
919 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
920 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
921 //
test_vslideup_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t dst,vuint16mf4_t src,size_t offset,size_t vl)922 vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
923                                        vuint16mf4_t src, size_t offset,
924                                        size_t vl) {
925   return vslideup(mask, dst, src, offset, vl);
926 }
927 
928 //
929 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
930 // CHECK-RV64-NEXT:  entry:
931 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
932 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
933 //
test_vslideup_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t dst,vuint16mf2_t src,size_t offset,size_t vl)934 vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
935                                        vuint16mf2_t src, size_t offset,
936                                        size_t vl) {
937   return vslideup(mask, dst, src, offset, vl);
938 }
939 
940 //
941 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
942 // CHECK-RV64-NEXT:  entry:
943 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
944 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
945 //
test_vslideup_vx_u16m1_m(vbool16_t mask,vuint16m1_t dst,vuint16m1_t src,size_t offset,size_t vl)946 vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
947                                      vuint16m1_t src, size_t offset,
948                                      size_t vl) {
949   return vslideup(mask, dst, src, offset, vl);
950 }
951 
952 //
953 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
954 // CHECK-RV64-NEXT:  entry:
955 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
956 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
957 //
test_vslideup_vx_u16m2_m(vbool8_t mask,vuint16m2_t dst,vuint16m2_t src,size_t offset,size_t vl)958 vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
959                                      vuint16m2_t src, size_t offset,
960                                      size_t vl) {
961   return vslideup(mask, dst, src, offset, vl);
962 }
963 
964 //
965 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
966 // CHECK-RV64-NEXT:  entry:
967 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
968 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
969 //
test_vslideup_vx_u16m4_m(vbool4_t mask,vuint16m4_t dst,vuint16m4_t src,size_t offset,size_t vl)970 vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
971                                      vuint16m4_t src, size_t offset,
972                                      size_t vl) {
973   return vslideup(mask, dst, src, offset, vl);
974 }
975 
976 //
977 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
978 // CHECK-RV64-NEXT:  entry:
979 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
980 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
981 //
test_vslideup_vx_u16m8_m(vbool2_t mask,vuint16m8_t dst,vuint16m8_t src,size_t offset,size_t vl)982 vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
983                                      vuint16m8_t src, size_t offset,
984                                      size_t vl) {
985   return vslideup(mask, dst, src, offset, vl);
986 }
987 
988 //
989 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
990 // CHECK-RV64-NEXT:  entry:
991 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
992 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
993 //
test_vslideup_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t dst,vuint32mf2_t src,size_t offset,size_t vl)994 vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
995                                        vuint32mf2_t src, size_t offset,
996                                        size_t vl) {
997   return vslideup(mask, dst, src, offset, vl);
998 }
999 
1000 //
1001 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
1002 // CHECK-RV64-NEXT:  entry:
1003 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1004 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1005 //
test_vslideup_vx_u32m1_m(vbool32_t mask,vuint32m1_t dst,vuint32m1_t src,size_t offset,size_t vl)1006 vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
1007                                      vuint32m1_t src, size_t offset,
1008                                      size_t vl) {
1009   return vslideup(mask, dst, src, offset, vl);
1010 }
1011 
1012 //
1013 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
1014 // CHECK-RV64-NEXT:  entry:
1015 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1016 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1017 //
test_vslideup_vx_u32m2_m(vbool16_t mask,vuint32m2_t dst,vuint32m2_t src,size_t offset,size_t vl)1018 vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
1019                                      vuint32m2_t src, size_t offset,
1020                                      size_t vl) {
1021   return vslideup(mask, dst, src, offset, vl);
1022 }
1023 
1024 //
1025 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
1026 // CHECK-RV64-NEXT:  entry:
1027 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1028 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1029 //
test_vslideup_vx_u32m4_m(vbool8_t mask,vuint32m4_t dst,vuint32m4_t src,size_t offset,size_t vl)1030 vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
1031                                      vuint32m4_t src, size_t offset,
1032                                      size_t vl) {
1033   return vslideup(mask, dst, src, offset, vl);
1034 }
1035 
1036 //
1037 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
1038 // CHECK-RV64-NEXT:  entry:
1039 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1040 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1041 //
test_vslideup_vx_u32m8_m(vbool4_t mask,vuint32m8_t dst,vuint32m8_t src,size_t offset,size_t vl)1042 vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
1043                                      vuint32m8_t src, size_t offset,
1044                                      size_t vl) {
1045   return vslideup(mask, dst, src, offset, vl);
1046 }
1047 
1048 //
1049 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
1050 // CHECK-RV64-NEXT:  entry:
1051 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1052 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1053 //
test_vslideup_vx_u64m1_m(vbool64_t mask,vuint64m1_t dst,vuint64m1_t src,size_t offset,size_t vl)1054 vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
1055                                      vuint64m1_t src, size_t offset,
1056                                      size_t vl) {
1057   return vslideup(mask, dst, src, offset, vl);
1058 }
1059 
1060 //
1061 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
1062 // CHECK-RV64-NEXT:  entry:
1063 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1064 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1065 //
test_vslideup_vx_u64m2_m(vbool32_t mask,vuint64m2_t dst,vuint64m2_t src,size_t offset,size_t vl)1066 vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
1067                                      vuint64m2_t src, size_t offset,
1068                                      size_t vl) {
1069   return vslideup(mask, dst, src, offset, vl);
1070 }
1071 
1072 //
1073 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
1074 // CHECK-RV64-NEXT:  entry:
1075 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1076 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1077 //
test_vslideup_vx_u64m4_m(vbool16_t mask,vuint64m4_t dst,vuint64m4_t src,size_t offset,size_t vl)1078 vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
1079                                      vuint64m4_t src, size_t offset,
1080                                      size_t vl) {
1081   return vslideup(mask, dst, src, offset, vl);
1082 }
1083 
1084 //
1085 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
1086 // CHECK-RV64-NEXT:  entry:
1087 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1088 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1089 //
test_vslideup_vx_u64m8_m(vbool8_t mask,vuint64m8_t dst,vuint64m8_t src,size_t offset,size_t vl)1090 vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
1091                                      vuint64m8_t src, size_t offset,
1092                                      size_t vl) {
1093   return vslideup(mask, dst, src, offset, vl);
1094 }
1095 
1096 //
1097 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
1098 // CHECK-RV64-NEXT:  entry:
1099 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1100 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
1101 //
test_vslideup_vx_f32mf2_m(vbool64_t mask,vfloat32mf2_t dst,vfloat32mf2_t src,size_t offset,size_t vl)1102 vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
1103                                         vfloat32mf2_t src, size_t offset,
1104                                         size_t vl) {
1105   return vslideup(mask, dst, src, offset, vl);
1106 }
1107 
1108 //
1109 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
1110 // CHECK-RV64-NEXT:  entry:
1111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1112 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
1113 //
test_vslideup_vx_f32m1_m(vbool32_t mask,vfloat32m1_t dst,vfloat32m1_t src,size_t offset,size_t vl)1114 vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
1115                                       vfloat32m1_t src, size_t offset,
1116                                       size_t vl) {
1117   return vslideup(mask, dst, src, offset, vl);
1118 }
1119 
1120 //
1121 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
1122 // CHECK-RV64-NEXT:  entry:
1123 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1124 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
1125 //
test_vslideup_vx_f32m2_m(vbool16_t mask,vfloat32m2_t dst,vfloat32m2_t src,size_t offset,size_t vl)1126 vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
1127                                       vfloat32m2_t src, size_t offset,
1128                                       size_t vl) {
1129   return vslideup(mask, dst, src, offset, vl);
1130 }
1131 
1132 //
1133 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
1134 // CHECK-RV64-NEXT:  entry:
1135 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1136 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
1137 //
test_vslideup_vx_f32m4_m(vbool8_t mask,vfloat32m4_t dst,vfloat32m4_t src,size_t offset,size_t vl)1138 vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
1139                                       vfloat32m4_t src, size_t offset,
1140                                       size_t vl) {
1141   return vslideup(mask, dst, src, offset, vl);
1142 }
1143 
1144 //
1145 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
1146 // CHECK-RV64-NEXT:  entry:
1147 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1148 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
1149 //
test_vslideup_vx_f32m8_m(vbool4_t mask,vfloat32m8_t dst,vfloat32m8_t src,size_t offset,size_t vl)1150 vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
1151                                       vfloat32m8_t src, size_t offset,
1152                                       size_t vl) {
1153   return vslideup(mask, dst, src, offset, vl);
1154 }
1155 
1156 //
1157 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
1158 // CHECK-RV64-NEXT:  entry:
1159 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1160 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
1161 //
test_vslideup_vx_f64m1_m(vbool64_t mask,vfloat64m1_t dst,vfloat64m1_t src,size_t offset,size_t vl)1162 vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
1163                                       vfloat64m1_t src, size_t offset,
1164                                       size_t vl) {
1165   return vslideup(mask, dst, src, offset, vl);
1166 }
1167 
1168 //
1169 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
1170 // CHECK-RV64-NEXT:  entry:
1171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1172 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
1173 //
test_vslideup_vx_f64m2_m(vbool32_t mask,vfloat64m2_t dst,vfloat64m2_t src,size_t offset,size_t vl)1174 vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
1175                                       vfloat64m2_t src, size_t offset,
1176                                       size_t vl) {
1177   return vslideup(mask, dst, src, offset, vl);
1178 }
1179 
1180 //
1181 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
1182 // CHECK-RV64-NEXT:  entry:
1183 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1184 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
1185 //
test_vslideup_vx_f64m4_m(vbool16_t mask,vfloat64m4_t dst,vfloat64m4_t src,size_t offset,size_t vl)1186 vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
1187                                       vfloat64m4_t src, size_t offset,
1188                                       size_t vl) {
1189   return vslideup(mask, dst, src, offset, vl);
1190 }
1191 
1192 //
1193 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
1194 // CHECK-RV64-NEXT:  entry:
1195 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1196 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
1197 //
test_vslideup_vx_f64m8_m(vbool8_t mask,vfloat64m8_t dst,vfloat64m8_t src,size_t offset,size_t vl)1198 vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
1199                                       vfloat64m8_t src, size_t offset,
1200                                       size_t vl) {
1201   return vslideup(mask, dst, src, offset, vl);
1202 }
1203