1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
12 //
test_vslideup_vx_i8mf8(vint8mf8_t dst,vint8mf8_t src,size_t offset,size_t vl)13 vint8mf8_t test_vslideup_vx_i8mf8(vint8mf8_t dst, vint8mf8_t src, size_t offset,
14                                   size_t vl) {
15   return vslideup(dst, src, offset, vl);
16 }
17 
18 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
22 //
test_vslideup_vx_i8mf4(vint8mf4_t dst,vint8mf4_t src,size_t offset,size_t vl)23 vint8mf4_t test_vslideup_vx_i8mf4(vint8mf4_t dst, vint8mf4_t src, size_t offset,
24                                   size_t vl) {
25   return vslideup(dst, src, offset, vl);
26 }
27 
28 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
32 //
test_vslideup_vx_i8mf2(vint8mf2_t dst,vint8mf2_t src,size_t offset,size_t vl)33 vint8mf2_t test_vslideup_vx_i8mf2(vint8mf2_t dst, vint8mf2_t src, size_t offset,
34                                   size_t vl) {
35   return vslideup(dst, src, offset, vl);
36 }
37 
38 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
42 //
test_vslideup_vx_i8m1(vint8m1_t dst,vint8m1_t src,size_t offset,size_t vl)43 vint8m1_t test_vslideup_vx_i8m1(vint8m1_t dst, vint8m1_t src, size_t offset,
44                                 size_t vl) {
45   return vslideup(dst, src, offset, vl);
46 }
47 
48 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
52 //
test_vslideup_vx_i8m2(vint8m2_t dst,vint8m2_t src,size_t offset,size_t vl)53 vint8m2_t test_vslideup_vx_i8m2(vint8m2_t dst, vint8m2_t src, size_t offset,
54                                 size_t vl) {
55   return vslideup(dst, src, offset, vl);
56 }
57 
58 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
62 //
test_vslideup_vx_i8m4(vint8m4_t dst,vint8m4_t src,size_t offset,size_t vl)63 vint8m4_t test_vslideup_vx_i8m4(vint8m4_t dst, vint8m4_t src, size_t offset,
64                                 size_t vl) {
65   return vslideup(dst, src, offset, vl);
66 }
67 
68 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
72 //
test_vslideup_vx_i8m8(vint8m8_t dst,vint8m8_t src,size_t offset,size_t vl)73 vint8m8_t test_vslideup_vx_i8m8(vint8m8_t dst, vint8m8_t src, size_t offset,
74                                 size_t vl) {
75   return vslideup(dst, src, offset, vl);
76 }
77 
78 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
82 //
test_vslideup_vx_i16mf4(vint16mf4_t dst,vint16mf4_t src,size_t offset,size_t vl)83 vint16mf4_t test_vslideup_vx_i16mf4(vint16mf4_t dst, vint16mf4_t src,
84                                     size_t offset, size_t vl) {
85   return vslideup(dst, src, offset, vl);
86 }
87 
88 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
92 //
test_vslideup_vx_i16mf2(vint16mf2_t dst,vint16mf2_t src,size_t offset,size_t vl)93 vint16mf2_t test_vslideup_vx_i16mf2(vint16mf2_t dst, vint16mf2_t src,
94                                     size_t offset, size_t vl) {
95   return vslideup(dst, src, offset, vl);
96 }
97 
98 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
102 //
test_vslideup_vx_i16m1(vint16m1_t dst,vint16m1_t src,size_t offset,size_t vl)103 vint16m1_t test_vslideup_vx_i16m1(vint16m1_t dst, vint16m1_t src, size_t offset,
104                                   size_t vl) {
105   return vslideup(dst, src, offset, vl);
106 }
107 
108 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
112 //
test_vslideup_vx_i16m2(vint16m2_t dst,vint16m2_t src,size_t offset,size_t vl)113 vint16m2_t test_vslideup_vx_i16m2(vint16m2_t dst, vint16m2_t src, size_t offset,
114                                   size_t vl) {
115   return vslideup(dst, src, offset, vl);
116 }
117 
118 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
122 //
test_vslideup_vx_i16m4(vint16m4_t dst,vint16m4_t src,size_t offset,size_t vl)123 vint16m4_t test_vslideup_vx_i16m4(vint16m4_t dst, vint16m4_t src, size_t offset,
124                                   size_t vl) {
125   return vslideup(dst, src, offset, vl);
126 }
127 
128 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
132 //
test_vslideup_vx_i16m8(vint16m8_t dst,vint16m8_t src,size_t offset,size_t vl)133 vint16m8_t test_vslideup_vx_i16m8(vint16m8_t dst, vint16m8_t src, size_t offset,
134                                   size_t vl) {
135   return vslideup(dst, src, offset, vl);
136 }
137 
138 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2(
139 // CHECK-RV64-NEXT:  entry:
140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
142 //
test_vslideup_vx_i32mf2(vint32mf2_t dst,vint32mf2_t src,size_t offset,size_t vl)143 vint32mf2_t test_vslideup_vx_i32mf2(vint32mf2_t dst, vint32mf2_t src,
144                                     size_t offset, size_t vl) {
145   return vslideup(dst, src, offset, vl);
146 }
147 
148 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1(
149 // CHECK-RV64-NEXT:  entry:
150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
152 //
test_vslideup_vx_i32m1(vint32m1_t dst,vint32m1_t src,size_t offset,size_t vl)153 vint32m1_t test_vslideup_vx_i32m1(vint32m1_t dst, vint32m1_t src, size_t offset,
154                                   size_t vl) {
155   return vslideup(dst, src, offset, vl);
156 }
157 
158 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2(
159 // CHECK-RV64-NEXT:  entry:
160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
162 //
test_vslideup_vx_i32m2(vint32m2_t dst,vint32m2_t src,size_t offset,size_t vl)163 vint32m2_t test_vslideup_vx_i32m2(vint32m2_t dst, vint32m2_t src, size_t offset,
164                                   size_t vl) {
165   return vslideup(dst, src, offset, vl);
166 }
167 
168 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4(
169 // CHECK-RV64-NEXT:  entry:
170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
172 //
test_vslideup_vx_i32m4(vint32m4_t dst,vint32m4_t src,size_t offset,size_t vl)173 vint32m4_t test_vslideup_vx_i32m4(vint32m4_t dst, vint32m4_t src, size_t offset,
174                                   size_t vl) {
175   return vslideup(dst, src, offset, vl);
176 }
177 
178 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
182 //
test_vslideup_vx_i32m8(vint32m8_t dst,vint32m8_t src,size_t offset,size_t vl)183 vint32m8_t test_vslideup_vx_i32m8(vint32m8_t dst, vint32m8_t src, size_t offset,
184                                   size_t vl) {
185   return vslideup(dst, src, offset, vl);
186 }
187 
188 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
192 //
test_vslideup_vx_i64m1(vint64m1_t dst,vint64m1_t src,size_t offset,size_t vl)193 vint64m1_t test_vslideup_vx_i64m1(vint64m1_t dst, vint64m1_t src, size_t offset,
194                                   size_t vl) {
195   return vslideup(dst, src, offset, vl);
196 }
197 
198 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
202 //
test_vslideup_vx_i64m2(vint64m2_t dst,vint64m2_t src,size_t offset,size_t vl)203 vint64m2_t test_vslideup_vx_i64m2(vint64m2_t dst, vint64m2_t src, size_t offset,
204                                   size_t vl) {
205   return vslideup(dst, src, offset, vl);
206 }
207 
208 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4(
209 // CHECK-RV64-NEXT:  entry:
210 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
212 //
test_vslideup_vx_i64m4(vint64m4_t dst,vint64m4_t src,size_t offset,size_t vl)213 vint64m4_t test_vslideup_vx_i64m4(vint64m4_t dst, vint64m4_t src, size_t offset,
214                                   size_t vl) {
215   return vslideup(dst, src, offset, vl);
216 }
217 
218 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
222 //
test_vslideup_vx_i64m8(vint64m8_t dst,vint64m8_t src,size_t offset,size_t vl)223 vint64m8_t test_vslideup_vx_i64m8(vint64m8_t dst, vint64m8_t src, size_t offset,
224                                   size_t vl) {
225   return vslideup(dst, src, offset, vl);
226 }
227 
228 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8(
229 // CHECK-RV64-NEXT:  entry:
230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
232 //
test_vslideup_vx_u8mf8(vuint8mf8_t dst,vuint8mf8_t src,size_t offset,size_t vl)233 vuint8mf8_t test_vslideup_vx_u8mf8(vuint8mf8_t dst, vuint8mf8_t src,
234                                    size_t offset, size_t vl) {
235   return vslideup(dst, src, offset, vl);
236 }
237 
238 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4(
239 // CHECK-RV64-NEXT:  entry:
240 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
241 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
242 //
test_vslideup_vx_u8mf4(vuint8mf4_t dst,vuint8mf4_t src,size_t offset,size_t vl)243 vuint8mf4_t test_vslideup_vx_u8mf4(vuint8mf4_t dst, vuint8mf4_t src,
244                                    size_t offset, size_t vl) {
245   return vslideup(dst, src, offset, vl);
246 }
247 
248 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2(
249 // CHECK-RV64-NEXT:  entry:
250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
251 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
252 //
test_vslideup_vx_u8mf2(vuint8mf2_t dst,vuint8mf2_t src,size_t offset,size_t vl)253 vuint8mf2_t test_vslideup_vx_u8mf2(vuint8mf2_t dst, vuint8mf2_t src,
254                                    size_t offset, size_t vl) {
255   return vslideup(dst, src, offset, vl);
256 }
257 
258 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1(
259 // CHECK-RV64-NEXT:  entry:
260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
262 //
test_vslideup_vx_u8m1(vuint8m1_t dst,vuint8m1_t src,size_t offset,size_t vl)263 vuint8m1_t test_vslideup_vx_u8m1(vuint8m1_t dst, vuint8m1_t src, size_t offset,
264                                  size_t vl) {
265   return vslideup(dst, src, offset, vl);
266 }
267 
268 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
272 //
test_vslideup_vx_u8m2(vuint8m2_t dst,vuint8m2_t src,size_t offset,size_t vl)273 vuint8m2_t test_vslideup_vx_u8m2(vuint8m2_t dst, vuint8m2_t src, size_t offset,
274                                  size_t vl) {
275   return vslideup(dst, src, offset, vl);
276 }
277 
278 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
282 //
test_vslideup_vx_u8m4(vuint8m4_t dst,vuint8m4_t src,size_t offset,size_t vl)283 vuint8m4_t test_vslideup_vx_u8m4(vuint8m4_t dst, vuint8m4_t src, size_t offset,
284                                  size_t vl) {
285   return vslideup(dst, src, offset, vl);
286 }
287 
288 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
292 //
test_vslideup_vx_u8m8(vuint8m8_t dst,vuint8m8_t src,size_t offset,size_t vl)293 vuint8m8_t test_vslideup_vx_u8m8(vuint8m8_t dst, vuint8m8_t src, size_t offset,
294                                  size_t vl) {
295   return vslideup(dst, src, offset, vl);
296 }
297 
298 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4(
299 // CHECK-RV64-NEXT:  entry:
300 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
302 //
test_vslideup_vx_u16mf4(vuint16mf4_t dst,vuint16mf4_t src,size_t offset,size_t vl)303 vuint16mf4_t test_vslideup_vx_u16mf4(vuint16mf4_t dst, vuint16mf4_t src,
304                                      size_t offset, size_t vl) {
305   return vslideup(dst, src, offset, vl);
306 }
307 
308 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2(
309 // CHECK-RV64-NEXT:  entry:
310 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
311 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
312 //
test_vslideup_vx_u16mf2(vuint16mf2_t dst,vuint16mf2_t src,size_t offset,size_t vl)313 vuint16mf2_t test_vslideup_vx_u16mf2(vuint16mf2_t dst, vuint16mf2_t src,
314                                      size_t offset, size_t vl) {
315   return vslideup(dst, src, offset, vl);
316 }
317 
318 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1(
319 // CHECK-RV64-NEXT:  entry:
320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
321 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
322 //
test_vslideup_vx_u16m1(vuint16m1_t dst,vuint16m1_t src,size_t offset,size_t vl)323 vuint16m1_t test_vslideup_vx_u16m1(vuint16m1_t dst, vuint16m1_t src,
324                                    size_t offset, size_t vl) {
325   return vslideup(dst, src, offset, vl);
326 }
327 
328 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2(
329 // CHECK-RV64-NEXT:  entry:
330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
332 //
test_vslideup_vx_u16m2(vuint16m2_t dst,vuint16m2_t src,size_t offset,size_t vl)333 vuint16m2_t test_vslideup_vx_u16m2(vuint16m2_t dst, vuint16m2_t src,
334                                    size_t offset, size_t vl) {
335   return vslideup(dst, src, offset, vl);
336 }
337 
338 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4(
339 // CHECK-RV64-NEXT:  entry:
340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
342 //
test_vslideup_vx_u16m4(vuint16m4_t dst,vuint16m4_t src,size_t offset,size_t vl)343 vuint16m4_t test_vslideup_vx_u16m4(vuint16m4_t dst, vuint16m4_t src,
344                                    size_t offset, size_t vl) {
345   return vslideup(dst, src, offset, vl);
346 }
347 
348 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8(
349 // CHECK-RV64-NEXT:  entry:
350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
351 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
352 //
test_vslideup_vx_u16m8(vuint16m8_t dst,vuint16m8_t src,size_t offset,size_t vl)353 vuint16m8_t test_vslideup_vx_u16m8(vuint16m8_t dst, vuint16m8_t src,
354                                    size_t offset, size_t vl) {
355   return vslideup(dst, src, offset, vl);
356 }
357 
358 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2(
359 // CHECK-RV64-NEXT:  entry:
360 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
362 //
test_vslideup_vx_u32mf2(vuint32mf2_t dst,vuint32mf2_t src,size_t offset,size_t vl)363 vuint32mf2_t test_vslideup_vx_u32mf2(vuint32mf2_t dst, vuint32mf2_t src,
364                                      size_t offset, size_t vl) {
365   return vslideup(dst, src, offset, vl);
366 }
367 
368 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
372 //
test_vslideup_vx_u32m1(vuint32m1_t dst,vuint32m1_t src,size_t offset,size_t vl)373 vuint32m1_t test_vslideup_vx_u32m1(vuint32m1_t dst, vuint32m1_t src,
374                                    size_t offset, size_t vl) {
375   return vslideup(dst, src, offset, vl);
376 }
377 
378 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
382 //
test_vslideup_vx_u32m2(vuint32m2_t dst,vuint32m2_t src,size_t offset,size_t vl)383 vuint32m2_t test_vslideup_vx_u32m2(vuint32m2_t dst, vuint32m2_t src,
384                                    size_t offset, size_t vl) {
385   return vslideup(dst, src, offset, vl);
386 }
387 
388 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4(
389 // CHECK-RV64-NEXT:  entry:
390 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
392 //
test_vslideup_vx_u32m4(vuint32m4_t dst,vuint32m4_t src,size_t offset,size_t vl)393 vuint32m4_t test_vslideup_vx_u32m4(vuint32m4_t dst, vuint32m4_t src,
394                                    size_t offset, size_t vl) {
395   return vslideup(dst, src, offset, vl);
396 }
397 
398 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8(
399 // CHECK-RV64-NEXT:  entry:
400 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
401 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
402 //
test_vslideup_vx_u32m8(vuint32m8_t dst,vuint32m8_t src,size_t offset,size_t vl)403 vuint32m8_t test_vslideup_vx_u32m8(vuint32m8_t dst, vuint32m8_t src,
404                                    size_t offset, size_t vl) {
405   return vslideup(dst, src, offset, vl);
406 }
407 
408 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1(
409 // CHECK-RV64-NEXT:  entry:
410 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
411 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
412 //
test_vslideup_vx_u64m1(vuint64m1_t dst,vuint64m1_t src,size_t offset,size_t vl)413 vuint64m1_t test_vslideup_vx_u64m1(vuint64m1_t dst, vuint64m1_t src,
414                                    size_t offset, size_t vl) {
415   return vslideup(dst, src, offset, vl);
416 }
417 
418 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2(
419 // CHECK-RV64-NEXT:  entry:
420 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
421 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
422 //
test_vslideup_vx_u64m2(vuint64m2_t dst,vuint64m2_t src,size_t offset,size_t vl)423 vuint64m2_t test_vslideup_vx_u64m2(vuint64m2_t dst, vuint64m2_t src,
424                                    size_t offset, size_t vl) {
425   return vslideup(dst, src, offset, vl);
426 }
427 
428 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4(
429 // CHECK-RV64-NEXT:  entry:
430 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
431 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
432 //
test_vslideup_vx_u64m4(vuint64m4_t dst,vuint64m4_t src,size_t offset,size_t vl)433 vuint64m4_t test_vslideup_vx_u64m4(vuint64m4_t dst, vuint64m4_t src,
434                                    size_t offset, size_t vl) {
435   return vslideup(dst, src, offset, vl);
436 }
437 
438 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8(
439 // CHECK-RV64-NEXT:  entry:
440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
442 //
test_vslideup_vx_u64m8(vuint64m8_t dst,vuint64m8_t src,size_t offset,size_t vl)443 vuint64m8_t test_vslideup_vx_u64m8(vuint64m8_t dst, vuint64m8_t src,
444                                    size_t offset, size_t vl) {
445   return vslideup(dst, src, offset, vl);
446 }
447 
448 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
452 //
test_vslideup_vx_f32mf2(vfloat32mf2_t dst,vfloat32mf2_t src,size_t offset,size_t vl)453 vfloat32mf2_t test_vslideup_vx_f32mf2(vfloat32mf2_t dst, vfloat32mf2_t src,
454                                       size_t offset, size_t vl) {
455   return vslideup(dst, src, offset, vl);
456 }
457 
458 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
461 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
462 //
test_vslideup_vx_f32m1(vfloat32m1_t dst,vfloat32m1_t src,size_t offset,size_t vl)463 vfloat32m1_t test_vslideup_vx_f32m1(vfloat32m1_t dst, vfloat32m1_t src,
464                                     size_t offset, size_t vl) {
465   return vslideup(dst, src, offset, vl);
466 }
467 
468 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2(
469 // CHECK-RV64-NEXT:  entry:
470 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
471 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
472 //
test_vslideup_vx_f32m2(vfloat32m2_t dst,vfloat32m2_t src,size_t offset,size_t vl)473 vfloat32m2_t test_vslideup_vx_f32m2(vfloat32m2_t dst, vfloat32m2_t src,
474                                     size_t offset, size_t vl) {
475   return vslideup(dst, src, offset, vl);
476 }
477 
478 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4(
479 // CHECK-RV64-NEXT:  entry:
480 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
481 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
482 //
test_vslideup_vx_f32m4(vfloat32m4_t dst,vfloat32m4_t src,size_t offset,size_t vl)483 vfloat32m4_t test_vslideup_vx_f32m4(vfloat32m4_t dst, vfloat32m4_t src,
484                                     size_t offset, size_t vl) {
485   return vslideup(dst, src, offset, vl);
486 }
487 
488 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8(
489 // CHECK-RV64-NEXT:  entry:
490 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
491 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
492 //
test_vslideup_vx_f32m8(vfloat32m8_t dst,vfloat32m8_t src,size_t offset,size_t vl)493 vfloat32m8_t test_vslideup_vx_f32m8(vfloat32m8_t dst, vfloat32m8_t src,
494                                     size_t offset, size_t vl) {
495   return vslideup(dst, src, offset, vl);
496 }
497 
498 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1(
499 // CHECK-RV64-NEXT:  entry:
500 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
501 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
502 //
test_vslideup_vx_f64m1(vfloat64m1_t dst,vfloat64m1_t src,size_t offset,size_t vl)503 vfloat64m1_t test_vslideup_vx_f64m1(vfloat64m1_t dst, vfloat64m1_t src,
504                                     size_t offset, size_t vl) {
505   return vslideup(dst, src, offset, vl);
506 }
507 
508 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2(
509 // CHECK-RV64-NEXT:  entry:
510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
512 //
test_vslideup_vx_f64m2(vfloat64m2_t dst,vfloat64m2_t src,size_t offset,size_t vl)513 vfloat64m2_t test_vslideup_vx_f64m2(vfloat64m2_t dst, vfloat64m2_t src,
514                                     size_t offset, size_t vl) {
515   return vslideup(dst, src, offset, vl);
516 }
517 
518 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4(
519 // CHECK-RV64-NEXT:  entry:
520 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
521 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
522 //
test_vslideup_vx_f64m4(vfloat64m4_t dst,vfloat64m4_t src,size_t offset,size_t vl)523 vfloat64m4_t test_vslideup_vx_f64m4(vfloat64m4_t dst, vfloat64m4_t src,
524                                     size_t offset, size_t vl) {
525   return vslideup(dst, src, offset, vl);
526 }
527 
528 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8(
529 // CHECK-RV64-NEXT:  entry:
530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
532 //
test_vslideup_vx_f64m8(vfloat64m8_t dst,vfloat64m8_t src,size_t offset,size_t vl)533 vfloat64m8_t test_vslideup_vx_f64m8(vfloat64m8_t dst, vfloat64m8_t src,
534                                     size_t offset, size_t vl) {
535   return vslideup(dst, src, offset, vl);
536 }
537 
538 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf8_m(
539 // CHECK-RV64-NEXT:  entry:
540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
542 //
test_vslideup_vx_i8mf8_m(vbool64_t mask,vint8mf8_t dst,vint8mf8_t src,size_t offset,size_t vl)543 vint8mf8_t test_vslideup_vx_i8mf8_m(vbool64_t mask, vint8mf8_t dst,
544                                     vint8mf8_t src, size_t offset, size_t vl) {
545   return vslideup(mask, dst, src, offset, vl);
546 }
547 
548 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf4_m(
549 // CHECK-RV64-NEXT:  entry:
550 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
552 //
test_vslideup_vx_i8mf4_m(vbool32_t mask,vint8mf4_t dst,vint8mf4_t src,size_t offset,size_t vl)553 vint8mf4_t test_vslideup_vx_i8mf4_m(vbool32_t mask, vint8mf4_t dst,
554                                     vint8mf4_t src, size_t offset, size_t vl) {
555   return vslideup(mask, dst, src, offset, vl);
556 }
557 
558 // CHECK-RV64-LABEL: @test_vslideup_vx_i8mf2_m(
559 // CHECK-RV64-NEXT:  entry:
560 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
562 //
test_vslideup_vx_i8mf2_m(vbool16_t mask,vint8mf2_t dst,vint8mf2_t src,size_t offset,size_t vl)563 vint8mf2_t test_vslideup_vx_i8mf2_m(vbool16_t mask, vint8mf2_t dst,
564                                     vint8mf2_t src, size_t offset, size_t vl) {
565   return vslideup(mask, dst, src, offset, vl);
566 }
567 
568 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m1_m(
569 // CHECK-RV64-NEXT:  entry:
570 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
571 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
572 //
test_vslideup_vx_i8m1_m(vbool8_t mask,vint8m1_t dst,vint8m1_t src,size_t offset,size_t vl)573 vint8m1_t test_vslideup_vx_i8m1_m(vbool8_t mask, vint8m1_t dst, vint8m1_t src,
574                                   size_t offset, size_t vl) {
575   return vslideup(mask, dst, src, offset, vl);
576 }
577 
578 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m2_m(
579 // CHECK-RV64-NEXT:  entry:
580 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
582 //
test_vslideup_vx_i8m2_m(vbool4_t mask,vint8m2_t dst,vint8m2_t src,size_t offset,size_t vl)583 vint8m2_t test_vslideup_vx_i8m2_m(vbool4_t mask, vint8m2_t dst, vint8m2_t src,
584                                   size_t offset, size_t vl) {
585   return vslideup(mask, dst, src, offset, vl);
586 }
587 
588 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m4_m(
589 // CHECK-RV64-NEXT:  entry:
590 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
591 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
592 //
test_vslideup_vx_i8m4_m(vbool2_t mask,vint8m4_t dst,vint8m4_t src,size_t offset,size_t vl)593 vint8m4_t test_vslideup_vx_i8m4_m(vbool2_t mask, vint8m4_t dst, vint8m4_t src,
594                                   size_t offset, size_t vl) {
595   return vslideup(mask, dst, src, offset, vl);
596 }
597 
598 // CHECK-RV64-LABEL: @test_vslideup_vx_i8m8_m(
599 // CHECK-RV64-NEXT:  entry:
600 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
601 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
602 //
test_vslideup_vx_i8m8_m(vbool1_t mask,vint8m8_t dst,vint8m8_t src,size_t offset,size_t vl)603 vint8m8_t test_vslideup_vx_i8m8_m(vbool1_t mask, vint8m8_t dst, vint8m8_t src,
604                                   size_t offset, size_t vl) {
605   return vslideup(mask, dst, src, offset, vl);
606 }
607 
608 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf4_m(
609 // CHECK-RV64-NEXT:  entry:
610 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
611 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
612 //
test_vslideup_vx_i16mf4_m(vbool64_t mask,vint16mf4_t dst,vint16mf4_t src,size_t offset,size_t vl)613 vint16mf4_t test_vslideup_vx_i16mf4_m(vbool64_t mask, vint16mf4_t dst,
614                                       vint16mf4_t src, size_t offset,
615                                       size_t vl) {
616   return vslideup(mask, dst, src, offset, vl);
617 }
618 
619 // CHECK-RV64-LABEL: @test_vslideup_vx_i16mf2_m(
620 // CHECK-RV64-NEXT:  entry:
621 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
623 //
test_vslideup_vx_i16mf2_m(vbool32_t mask,vint16mf2_t dst,vint16mf2_t src,size_t offset,size_t vl)624 vint16mf2_t test_vslideup_vx_i16mf2_m(vbool32_t mask, vint16mf2_t dst,
625                                       vint16mf2_t src, size_t offset,
626                                       size_t vl) {
627   return vslideup(mask, dst, src, offset, vl);
628 }
629 
630 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m1_m(
631 // CHECK-RV64-NEXT:  entry:
632 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
633 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
634 //
test_vslideup_vx_i16m1_m(vbool16_t mask,vint16m1_t dst,vint16m1_t src,size_t offset,size_t vl)635 vint16m1_t test_vslideup_vx_i16m1_m(vbool16_t mask, vint16m1_t dst,
636                                     vint16m1_t src, size_t offset, size_t vl) {
637   return vslideup(mask, dst, src, offset, vl);
638 }
639 
640 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m2_m(
641 // CHECK-RV64-NEXT:  entry:
642 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
643 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
644 //
test_vslideup_vx_i16m2_m(vbool8_t mask,vint16m2_t dst,vint16m2_t src,size_t offset,size_t vl)645 vint16m2_t test_vslideup_vx_i16m2_m(vbool8_t mask, vint16m2_t dst,
646                                     vint16m2_t src, size_t offset, size_t vl) {
647   return vslideup(mask, dst, src, offset, vl);
648 }
649 
650 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m4_m(
651 // CHECK-RV64-NEXT:  entry:
652 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
653 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
654 //
test_vslideup_vx_i16m4_m(vbool4_t mask,vint16m4_t dst,vint16m4_t src,size_t offset,size_t vl)655 vint16m4_t test_vslideup_vx_i16m4_m(vbool4_t mask, vint16m4_t dst,
656                                     vint16m4_t src, size_t offset, size_t vl) {
657   return vslideup(mask, dst, src, offset, vl);
658 }
659 
660 // CHECK-RV64-LABEL: @test_vslideup_vx_i16m8_m(
661 // CHECK-RV64-NEXT:  entry:
662 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
663 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
664 //
test_vslideup_vx_i16m8_m(vbool2_t mask,vint16m8_t dst,vint16m8_t src,size_t offset,size_t vl)665 vint16m8_t test_vslideup_vx_i16m8_m(vbool2_t mask, vint16m8_t dst,
666                                     vint16m8_t src, size_t offset, size_t vl) {
667   return vslideup(mask, dst, src, offset, vl);
668 }
669 
670 // CHECK-RV64-LABEL: @test_vslideup_vx_i32mf2_m(
671 // CHECK-RV64-NEXT:  entry:
672 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
673 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
674 //
test_vslideup_vx_i32mf2_m(vbool64_t mask,vint32mf2_t dst,vint32mf2_t src,size_t offset,size_t vl)675 vint32mf2_t test_vslideup_vx_i32mf2_m(vbool64_t mask, vint32mf2_t dst,
676                                       vint32mf2_t src, size_t offset,
677                                       size_t vl) {
678   return vslideup(mask, dst, src, offset, vl);
679 }
680 
681 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m1_m(
682 // CHECK-RV64-NEXT:  entry:
683 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
684 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
685 //
test_vslideup_vx_i32m1_m(vbool32_t mask,vint32m1_t dst,vint32m1_t src,size_t offset,size_t vl)686 vint32m1_t test_vslideup_vx_i32m1_m(vbool32_t mask, vint32m1_t dst,
687                                     vint32m1_t src, size_t offset, size_t vl) {
688   return vslideup(mask, dst, src, offset, vl);
689 }
690 
691 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m2_m(
692 // CHECK-RV64-NEXT:  entry:
693 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
694 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
695 //
test_vslideup_vx_i32m2_m(vbool16_t mask,vint32m2_t dst,vint32m2_t src,size_t offset,size_t vl)696 vint32m2_t test_vslideup_vx_i32m2_m(vbool16_t mask, vint32m2_t dst,
697                                     vint32m2_t src, size_t offset, size_t vl) {
698   return vslideup(mask, dst, src, offset, vl);
699 }
700 
701 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m4_m(
702 // CHECK-RV64-NEXT:  entry:
703 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
704 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
705 //
test_vslideup_vx_i32m4_m(vbool8_t mask,vint32m4_t dst,vint32m4_t src,size_t offset,size_t vl)706 vint32m4_t test_vslideup_vx_i32m4_m(vbool8_t mask, vint32m4_t dst,
707                                     vint32m4_t src, size_t offset, size_t vl) {
708   return vslideup(mask, dst, src, offset, vl);
709 }
710 
711 // CHECK-RV64-LABEL: @test_vslideup_vx_i32m8_m(
712 // CHECK-RV64-NEXT:  entry:
713 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
714 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
715 //
test_vslideup_vx_i32m8_m(vbool4_t mask,vint32m8_t dst,vint32m8_t src,size_t offset,size_t vl)716 vint32m8_t test_vslideup_vx_i32m8_m(vbool4_t mask, vint32m8_t dst,
717                                     vint32m8_t src, size_t offset, size_t vl) {
718   return vslideup(mask, dst, src, offset, vl);
719 }
720 
721 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m1_m(
722 // CHECK-RV64-NEXT:  entry:
723 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
724 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
725 //
test_vslideup_vx_i64m1_m(vbool64_t mask,vint64m1_t dst,vint64m1_t src,size_t offset,size_t vl)726 vint64m1_t test_vslideup_vx_i64m1_m(vbool64_t mask, vint64m1_t dst,
727                                     vint64m1_t src, size_t offset, size_t vl) {
728   return vslideup(mask, dst, src, offset, vl);
729 }
730 
731 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m2_m(
732 // CHECK-RV64-NEXT:  entry:
733 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
734 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
735 //
test_vslideup_vx_i64m2_m(vbool32_t mask,vint64m2_t dst,vint64m2_t src,size_t offset,size_t vl)736 vint64m2_t test_vslideup_vx_i64m2_m(vbool32_t mask, vint64m2_t dst,
737                                     vint64m2_t src, size_t offset, size_t vl) {
738   return vslideup(mask, dst, src, offset, vl);
739 }
740 
741 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m4_m(
742 // CHECK-RV64-NEXT:  entry:
743 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
744 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
745 //
test_vslideup_vx_i64m4_m(vbool16_t mask,vint64m4_t dst,vint64m4_t src,size_t offset,size_t vl)746 vint64m4_t test_vslideup_vx_i64m4_m(vbool16_t mask, vint64m4_t dst,
747                                     vint64m4_t src, size_t offset, size_t vl) {
748   return vslideup(mask, dst, src, offset, vl);
749 }
750 
751 // CHECK-RV64-LABEL: @test_vslideup_vx_i64m8_m(
752 // CHECK-RV64-NEXT:  entry:
753 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
754 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
755 //
test_vslideup_vx_i64m8_m(vbool8_t mask,vint64m8_t dst,vint64m8_t src,size_t offset,size_t vl)756 vint64m8_t test_vslideup_vx_i64m8_m(vbool8_t mask, vint64m8_t dst,
757                                     vint64m8_t src, size_t offset, size_t vl) {
758   return vslideup(mask, dst, src, offset, vl);
759 }
760 
761 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf8_m(
762 // CHECK-RV64-NEXT:  entry:
763 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vslideup.mask.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
764 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
765 //
test_vslideup_vx_u8mf8_m(vbool64_t mask,vuint8mf8_t dst,vuint8mf8_t src,size_t offset,size_t vl)766 vuint8mf8_t test_vslideup_vx_u8mf8_m(vbool64_t mask, vuint8mf8_t dst,
767                                      vuint8mf8_t src, size_t offset,
768                                      size_t vl) {
769   return vslideup(mask, dst, src, offset, vl);
770 }
771 
772 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf4_m(
773 // CHECK-RV64-NEXT:  entry:
774 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vslideup.mask.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
775 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
776 //
test_vslideup_vx_u8mf4_m(vbool32_t mask,vuint8mf4_t dst,vuint8mf4_t src,size_t offset,size_t vl)777 vuint8mf4_t test_vslideup_vx_u8mf4_m(vbool32_t mask, vuint8mf4_t dst,
778                                      vuint8mf4_t src, size_t offset,
779                                      size_t vl) {
780   return vslideup(mask, dst, src, offset, vl);
781 }
782 
783 // CHECK-RV64-LABEL: @test_vslideup_vx_u8mf2_m(
784 // CHECK-RV64-NEXT:  entry:
785 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vslideup.mask.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
786 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
787 //
test_vslideup_vx_u8mf2_m(vbool16_t mask,vuint8mf2_t dst,vuint8mf2_t src,size_t offset,size_t vl)788 vuint8mf2_t test_vslideup_vx_u8mf2_m(vbool16_t mask, vuint8mf2_t dst,
789                                      vuint8mf2_t src, size_t offset,
790                                      size_t vl) {
791   return vslideup(mask, dst, src, offset, vl);
792 }
793 
794 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m1_m(
795 // CHECK-RV64-NEXT:  entry:
796 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vslideup.mask.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
797 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
798 //
test_vslideup_vx_u8m1_m(vbool8_t mask,vuint8m1_t dst,vuint8m1_t src,size_t offset,size_t vl)799 vuint8m1_t test_vslideup_vx_u8m1_m(vbool8_t mask, vuint8m1_t dst,
800                                    vuint8m1_t src, size_t offset, size_t vl) {
801   return vslideup(mask, dst, src, offset, vl);
802 }
803 
804 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m2_m(
805 // CHECK-RV64-NEXT:  entry:
806 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vslideup.mask.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
807 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
808 //
test_vslideup_vx_u8m2_m(vbool4_t mask,vuint8m2_t dst,vuint8m2_t src,size_t offset,size_t vl)809 vuint8m2_t test_vslideup_vx_u8m2_m(vbool4_t mask, vuint8m2_t dst,
810                                    vuint8m2_t src, size_t offset, size_t vl) {
811   return vslideup(mask, dst, src, offset, vl);
812 }
813 
814 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m4_m(
815 // CHECK-RV64-NEXT:  entry:
816 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vslideup.mask.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
817 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
818 //
test_vslideup_vx_u8m4_m(vbool2_t mask,vuint8m4_t dst,vuint8m4_t src,size_t offset,size_t vl)819 vuint8m4_t test_vslideup_vx_u8m4_m(vbool2_t mask, vuint8m4_t dst,
820                                    vuint8m4_t src, size_t offset, size_t vl) {
821   return vslideup(mask, dst, src, offset, vl);
822 }
823 
824 // CHECK-RV64-LABEL: @test_vslideup_vx_u8m8_m(
825 // CHECK-RV64-NEXT:  entry:
826 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vslideup.mask.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], <vscale x 64 x i8> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 64 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
827 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
828 //
test_vslideup_vx_u8m8_m(vbool1_t mask,vuint8m8_t dst,vuint8m8_t src,size_t offset,size_t vl)829 vuint8m8_t test_vslideup_vx_u8m8_m(vbool1_t mask, vuint8m8_t dst,
830                                    vuint8m8_t src, size_t offset, size_t vl) {
831   return vslideup(mask, dst, src, offset, vl);
832 }
833 
834 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf4_m(
835 // CHECK-RV64-NEXT:  entry:
836 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vslideup.mask.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
837 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
838 //
test_vslideup_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t dst,vuint16mf4_t src,size_t offset,size_t vl)839 vuint16mf4_t test_vslideup_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t dst,
840                                        vuint16mf4_t src, size_t offset,
841                                        size_t vl) {
842   return vslideup(mask, dst, src, offset, vl);
843 }
844 
845 // CHECK-RV64-LABEL: @test_vslideup_vx_u16mf2_m(
846 // CHECK-RV64-NEXT:  entry:
847 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vslideup.mask.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
848 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
849 //
test_vslideup_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t dst,vuint16mf2_t src,size_t offset,size_t vl)850 vuint16mf2_t test_vslideup_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t dst,
851                                        vuint16mf2_t src, size_t offset,
852                                        size_t vl) {
853   return vslideup(mask, dst, src, offset, vl);
854 }
855 
856 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m1_m(
857 // CHECK-RV64-NEXT:  entry:
858 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vslideup.mask.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
859 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
860 //
test_vslideup_vx_u16m1_m(vbool16_t mask,vuint16m1_t dst,vuint16m1_t src,size_t offset,size_t vl)861 vuint16m1_t test_vslideup_vx_u16m1_m(vbool16_t mask, vuint16m1_t dst,
862                                      vuint16m1_t src, size_t offset,
863                                      size_t vl) {
864   return vslideup(mask, dst, src, offset, vl);
865 }
866 
867 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m2_m(
868 // CHECK-RV64-NEXT:  entry:
869 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vslideup.mask.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
870 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
871 //
test_vslideup_vx_u16m2_m(vbool8_t mask,vuint16m2_t dst,vuint16m2_t src,size_t offset,size_t vl)872 vuint16m2_t test_vslideup_vx_u16m2_m(vbool8_t mask, vuint16m2_t dst,
873                                      vuint16m2_t src, size_t offset,
874                                      size_t vl) {
875   return vslideup(mask, dst, src, offset, vl);
876 }
877 
878 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m4_m(
879 // CHECK-RV64-NEXT:  entry:
880 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vslideup.mask.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
881 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
882 //
test_vslideup_vx_u16m4_m(vbool4_t mask,vuint16m4_t dst,vuint16m4_t src,size_t offset,size_t vl)883 vuint16m4_t test_vslideup_vx_u16m4_m(vbool4_t mask, vuint16m4_t dst,
884                                      vuint16m4_t src, size_t offset,
885                                      size_t vl) {
886   return vslideup(mask, dst, src, offset, vl);
887 }
888 
889 // CHECK-RV64-LABEL: @test_vslideup_vx_u16m8_m(
890 // CHECK-RV64-NEXT:  entry:
891 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vslideup.mask.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], <vscale x 32 x i16> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
892 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
893 //
test_vslideup_vx_u16m8_m(vbool2_t mask,vuint16m8_t dst,vuint16m8_t src,size_t offset,size_t vl)894 vuint16m8_t test_vslideup_vx_u16m8_m(vbool2_t mask, vuint16m8_t dst,
895                                      vuint16m8_t src, size_t offset,
896                                      size_t vl) {
897   return vslideup(mask, dst, src, offset, vl);
898 }
899 
900 // CHECK-RV64-LABEL: @test_vslideup_vx_u32mf2_m(
901 // CHECK-RV64-NEXT:  entry:
902 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vslideup.mask.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
903 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
904 //
test_vslideup_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t dst,vuint32mf2_t src,size_t offset,size_t vl)905 vuint32mf2_t test_vslideup_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t dst,
906                                        vuint32mf2_t src, size_t offset,
907                                        size_t vl) {
908   return vslideup(mask, dst, src, offset, vl);
909 }
910 
911 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m1_m(
912 // CHECK-RV64-NEXT:  entry:
913 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vslideup.mask.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
914 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
915 //
test_vslideup_vx_u32m1_m(vbool32_t mask,vuint32m1_t dst,vuint32m1_t src,size_t offset,size_t vl)916 vuint32m1_t test_vslideup_vx_u32m1_m(vbool32_t mask, vuint32m1_t dst,
917                                      vuint32m1_t src, size_t offset,
918                                      size_t vl) {
919   return vslideup(mask, dst, src, offset, vl);
920 }
921 
922 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m2_m(
923 // CHECK-RV64-NEXT:  entry:
924 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vslideup.mask.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
925 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
926 //
test_vslideup_vx_u32m2_m(vbool16_t mask,vuint32m2_t dst,vuint32m2_t src,size_t offset,size_t vl)927 vuint32m2_t test_vslideup_vx_u32m2_m(vbool16_t mask, vuint32m2_t dst,
928                                      vuint32m2_t src, size_t offset,
929                                      size_t vl) {
930   return vslideup(mask, dst, src, offset, vl);
931 }
932 
933 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m4_m(
934 // CHECK-RV64-NEXT:  entry:
935 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vslideup.mask.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
936 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
937 //
test_vslideup_vx_u32m4_m(vbool8_t mask,vuint32m4_t dst,vuint32m4_t src,size_t offset,size_t vl)938 vuint32m4_t test_vslideup_vx_u32m4_m(vbool8_t mask, vuint32m4_t dst,
939                                      vuint32m4_t src, size_t offset,
940                                      size_t vl) {
941   return vslideup(mask, dst, src, offset, vl);
942 }
943 
944 // CHECK-RV64-LABEL: @test_vslideup_vx_u32m8_m(
945 // CHECK-RV64-NEXT:  entry:
946 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vslideup.mask.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], <vscale x 16 x i32> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
947 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
948 //
test_vslideup_vx_u32m8_m(vbool4_t mask,vuint32m8_t dst,vuint32m8_t src,size_t offset,size_t vl)949 vuint32m8_t test_vslideup_vx_u32m8_m(vbool4_t mask, vuint32m8_t dst,
950                                      vuint32m8_t src, size_t offset,
951                                      size_t vl) {
952   return vslideup(mask, dst, src, offset, vl);
953 }
954 
955 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m1_m(
956 // CHECK-RV64-NEXT:  entry:
957 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vslideup.mask.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], <vscale x 1 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
958 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
959 //
test_vslideup_vx_u64m1_m(vbool64_t mask,vuint64m1_t dst,vuint64m1_t src,size_t offset,size_t vl)960 vuint64m1_t test_vslideup_vx_u64m1_m(vbool64_t mask, vuint64m1_t dst,
961                                      vuint64m1_t src, size_t offset,
962                                      size_t vl) {
963   return vslideup(mask, dst, src, offset, vl);
964 }
965 
966 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m2_m(
967 // CHECK-RV64-NEXT:  entry:
968 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vslideup.mask.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], <vscale x 2 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
969 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
970 //
test_vslideup_vx_u64m2_m(vbool32_t mask,vuint64m2_t dst,vuint64m2_t src,size_t offset,size_t vl)971 vuint64m2_t test_vslideup_vx_u64m2_m(vbool32_t mask, vuint64m2_t dst,
972                                      vuint64m2_t src, size_t offset,
973                                      size_t vl) {
974   return vslideup(mask, dst, src, offset, vl);
975 }
976 
977 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m4_m(
978 // CHECK-RV64-NEXT:  entry:
979 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vslideup.mask.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], <vscale x 4 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
980 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
981 //
test_vslideup_vx_u64m4_m(vbool16_t mask,vuint64m4_t dst,vuint64m4_t src,size_t offset,size_t vl)982 vuint64m4_t test_vslideup_vx_u64m4_m(vbool16_t mask, vuint64m4_t dst,
983                                      vuint64m4_t src, size_t offset,
984                                      size_t vl) {
985   return vslideup(mask, dst, src, offset, vl);
986 }
987 
988 // CHECK-RV64-LABEL: @test_vslideup_vx_u64m8_m(
989 // CHECK-RV64-NEXT:  entry:
990 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vslideup.mask.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], <vscale x 8 x i64> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
991 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
992 //
test_vslideup_vx_u64m8_m(vbool8_t mask,vuint64m8_t dst,vuint64m8_t src,size_t offset,size_t vl)993 vuint64m8_t test_vslideup_vx_u64m8_m(vbool8_t mask, vuint64m8_t dst,
994                                      vuint64m8_t src, size_t offset,
995                                      size_t vl) {
996   return vslideup(mask, dst, src, offset, vl);
997 }
998 
999 // CHECK-RV64-LABEL: @test_vslideup_vx_f32mf2_m(
1000 // CHECK-RV64-NEXT:  entry:
1001 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vslideup.mask.nxv1f32.i64(<vscale x 1 x float> [[DST:%.*]], <vscale x 1 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1002 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
1003 //
test_vslideup_vx_f32mf2_m(vbool64_t mask,vfloat32mf2_t dst,vfloat32mf2_t src,size_t offset,size_t vl)1004 vfloat32mf2_t test_vslideup_vx_f32mf2_m(vbool64_t mask, vfloat32mf2_t dst,
1005                                         vfloat32mf2_t src, size_t offset,
1006                                         size_t vl) {
1007   return vslideup(mask, dst, src, offset, vl);
1008 }
1009 
1010 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m1_m(
1011 // CHECK-RV64-NEXT:  entry:
1012 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vslideup.mask.nxv2f32.i64(<vscale x 2 x float> [[DST:%.*]], <vscale x 2 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1013 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
1014 //
test_vslideup_vx_f32m1_m(vbool32_t mask,vfloat32m1_t dst,vfloat32m1_t src,size_t offset,size_t vl)1015 vfloat32m1_t test_vslideup_vx_f32m1_m(vbool32_t mask, vfloat32m1_t dst,
1016                                       vfloat32m1_t src, size_t offset,
1017                                       size_t vl) {
1018   return vslideup(mask, dst, src, offset, vl);
1019 }
1020 
1021 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m2_m(
1022 // CHECK-RV64-NEXT:  entry:
1023 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vslideup.mask.nxv4f32.i64(<vscale x 4 x float> [[DST:%.*]], <vscale x 4 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1024 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
1025 //
test_vslideup_vx_f32m2_m(vbool16_t mask,vfloat32m2_t dst,vfloat32m2_t src,size_t offset,size_t vl)1026 vfloat32m2_t test_vslideup_vx_f32m2_m(vbool16_t mask, vfloat32m2_t dst,
1027                                       vfloat32m2_t src, size_t offset,
1028                                       size_t vl) {
1029   return vslideup(mask, dst, src, offset, vl);
1030 }
1031 
1032 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m4_m(
1033 // CHECK-RV64-NEXT:  entry:
1034 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vslideup.mask.nxv8f32.i64(<vscale x 8 x float> [[DST:%.*]], <vscale x 8 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1035 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
1036 //
test_vslideup_vx_f32m4_m(vbool8_t mask,vfloat32m4_t dst,vfloat32m4_t src,size_t offset,size_t vl)1037 vfloat32m4_t test_vslideup_vx_f32m4_m(vbool8_t mask, vfloat32m4_t dst,
1038                                       vfloat32m4_t src, size_t offset,
1039                                       size_t vl) {
1040   return vslideup(mask, dst, src, offset, vl);
1041 }
1042 
1043 // CHECK-RV64-LABEL: @test_vslideup_vx_f32m8_m(
1044 // CHECK-RV64-NEXT:  entry:
1045 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vslideup.mask.nxv16f32.i64(<vscale x 16 x float> [[DST:%.*]], <vscale x 16 x float> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1046 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
1047 //
test_vslideup_vx_f32m8_m(vbool4_t mask,vfloat32m8_t dst,vfloat32m8_t src,size_t offset,size_t vl)1048 vfloat32m8_t test_vslideup_vx_f32m8_m(vbool4_t mask, vfloat32m8_t dst,
1049                                       vfloat32m8_t src, size_t offset,
1050                                       size_t vl) {
1051   return vslideup(mask, dst, src, offset, vl);
1052 }
1053 
1054 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m1_m(
1055 // CHECK-RV64-NEXT:  entry:
1056 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vslideup.mask.nxv1f64.i64(<vscale x 1 x double> [[DST:%.*]], <vscale x 1 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1057 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
1058 //
test_vslideup_vx_f64m1_m(vbool64_t mask,vfloat64m1_t dst,vfloat64m1_t src,size_t offset,size_t vl)1059 vfloat64m1_t test_vslideup_vx_f64m1_m(vbool64_t mask, vfloat64m1_t dst,
1060                                       vfloat64m1_t src, size_t offset,
1061                                       size_t vl) {
1062   return vslideup(mask, dst, src, offset, vl);
1063 }
1064 
1065 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m2_m(
1066 // CHECK-RV64-NEXT:  entry:
1067 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vslideup.mask.nxv2f64.i64(<vscale x 2 x double> [[DST:%.*]], <vscale x 2 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1068 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
1069 //
test_vslideup_vx_f64m2_m(vbool32_t mask,vfloat64m2_t dst,vfloat64m2_t src,size_t offset,size_t vl)1070 vfloat64m2_t test_vslideup_vx_f64m2_m(vbool32_t mask, vfloat64m2_t dst,
1071                                       vfloat64m2_t src, size_t offset,
1072                                       size_t vl) {
1073   return vslideup(mask, dst, src, offset, vl);
1074 }
1075 
1076 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m4_m(
1077 // CHECK-RV64-NEXT:  entry:
1078 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vslideup.mask.nxv4f64.i64(<vscale x 4 x double> [[DST:%.*]], <vscale x 4 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1079 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
1080 //
test_vslideup_vx_f64m4_m(vbool16_t mask,vfloat64m4_t dst,vfloat64m4_t src,size_t offset,size_t vl)1081 vfloat64m4_t test_vslideup_vx_f64m4_m(vbool16_t mask, vfloat64m4_t dst,
1082                                       vfloat64m4_t src, size_t offset,
1083                                       size_t vl) {
1084   return vslideup(mask, dst, src, offset, vl);
1085 }
1086 
1087 // CHECK-RV64-LABEL: @test_vslideup_vx_f64m8_m(
1088 // CHECK-RV64-NEXT:  entry:
1089 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vslideup.mask.nxv8f64.i64(<vscale x 8 x double> [[DST:%.*]], <vscale x 8 x double> [[SRC:%.*]], i64 [[OFFSET:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1090 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
1091 //
test_vslideup_vx_f64m8_m(vbool8_t mask,vfloat64m8_t dst,vfloat64m8_t src,size_t offset,size_t vl)1092 vfloat64m8_t test_vslideup_vx_f64m8_m(vbool8_t mask, vfloat64m8_t dst,
1093                                       vfloat64m8_t src, size_t offset,
1094                                       size_t vl) {
1095   return vslideup(mask, dst, src, offset, vl);
1096 }
1097