1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +f -target-feature +d -target-feature +experimental-v \
4 // RUN:   -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
5 
6 #include <riscv_vector.h>
7 
8 //
9 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf8(
10 // CHECK-RV64-NEXT:  entry:
11 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
12 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
13 //
test_vmv_v_v_i8mf8(vint8mf8_t src,size_t vl)14 vint8mf8_t test_vmv_v_v_i8mf8(vint8mf8_t src, size_t vl) {
15   return vmv_v_v_i8mf8(src, vl);
16 }
17 
18 //
19 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf8(
20 // CHECK-RV64-NEXT:  entry:
21 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
23 //
test_vmv_v_x_i8mf8(int8_t src,size_t vl)24 vint8mf8_t test_vmv_v_x_i8mf8(int8_t src, size_t vl) {
25   return vmv_v_x_i8mf8(src, vl);
26 }
27 
28 //
29 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf4(
30 // CHECK-RV64-NEXT:  entry:
31 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
32 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
33 //
test_vmv_v_v_i8mf4(vint8mf4_t src,size_t vl)34 vint8mf4_t test_vmv_v_v_i8mf4(vint8mf4_t src, size_t vl) {
35   return vmv_v_v_i8mf4(src, vl);
36 }
37 
38 //
39 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf4(
40 // CHECK-RV64-NEXT:  entry:
41 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
42 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
43 //
test_vmv_v_x_i8mf4(int8_t src,size_t vl)44 vint8mf4_t test_vmv_v_x_i8mf4(int8_t src, size_t vl) {
45   return vmv_v_x_i8mf4(src, vl);
46 }
47 
48 //
49 // CHECK-RV64-LABEL: @test_vmv_v_v_i8mf2(
50 // CHECK-RV64-NEXT:  entry:
51 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
52 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
53 //
test_vmv_v_v_i8mf2(vint8mf2_t src,size_t vl)54 vint8mf2_t test_vmv_v_v_i8mf2(vint8mf2_t src, size_t vl) {
55   return vmv_v_v_i8mf2(src, vl);
56 }
57 
58 //
59 // CHECK-RV64-LABEL: @test_vmv_v_x_i8mf2(
60 // CHECK-RV64-NEXT:  entry:
61 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
62 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
63 //
test_vmv_v_x_i8mf2(int8_t src,size_t vl)64 vint8mf2_t test_vmv_v_x_i8mf2(int8_t src, size_t vl) {
65   return vmv_v_x_i8mf2(src, vl);
66 }
67 
68 //
69 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m1(
70 // CHECK-RV64-NEXT:  entry:
71 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
72 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
73 //
test_vmv_v_v_i8m1(vint8m1_t src,size_t vl)74 vint8m1_t test_vmv_v_v_i8m1(vint8m1_t src, size_t vl) {
75   return vmv_v_v_i8m1(src, vl);
76 }
77 
78 //
79 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m1(
80 // CHECK-RV64-NEXT:  entry:
81 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
83 //
test_vmv_v_x_i8m1(int8_t src,size_t vl)84 vint8m1_t test_vmv_v_x_i8m1(int8_t src, size_t vl) {
85   return vmv_v_x_i8m1(src, vl);
86 }
87 
88 //
89 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m2(
90 // CHECK-RV64-NEXT:  entry:
91 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
92 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
93 //
test_vmv_v_v_i8m2(vint8m2_t src,size_t vl)94 vint8m2_t test_vmv_v_v_i8m2(vint8m2_t src, size_t vl) {
95   return vmv_v_v_i8m2(src, vl);
96 }
97 
98 //
99 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m2(
100 // CHECK-RV64-NEXT:  entry:
101 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
102 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
103 //
test_vmv_v_x_i8m2(int8_t src,size_t vl)104 vint8m2_t test_vmv_v_x_i8m2(int8_t src, size_t vl) {
105   return vmv_v_x_i8m2(src, vl);
106 }
107 
108 //
109 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m4(
110 // CHECK-RV64-NEXT:  entry:
111 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
112 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
113 //
test_vmv_v_v_i8m4(vint8m4_t src,size_t vl)114 vint8m4_t test_vmv_v_v_i8m4(vint8m4_t src, size_t vl) {
115   return vmv_v_v_i8m4(src, vl);
116 }
117 
118 //
119 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m4(
120 // CHECK-RV64-NEXT:  entry:
121 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
122 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
123 //
test_vmv_v_x_i8m4(int8_t src,size_t vl)124 vint8m4_t test_vmv_v_x_i8m4(int8_t src, size_t vl) {
125   return vmv_v_x_i8m4(src, vl);
126 }
127 
128 //
129 // CHECK-RV64-LABEL: @test_vmv_v_v_i8m8(
130 // CHECK-RV64-NEXT:  entry:
131 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
133 //
test_vmv_v_v_i8m8(vint8m8_t src,size_t vl)134 vint8m8_t test_vmv_v_v_i8m8(vint8m8_t src, size_t vl) {
135   return vmv_v_v_i8m8(src, vl);
136 }
137 
138 //
139 // CHECK-RV64-LABEL: @test_vmv_v_x_i8m8(
140 // CHECK-RV64-NEXT:  entry:
141 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
142 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
143 //
test_vmv_v_x_i8m8(int8_t src,size_t vl)144 vint8m8_t test_vmv_v_x_i8m8(int8_t src, size_t vl) {
145   return vmv_v_x_i8m8(src, vl);
146 }
147 
148 //
149 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf4(
150 // CHECK-RV64-NEXT:  entry:
151 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
152 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
153 //
test_vmv_v_v_i16mf4(vint16mf4_t src,size_t vl)154 vint16mf4_t test_vmv_v_v_i16mf4(vint16mf4_t src, size_t vl) {
155   return vmv_v_v_i16mf4(src, vl);
156 }
157 
158 //
159 // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf4(
160 // CHECK-RV64-NEXT:  entry:
161 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
162 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
163 //
test_vmv_v_x_i16mf4(int16_t src,size_t vl)164 vint16mf4_t test_vmv_v_x_i16mf4(int16_t src, size_t vl) {
165   return vmv_v_x_i16mf4(src, vl);
166 }
167 
168 //
169 // CHECK-RV64-LABEL: @test_vmv_v_v_i16mf2(
170 // CHECK-RV64-NEXT:  entry:
171 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
173 //
test_vmv_v_v_i16mf2(vint16mf2_t src,size_t vl)174 vint16mf2_t test_vmv_v_v_i16mf2(vint16mf2_t src, size_t vl) {
175   return vmv_v_v_i16mf2(src, vl);
176 }
177 
178 //
179 // CHECK-RV64-LABEL: @test_vmv_v_x_i16mf2(
180 // CHECK-RV64-NEXT:  entry:
181 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
182 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
183 //
test_vmv_v_x_i16mf2(int16_t src,size_t vl)184 vint16mf2_t test_vmv_v_x_i16mf2(int16_t src, size_t vl) {
185   return vmv_v_x_i16mf2(src, vl);
186 }
187 
188 //
189 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m1(
190 // CHECK-RV64-NEXT:  entry:
191 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
192 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
193 //
test_vmv_v_v_i16m1(vint16m1_t src,size_t vl)194 vint16m1_t test_vmv_v_v_i16m1(vint16m1_t src, size_t vl) {
195   return vmv_v_v_i16m1(src, vl);
196 }
197 
198 //
199 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m1(
200 // CHECK-RV64-NEXT:  entry:
201 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
202 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
203 //
test_vmv_v_x_i16m1(int16_t src,size_t vl)204 vint16m1_t test_vmv_v_x_i16m1(int16_t src, size_t vl) {
205   return vmv_v_x_i16m1(src, vl);
206 }
207 
208 //
209 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m2(
210 // CHECK-RV64-NEXT:  entry:
211 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
212 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
213 //
test_vmv_v_v_i16m2(vint16m2_t src,size_t vl)214 vint16m2_t test_vmv_v_v_i16m2(vint16m2_t src, size_t vl) {
215   return vmv_v_v_i16m2(src, vl);
216 }
217 
218 //
219 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m2(
220 // CHECK-RV64-NEXT:  entry:
221 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
222 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
223 //
test_vmv_v_x_i16m2(int16_t src,size_t vl)224 vint16m2_t test_vmv_v_x_i16m2(int16_t src, size_t vl) {
225   return vmv_v_x_i16m2(src, vl);
226 }
227 
228 //
229 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m4(
230 // CHECK-RV64-NEXT:  entry:
231 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
232 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
233 //
test_vmv_v_v_i16m4(vint16m4_t src,size_t vl)234 vint16m4_t test_vmv_v_v_i16m4(vint16m4_t src, size_t vl) {
235   return vmv_v_v_i16m4(src, vl);
236 }
237 
238 //
239 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m4(
240 // CHECK-RV64-NEXT:  entry:
241 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
242 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
243 //
test_vmv_v_x_i16m4(int16_t src,size_t vl)244 vint16m4_t test_vmv_v_x_i16m4(int16_t src, size_t vl) {
245   return vmv_v_x_i16m4(src, vl);
246 }
247 
248 //
249 // CHECK-RV64-LABEL: @test_vmv_v_v_i16m8(
250 // CHECK-RV64-NEXT:  entry:
251 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
252 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
253 //
test_vmv_v_v_i16m8(vint16m8_t src,size_t vl)254 vint16m8_t test_vmv_v_v_i16m8(vint16m8_t src, size_t vl) {
255   return vmv_v_v_i16m8(src, vl);
256 }
257 
258 //
259 // CHECK-RV64-LABEL: @test_vmv_v_x_i16m8(
260 // CHECK-RV64-NEXT:  entry:
261 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
262 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
263 //
test_vmv_v_x_i16m8(int16_t src,size_t vl)264 vint16m8_t test_vmv_v_x_i16m8(int16_t src, size_t vl) {
265   return vmv_v_x_i16m8(src, vl);
266 }
267 
268 //
269 // CHECK-RV64-LABEL: @test_vmv_v_v_i32mf2(
270 // CHECK-RV64-NEXT:  entry:
271 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
272 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
273 //
test_vmv_v_v_i32mf2(vint32mf2_t src,size_t vl)274 vint32mf2_t test_vmv_v_v_i32mf2(vint32mf2_t src, size_t vl) {
275   return vmv_v_v_i32mf2(src, vl);
276 }
277 
278 //
279 // CHECK-RV64-LABEL: @test_vmv_v_x_i32mf2(
280 // CHECK-RV64-NEXT:  entry:
281 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
282 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
283 //
test_vmv_v_x_i32mf2(int32_t src,size_t vl)284 vint32mf2_t test_vmv_v_x_i32mf2(int32_t src, size_t vl) {
285   return vmv_v_x_i32mf2(src, vl);
286 }
287 
288 //
289 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m1(
290 // CHECK-RV64-NEXT:  entry:
291 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
292 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
293 //
test_vmv_v_v_i32m1(vint32m1_t src,size_t vl)294 vint32m1_t test_vmv_v_v_i32m1(vint32m1_t src, size_t vl) {
295   return vmv_v_v_i32m1(src, vl);
296 }
297 
298 //
299 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m1(
300 // CHECK-RV64-NEXT:  entry:
301 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
302 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
303 //
test_vmv_v_x_i32m1(int32_t src,size_t vl)304 vint32m1_t test_vmv_v_x_i32m1(int32_t src, size_t vl) {
305   return vmv_v_x_i32m1(src, vl);
306 }
307 
308 //
309 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m2(
310 // CHECK-RV64-NEXT:  entry:
311 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
312 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
313 //
test_vmv_v_v_i32m2(vint32m2_t src,size_t vl)314 vint32m2_t test_vmv_v_v_i32m2(vint32m2_t src, size_t vl) {
315   return vmv_v_v_i32m2(src, vl);
316 }
317 
318 //
319 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m2(
320 // CHECK-RV64-NEXT:  entry:
321 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
322 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
323 //
test_vmv_v_x_i32m2(int32_t src,size_t vl)324 vint32m2_t test_vmv_v_x_i32m2(int32_t src, size_t vl) {
325   return vmv_v_x_i32m2(src, vl);
326 }
327 
328 //
329 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m4(
330 // CHECK-RV64-NEXT:  entry:
331 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
332 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
333 //
test_vmv_v_v_i32m4(vint32m4_t src,size_t vl)334 vint32m4_t test_vmv_v_v_i32m4(vint32m4_t src, size_t vl) {
335   return vmv_v_v_i32m4(src, vl);
336 }
337 
338 //
339 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m4(
340 // CHECK-RV64-NEXT:  entry:
341 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
342 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
343 //
test_vmv_v_x_i32m4(int32_t src,size_t vl)344 vint32m4_t test_vmv_v_x_i32m4(int32_t src, size_t vl) {
345   return vmv_v_x_i32m4(src, vl);
346 }
347 
348 //
349 // CHECK-RV64-LABEL: @test_vmv_v_v_i32m8(
350 // CHECK-RV64-NEXT:  entry:
351 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
353 //
test_vmv_v_v_i32m8(vint32m8_t src,size_t vl)354 vint32m8_t test_vmv_v_v_i32m8(vint32m8_t src, size_t vl) {
355   return vmv_v_v_i32m8(src, vl);
356 }
357 
358 //
359 // CHECK-RV64-LABEL: @test_vmv_v_x_i32m8(
360 // CHECK-RV64-NEXT:  entry:
361 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
362 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
363 //
test_vmv_v_x_i32m8(int32_t src,size_t vl)364 vint32m8_t test_vmv_v_x_i32m8(int32_t src, size_t vl) {
365   return vmv_v_x_i32m8(src, vl);
366 }
367 
368 //
369 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m1(
370 // CHECK-RV64-NEXT:  entry:
371 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
372 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
373 //
test_vmv_v_v_i64m1(vint64m1_t src,size_t vl)374 vint64m1_t test_vmv_v_v_i64m1(vint64m1_t src, size_t vl) {
375   return vmv_v_v_i64m1(src, vl);
376 }
377 
378 //
379 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m1(
380 // CHECK-RV64-NEXT:  entry:
381 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
382 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
383 //
test_vmv_v_x_i64m1(int64_t src,size_t vl)384 vint64m1_t test_vmv_v_x_i64m1(int64_t src, size_t vl) {
385   return vmv_v_x_i64m1(src, vl);
386 }
387 
388 //
389 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m2(
390 // CHECK-RV64-NEXT:  entry:
391 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
392 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
393 //
test_vmv_v_v_i64m2(vint64m2_t src,size_t vl)394 vint64m2_t test_vmv_v_v_i64m2(vint64m2_t src, size_t vl) {
395   return vmv_v_v_i64m2(src, vl);
396 }
397 
398 //
399 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m2(
400 // CHECK-RV64-NEXT:  entry:
401 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
402 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
403 //
test_vmv_v_x_i64m2(int64_t src,size_t vl)404 vint64m2_t test_vmv_v_x_i64m2(int64_t src, size_t vl) {
405   return vmv_v_x_i64m2(src, vl);
406 }
407 
408 //
409 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m4(
410 // CHECK-RV64-NEXT:  entry:
411 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
412 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
413 //
test_vmv_v_v_i64m4(vint64m4_t src,size_t vl)414 vint64m4_t test_vmv_v_v_i64m4(vint64m4_t src, size_t vl) {
415   return vmv_v_v_i64m4(src, vl);
416 }
417 
418 //
419 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m4(
420 // CHECK-RV64-NEXT:  entry:
421 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
422 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
423 //
test_vmv_v_x_i64m4(int64_t src,size_t vl)424 vint64m4_t test_vmv_v_x_i64m4(int64_t src, size_t vl) {
425   return vmv_v_x_i64m4(src, vl);
426 }
427 
428 //
429 // CHECK-RV64-LABEL: @test_vmv_v_v_i64m8(
430 // CHECK-RV64-NEXT:  entry:
431 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
432 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
433 //
test_vmv_v_v_i64m8(vint64m8_t src,size_t vl)434 vint64m8_t test_vmv_v_v_i64m8(vint64m8_t src, size_t vl) {
435   return vmv_v_v_i64m8(src, vl);
436 }
437 
438 //
439 // CHECK-RV64-LABEL: @test_vmv_v_x_i64m8(
440 // CHECK-RV64-NEXT:  entry:
441 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
442 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
443 //
test_vmv_v_x_i64m8(int64_t src,size_t vl)444 vint64m8_t test_vmv_v_x_i64m8(int64_t src, size_t vl) {
445   return vmv_v_x_i64m8(src, vl);
446 }
447 
448 //
449 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf8(
450 // CHECK-RV64-NEXT:  entry:
451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.v.nxv1i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
452 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
453 //
test_vmv_v_v_u8mf8(vuint8mf8_t src,size_t vl)454 vuint8mf8_t test_vmv_v_v_u8mf8(vuint8mf8_t src, size_t vl) {
455   return vmv_v_v_u8mf8(src, vl);
456 }
457 
458 //
459 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf8(
460 // CHECK-RV64-NEXT:  entry:
461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.v.x.nxv1i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
462 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
463 //
test_vmv_v_x_u8mf8(uint8_t src,size_t vl)464 vuint8mf8_t test_vmv_v_x_u8mf8(uint8_t src, size_t vl) {
465   return vmv_v_x_u8mf8(src, vl);
466 }
467 
468 //
469 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf4(
470 // CHECK-RV64-NEXT:  entry:
471 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.v.nxv2i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
472 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
473 //
test_vmv_v_v_u8mf4(vuint8mf4_t src,size_t vl)474 vuint8mf4_t test_vmv_v_v_u8mf4(vuint8mf4_t src, size_t vl) {
475   return vmv_v_v_u8mf4(src, vl);
476 }
477 
478 //
479 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf4(
480 // CHECK-RV64-NEXT:  entry:
481 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.v.x.nxv2i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
482 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
483 //
test_vmv_v_x_u8mf4(uint8_t src,size_t vl)484 vuint8mf4_t test_vmv_v_x_u8mf4(uint8_t src, size_t vl) {
485   return vmv_v_x_u8mf4(src, vl);
486 }
487 
488 //
489 // CHECK-RV64-LABEL: @test_vmv_v_v_u8mf2(
490 // CHECK-RV64-NEXT:  entry:
491 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.v.nxv4i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
492 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
493 //
test_vmv_v_v_u8mf2(vuint8mf2_t src,size_t vl)494 vuint8mf2_t test_vmv_v_v_u8mf2(vuint8mf2_t src, size_t vl) {
495   return vmv_v_v_u8mf2(src, vl);
496 }
497 
498 //
499 // CHECK-RV64-LABEL: @test_vmv_v_x_u8mf2(
500 // CHECK-RV64-NEXT:  entry:
501 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.v.x.nxv4i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
502 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
503 //
test_vmv_v_x_u8mf2(uint8_t src,size_t vl)504 vuint8mf2_t test_vmv_v_x_u8mf2(uint8_t src, size_t vl) {
505   return vmv_v_x_u8mf2(src, vl);
506 }
507 
508 //
509 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m1(
510 // CHECK-RV64-NEXT:  entry:
511 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.v.nxv8i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
512 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
513 //
test_vmv_v_v_u8m1(vuint8m1_t src,size_t vl)514 vuint8m1_t test_vmv_v_v_u8m1(vuint8m1_t src, size_t vl) {
515   return vmv_v_v_u8m1(src, vl);
516 }
517 
518 //
519 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m1(
520 // CHECK-RV64-NEXT:  entry:
521 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.v.x.nxv8i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
522 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
523 //
test_vmv_v_x_u8m1(uint8_t src,size_t vl)524 vuint8m1_t test_vmv_v_x_u8m1(uint8_t src, size_t vl) {
525   return vmv_v_x_u8m1(src, vl);
526 }
527 
528 //
529 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m2(
530 // CHECK-RV64-NEXT:  entry:
531 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.v.nxv16i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
532 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
533 //
test_vmv_v_v_u8m2(vuint8m2_t src,size_t vl)534 vuint8m2_t test_vmv_v_v_u8m2(vuint8m2_t src, size_t vl) {
535   return vmv_v_v_u8m2(src, vl);
536 }
537 
538 //
539 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m2(
540 // CHECK-RV64-NEXT:  entry:
541 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.v.x.nxv16i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
542 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
543 //
test_vmv_v_x_u8m2(uint8_t src,size_t vl)544 vuint8m2_t test_vmv_v_x_u8m2(uint8_t src, size_t vl) {
545   return vmv_v_x_u8m2(src, vl);
546 }
547 
548 //
549 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m4(
550 // CHECK-RV64-NEXT:  entry:
551 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.v.nxv32i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
552 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
553 //
test_vmv_v_v_u8m4(vuint8m4_t src,size_t vl)554 vuint8m4_t test_vmv_v_v_u8m4(vuint8m4_t src, size_t vl) {
555   return vmv_v_v_u8m4(src, vl);
556 }
557 
558 //
559 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m4(
560 // CHECK-RV64-NEXT:  entry:
561 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.v.x.nxv32i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
562 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
563 //
test_vmv_v_x_u8m4(uint8_t src,size_t vl)564 vuint8m4_t test_vmv_v_x_u8m4(uint8_t src, size_t vl) {
565   return vmv_v_x_u8m4(src, vl);
566 }
567 
568 //
569 // CHECK-RV64-LABEL: @test_vmv_v_v_u8m8(
570 // CHECK-RV64-NEXT:  entry:
571 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.v.nxv64i8.i64(<vscale x 64 x i8> [[SRC:%.*]], i64 [[VL:%.*]])
572 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
573 //
test_vmv_v_v_u8m8(vuint8m8_t src,size_t vl)574 vuint8m8_t test_vmv_v_v_u8m8(vuint8m8_t src, size_t vl) {
575   return vmv_v_v_u8m8(src, vl);
576 }
577 
578 //
579 // CHECK-RV64-LABEL: @test_vmv_v_x_u8m8(
580 // CHECK-RV64-NEXT:  entry:
581 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.v.x.nxv64i8.i64(i8 [[SRC:%.*]], i64 [[VL:%.*]])
582 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
583 //
test_vmv_v_x_u8m8(uint8_t src,size_t vl)584 vuint8m8_t test_vmv_v_x_u8m8(uint8_t src, size_t vl) {
585   return vmv_v_x_u8m8(src, vl);
586 }
587 
588 //
589 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf4(
590 // CHECK-RV64-NEXT:  entry:
591 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.v.nxv1i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
592 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
593 //
test_vmv_v_v_u16mf4(vuint16mf4_t src,size_t vl)594 vuint16mf4_t test_vmv_v_v_u16mf4(vuint16mf4_t src, size_t vl) {
595   return vmv_v_v_u16mf4(src, vl);
596 }
597 
598 //
599 // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf4(
600 // CHECK-RV64-NEXT:  entry:
601 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.v.x.nxv1i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
602 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
603 //
test_vmv_v_x_u16mf4(uint16_t src,size_t vl)604 vuint16mf4_t test_vmv_v_x_u16mf4(uint16_t src, size_t vl) {
605   return vmv_v_x_u16mf4(src, vl);
606 }
607 
608 //
609 // CHECK-RV64-LABEL: @test_vmv_v_v_u16mf2(
610 // CHECK-RV64-NEXT:  entry:
611 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.v.nxv2i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
612 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
613 //
test_vmv_v_v_u16mf2(vuint16mf2_t src,size_t vl)614 vuint16mf2_t test_vmv_v_v_u16mf2(vuint16mf2_t src, size_t vl) {
615   return vmv_v_v_u16mf2(src, vl);
616 }
617 
618 //
619 // CHECK-RV64-LABEL: @test_vmv_v_x_u16mf2(
620 // CHECK-RV64-NEXT:  entry:
621 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.v.x.nxv2i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
623 //
test_vmv_v_x_u16mf2(uint16_t src,size_t vl)624 vuint16mf2_t test_vmv_v_x_u16mf2(uint16_t src, size_t vl) {
625   return vmv_v_x_u16mf2(src, vl);
626 }
627 
628 //
629 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m1(
630 // CHECK-RV64-NEXT:  entry:
631 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.v.nxv4i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
632 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
633 //
test_vmv_v_v_u16m1(vuint16m1_t src,size_t vl)634 vuint16m1_t test_vmv_v_v_u16m1(vuint16m1_t src, size_t vl) {
635   return vmv_v_v_u16m1(src, vl);
636 }
637 
638 //
639 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m1(
640 // CHECK-RV64-NEXT:  entry:
641 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.v.x.nxv4i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
642 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
643 //
test_vmv_v_x_u16m1(uint16_t src,size_t vl)644 vuint16m1_t test_vmv_v_x_u16m1(uint16_t src, size_t vl) {
645   return vmv_v_x_u16m1(src, vl);
646 }
647 
648 //
649 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m2(
650 // CHECK-RV64-NEXT:  entry:
651 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.v.nxv8i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
652 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
653 //
test_vmv_v_v_u16m2(vuint16m2_t src,size_t vl)654 vuint16m2_t test_vmv_v_v_u16m2(vuint16m2_t src, size_t vl) {
655   return vmv_v_v_u16m2(src, vl);
656 }
657 
658 //
659 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m2(
660 // CHECK-RV64-NEXT:  entry:
661 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.v.x.nxv8i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
662 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
663 //
test_vmv_v_x_u16m2(uint16_t src,size_t vl)664 vuint16m2_t test_vmv_v_x_u16m2(uint16_t src, size_t vl) {
665   return vmv_v_x_u16m2(src, vl);
666 }
667 
668 //
669 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m4(
670 // CHECK-RV64-NEXT:  entry:
671 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.v.nxv16i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
672 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
673 //
test_vmv_v_v_u16m4(vuint16m4_t src,size_t vl)674 vuint16m4_t test_vmv_v_v_u16m4(vuint16m4_t src, size_t vl) {
675   return vmv_v_v_u16m4(src, vl);
676 }
677 
678 //
679 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m4(
680 // CHECK-RV64-NEXT:  entry:
681 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.v.x.nxv16i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
682 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
683 //
test_vmv_v_x_u16m4(uint16_t src,size_t vl)684 vuint16m4_t test_vmv_v_x_u16m4(uint16_t src, size_t vl) {
685   return vmv_v_x_u16m4(src, vl);
686 }
687 
688 //
689 // CHECK-RV64-LABEL: @test_vmv_v_v_u16m8(
690 // CHECK-RV64-NEXT:  entry:
691 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.v.nxv32i16.i64(<vscale x 32 x i16> [[SRC:%.*]], i64 [[VL:%.*]])
692 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
693 //
test_vmv_v_v_u16m8(vuint16m8_t src,size_t vl)694 vuint16m8_t test_vmv_v_v_u16m8(vuint16m8_t src, size_t vl) {
695   return vmv_v_v_u16m8(src, vl);
696 }
697 
698 //
699 // CHECK-RV64-LABEL: @test_vmv_v_x_u16m8(
700 // CHECK-RV64-NEXT:  entry:
701 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.v.x.nxv32i16.i64(i16 [[SRC:%.*]], i64 [[VL:%.*]])
702 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
703 //
test_vmv_v_x_u16m8(uint16_t src,size_t vl)704 vuint16m8_t test_vmv_v_x_u16m8(uint16_t src, size_t vl) {
705   return vmv_v_x_u16m8(src, vl);
706 }
707 
708 //
709 // CHECK-RV64-LABEL: @test_vmv_v_v_u32mf2(
710 // CHECK-RV64-NEXT:  entry:
711 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.v.nxv1i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
712 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
713 //
test_vmv_v_v_u32mf2(vuint32mf2_t src,size_t vl)714 vuint32mf2_t test_vmv_v_v_u32mf2(vuint32mf2_t src, size_t vl) {
715   return vmv_v_v_u32mf2(src, vl);
716 }
717 
718 //
719 // CHECK-RV64-LABEL: @test_vmv_v_x_u32mf2(
720 // CHECK-RV64-NEXT:  entry:
721 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.v.x.nxv1i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
722 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
723 //
test_vmv_v_x_u32mf2(uint32_t src,size_t vl)724 vuint32mf2_t test_vmv_v_x_u32mf2(uint32_t src, size_t vl) {
725   return vmv_v_x_u32mf2(src, vl);
726 }
727 
728 //
729 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m1(
730 // CHECK-RV64-NEXT:  entry:
731 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.v.nxv2i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
732 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
733 //
test_vmv_v_v_u32m1(vuint32m1_t src,size_t vl)734 vuint32m1_t test_vmv_v_v_u32m1(vuint32m1_t src, size_t vl) {
735   return vmv_v_v_u32m1(src, vl);
736 }
737 
738 //
739 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m1(
740 // CHECK-RV64-NEXT:  entry:
741 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.v.x.nxv2i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
742 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
743 //
test_vmv_v_x_u32m1(uint32_t src,size_t vl)744 vuint32m1_t test_vmv_v_x_u32m1(uint32_t src, size_t vl) {
745   return vmv_v_x_u32m1(src, vl);
746 }
747 
748 //
749 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m2(
750 // CHECK-RV64-NEXT:  entry:
751 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.v.nxv4i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
752 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
753 //
test_vmv_v_v_u32m2(vuint32m2_t src,size_t vl)754 vuint32m2_t test_vmv_v_v_u32m2(vuint32m2_t src, size_t vl) {
755   return vmv_v_v_u32m2(src, vl);
756 }
757 
758 //
759 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m2(
760 // CHECK-RV64-NEXT:  entry:
761 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.v.x.nxv4i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
762 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
763 //
test_vmv_v_x_u32m2(uint32_t src,size_t vl)764 vuint32m2_t test_vmv_v_x_u32m2(uint32_t src, size_t vl) {
765   return vmv_v_x_u32m2(src, vl);
766 }
767 
768 //
769 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m4(
770 // CHECK-RV64-NEXT:  entry:
771 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.v.nxv8i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
772 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
773 //
test_vmv_v_v_u32m4(vuint32m4_t src,size_t vl)774 vuint32m4_t test_vmv_v_v_u32m4(vuint32m4_t src, size_t vl) {
775   return vmv_v_v_u32m4(src, vl);
776 }
777 
778 //
779 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m4(
780 // CHECK-RV64-NEXT:  entry:
781 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.v.x.nxv8i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
782 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
783 //
test_vmv_v_x_u32m4(uint32_t src,size_t vl)784 vuint32m4_t test_vmv_v_x_u32m4(uint32_t src, size_t vl) {
785   return vmv_v_x_u32m4(src, vl);
786 }
787 
788 //
789 // CHECK-RV64-LABEL: @test_vmv_v_v_u32m8(
790 // CHECK-RV64-NEXT:  entry:
791 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.v.nxv16i32.i64(<vscale x 16 x i32> [[SRC:%.*]], i64 [[VL:%.*]])
792 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
793 //
test_vmv_v_v_u32m8(vuint32m8_t src,size_t vl)794 vuint32m8_t test_vmv_v_v_u32m8(vuint32m8_t src, size_t vl) {
795   return vmv_v_v_u32m8(src, vl);
796 }
797 
798 //
799 // CHECK-RV64-LABEL: @test_vmv_v_x_u32m8(
800 // CHECK-RV64-NEXT:  entry:
801 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.v.x.nxv16i32.i64(i32 [[SRC:%.*]], i64 [[VL:%.*]])
802 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
803 //
test_vmv_v_x_u32m8(uint32_t src,size_t vl)804 vuint32m8_t test_vmv_v_x_u32m8(uint32_t src, size_t vl) {
805   return vmv_v_x_u32m8(src, vl);
806 }
807 
808 //
809 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m1(
810 // CHECK-RV64-NEXT:  entry:
811 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.v.nxv1i64.i64(<vscale x 1 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
812 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
813 //
test_vmv_v_v_u64m1(vuint64m1_t src,size_t vl)814 vuint64m1_t test_vmv_v_v_u64m1(vuint64m1_t src, size_t vl) {
815   return vmv_v_v_u64m1(src, vl);
816 }
817 
818 //
819 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m1(
820 // CHECK-RV64-NEXT:  entry:
821 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.v.x.nxv1i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
822 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
823 //
test_vmv_v_x_u64m1(uint64_t src,size_t vl)824 vuint64m1_t test_vmv_v_x_u64m1(uint64_t src, size_t vl) {
825   return vmv_v_x_u64m1(src, vl);
826 }
827 
828 //
829 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m2(
830 // CHECK-RV64-NEXT:  entry:
831 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.v.nxv2i64.i64(<vscale x 2 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
832 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
833 //
test_vmv_v_v_u64m2(vuint64m2_t src,size_t vl)834 vuint64m2_t test_vmv_v_v_u64m2(vuint64m2_t src, size_t vl) {
835   return vmv_v_v_u64m2(src, vl);
836 }
837 
838 //
839 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m2(
840 // CHECK-RV64-NEXT:  entry:
841 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.v.x.nxv2i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
842 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
843 //
test_vmv_v_x_u64m2(uint64_t src,size_t vl)844 vuint64m2_t test_vmv_v_x_u64m2(uint64_t src, size_t vl) {
845   return vmv_v_x_u64m2(src, vl);
846 }
847 
848 //
849 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m4(
850 // CHECK-RV64-NEXT:  entry:
851 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.v.nxv4i64.i64(<vscale x 4 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
852 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
853 //
test_vmv_v_v_u64m4(vuint64m4_t src,size_t vl)854 vuint64m4_t test_vmv_v_v_u64m4(vuint64m4_t src, size_t vl) {
855   return vmv_v_v_u64m4(src, vl);
856 }
857 
858 //
859 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m4(
860 // CHECK-RV64-NEXT:  entry:
861 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.v.x.nxv4i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
862 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
863 //
test_vmv_v_x_u64m4(uint64_t src,size_t vl)864 vuint64m4_t test_vmv_v_x_u64m4(uint64_t src, size_t vl) {
865   return vmv_v_x_u64m4(src, vl);
866 }
867 
868 //
869 // CHECK-RV64-LABEL: @test_vmv_v_v_u64m8(
870 // CHECK-RV64-NEXT:  entry:
871 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.v.nxv8i64.i64(<vscale x 8 x i64> [[SRC:%.*]], i64 [[VL:%.*]])
872 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
873 //
test_vmv_v_v_u64m8(vuint64m8_t src,size_t vl)874 vuint64m8_t test_vmv_v_v_u64m8(vuint64m8_t src, size_t vl) {
875   return vmv_v_v_u64m8(src, vl);
876 }
877 
878 //
879 // CHECK-RV64-LABEL: @test_vmv_v_x_u64m8(
880 // CHECK-RV64-NEXT:  entry:
881 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.v.x.nxv8i64.i64(i64 [[SRC:%.*]], i64 [[VL:%.*]])
882 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
883 //
test_vmv_v_x_u64m8(uint64_t src,size_t vl)884 vuint64m8_t test_vmv_v_x_u64m8(uint64_t src, size_t vl) {
885   return vmv_v_x_u64m8(src, vl);
886 }
887 
888 //
889 // CHECK-RV64-LABEL: @test_vmv_v_v_f32mf2(
890 // CHECK-RV64-NEXT:  entry:
891 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x float> @llvm.riscv.vmv.v.v.nxv1f32.i64(<vscale x 1 x float> [[SRC:%.*]], i64 [[VL:%.*]])
892 // CHECK-RV64-NEXT:    ret <vscale x 1 x float> [[TMP0]]
893 //
test_vmv_v_v_f32mf2(vfloat32mf2_t src,size_t vl)894 vfloat32mf2_t test_vmv_v_v_f32mf2(vfloat32mf2_t src, size_t vl) {
895   return vmv_v_v_f32mf2(src, vl);
896 }
897 
898 //
899 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m1(
900 // CHECK-RV64-NEXT:  entry:
901 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x float> @llvm.riscv.vmv.v.v.nxv2f32.i64(<vscale x 2 x float> [[SRC:%.*]], i64 [[VL:%.*]])
902 // CHECK-RV64-NEXT:    ret <vscale x 2 x float> [[TMP0]]
903 //
test_vmv_v_v_f32m1(vfloat32m1_t src,size_t vl)904 vfloat32m1_t test_vmv_v_v_f32m1(vfloat32m1_t src, size_t vl) {
905   return vmv_v_v_f32m1(src, vl);
906 }
907 
908 //
909 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m2(
910 // CHECK-RV64-NEXT:  entry:
911 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x float> @llvm.riscv.vmv.v.v.nxv4f32.i64(<vscale x 4 x float> [[SRC:%.*]], i64 [[VL:%.*]])
912 // CHECK-RV64-NEXT:    ret <vscale x 4 x float> [[TMP0]]
913 //
test_vmv_v_v_f32m2(vfloat32m2_t src,size_t vl)914 vfloat32m2_t test_vmv_v_v_f32m2(vfloat32m2_t src, size_t vl) {
915   return vmv_v_v_f32m2(src, vl);
916 }
917 
918 //
919 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m4(
920 // CHECK-RV64-NEXT:  entry:
921 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x float> @llvm.riscv.vmv.v.v.nxv8f32.i64(<vscale x 8 x float> [[SRC:%.*]], i64 [[VL:%.*]])
922 // CHECK-RV64-NEXT:    ret <vscale x 8 x float> [[TMP0]]
923 //
test_vmv_v_v_f32m4(vfloat32m4_t src,size_t vl)924 vfloat32m4_t test_vmv_v_v_f32m4(vfloat32m4_t src, size_t vl) {
925   return vmv_v_v_f32m4(src, vl);
926 }
927 
928 //
929 // CHECK-RV64-LABEL: @test_vmv_v_v_f32m8(
930 // CHECK-RV64-NEXT:  entry:
931 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x float> @llvm.riscv.vmv.v.v.nxv16f32.i64(<vscale x 16 x float> [[SRC:%.*]], i64 [[VL:%.*]])
932 // CHECK-RV64-NEXT:    ret <vscale x 16 x float> [[TMP0]]
933 //
test_vmv_v_v_f32m8(vfloat32m8_t src,size_t vl)934 vfloat32m8_t test_vmv_v_v_f32m8(vfloat32m8_t src, size_t vl) {
935   return vmv_v_v_f32m8(src, vl);
936 }
937 
938 //
939 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m1(
940 // CHECK-RV64-NEXT:  entry:
941 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x double> @llvm.riscv.vmv.v.v.nxv1f64.i64(<vscale x 1 x double> [[SRC:%.*]], i64 [[VL:%.*]])
942 // CHECK-RV64-NEXT:    ret <vscale x 1 x double> [[TMP0]]
943 //
test_vmv_v_v_f64m1(vfloat64m1_t src,size_t vl)944 vfloat64m1_t test_vmv_v_v_f64m1(vfloat64m1_t src, size_t vl) {
945   return vmv_v_v_f64m1(src, vl);
946 }
947 
948 //
949 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m2(
950 // CHECK-RV64-NEXT:  entry:
951 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x double> @llvm.riscv.vmv.v.v.nxv2f64.i64(<vscale x 2 x double> [[SRC:%.*]], i64 [[VL:%.*]])
952 // CHECK-RV64-NEXT:    ret <vscale x 2 x double> [[TMP0]]
953 //
test_vmv_v_v_f64m2(vfloat64m2_t src,size_t vl)954 vfloat64m2_t test_vmv_v_v_f64m2(vfloat64m2_t src, size_t vl) {
955   return vmv_v_v_f64m2(src, vl);
956 }
957 
958 //
959 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m4(
960 // CHECK-RV64-NEXT:  entry:
961 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x double> @llvm.riscv.vmv.v.v.nxv4f64.i64(<vscale x 4 x double> [[SRC:%.*]], i64 [[VL:%.*]])
962 // CHECK-RV64-NEXT:    ret <vscale x 4 x double> [[TMP0]]
963 //
test_vmv_v_v_f64m4(vfloat64m4_t src,size_t vl)964 vfloat64m4_t test_vmv_v_v_f64m4(vfloat64m4_t src, size_t vl) {
965   return vmv_v_v_f64m4(src, vl);
966 }
967 
968 //
969 // CHECK-RV64-LABEL: @test_vmv_v_v_f64m8(
970 // CHECK-RV64-NEXT:  entry:
971 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x double> @llvm.riscv.vmv.v.v.nxv8f64.i64(<vscale x 8 x double> [[SRC:%.*]], i64 [[VL:%.*]])
972 // CHECK-RV64-NEXT:    ret <vscale x 8 x double> [[TMP0]]
973 //
test_vmv_v_v_f64m8(vfloat64m8_t src,size_t vl)974 vfloat64m8_t test_vmv_v_v_f64m8(vfloat64m8_t src, size_t vl) {
975   return vmv_v_v_f64m8(src, vl);
976 }
977 
978 //
979 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf8_i8(
980 // CHECK-RV64-NEXT:  entry:
981 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> [[SRC:%.*]])
982 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
983 //
test_vmv_x_s_i8mf8_i8(vint8mf8_t src)984 int8_t test_vmv_x_s_i8mf8_i8(vint8mf8_t src) { return vmv_x_s_i8mf8_i8(src); }
985 
986 //
987 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf8(
988 // CHECK-RV64-NEXT:  entry:
989 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
990 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
991 //
test_vmv_s_x_i8mf8(vint8mf8_t dst,int8_t src,size_t vl)992 vint8mf8_t test_vmv_s_x_i8mf8(vint8mf8_t dst, int8_t src, size_t vl) {
993   return vmv_s_x_i8mf8(dst, src, vl);
994 }
995 
996 //
997 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf4_i8(
998 // CHECK-RV64-NEXT:  entry:
999 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> [[SRC:%.*]])
1000 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1001 //
test_vmv_x_s_i8mf4_i8(vint8mf4_t src)1002 int8_t test_vmv_x_s_i8mf4_i8(vint8mf4_t src) { return vmv_x_s_i8mf4_i8(src); }
1003 
1004 //
1005 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf4(
1006 // CHECK-RV64-NEXT:  entry:
1007 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1008 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1009 //
test_vmv_s_x_i8mf4(vint8mf4_t dst,int8_t src,size_t vl)1010 vint8mf4_t test_vmv_s_x_i8mf4(vint8mf4_t dst, int8_t src, size_t vl) {
1011   return vmv_s_x_i8mf4(dst, src, vl);
1012 }
1013 
1014 //
1015 // CHECK-RV64-LABEL: @test_vmv_x_s_i8mf2_i8(
1016 // CHECK-RV64-NEXT:  entry:
1017 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> [[SRC:%.*]])
1018 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1019 //
test_vmv_x_s_i8mf2_i8(vint8mf2_t src)1020 int8_t test_vmv_x_s_i8mf2_i8(vint8mf2_t src) { return vmv_x_s_i8mf2_i8(src); }
1021 
1022 //
1023 // CHECK-RV64-LABEL: @test_vmv_s_x_i8mf2(
1024 // CHECK-RV64-NEXT:  entry:
1025 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1026 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1027 //
test_vmv_s_x_i8mf2(vint8mf2_t dst,int8_t src,size_t vl)1028 vint8mf2_t test_vmv_s_x_i8mf2(vint8mf2_t dst, int8_t src, size_t vl) {
1029   return vmv_s_x_i8mf2(dst, src, vl);
1030 }
1031 
1032 //
1033 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m1_i8(
1034 // CHECK-RV64-NEXT:  entry:
1035 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> [[SRC:%.*]])
1036 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1037 //
test_vmv_x_s_i8m1_i8(vint8m1_t src)1038 int8_t test_vmv_x_s_i8m1_i8(vint8m1_t src) { return vmv_x_s_i8m1_i8(src); }
1039 
1040 //
1041 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m1(
1042 // CHECK-RV64-NEXT:  entry:
1043 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1044 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1045 //
test_vmv_s_x_i8m1(vint8m1_t dst,int8_t src,size_t vl)1046 vint8m1_t test_vmv_s_x_i8m1(vint8m1_t dst, int8_t src, size_t vl) {
1047   return vmv_s_x_i8m1(dst, src, vl);
1048 }
1049 
1050 //
1051 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m2_i8(
1052 // CHECK-RV64-NEXT:  entry:
1053 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]])
1054 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1055 //
test_vmv_x_s_i8m2_i8(vint8m2_t src)1056 int8_t test_vmv_x_s_i8m2_i8(vint8m2_t src) { return vmv_x_s_i8m2_i8(src); }
1057 
1058 //
1059 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m2(
1060 // CHECK-RV64-NEXT:  entry:
1061 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1062 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1063 //
test_vmv_s_x_i8m2(vint8m2_t dst,int8_t src,size_t vl)1064 vint8m2_t test_vmv_s_x_i8m2(vint8m2_t dst, int8_t src, size_t vl) {
1065   return vmv_s_x_i8m2(dst, src, vl);
1066 }
1067 
1068 //
1069 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m4_i8(
1070 // CHECK-RV64-NEXT:  entry:
1071 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]])
1072 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1073 //
test_vmv_x_s_i8m4_i8(vint8m4_t src)1074 int8_t test_vmv_x_s_i8m4_i8(vint8m4_t src) { return vmv_x_s_i8m4_i8(src); }
1075 
1076 //
1077 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m4(
1078 // CHECK-RV64-NEXT:  entry:
1079 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1080 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1081 //
test_vmv_s_x_i8m4(vint8m4_t dst,int8_t src,size_t vl)1082 vint8m4_t test_vmv_s_x_i8m4(vint8m4_t dst, int8_t src, size_t vl) {
1083   return vmv_s_x_i8m4(dst, src, vl);
1084 }
1085 
1086 //
1087 // CHECK-RV64-LABEL: @test_vmv_x_s_i8m8_i8(
1088 // CHECK-RV64-NEXT:  entry:
1089 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]])
1090 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1091 //
test_vmv_x_s_i8m8_i8(vint8m8_t src)1092 int8_t test_vmv_x_s_i8m8_i8(vint8m8_t src) { return vmv_x_s_i8m8_i8(src); }
1093 
1094 //
1095 // CHECK-RV64-LABEL: @test_vmv_s_x_i8m8(
1096 // CHECK-RV64-NEXT:  entry:
1097 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1098 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1099 //
test_vmv_s_x_i8m8(vint8m8_t dst,int8_t src,size_t vl)1100 vint8m8_t test_vmv_s_x_i8m8(vint8m8_t dst, int8_t src, size_t vl) {
1101   return vmv_s_x_i8m8(dst, src, vl);
1102 }
1103 
1104 //
1105 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf4_i16(
1106 // CHECK-RV64-NEXT:  entry:
1107 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> [[SRC:%.*]])
1108 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1109 //
test_vmv_x_s_i16mf4_i16(vint16mf4_t src)1110 int16_t test_vmv_x_s_i16mf4_i16(vint16mf4_t src) {
1111   return vmv_x_s_i16mf4_i16(src);
1112 }
1113 
1114 //
1115 // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf4(
1116 // CHECK-RV64-NEXT:  entry:
1117 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1118 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1119 //
test_vmv_s_x_i16mf4(vint16mf4_t dst,int16_t src,size_t vl)1120 vint16mf4_t test_vmv_s_x_i16mf4(vint16mf4_t dst, int16_t src, size_t vl) {
1121   return vmv_s_x_i16mf4(dst, src, vl);
1122 }
1123 
1124 //
1125 // CHECK-RV64-LABEL: @test_vmv_x_s_i16mf2_i16(
1126 // CHECK-RV64-NEXT:  entry:
1127 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> [[SRC:%.*]])
1128 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1129 //
test_vmv_x_s_i16mf2_i16(vint16mf2_t src)1130 int16_t test_vmv_x_s_i16mf2_i16(vint16mf2_t src) {
1131   return vmv_x_s_i16mf2_i16(src);
1132 }
1133 
1134 //
1135 // CHECK-RV64-LABEL: @test_vmv_s_x_i16mf2(
1136 // CHECK-RV64-NEXT:  entry:
1137 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1138 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1139 //
test_vmv_s_x_i16mf2(vint16mf2_t dst,int16_t src,size_t vl)1140 vint16mf2_t test_vmv_s_x_i16mf2(vint16mf2_t dst, int16_t src, size_t vl) {
1141   return vmv_s_x_i16mf2(dst, src, vl);
1142 }
1143 
1144 //
1145 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m1_i16(
1146 // CHECK-RV64-NEXT:  entry:
1147 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> [[SRC:%.*]])
1148 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1149 //
test_vmv_x_s_i16m1_i16(vint16m1_t src)1150 int16_t test_vmv_x_s_i16m1_i16(vint16m1_t src) {
1151   return vmv_x_s_i16m1_i16(src);
1152 }
1153 
1154 //
1155 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m1(
1156 // CHECK-RV64-NEXT:  entry:
1157 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1158 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1159 //
test_vmv_s_x_i16m1(vint16m1_t dst,int16_t src,size_t vl)1160 vint16m1_t test_vmv_s_x_i16m1(vint16m1_t dst, int16_t src, size_t vl) {
1161   return vmv_s_x_i16m1(dst, src, vl);
1162 }
1163 
1164 //
1165 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m2_i16(
1166 // CHECK-RV64-NEXT:  entry:
1167 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]])
1168 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1169 //
test_vmv_x_s_i16m2_i16(vint16m2_t src)1170 int16_t test_vmv_x_s_i16m2_i16(vint16m2_t src) {
1171   return vmv_x_s_i16m2_i16(src);
1172 }
1173 
1174 //
1175 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m2(
1176 // CHECK-RV64-NEXT:  entry:
1177 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1178 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1179 //
test_vmv_s_x_i16m2(vint16m2_t dst,int16_t src,size_t vl)1180 vint16m2_t test_vmv_s_x_i16m2(vint16m2_t dst, int16_t src, size_t vl) {
1181   return vmv_s_x_i16m2(dst, src, vl);
1182 }
1183 
1184 //
1185 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m4_i16(
1186 // CHECK-RV64-NEXT:  entry:
1187 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]])
1188 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1189 //
test_vmv_x_s_i16m4_i16(vint16m4_t src)1190 int16_t test_vmv_x_s_i16m4_i16(vint16m4_t src) {
1191   return vmv_x_s_i16m4_i16(src);
1192 }
1193 
1194 //
1195 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m4(
1196 // CHECK-RV64-NEXT:  entry:
1197 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1198 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1199 //
test_vmv_s_x_i16m4(vint16m4_t dst,int16_t src,size_t vl)1200 vint16m4_t test_vmv_s_x_i16m4(vint16m4_t dst, int16_t src, size_t vl) {
1201   return vmv_s_x_i16m4(dst, src, vl);
1202 }
1203 
1204 //
1205 // CHECK-RV64-LABEL: @test_vmv_x_s_i16m8_i16(
1206 // CHECK-RV64-NEXT:  entry:
1207 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]])
1208 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1209 //
test_vmv_x_s_i16m8_i16(vint16m8_t src)1210 int16_t test_vmv_x_s_i16m8_i16(vint16m8_t src) {
1211   return vmv_x_s_i16m8_i16(src);
1212 }
1213 
1214 //
1215 // CHECK-RV64-LABEL: @test_vmv_s_x_i16m8(
1216 // CHECK-RV64-NEXT:  entry:
1217 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1218 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1219 //
test_vmv_s_x_i16m8(vint16m8_t dst,int16_t src,size_t vl)1220 vint16m8_t test_vmv_s_x_i16m8(vint16m8_t dst, int16_t src, size_t vl) {
1221   return vmv_s_x_i16m8(dst, src, vl);
1222 }
1223 
1224 //
1225 // CHECK-RV64-LABEL: @test_vmv_x_s_i32mf2_i32(
1226 // CHECK-RV64-NEXT:  entry:
1227 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32(<vscale x 1 x i32> [[SRC:%.*]])
1228 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1229 //
test_vmv_x_s_i32mf2_i32(vint32mf2_t src)1230 int32_t test_vmv_x_s_i32mf2_i32(vint32mf2_t src) {
1231   return vmv_x_s_i32mf2_i32(src);
1232 }
1233 
1234 //
1235 // CHECK-RV64-LABEL: @test_vmv_s_x_i32mf2(
1236 // CHECK-RV64-NEXT:  entry:
1237 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1238 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1239 //
test_vmv_s_x_i32mf2(vint32mf2_t dst,int32_t src,size_t vl)1240 vint32mf2_t test_vmv_s_x_i32mf2(vint32mf2_t dst, int32_t src, size_t vl) {
1241   return vmv_s_x_i32mf2(dst, src, vl);
1242 }
1243 
1244 //
1245 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m1_i32(
1246 // CHECK-RV64-NEXT:  entry:
1247 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32(<vscale x 2 x i32> [[SRC:%.*]])
1248 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1249 //
test_vmv_x_s_i32m1_i32(vint32m1_t src)1250 int32_t test_vmv_x_s_i32m1_i32(vint32m1_t src) {
1251   return vmv_x_s_i32m1_i32(src);
1252 }
1253 
1254 //
1255 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m1(
1256 // CHECK-RV64-NEXT:  entry:
1257 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1258 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1259 //
test_vmv_s_x_i32m1(vint32m1_t dst,int32_t src,size_t vl)1260 vint32m1_t test_vmv_s_x_i32m1(vint32m1_t dst, int32_t src, size_t vl) {
1261   return vmv_s_x_i32m1(dst, src, vl);
1262 }
1263 
1264 //
1265 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m2_i32(
1266 // CHECK-RV64-NEXT:  entry:
1267 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]])
1268 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1269 //
test_vmv_x_s_i32m2_i32(vint32m2_t src)1270 int32_t test_vmv_x_s_i32m2_i32(vint32m2_t src) {
1271   return vmv_x_s_i32m2_i32(src);
1272 }
1273 
1274 //
1275 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m2(
1276 // CHECK-RV64-NEXT:  entry:
1277 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1278 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1279 //
test_vmv_s_x_i32m2(vint32m2_t dst,int32_t src,size_t vl)1280 vint32m2_t test_vmv_s_x_i32m2(vint32m2_t dst, int32_t src, size_t vl) {
1281   return vmv_s_x_i32m2(dst, src, vl);
1282 }
1283 
1284 //
1285 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m4_i32(
1286 // CHECK-RV64-NEXT:  entry:
1287 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]])
1288 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1289 //
test_vmv_x_s_i32m4_i32(vint32m4_t src)1290 int32_t test_vmv_x_s_i32m4_i32(vint32m4_t src) {
1291   return vmv_x_s_i32m4_i32(src);
1292 }
1293 
1294 //
1295 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m4(
1296 // CHECK-RV64-NEXT:  entry:
1297 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1298 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1299 //
test_vmv_s_x_i32m4(vint32m4_t dst,int32_t src,size_t vl)1300 vint32m4_t test_vmv_s_x_i32m4(vint32m4_t dst, int32_t src, size_t vl) {
1301   return vmv_s_x_i32m4(dst, src, vl);
1302 }
1303 
1304 //
1305 // CHECK-RV64-LABEL: @test_vmv_x_s_i32m8_i32(
1306 // CHECK-RV64-NEXT:  entry:
1307 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]])
1308 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1309 //
test_vmv_x_s_i32m8_i32(vint32m8_t src)1310 int32_t test_vmv_x_s_i32m8_i32(vint32m8_t src) {
1311   return vmv_x_s_i32m8_i32(src);
1312 }
1313 
1314 //
1315 // CHECK-RV64-LABEL: @test_vmv_s_x_i32m8(
1316 // CHECK-RV64-NEXT:  entry:
1317 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1318 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1319 //
test_vmv_s_x_i32m8(vint32m8_t dst,int32_t src,size_t vl)1320 vint32m8_t test_vmv_s_x_i32m8(vint32m8_t dst, int32_t src, size_t vl) {
1321   return vmv_s_x_i32m8(dst, src, vl);
1322 }
1323 
1324 //
1325 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m1_i64(
1326 // CHECK-RV64-NEXT:  entry:
1327 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> [[SRC:%.*]])
1328 // CHECK-RV64-NEXT:    ret i64 [[TMP0]]
1329 //
test_vmv_x_s_i64m1_i64(vint64m1_t src)1330 int64_t test_vmv_x_s_i64m1_i64(vint64m1_t src) {
1331   return vmv_x_s_i64m1_i64(src);
1332 }
1333 
1334 //
1335 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m1(
1336 // CHECK-RV64-NEXT:  entry:
1337 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
1338 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1339 //
test_vmv_s_x_i64m1(vint64m1_t dst,int64_t src,size_t vl)1340 vint64m1_t test_vmv_s_x_i64m1(vint64m1_t dst, int64_t src, size_t vl) {
1341   return vmv_s_x_i64m1(dst, src, vl);
1342 }
1343 
1344 //
1345 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m2_i64(
1346 // CHECK-RV64-NEXT:  entry:
1347 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]])
1348 // CHECK-RV64-NEXT:    ret i64 [[TMP0]]
1349 //
test_vmv_x_s_i64m2_i64(vint64m2_t src)1350 int64_t test_vmv_x_s_i64m2_i64(vint64m2_t src) {
1351   return vmv_x_s_i64m2_i64(src);
1352 }
1353 
1354 //
1355 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m2(
1356 // CHECK-RV64-NEXT:  entry:
1357 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
1358 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1359 //
test_vmv_s_x_i64m2(vint64m2_t dst,int64_t src,size_t vl)1360 vint64m2_t test_vmv_s_x_i64m2(vint64m2_t dst, int64_t src, size_t vl) {
1361   return vmv_s_x_i64m2(dst, src, vl);
1362 }
1363 
1364 //
1365 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m4_i64(
1366 // CHECK-RV64-NEXT:  entry:
1367 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]])
1368 // CHECK-RV64-NEXT:    ret i64 [[TMP0]]
1369 //
test_vmv_x_s_i64m4_i64(vint64m4_t src)1370 int64_t test_vmv_x_s_i64m4_i64(vint64m4_t src) {
1371   return vmv_x_s_i64m4_i64(src);
1372 }
1373 
1374 //
1375 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m4(
1376 // CHECK-RV64-NEXT:  entry:
1377 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
1378 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1379 //
test_vmv_s_x_i64m4(vint64m4_t dst,int64_t src,size_t vl)1380 vint64m4_t test_vmv_s_x_i64m4(vint64m4_t dst, int64_t src, size_t vl) {
1381   return vmv_s_x_i64m4(dst, src, vl);
1382 }
1383 
1384 //
1385 // CHECK-RV64-LABEL: @test_vmv_x_s_i64m8_i64(
1386 // CHECK-RV64-NEXT:  entry:
1387 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]])
1388 // CHECK-RV64-NEXT:    ret i64 [[TMP0]]
1389 //
test_vmv_x_s_i64m8_i64(vint64m8_t src)1390 int64_t test_vmv_x_s_i64m8_i64(vint64m8_t src) {
1391   return vmv_x_s_i64m8_i64(src);
1392 }
1393 
1394 //
1395 // CHECK-RV64-LABEL: @test_vmv_s_x_i64m8(
1396 // CHECK-RV64-NEXT:  entry:
1397 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
1398 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1399 //
test_vmv_s_x_i64m8(vint64m8_t dst,int64_t src,size_t vl)1400 vint64m8_t test_vmv_s_x_i64m8(vint64m8_t dst, int64_t src, size_t vl) {
1401   return vmv_s_x_i64m8(dst, src, vl);
1402 }
1403 
1404 //
1405 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf8_u8(
1406 // CHECK-RV64-NEXT:  entry:
1407 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv1i8(<vscale x 1 x i8> [[SRC:%.*]])
1408 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1409 //
test_vmv_x_s_u8mf8_u8(vuint8mf8_t src)1410 uint8_t test_vmv_x_s_u8mf8_u8(vuint8mf8_t src) { return vmv_x_s_u8mf8_u8(src); }
1411 
1412 //
1413 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf8(
1414 // CHECK-RV64-NEXT:  entry:
1415 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i8> @llvm.riscv.vmv.s.x.nxv1i8.i64(<vscale x 1 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1416 // CHECK-RV64-NEXT:    ret <vscale x 1 x i8> [[TMP0]]
1417 //
test_vmv_s_x_u8mf8(vuint8mf8_t dst,uint8_t src,size_t vl)1418 vuint8mf8_t test_vmv_s_x_u8mf8(vuint8mf8_t dst, uint8_t src, size_t vl) {
1419   return vmv_s_x_u8mf8(dst, src, vl);
1420 }
1421 
1422 //
1423 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf4_u8(
1424 // CHECK-RV64-NEXT:  entry:
1425 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv2i8(<vscale x 2 x i8> [[SRC:%.*]])
1426 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1427 //
test_vmv_x_s_u8mf4_u8(vuint8mf4_t src)1428 uint8_t test_vmv_x_s_u8mf4_u8(vuint8mf4_t src) { return vmv_x_s_u8mf4_u8(src); }
1429 
1430 //
1431 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf4(
1432 // CHECK-RV64-NEXT:  entry:
1433 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i8> @llvm.riscv.vmv.s.x.nxv2i8.i64(<vscale x 2 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1434 // CHECK-RV64-NEXT:    ret <vscale x 2 x i8> [[TMP0]]
1435 //
test_vmv_s_x_u8mf4(vuint8mf4_t dst,uint8_t src,size_t vl)1436 vuint8mf4_t test_vmv_s_x_u8mf4(vuint8mf4_t dst, uint8_t src, size_t vl) {
1437   return vmv_s_x_u8mf4(dst, src, vl);
1438 }
1439 
1440 //
1441 // CHECK-RV64-LABEL: @test_vmv_x_s_u8mf2_u8(
1442 // CHECK-RV64-NEXT:  entry:
1443 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv4i8(<vscale x 4 x i8> [[SRC:%.*]])
1444 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1445 //
test_vmv_x_s_u8mf2_u8(vuint8mf2_t src)1446 uint8_t test_vmv_x_s_u8mf2_u8(vuint8mf2_t src) { return vmv_x_s_u8mf2_u8(src); }
1447 
1448 //
1449 // CHECK-RV64-LABEL: @test_vmv_s_x_u8mf2(
1450 // CHECK-RV64-NEXT:  entry:
1451 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i8> @llvm.riscv.vmv.s.x.nxv4i8.i64(<vscale x 4 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1452 // CHECK-RV64-NEXT:    ret <vscale x 4 x i8> [[TMP0]]
1453 //
test_vmv_s_x_u8mf2(vuint8mf2_t dst,uint8_t src,size_t vl)1454 vuint8mf2_t test_vmv_s_x_u8mf2(vuint8mf2_t dst, uint8_t src, size_t vl) {
1455   return vmv_s_x_u8mf2(dst, src, vl);
1456 }
1457 
1458 //
1459 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m1_u8(
1460 // CHECK-RV64-NEXT:  entry:
1461 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv8i8(<vscale x 8 x i8> [[SRC:%.*]])
1462 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1463 //
test_vmv_x_s_u8m1_u8(vuint8m1_t src)1464 uint8_t test_vmv_x_s_u8m1_u8(vuint8m1_t src) { return vmv_x_s_u8m1_u8(src); }
1465 
1466 //
1467 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m1(
1468 // CHECK-RV64-NEXT:  entry:
1469 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i8> @llvm.riscv.vmv.s.x.nxv8i8.i64(<vscale x 8 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1470 // CHECK-RV64-NEXT:    ret <vscale x 8 x i8> [[TMP0]]
1471 //
test_vmv_s_x_u8m1(vuint8m1_t dst,uint8_t src,size_t vl)1472 vuint8m1_t test_vmv_s_x_u8m1(vuint8m1_t dst, uint8_t src, size_t vl) {
1473   return vmv_s_x_u8m1(dst, src, vl);
1474 }
1475 
1476 //
1477 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m2_u8(
1478 // CHECK-RV64-NEXT:  entry:
1479 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv16i8(<vscale x 16 x i8> [[SRC:%.*]])
1480 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1481 //
test_vmv_x_s_u8m2_u8(vuint8m2_t src)1482 uint8_t test_vmv_x_s_u8m2_u8(vuint8m2_t src) { return vmv_x_s_u8m2_u8(src); }
1483 
1484 //
1485 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m2(
1486 // CHECK-RV64-NEXT:  entry:
1487 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i8> @llvm.riscv.vmv.s.x.nxv16i8.i64(<vscale x 16 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1488 // CHECK-RV64-NEXT:    ret <vscale x 16 x i8> [[TMP0]]
1489 //
test_vmv_s_x_u8m2(vuint8m2_t dst,uint8_t src,size_t vl)1490 vuint8m2_t test_vmv_s_x_u8m2(vuint8m2_t dst, uint8_t src, size_t vl) {
1491   return vmv_s_x_u8m2(dst, src, vl);
1492 }
1493 
1494 //
1495 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m4_u8(
1496 // CHECK-RV64-NEXT:  entry:
1497 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv32i8(<vscale x 32 x i8> [[SRC:%.*]])
1498 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1499 //
test_vmv_x_s_u8m4_u8(vuint8m4_t src)1500 uint8_t test_vmv_x_s_u8m4_u8(vuint8m4_t src) { return vmv_x_s_u8m4_u8(src); }
1501 
1502 //
1503 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m4(
1504 // CHECK-RV64-NEXT:  entry:
1505 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i8> @llvm.riscv.vmv.s.x.nxv32i8.i64(<vscale x 32 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1506 // CHECK-RV64-NEXT:    ret <vscale x 32 x i8> [[TMP0]]
1507 //
test_vmv_s_x_u8m4(vuint8m4_t dst,uint8_t src,size_t vl)1508 vuint8m4_t test_vmv_s_x_u8m4(vuint8m4_t dst, uint8_t src, size_t vl) {
1509   return vmv_s_x_u8m4(dst, src, vl);
1510 }
1511 
1512 //
1513 // CHECK-RV64-LABEL: @test_vmv_x_s_u8m8_u8(
1514 // CHECK-RV64-NEXT:  entry:
1515 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i8 @llvm.riscv.vmv.x.s.nxv64i8(<vscale x 64 x i8> [[SRC:%.*]])
1516 // CHECK-RV64-NEXT:    ret i8 [[TMP0]]
1517 //
test_vmv_x_s_u8m8_u8(vuint8m8_t src)1518 uint8_t test_vmv_x_s_u8m8_u8(vuint8m8_t src) { return vmv_x_s_u8m8_u8(src); }
1519 
1520 //
1521 // CHECK-RV64-LABEL: @test_vmv_s_x_u8m8(
1522 // CHECK-RV64-NEXT:  entry:
1523 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 64 x i8> @llvm.riscv.vmv.s.x.nxv64i8.i64(<vscale x 64 x i8> [[DST:%.*]], i8 [[SRC:%.*]], i64 [[VL:%.*]])
1524 // CHECK-RV64-NEXT:    ret <vscale x 64 x i8> [[TMP0]]
1525 //
test_vmv_s_x_u8m8(vuint8m8_t dst,uint8_t src,size_t vl)1526 vuint8m8_t test_vmv_s_x_u8m8(vuint8m8_t dst, uint8_t src, size_t vl) {
1527   return vmv_s_x_u8m8(dst, src, vl);
1528 }
1529 
1530 //
1531 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf4_u16(
1532 // CHECK-RV64-NEXT:  entry:
1533 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv1i16(<vscale x 1 x i16> [[SRC:%.*]])
1534 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1535 //
test_vmv_x_s_u16mf4_u16(vuint16mf4_t src)1536 uint16_t test_vmv_x_s_u16mf4_u16(vuint16mf4_t src) {
1537   return vmv_x_s_u16mf4_u16(src);
1538 }
1539 
1540 //
1541 // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf4(
1542 // CHECK-RV64-NEXT:  entry:
1543 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vmv.s.x.nxv1i16.i64(<vscale x 1 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1544 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
1545 //
test_vmv_s_x_u16mf4(vuint16mf4_t dst,uint16_t src,size_t vl)1546 vuint16mf4_t test_vmv_s_x_u16mf4(vuint16mf4_t dst, uint16_t src, size_t vl) {
1547   return vmv_s_x_u16mf4(dst, src, vl);
1548 }
1549 
1550 //
1551 // CHECK-RV64-LABEL: @test_vmv_x_s_u16mf2_u16(
1552 // CHECK-RV64-NEXT:  entry:
1553 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv2i16(<vscale x 2 x i16> [[SRC:%.*]])
1554 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1555 //
test_vmv_x_s_u16mf2_u16(vuint16mf2_t src)1556 uint16_t test_vmv_x_s_u16mf2_u16(vuint16mf2_t src) {
1557   return vmv_x_s_u16mf2_u16(src);
1558 }
1559 
1560 //
1561 // CHECK-RV64-LABEL: @test_vmv_s_x_u16mf2(
1562 // CHECK-RV64-NEXT:  entry:
1563 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vmv.s.x.nxv2i16.i64(<vscale x 2 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1564 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
1565 //
test_vmv_s_x_u16mf2(vuint16mf2_t dst,uint16_t src,size_t vl)1566 vuint16mf2_t test_vmv_s_x_u16mf2(vuint16mf2_t dst, uint16_t src, size_t vl) {
1567   return vmv_s_x_u16mf2(dst, src, vl);
1568 }
1569 
1570 //
1571 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m1_u16(
1572 // CHECK-RV64-NEXT:  entry:
1573 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv4i16(<vscale x 4 x i16> [[SRC:%.*]])
1574 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1575 //
test_vmv_x_s_u16m1_u16(vuint16m1_t src)1576 uint16_t test_vmv_x_s_u16m1_u16(vuint16m1_t src) {
1577   return vmv_x_s_u16m1_u16(src);
1578 }
1579 
1580 //
1581 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m1(
1582 // CHECK-RV64-NEXT:  entry:
1583 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vmv.s.x.nxv4i16.i64(<vscale x 4 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1584 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
1585 //
test_vmv_s_x_u16m1(vuint16m1_t dst,uint16_t src,size_t vl)1586 vuint16m1_t test_vmv_s_x_u16m1(vuint16m1_t dst, uint16_t src, size_t vl) {
1587   return vmv_s_x_u16m1(dst, src, vl);
1588 }
1589 
1590 //
1591 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m2_u16(
1592 // CHECK-RV64-NEXT:  entry:
1593 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv8i16(<vscale x 8 x i16> [[SRC:%.*]])
1594 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1595 //
test_vmv_x_s_u16m2_u16(vuint16m2_t src)1596 uint16_t test_vmv_x_s_u16m2_u16(vuint16m2_t src) {
1597   return vmv_x_s_u16m2_u16(src);
1598 }
1599 
1600 //
1601 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m2(
1602 // CHECK-RV64-NEXT:  entry:
1603 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vmv.s.x.nxv8i16.i64(<vscale x 8 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1604 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
1605 //
test_vmv_s_x_u16m2(vuint16m2_t dst,uint16_t src,size_t vl)1606 vuint16m2_t test_vmv_s_x_u16m2(vuint16m2_t dst, uint16_t src, size_t vl) {
1607   return vmv_s_x_u16m2(dst, src, vl);
1608 }
1609 
1610 //
1611 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m4_u16(
1612 // CHECK-RV64-NEXT:  entry:
1613 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv16i16(<vscale x 16 x i16> [[SRC:%.*]])
1614 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1615 //
test_vmv_x_s_u16m4_u16(vuint16m4_t src)1616 uint16_t test_vmv_x_s_u16m4_u16(vuint16m4_t src) {
1617   return vmv_x_s_u16m4_u16(src);
1618 }
1619 
1620 //
1621 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m4(
1622 // CHECK-RV64-NEXT:  entry:
1623 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vmv.s.x.nxv16i16.i64(<vscale x 16 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1624 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
1625 //
test_vmv_s_x_u16m4(vuint16m4_t dst,uint16_t src,size_t vl)1626 vuint16m4_t test_vmv_s_x_u16m4(vuint16m4_t dst, uint16_t src, size_t vl) {
1627   return vmv_s_x_u16m4(dst, src, vl);
1628 }
1629 
1630 //
1631 // CHECK-RV64-LABEL: @test_vmv_x_s_u16m8_u16(
1632 // CHECK-RV64-NEXT:  entry:
1633 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i16 @llvm.riscv.vmv.x.s.nxv32i16(<vscale x 32 x i16> [[SRC:%.*]])
1634 // CHECK-RV64-NEXT:    ret i16 [[TMP0]]
1635 //
test_vmv_x_s_u16m8_u16(vuint16m8_t src)1636 uint16_t test_vmv_x_s_u16m8_u16(vuint16m8_t src) {
1637   return vmv_x_s_u16m8_u16(src);
1638 }
1639 
1640 //
1641 // CHECK-RV64-LABEL: @test_vmv_s_x_u16m8(
1642 // CHECK-RV64-NEXT:  entry:
1643 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vmv.s.x.nxv32i16.i64(<vscale x 32 x i16> [[DST:%.*]], i16 [[SRC:%.*]], i64 [[VL:%.*]])
1644 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
1645 //
test_vmv_s_x_u16m8(vuint16m8_t dst,uint16_t src,size_t vl)1646 vuint16m8_t test_vmv_s_x_u16m8(vuint16m8_t dst, uint16_t src, size_t vl) {
1647   return vmv_s_x_u16m8(dst, src, vl);
1648 }
1649 
1650 //
1651 // CHECK-RV64-LABEL: @test_vmv_x_s_u32mf2_u32(
1652 // CHECK-RV64-NEXT:  entry:
1653 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv1i32(<vscale x 1 x i32> [[SRC:%.*]])
1654 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1655 //
test_vmv_x_s_u32mf2_u32(vuint32mf2_t src)1656 uint32_t test_vmv_x_s_u32mf2_u32(vuint32mf2_t src) {
1657   return vmv_x_s_u32mf2_u32(src);
1658 }
1659 
1660 //
1661 // CHECK-RV64-LABEL: @test_vmv_s_x_u32mf2(
1662 // CHECK-RV64-NEXT:  entry:
1663 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vmv.s.x.nxv1i32.i64(<vscale x 1 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1664 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
1665 //
test_vmv_s_x_u32mf2(vuint32mf2_t dst,uint32_t src,size_t vl)1666 vuint32mf2_t test_vmv_s_x_u32mf2(vuint32mf2_t dst, uint32_t src, size_t vl) {
1667   return vmv_s_x_u32mf2(dst, src, vl);
1668 }
1669 
1670 //
1671 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m1_u32(
1672 // CHECK-RV64-NEXT:  entry:
1673 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv2i32(<vscale x 2 x i32> [[SRC:%.*]])
1674 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1675 //
test_vmv_x_s_u32m1_u32(vuint32m1_t src)1676 uint32_t test_vmv_x_s_u32m1_u32(vuint32m1_t src) {
1677   return vmv_x_s_u32m1_u32(src);
1678 }
1679 
1680 //
1681 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m1(
1682 // CHECK-RV64-NEXT:  entry:
1683 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vmv.s.x.nxv2i32.i64(<vscale x 2 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1684 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
1685 //
test_vmv_s_x_u32m1(vuint32m1_t dst,uint32_t src,size_t vl)1686 vuint32m1_t test_vmv_s_x_u32m1(vuint32m1_t dst, uint32_t src, size_t vl) {
1687   return vmv_s_x_u32m1(dst, src, vl);
1688 }
1689 
1690 //
1691 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m2_u32(
1692 // CHECK-RV64-NEXT:  entry:
1693 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv4i32(<vscale x 4 x i32> [[SRC:%.*]])
1694 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1695 //
test_vmv_x_s_u32m2_u32(vuint32m2_t src)1696 uint32_t test_vmv_x_s_u32m2_u32(vuint32m2_t src) {
1697   return vmv_x_s_u32m2_u32(src);
1698 }
1699 
1700 //
1701 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m2(
1702 // CHECK-RV64-NEXT:  entry:
1703 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vmv.s.x.nxv4i32.i64(<vscale x 4 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1704 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
1705 //
test_vmv_s_x_u32m2(vuint32m2_t dst,uint32_t src,size_t vl)1706 vuint32m2_t test_vmv_s_x_u32m2(vuint32m2_t dst, uint32_t src, size_t vl) {
1707   return vmv_s_x_u32m2(dst, src, vl);
1708 }
1709 
1710 //
1711 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m4_u32(
1712 // CHECK-RV64-NEXT:  entry:
1713 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv8i32(<vscale x 8 x i32> [[SRC:%.*]])
1714 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1715 //
test_vmv_x_s_u32m4_u32(vuint32m4_t src)1716 uint32_t test_vmv_x_s_u32m4_u32(vuint32m4_t src) {
1717   return vmv_x_s_u32m4_u32(src);
1718 }
1719 
1720 //
1721 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m4(
1722 // CHECK-RV64-NEXT:  entry:
1723 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vmv.s.x.nxv8i32.i64(<vscale x 8 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1724 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
1725 //
test_vmv_s_x_u32m4(vuint32m4_t dst,uint32_t src,size_t vl)1726 vuint32m4_t test_vmv_s_x_u32m4(vuint32m4_t dst, uint32_t src, size_t vl) {
1727   return vmv_s_x_u32m4(dst, src, vl);
1728 }
1729 
1730 //
1731 // CHECK-RV64-LABEL: @test_vmv_x_s_u32m8_u32(
1732 // CHECK-RV64-NEXT:  entry:
1733 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i32 @llvm.riscv.vmv.x.s.nxv16i32(<vscale x 16 x i32> [[SRC:%.*]])
1734 // CHECK-RV64-NEXT:    ret i32 [[TMP0]]
1735 //
test_vmv_x_s_u32m8_u32(vuint32m8_t src)1736 uint32_t test_vmv_x_s_u32m8_u32(vuint32m8_t src) {
1737   return vmv_x_s_u32m8_u32(src);
1738 }
1739 
1740 //
1741 // CHECK-RV64-LABEL: @test_vmv_s_x_u32m8(
1742 // CHECK-RV64-NEXT:  entry:
1743 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vmv.s.x.nxv16i32.i64(<vscale x 16 x i32> [[DST:%.*]], i32 [[SRC:%.*]], i64 [[VL:%.*]])
1744 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
1745 //
test_vmv_s_x_u32m8(vuint32m8_t dst,uint32_t src,size_t vl)1746 vuint32m8_t test_vmv_s_x_u32m8(vuint32m8_t dst, uint32_t src, size_t vl) {
1747   return vmv_s_x_u32m8(dst, src, vl);
1748 }
1749 
1750 //
1751 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m1_u64(
1752 // CHECK-RV64-NEXT:  entry:
1753 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv1i64(<vscale x 1 x i64> [[SRC:%.*]])
1754 // CHECK-RV64-NEXT:    ret i64 [[TMP0]]
1755 //
test_vmv_x_s_u64m1_u64(vuint64m1_t src)1756 uint64_t test_vmv_x_s_u64m1_u64(vuint64m1_t src) {
1757   return vmv_x_s_u64m1_u64(src);
1758 }
1759 
1760 //
1761 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m1(
1762 // CHECK-RV64-NEXT:  entry:
1763 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vmv.s.x.nxv1i64.i64(<vscale x 1 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
1764 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
1765 //
test_vmv_s_x_u64m1(vuint64m1_t dst,uint64_t src,size_t vl)1766 vuint64m1_t test_vmv_s_x_u64m1(vuint64m1_t dst, uint64_t src, size_t vl) {
1767   return vmv_s_x_u64m1(dst, src, vl);
1768 }
1769 
1770 //
1771 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m2_u64(
1772 // CHECK-RV64-NEXT:  entry:
1773 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv2i64(<vscale x 2 x i64> [[SRC:%.*]])
1774 // CHECK-RV64-NEXT:    ret i64 [[TMP0]]
1775 //
test_vmv_x_s_u64m2_u64(vuint64m2_t src)1776 uint64_t test_vmv_x_s_u64m2_u64(vuint64m2_t src) {
1777   return vmv_x_s_u64m2_u64(src);
1778 }
1779 
1780 //
1781 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m2(
1782 // CHECK-RV64-NEXT:  entry:
1783 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vmv.s.x.nxv2i64.i64(<vscale x 2 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
1784 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
1785 //
test_vmv_s_x_u64m2(vuint64m2_t dst,uint64_t src,size_t vl)1786 vuint64m2_t test_vmv_s_x_u64m2(vuint64m2_t dst, uint64_t src, size_t vl) {
1787   return vmv_s_x_u64m2(dst, src, vl);
1788 }
1789 
1790 //
1791 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m4_u64(
1792 // CHECK-RV64-NEXT:  entry:
1793 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv4i64(<vscale x 4 x i64> [[SRC:%.*]])
1794 // CHECK-RV64-NEXT:    ret i64 [[TMP0]]
1795 //
test_vmv_x_s_u64m4_u64(vuint64m4_t src)1796 uint64_t test_vmv_x_s_u64m4_u64(vuint64m4_t src) {
1797   return vmv_x_s_u64m4_u64(src);
1798 }
1799 
1800 //
1801 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m4(
1802 // CHECK-RV64-NEXT:  entry:
1803 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vmv.s.x.nxv4i64.i64(<vscale x 4 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
1804 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
1805 //
test_vmv_s_x_u64m4(vuint64m4_t dst,uint64_t src,size_t vl)1806 vuint64m4_t test_vmv_s_x_u64m4(vuint64m4_t dst, uint64_t src, size_t vl) {
1807   return vmv_s_x_u64m4(dst, src, vl);
1808 }
1809 
1810 //
1811 // CHECK-RV64-LABEL: @test_vmv_x_s_u64m8_u64(
1812 // CHECK-RV64-NEXT:  entry:
1813 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call i64 @llvm.riscv.vmv.x.s.nxv8i64(<vscale x 8 x i64> [[SRC:%.*]])
1814 // CHECK-RV64-NEXT:    ret i64 [[TMP0]]
1815 //
test_vmv_x_s_u64m8_u64(vuint64m8_t src)1816 uint64_t test_vmv_x_s_u64m8_u64(vuint64m8_t src) {
1817   return vmv_x_s_u64m8_u64(src);
1818 }
1819 
1820 //
1821 // CHECK-RV64-LABEL: @test_vmv_s_x_u64m8(
1822 // CHECK-RV64-NEXT:  entry:
1823 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vmv.s.x.nxv8i64.i64(<vscale x 8 x i64> [[DST:%.*]], i64 [[SRC:%.*]], i64 [[VL:%.*]])
1824 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
1825 //
test_vmv_s_x_u64m8(vuint64m8_t dst,uint64_t src,size_t vl)1826 vuint64m8_t test_vmv_s_x_u64m8(vuint64m8_t dst, uint64_t src, size_t vl) {
1827   return vmv_s_x_u64m8(dst, src, vl);
1828 }
1829