1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4 
5 #include <riscv_vector.h>
6 
7 //
8 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4(
9 // CHECK-RV64-NEXT:  entry:
10 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
12 //
test_vwcvt_x_x_v_i16mf4(vint8mf8_t src,size_t vl)13 vint16mf4_t test_vwcvt_x_x_v_i16mf4 (vint8mf8_t src, size_t vl) {
14   return vwcvt_x_x_v_i16mf4(src, vl);
15 }
16 
17 //
18 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2(
19 // CHECK-RV64-NEXT:  entry:
20 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
22 //
test_vwcvt_x_x_v_i16mf2(vint8mf4_t src,size_t vl)23 vint16mf2_t test_vwcvt_x_x_v_i16mf2 (vint8mf4_t src, size_t vl) {
24   return vwcvt_x_x_v_i16mf2(src, vl);
25 }
26 
27 //
28 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1(
29 // CHECK-RV64-NEXT:  entry:
30 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
32 //
test_vwcvt_x_x_v_i16m1(vint8mf2_t src,size_t vl)33 vint16m1_t test_vwcvt_x_x_v_i16m1 (vint8mf2_t src, size_t vl) {
34   return vwcvt_x_x_v_i16m1(src, vl);
35 }
36 
37 //
38 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2(
39 // CHECK-RV64-NEXT:  entry:
40 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
42 //
test_vwcvt_x_x_v_i16m2(vint8m1_t src,size_t vl)43 vint16m2_t test_vwcvt_x_x_v_i16m2 (vint8m1_t src, size_t vl) {
44   return vwcvt_x_x_v_i16m2(src, vl);
45 }
46 
47 //
48 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4(
49 // CHECK-RV64-NEXT:  entry:
50 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
52 //
test_vwcvt_x_x_v_i16m4(vint8m2_t src,size_t vl)53 vint16m4_t test_vwcvt_x_x_v_i16m4 (vint8m2_t src, size_t vl) {
54   return vwcvt_x_x_v_i16m4(src, vl);
55 }
56 
57 //
58 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8(
59 // CHECK-RV64-NEXT:  entry:
60 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
62 //
test_vwcvt_x_x_v_i16m8(vint8m4_t src,size_t vl)63 vint16m8_t test_vwcvt_x_x_v_i16m8 (vint8m4_t src, size_t vl) {
64   return vwcvt_x_x_v_i16m8(src, vl);
65 }
66 
67 //
68 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4(
69 // CHECK-RV64-NEXT:  entry:
70 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
72 //
test_vwcvtu_x_x_v_u16mf4(vuint8mf8_t src,size_t vl)73 vuint16mf4_t test_vwcvtu_x_x_v_u16mf4 (vuint8mf8_t src, size_t vl) {
74   return vwcvtu_x_x_v_u16mf4(src, vl);
75 }
76 
77 //
78 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2(
79 // CHECK-RV64-NEXT:  entry:
80 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
82 //
test_vwcvtu_x_x_v_u16mf2(vuint8mf4_t src,size_t vl)83 vuint16mf2_t test_vwcvtu_x_x_v_u16mf2 (vuint8mf4_t src, size_t vl) {
84   return vwcvtu_x_x_v_u16mf2(src, vl);
85 }
86 
87 //
88 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1(
89 // CHECK-RV64-NEXT:  entry:
90 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
92 //
test_vwcvtu_x_x_v_u16m1(vuint8mf2_t src,size_t vl)93 vuint16m1_t test_vwcvtu_x_x_v_u16m1 (vuint8mf2_t src, size_t vl) {
94   return vwcvtu_x_x_v_u16m1(src, vl);
95 }
96 
97 //
98 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2(
99 // CHECK-RV64-NEXT:  entry:
100 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
102 //
test_vwcvtu_x_x_v_u16m2(vuint8m1_t src,size_t vl)103 vuint16m2_t test_vwcvtu_x_x_v_u16m2 (vuint8m1_t src, size_t vl) {
104   return vwcvtu_x_x_v_u16m2(src, vl);
105 }
106 
107 //
108 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4(
109 // CHECK-RV64-NEXT:  entry:
110 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
112 //
test_vwcvtu_x_x_v_u16m4(vuint8m2_t src,size_t vl)113 vuint16m4_t test_vwcvtu_x_x_v_u16m4 (vuint8m2_t src, size_t vl) {
114   return vwcvtu_x_x_v_u16m4(src, vl);
115 }
116 
117 //
118 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8(
119 // CHECK-RV64-NEXT:  entry:
120 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[SRC:%.*]], i8 0, i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
122 //
test_vwcvtu_x_x_v_u16m8(vuint8m4_t src,size_t vl)123 vuint16m8_t test_vwcvtu_x_x_v_u16m8 (vuint8m4_t src, size_t vl) {
124   return vwcvtu_x_x_v_u16m8(src, vl);
125 }
126 
127 //
128 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2(
129 // CHECK-RV64-NEXT:  entry:
130 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
132 //
test_vwcvt_x_x_v_i32mf2(vint16mf4_t src,size_t vl)133 vint32mf2_t test_vwcvt_x_x_v_i32mf2 (vint16mf4_t src, size_t vl) {
134   return vwcvt_x_x_v_i32mf2(src, vl);
135 }
136 
137 //
138 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1(
139 // CHECK-RV64-NEXT:  entry:
140 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
142 //
test_vwcvt_x_x_v_i32m1(vint16mf2_t src,size_t vl)143 vint32m1_t test_vwcvt_x_x_v_i32m1 (vint16mf2_t src, size_t vl) {
144   return vwcvt_x_x_v_i32m1(src, vl);
145 }
146 
147 //
148 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2(
149 // CHECK-RV64-NEXT:  entry:
150 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
152 //
test_vwcvt_x_x_v_i32m2(vint16m1_t src,size_t vl)153 vint32m2_t test_vwcvt_x_x_v_i32m2 (vint16m1_t src, size_t vl) {
154   return vwcvt_x_x_v_i32m2(src, vl);
155 }
156 
157 //
158 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4(
159 // CHECK-RV64-NEXT:  entry:
160 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
162 //
test_vwcvt_x_x_v_i32m4(vint16m2_t src,size_t vl)163 vint32m4_t test_vwcvt_x_x_v_i32m4 (vint16m2_t src, size_t vl) {
164   return vwcvt_x_x_v_i32m4(src, vl);
165 }
166 
167 //
168 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8(
169 // CHECK-RV64-NEXT:  entry:
170 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
172 //
test_vwcvt_x_x_v_i32m8(vint16m4_t src,size_t vl)173 vint32m8_t test_vwcvt_x_x_v_i32m8 (vint16m4_t src, size_t vl) {
174   return vwcvt_x_x_v_i32m8(src, vl);
175 }
176 
177 //
178 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2(
179 // CHECK-RV64-NEXT:  entry:
180 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
182 //
test_vwcvtu_x_x_v_u32mf2(vuint16mf4_t src,size_t vl)183 vuint32mf2_t test_vwcvtu_x_x_v_u32mf2 (vuint16mf4_t src, size_t vl) {
184   return vwcvtu_x_x_v_u32mf2(src, vl);
185 }
186 
187 //
188 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1(
189 // CHECK-RV64-NEXT:  entry:
190 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
192 //
test_vwcvtu_x_x_v_u32m1(vuint16mf2_t src,size_t vl)193 vuint32m1_t test_vwcvtu_x_x_v_u32m1 (vuint16mf2_t src, size_t vl) {
194   return vwcvtu_x_x_v_u32m1(src, vl);
195 }
196 
197 //
198 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2(
199 // CHECK-RV64-NEXT:  entry:
200 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
202 //
test_vwcvtu_x_x_v_u32m2(vuint16m1_t src,size_t vl)203 vuint32m2_t test_vwcvtu_x_x_v_u32m2 (vuint16m1_t src, size_t vl) {
204   return vwcvtu_x_x_v_u32m2(src, vl);
205 }
206 
207 //
208 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4(
209 // CHECK-RV64-NEXT:  entry:
210 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
212 //
test_vwcvtu_x_x_v_u32m4(vuint16m2_t src,size_t vl)213 vuint32m4_t test_vwcvtu_x_x_v_u32m4 (vuint16m2_t src, size_t vl) {
214   return vwcvtu_x_x_v_u32m4(src, vl);
215 }
216 
217 //
218 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8(
219 // CHECK-RV64-NEXT:  entry:
220 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[SRC:%.*]], i16 0, i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
222 //
test_vwcvtu_x_x_v_u32m8(vuint16m4_t src,size_t vl)223 vuint32m8_t test_vwcvtu_x_x_v_u32m8 (vuint16m4_t src, size_t vl) {
224   return vwcvtu_x_x_v_u32m8(src, vl);
225 }
226 
227 //
228 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1(
229 // CHECK-RV64-NEXT:  entry:
230 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
232 //
test_vwcvt_x_x_v_i64m1(vint32mf2_t src,size_t vl)233 vint64m1_t test_vwcvt_x_x_v_i64m1 (vint32mf2_t src, size_t vl) {
234   return vwcvt_x_x_v_i64m1(src, vl);
235 }
236 
237 //
238 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2(
239 // CHECK-RV64-NEXT:  entry:
240 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
241 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
242 //
test_vwcvt_x_x_v_i64m2(vint32m1_t src,size_t vl)243 vint64m2_t test_vwcvt_x_x_v_i64m2 (vint32m1_t src, size_t vl) {
244   return vwcvt_x_x_v_i64m2(src, vl);
245 }
246 
247 //
248 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4(
249 // CHECK-RV64-NEXT:  entry:
250 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
251 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
252 //
test_vwcvt_x_x_v_i64m4(vint32m2_t src,size_t vl)253 vint64m4_t test_vwcvt_x_x_v_i64m4 (vint32m2_t src, size_t vl) {
254   return vwcvt_x_x_v_i64m4(src, vl);
255 }
256 
257 //
258 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8(
259 // CHECK-RV64-NEXT:  entry:
260 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
262 //
test_vwcvt_x_x_v_i64m8(vint32m4_t src,size_t vl)263 vint64m8_t test_vwcvt_x_x_v_i64m8 (vint32m4_t src, size_t vl) {
264   return vwcvt_x_x_v_i64m8(src, vl);
265 }
266 
267 //
268 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1(
269 // CHECK-RV64-NEXT:  entry:
270 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
272 //
test_vwcvtu_x_x_v_u64m1(vuint32mf2_t src,size_t vl)273 vuint64m1_t test_vwcvtu_x_x_v_u64m1 (vuint32mf2_t src, size_t vl) {
274   return vwcvtu_x_x_v_u64m1(src, vl);
275 }
276 
277 //
278 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2(
279 // CHECK-RV64-NEXT:  entry:
280 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
282 //
test_vwcvtu_x_x_v_u64m2(vuint32m1_t src,size_t vl)283 vuint64m2_t test_vwcvtu_x_x_v_u64m2 (vuint32m1_t src, size_t vl) {
284   return vwcvtu_x_x_v_u64m2(src, vl);
285 }
286 
287 //
288 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4(
289 // CHECK-RV64-NEXT:  entry:
290 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
292 //
test_vwcvtu_x_x_v_u64m4(vuint32m2_t src,size_t vl)293 vuint64m4_t test_vwcvtu_x_x_v_u64m4 (vuint32m2_t src, size_t vl) {
294   return vwcvtu_x_x_v_u64m4(src, vl);
295 }
296 
297 //
298 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8(
299 // CHECK-RV64-NEXT:  entry:
300 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[SRC:%.*]], i32 0, i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
302 //
test_vwcvtu_x_x_v_u64m8(vuint32m4_t src,size_t vl)303 vuint64m8_t test_vwcvtu_x_x_v_u64m8 (vuint32m4_t src, size_t vl) {
304   return vwcvtu_x_x_v_u64m8(src, vl);
305 }
306 
307 //
308 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf4_m(
309 // CHECK-RV64-NEXT:  entry:
310 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
311 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
312 //
test_vwcvt_x_x_v_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint8mf8_t src,size_t vl)313 vint16mf4_t test_vwcvt_x_x_v_i16mf4_m (vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t src, size_t vl) {
314   return vwcvt_x_x_v_i16mf4_m(mask, maskedoff, src, vl);
315 }
316 
317 //
318 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16mf2_m(
319 // CHECK-RV64-NEXT:  entry:
320 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
321 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
322 //
test_vwcvt_x_x_v_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint8mf4_t src,size_t vl)323 vint16mf2_t test_vwcvt_x_x_v_i16mf2_m (vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t src, size_t vl) {
324   return vwcvt_x_x_v_i16mf2_m(mask, maskedoff, src, vl);
325 }
326 
327 //
328 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m1_m(
329 // CHECK-RV64-NEXT:  entry:
330 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
332 //
test_vwcvt_x_x_v_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint8mf2_t src,size_t vl)333 vint16m1_t test_vwcvt_x_x_v_i16m1_m (vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t src, size_t vl) {
334   return vwcvt_x_x_v_i16m1_m(mask, maskedoff, src, vl);
335 }
336 
337 //
338 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m2_m(
339 // CHECK-RV64-NEXT:  entry:
340 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
342 //
test_vwcvt_x_x_v_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint8m1_t src,size_t vl)343 vint16m2_t test_vwcvt_x_x_v_i16m2_m (vbool8_t mask, vint16m2_t maskedoff, vint8m1_t src, size_t vl) {
344   return vwcvt_x_x_v_i16m2_m(mask, maskedoff, src, vl);
345 }
346 
347 //
348 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m4_m(
349 // CHECK-RV64-NEXT:  entry:
350 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
351 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
352 //
test_vwcvt_x_x_v_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint8m2_t src,size_t vl)353 vint16m4_t test_vwcvt_x_x_v_i16m4_m (vbool4_t mask, vint16m4_t maskedoff, vint8m2_t src, size_t vl) {
354   return vwcvt_x_x_v_i16m4_m(mask, maskedoff, src, vl);
355 }
356 
357 //
358 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i16m8_m(
359 // CHECK-RV64-NEXT:  entry:
360 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
362 //
test_vwcvt_x_x_v_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint8m4_t src,size_t vl)363 vint16m8_t test_vwcvt_x_x_v_i16m8_m (vbool2_t mask, vint16m8_t maskedoff, vint8m4_t src, size_t vl) {
364   return vwcvt_x_x_v_i16m8_m(mask, maskedoff, src, vl);
365 }
366 
367 //
368 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf4_m(
369 // CHECK-RV64-NEXT:  entry:
370 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[SRC:%.*]], i8 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT:    ret <vscale x 1 x i16> [[TMP0]]
372 //
test_vwcvtu_x_x_v_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint8mf8_t src,size_t vl)373 vuint16mf4_t test_vwcvtu_x_x_v_u16mf4_m (vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t src, size_t vl) {
374   return vwcvtu_x_x_v_u16mf4_m(mask, maskedoff, src, vl);
375 }
376 
377 //
378 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16mf2_m(
379 // CHECK-RV64-NEXT:  entry:
380 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[SRC:%.*]], i8 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT:    ret <vscale x 2 x i16> [[TMP0]]
382 //
test_vwcvtu_x_x_v_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint8mf4_t src,size_t vl)383 vuint16mf2_t test_vwcvtu_x_x_v_u16mf2_m (vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t src, size_t vl) {
384   return vwcvtu_x_x_v_u16mf2_m(mask, maskedoff, src, vl);
385 }
386 
387 //
388 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m1_m(
389 // CHECK-RV64-NEXT:  entry:
390 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[SRC:%.*]], i8 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT:    ret <vscale x 4 x i16> [[TMP0]]
392 //
test_vwcvtu_x_x_v_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint8mf2_t src,size_t vl)393 vuint16m1_t test_vwcvtu_x_x_v_u16m1_m (vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t src, size_t vl) {
394   return vwcvtu_x_x_v_u16m1_m(mask, maskedoff, src, vl);
395 }
396 
397 //
398 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m2_m(
399 // CHECK-RV64-NEXT:  entry:
400 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[SRC:%.*]], i8 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
401 // CHECK-RV64-NEXT:    ret <vscale x 8 x i16> [[TMP0]]
402 //
test_vwcvtu_x_x_v_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint8m1_t src,size_t vl)403 vuint16m2_t test_vwcvtu_x_x_v_u16m2_m (vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t src, size_t vl) {
404   return vwcvtu_x_x_v_u16m2_m(mask, maskedoff, src, vl);
405 }
406 
407 //
408 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m4_m(
409 // CHECK-RV64-NEXT:  entry:
410 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[SRC:%.*]], i8 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
411 // CHECK-RV64-NEXT:    ret <vscale x 16 x i16> [[TMP0]]
412 //
test_vwcvtu_x_x_v_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint8m2_t src,size_t vl)413 vuint16m4_t test_vwcvtu_x_x_v_u16m4_m (vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t src, size_t vl) {
414   return vwcvtu_x_x_v_u16m4_m(mask, maskedoff, src, vl);
415 }
416 
417 //
418 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u16m8_m(
419 // CHECK-RV64-NEXT:  entry:
420 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[SRC:%.*]], i8 0, <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
421 // CHECK-RV64-NEXT:    ret <vscale x 32 x i16> [[TMP0]]
422 //
test_vwcvtu_x_x_v_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint8m4_t src,size_t vl)423 vuint16m8_t test_vwcvtu_x_x_v_u16m8_m (vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t src, size_t vl) {
424   return vwcvtu_x_x_v_u16m8_m(mask, maskedoff, src, vl);
425 }
426 
427 //
428 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32mf2_m(
429 // CHECK-RV64-NEXT:  entry:
430 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
431 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
432 //
test_vwcvt_x_x_v_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint16mf4_t src,size_t vl)433 vint32mf2_t test_vwcvt_x_x_v_i32mf2_m (vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t src, size_t vl) {
434   return vwcvt_x_x_v_i32mf2_m(mask, maskedoff, src, vl);
435 }
436 
437 //
438 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m1_m(
439 // CHECK-RV64-NEXT:  entry:
440 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
442 //
test_vwcvt_x_x_v_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint16mf2_t src,size_t vl)443 vint32m1_t test_vwcvt_x_x_v_i32m1_m (vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t src, size_t vl) {
444   return vwcvt_x_x_v_i32m1_m(mask, maskedoff, src, vl);
445 }
446 
447 //
448 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m2_m(
449 // CHECK-RV64-NEXT:  entry:
450 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
452 //
test_vwcvt_x_x_v_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint16m1_t src,size_t vl)453 vint32m2_t test_vwcvt_x_x_v_i32m2_m (vbool16_t mask, vint32m2_t maskedoff, vint16m1_t src, size_t vl) {
454   return vwcvt_x_x_v_i32m2_m(mask, maskedoff, src, vl);
455 }
456 
457 //
458 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m4_m(
459 // CHECK-RV64-NEXT:  entry:
460 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
461 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
462 //
test_vwcvt_x_x_v_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint16m2_t src,size_t vl)463 vint32m4_t test_vwcvt_x_x_v_i32m4_m (vbool8_t mask, vint32m4_t maskedoff, vint16m2_t src, size_t vl) {
464   return vwcvt_x_x_v_i32m4_m(mask, maskedoff, src, vl);
465 }
466 
467 //
468 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i32m8_m(
469 // CHECK-RV64-NEXT:  entry:
470 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
471 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
472 //
test_vwcvt_x_x_v_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint16m4_t src,size_t vl)473 vint32m8_t test_vwcvt_x_x_v_i32m8_m (vbool4_t mask, vint32m8_t maskedoff, vint16m4_t src, size_t vl) {
474   return vwcvt_x_x_v_i32m8_m(mask, maskedoff, src, vl);
475 }
476 
477 //
478 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32mf2_m(
479 // CHECK-RV64-NEXT:  entry:
480 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[SRC:%.*]], i16 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
481 // CHECK-RV64-NEXT:    ret <vscale x 1 x i32> [[TMP0]]
482 //
test_vwcvtu_x_x_v_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint16mf4_t src,size_t vl)483 vuint32mf2_t test_vwcvtu_x_x_v_u32mf2_m (vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t src, size_t vl) {
484   return vwcvtu_x_x_v_u32mf2_m(mask, maskedoff, src, vl);
485 }
486 
487 //
488 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m1_m(
489 // CHECK-RV64-NEXT:  entry:
490 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[SRC:%.*]], i16 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
491 // CHECK-RV64-NEXT:    ret <vscale x 2 x i32> [[TMP0]]
492 //
test_vwcvtu_x_x_v_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint16mf2_t src,size_t vl)493 vuint32m1_t test_vwcvtu_x_x_v_u32m1_m (vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t src, size_t vl) {
494   return vwcvtu_x_x_v_u32m1_m(mask, maskedoff, src, vl);
495 }
496 
497 //
498 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m2_m(
499 // CHECK-RV64-NEXT:  entry:
500 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[SRC:%.*]], i16 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
501 // CHECK-RV64-NEXT:    ret <vscale x 4 x i32> [[TMP0]]
502 //
test_vwcvtu_x_x_v_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint16m1_t src,size_t vl)503 vuint32m2_t test_vwcvtu_x_x_v_u32m2_m (vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t src, size_t vl) {
504   return vwcvtu_x_x_v_u32m2_m(mask, maskedoff, src, vl);
505 }
506 
507 //
508 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m4_m(
509 // CHECK-RV64-NEXT:  entry:
510 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[SRC:%.*]], i16 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT:    ret <vscale x 8 x i32> [[TMP0]]
512 //
test_vwcvtu_x_x_v_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint16m2_t src,size_t vl)513 vuint32m4_t test_vwcvtu_x_x_v_u32m4_m (vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t src, size_t vl) {
514   return vwcvtu_x_x_v_u32m4_m(mask, maskedoff, src, vl);
515 }
516 
517 //
518 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u32m8_m(
519 // CHECK-RV64-NEXT:  entry:
520 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[SRC:%.*]], i16 0, <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
521 // CHECK-RV64-NEXT:    ret <vscale x 16 x i32> [[TMP0]]
522 //
test_vwcvtu_x_x_v_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint16m4_t src,size_t vl)523 vuint32m8_t test_vwcvtu_x_x_v_u32m8_m (vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t src, size_t vl) {
524   return vwcvtu_x_x_v_u32m8_m(mask, maskedoff, src, vl);
525 }
526 
527 //
528 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m1_m(
529 // CHECK-RV64-NEXT:  entry:
530 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
532 //
test_vwcvt_x_x_v_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint32mf2_t src,size_t vl)533 vint64m1_t test_vwcvt_x_x_v_i64m1_m (vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t src, size_t vl) {
534   return vwcvt_x_x_v_i64m1_m(mask, maskedoff, src, vl);
535 }
536 
537 //
538 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m2_m(
539 // CHECK-RV64-NEXT:  entry:
540 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
542 //
test_vwcvt_x_x_v_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint32m1_t src,size_t vl)543 vint64m2_t test_vwcvt_x_x_v_i64m2_m (vbool32_t mask, vint64m2_t maskedoff, vint32m1_t src, size_t vl) {
544   return vwcvt_x_x_v_i64m2_m(mask, maskedoff, src, vl);
545 }
546 
547 //
548 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m4_m(
549 // CHECK-RV64-NEXT:  entry:
550 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
552 //
test_vwcvt_x_x_v_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint32m2_t src,size_t vl)553 vint64m4_t test_vwcvt_x_x_v_i64m4_m (vbool16_t mask, vint64m4_t maskedoff, vint32m2_t src, size_t vl) {
554   return vwcvt_x_x_v_i64m4_m(mask, maskedoff, src, vl);
555 }
556 
557 //
558 // CHECK-RV64-LABEL: @test_vwcvt_x_x_v_i64m8_m(
559 // CHECK-RV64-NEXT:  entry:
560 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
562 //
test_vwcvt_x_x_v_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint32m4_t src,size_t vl)563 vint64m8_t test_vwcvt_x_x_v_i64m8_m (vbool8_t mask, vint64m8_t maskedoff, vint32m4_t src, size_t vl) {
564   return vwcvt_x_x_v_i64m8_m(mask, maskedoff, src, vl);
565 }
566 
567 //
568 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m1_m(
569 // CHECK-RV64-NEXT:  entry:
570 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[SRC:%.*]], i32 0, <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
571 // CHECK-RV64-NEXT:    ret <vscale x 1 x i64> [[TMP0]]
572 //
test_vwcvtu_x_x_v_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint32mf2_t src,size_t vl)573 vuint64m1_t test_vwcvtu_x_x_v_u64m1_m (vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t src, size_t vl) {
574   return vwcvtu_x_x_v_u64m1_m(mask, maskedoff, src, vl);
575 }
576 
577 //
578 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m2_m(
579 // CHECK-RV64-NEXT:  entry:
580 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[SRC:%.*]], i32 0, <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT:    ret <vscale x 2 x i64> [[TMP0]]
582 //
test_vwcvtu_x_x_v_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint32m1_t src,size_t vl)583 vuint64m2_t test_vwcvtu_x_x_v_u64m2_m (vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t src, size_t vl) {
584   return vwcvtu_x_x_v_u64m2_m(mask, maskedoff, src, vl);
585 }
586 
587 //
588 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m4_m(
589 // CHECK-RV64-NEXT:  entry:
590 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[SRC:%.*]], i32 0, <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
591 // CHECK-RV64-NEXT:    ret <vscale x 4 x i64> [[TMP0]]
592 //
test_vwcvtu_x_x_v_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint32m2_t src,size_t vl)593 vuint64m4_t test_vwcvtu_x_x_v_u64m4_m (vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t src, size_t vl) {
594   return vwcvtu_x_x_v_u64m4_m(mask, maskedoff, src, vl);
595 }
596 
597 //
598 // CHECK-RV64-LABEL: @test_vwcvtu_x_x_v_u64m8_m(
599 // CHECK-RV64-NEXT:  entry:
600 // CHECK-RV64-NEXT:    [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[SRC:%.*]], i32 0, <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
601 // CHECK-RV64-NEXT:    ret <vscale x 8 x i64> [[TMP0]]
602 //
test_vwcvtu_x_x_v_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint32m4_t src,size_t vl)603 vuint64m8_t test_vwcvtu_x_x_v_u64m8_m (vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t src, size_t vl) {
604   return vwcvtu_x_x_v_u64m8_m(mask, maskedoff, src, vl);
605 }
606 
607