1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4
5 #include <riscv_vector.h>
6
7 //
8 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
9 // CHECK-RV64-NEXT: entry:
10 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
12 //
test_vwadd_vv_i16mf4(vint8mf8_t op1,vint8mf8_t op2,size_t vl)13 vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
14 return vwadd_vv_i16mf4(op1, op2, vl);
15 }
16
17 //
18 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
19 // CHECK-RV64-NEXT: entry:
20 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
21 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
22 //
test_vwadd_vx_i16mf4(vint8mf8_t op1,int8_t op2,size_t vl)23 vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
24 return vwadd_vx_i16mf4(op1, op2, vl);
25 }
26
27 //
28 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
29 // CHECK-RV64-NEXT: entry:
30 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
31 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
32 //
test_vwadd_wv_i16mf4(vint16mf4_t op1,vint8mf8_t op2,size_t vl)33 vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
34 return vwadd_wv_i16mf4(op1, op2, vl);
35 }
36
37 //
38 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
39 // CHECK-RV64-NEXT: entry:
40 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
41 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
42 //
test_vwadd_wx_i16mf4(vint16mf4_t op1,int8_t op2,size_t vl)43 vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
44 return vwadd_wx_i16mf4(op1, op2, vl);
45 }
46
47 //
48 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
49 // CHECK-RV64-NEXT: entry:
50 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
51 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
52 //
test_vwadd_vv_i16mf2(vint8mf4_t op1,vint8mf4_t op2,size_t vl)53 vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
54 return vwadd_vv_i16mf2(op1, op2, vl);
55 }
56
57 //
58 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
59 // CHECK-RV64-NEXT: entry:
60 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
61 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
62 //
test_vwadd_vx_i16mf2(vint8mf4_t op1,int8_t op2,size_t vl)63 vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
64 return vwadd_vx_i16mf2(op1, op2, vl);
65 }
66
67 //
68 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
69 // CHECK-RV64-NEXT: entry:
70 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
71 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
72 //
test_vwadd_wv_i16mf2(vint16mf2_t op1,vint8mf4_t op2,size_t vl)73 vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
74 return vwadd_wv_i16mf2(op1, op2, vl);
75 }
76
77 //
78 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
79 // CHECK-RV64-NEXT: entry:
80 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
81 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
82 //
test_vwadd_wx_i16mf2(vint16mf2_t op1,int8_t op2,size_t vl)83 vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
84 return vwadd_wx_i16mf2(op1, op2, vl);
85 }
86
87 //
88 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
89 // CHECK-RV64-NEXT: entry:
90 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
92 //
test_vwadd_vv_i16m1(vint8mf2_t op1,vint8mf2_t op2,size_t vl)93 vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
94 return vwadd_vv_i16m1(op1, op2, vl);
95 }
96
97 //
98 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
99 // CHECK-RV64-NEXT: entry:
100 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
101 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
102 //
test_vwadd_vx_i16m1(vint8mf2_t op1,int8_t op2,size_t vl)103 vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
104 return vwadd_vx_i16m1(op1, op2, vl);
105 }
106
107 //
108 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
109 // CHECK-RV64-NEXT: entry:
110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
111 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
112 //
test_vwadd_wv_i16m1(vint16m1_t op1,vint8mf2_t op2,size_t vl)113 vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
114 return vwadd_wv_i16m1(op1, op2, vl);
115 }
116
117 //
118 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
119 // CHECK-RV64-NEXT: entry:
120 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
122 //
test_vwadd_wx_i16m1(vint16m1_t op1,int8_t op2,size_t vl)123 vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
124 return vwadd_wx_i16m1(op1, op2, vl);
125 }
126
127 //
128 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
129 // CHECK-RV64-NEXT: entry:
130 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
131 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
132 //
test_vwadd_vv_i16m2(vint8m1_t op1,vint8m1_t op2,size_t vl)133 vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
134 return vwadd_vv_i16m2(op1, op2, vl);
135 }
136
137 //
138 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
139 // CHECK-RV64-NEXT: entry:
140 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
141 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
142 //
test_vwadd_vx_i16m2(vint8m1_t op1,int8_t op2,size_t vl)143 vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
144 return vwadd_vx_i16m2(op1, op2, vl);
145 }
146
147 //
148 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
149 // CHECK-RV64-NEXT: entry:
150 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
151 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
152 //
test_vwadd_wv_i16m2(vint16m2_t op1,vint8m1_t op2,size_t vl)153 vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
154 return vwadd_wv_i16m2(op1, op2, vl);
155 }
156
157 //
158 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
159 // CHECK-RV64-NEXT: entry:
160 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
161 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
162 //
test_vwadd_wx_i16m2(vint16m2_t op1,int8_t op2,size_t vl)163 vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
164 return vwadd_wx_i16m2(op1, op2, vl);
165 }
166
167 //
168 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
169 // CHECK-RV64-NEXT: entry:
170 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
171 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
172 //
test_vwadd_vv_i16m4(vint8m2_t op1,vint8m2_t op2,size_t vl)173 vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
174 return vwadd_vv_i16m4(op1, op2, vl);
175 }
176
177 //
178 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
179 // CHECK-RV64-NEXT: entry:
180 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
182 //
test_vwadd_vx_i16m4(vint8m2_t op1,int8_t op2,size_t vl)183 vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
184 return vwadd_vx_i16m4(op1, op2, vl);
185 }
186
187 //
188 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
189 // CHECK-RV64-NEXT: entry:
190 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
191 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
192 //
test_vwadd_wv_i16m4(vint16m4_t op1,vint8m2_t op2,size_t vl)193 vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
194 return vwadd_wv_i16m4(op1, op2, vl);
195 }
196
197 //
198 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
199 // CHECK-RV64-NEXT: entry:
200 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
201 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
202 //
test_vwadd_wx_i16m4(vint16m4_t op1,int8_t op2,size_t vl)203 vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
204 return vwadd_wx_i16m4(op1, op2, vl);
205 }
206
207 //
208 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
209 // CHECK-RV64-NEXT: entry:
210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
211 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
212 //
test_vwadd_vv_i16m8(vint8m4_t op1,vint8m4_t op2,size_t vl)213 vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
214 return vwadd_vv_i16m8(op1, op2, vl);
215 }
216
217 //
218 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
219 // CHECK-RV64-NEXT: entry:
220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
221 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
222 //
test_vwadd_vx_i16m8(vint8m4_t op1,int8_t op2,size_t vl)223 vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
224 return vwadd_vx_i16m8(op1, op2, vl);
225 }
226
227 //
228 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
229 // CHECK-RV64-NEXT: entry:
230 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
232 //
test_vwadd_wv_i16m8(vint16m8_t op1,vint8m4_t op2,size_t vl)233 vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
234 return vwadd_wv_i16m8(op1, op2, vl);
235 }
236
237 //
238 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
239 // CHECK-RV64-NEXT: entry:
240 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
241 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
242 //
test_vwadd_wx_i16m8(vint16m8_t op1,int8_t op2,size_t vl)243 vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
244 return vwadd_wx_i16m8(op1, op2, vl);
245 }
246
247 //
248 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
249 // CHECK-RV64-NEXT: entry:
250 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
251 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
252 //
test_vwadd_vv_i32mf2(vint16mf4_t op1,vint16mf4_t op2,size_t vl)253 vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
254 return vwadd_vv_i32mf2(op1, op2, vl);
255 }
256
257 //
258 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
259 // CHECK-RV64-NEXT: entry:
260 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
261 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
262 //
test_vwadd_vx_i32mf2(vint16mf4_t op1,int16_t op2,size_t vl)263 vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
264 return vwadd_vx_i32mf2(op1, op2, vl);
265 }
266
267 //
268 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
269 // CHECK-RV64-NEXT: entry:
270 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
272 //
test_vwadd_wv_i32mf2(vint32mf2_t op1,vint16mf4_t op2,size_t vl)273 vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
274 return vwadd_wv_i32mf2(op1, op2, vl);
275 }
276
277 //
278 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
279 // CHECK-RV64-NEXT: entry:
280 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
281 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
282 //
test_vwadd_wx_i32mf2(vint32mf2_t op1,int16_t op2,size_t vl)283 vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
284 return vwadd_wx_i32mf2(op1, op2, vl);
285 }
286
287 //
288 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
289 // CHECK-RV64-NEXT: entry:
290 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
291 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
292 //
test_vwadd_vv_i32m1(vint16mf2_t op1,vint16mf2_t op2,size_t vl)293 vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
294 return vwadd_vv_i32m1(op1, op2, vl);
295 }
296
297 //
298 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
299 // CHECK-RV64-NEXT: entry:
300 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
301 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
302 //
test_vwadd_vx_i32m1(vint16mf2_t op1,int16_t op2,size_t vl)303 vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
304 return vwadd_vx_i32m1(op1, op2, vl);
305 }
306
307 //
308 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
309 // CHECK-RV64-NEXT: entry:
310 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
311 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
312 //
test_vwadd_wv_i32m1(vint32m1_t op1,vint16mf2_t op2,size_t vl)313 vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
314 return vwadd_wv_i32m1(op1, op2, vl);
315 }
316
317 //
318 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
319 // CHECK-RV64-NEXT: entry:
320 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
321 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
322 //
test_vwadd_wx_i32m1(vint32m1_t op1,int16_t op2,size_t vl)323 vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
324 return vwadd_wx_i32m1(op1, op2, vl);
325 }
326
327 //
328 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
329 // CHECK-RV64-NEXT: entry:
330 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
331 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
332 //
test_vwadd_vv_i32m2(vint16m1_t op1,vint16m1_t op2,size_t vl)333 vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
334 return vwadd_vv_i32m2(op1, op2, vl);
335 }
336
337 //
338 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
339 // CHECK-RV64-NEXT: entry:
340 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
342 //
test_vwadd_vx_i32m2(vint16m1_t op1,int16_t op2,size_t vl)343 vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
344 return vwadd_vx_i32m2(op1, op2, vl);
345 }
346
347 //
348 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
349 // CHECK-RV64-NEXT: entry:
350 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
351 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
352 //
test_vwadd_wv_i32m2(vint32m2_t op1,vint16m1_t op2,size_t vl)353 vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
354 return vwadd_wv_i32m2(op1, op2, vl);
355 }
356
357 //
358 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
359 // CHECK-RV64-NEXT: entry:
360 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
362 //
test_vwadd_wx_i32m2(vint32m2_t op1,int16_t op2,size_t vl)363 vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
364 return vwadd_wx_i32m2(op1, op2, vl);
365 }
366
367 //
368 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
369 // CHECK-RV64-NEXT: entry:
370 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
371 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
372 //
test_vwadd_vv_i32m4(vint16m2_t op1,vint16m2_t op2,size_t vl)373 vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
374 return vwadd_vv_i32m4(op1, op2, vl);
375 }
376
377 //
378 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
379 // CHECK-RV64-NEXT: entry:
380 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
381 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
382 //
test_vwadd_vx_i32m4(vint16m2_t op1,int16_t op2,size_t vl)383 vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
384 return vwadd_vx_i32m4(op1, op2, vl);
385 }
386
387 //
388 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
389 // CHECK-RV64-NEXT: entry:
390 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
391 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
392 //
test_vwadd_wv_i32m4(vint32m4_t op1,vint16m2_t op2,size_t vl)393 vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
394 return vwadd_wv_i32m4(op1, op2, vl);
395 }
396
397 //
398 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
399 // CHECK-RV64-NEXT: entry:
400 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
401 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
402 //
test_vwadd_wx_i32m4(vint32m4_t op1,int16_t op2,size_t vl)403 vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
404 return vwadd_wx_i32m4(op1, op2, vl);
405 }
406
407 //
408 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
409 // CHECK-RV64-NEXT: entry:
410 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
411 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
412 //
test_vwadd_vv_i32m8(vint16m4_t op1,vint16m4_t op2,size_t vl)413 vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
414 return vwadd_vv_i32m8(op1, op2, vl);
415 }
416
417 //
418 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
419 // CHECK-RV64-NEXT: entry:
420 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
421 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
422 //
test_vwadd_vx_i32m8(vint16m4_t op1,int16_t op2,size_t vl)423 vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
424 return vwadd_vx_i32m8(op1, op2, vl);
425 }
426
427 //
428 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
429 // CHECK-RV64-NEXT: entry:
430 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
431 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
432 //
test_vwadd_wv_i32m8(vint32m8_t op1,vint16m4_t op2,size_t vl)433 vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
434 return vwadd_wv_i32m8(op1, op2, vl);
435 }
436
437 //
438 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
439 // CHECK-RV64-NEXT: entry:
440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
441 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
442 //
test_vwadd_wx_i32m8(vint32m8_t op1,int16_t op2,size_t vl)443 vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
444 return vwadd_wx_i32m8(op1, op2, vl);
445 }
446
447 //
448 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
449 // CHECK-RV64-NEXT: entry:
450 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
452 //
test_vwadd_vv_i64m1(vint32mf2_t op1,vint32mf2_t op2,size_t vl)453 vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
454 return vwadd_vv_i64m1(op1, op2, vl);
455 }
456
457 //
458 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
459 // CHECK-RV64-NEXT: entry:
460 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
461 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
462 //
test_vwadd_vx_i64m1(vint32mf2_t op1,int32_t op2,size_t vl)463 vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
464 return vwadd_vx_i64m1(op1, op2, vl);
465 }
466
467 //
468 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
469 // CHECK-RV64-NEXT: entry:
470 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
471 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
472 //
test_vwadd_wv_i64m1(vint64m1_t op1,vint32mf2_t op2,size_t vl)473 vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
474 return vwadd_wv_i64m1(op1, op2, vl);
475 }
476
477 //
478 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
479 // CHECK-RV64-NEXT: entry:
480 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
481 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
482 //
test_vwadd_wx_i64m1(vint64m1_t op1,int32_t op2,size_t vl)483 vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
484 return vwadd_wx_i64m1(op1, op2, vl);
485 }
486
487 //
488 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
489 // CHECK-RV64-NEXT: entry:
490 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
491 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
492 //
test_vwadd_vv_i64m2(vint32m1_t op1,vint32m1_t op2,size_t vl)493 vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
494 return vwadd_vv_i64m2(op1, op2, vl);
495 }
496
497 //
498 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
499 // CHECK-RV64-NEXT: entry:
500 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
501 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
502 //
test_vwadd_vx_i64m2(vint32m1_t op1,int32_t op2,size_t vl)503 vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
504 return vwadd_vx_i64m2(op1, op2, vl);
505 }
506
507 //
508 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
509 // CHECK-RV64-NEXT: entry:
510 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
511 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
512 //
test_vwadd_wv_i64m2(vint64m2_t op1,vint32m1_t op2,size_t vl)513 vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
514 return vwadd_wv_i64m2(op1, op2, vl);
515 }
516
517 //
518 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
519 // CHECK-RV64-NEXT: entry:
520 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
521 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
522 //
test_vwadd_wx_i64m2(vint64m2_t op1,int32_t op2,size_t vl)523 vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
524 return vwadd_wx_i64m2(op1, op2, vl);
525 }
526
527 //
528 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
529 // CHECK-RV64-NEXT: entry:
530 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
531 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
532 //
test_vwadd_vv_i64m4(vint32m2_t op1,vint32m2_t op2,size_t vl)533 vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
534 return vwadd_vv_i64m4(op1, op2, vl);
535 }
536
537 //
538 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
539 // CHECK-RV64-NEXT: entry:
540 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
542 //
test_vwadd_vx_i64m4(vint32m2_t op1,int32_t op2,size_t vl)543 vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
544 return vwadd_vx_i64m4(op1, op2, vl);
545 }
546
547 //
548 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
549 // CHECK-RV64-NEXT: entry:
550 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
551 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
552 //
test_vwadd_wv_i64m4(vint64m4_t op1,vint32m2_t op2,size_t vl)553 vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
554 return vwadd_wv_i64m4(op1, op2, vl);
555 }
556
557 //
558 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
559 // CHECK-RV64-NEXT: entry:
560 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
562 //
test_vwadd_wx_i64m4(vint64m4_t op1,int32_t op2,size_t vl)563 vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
564 return vwadd_wx_i64m4(op1, op2, vl);
565 }
566
567 //
568 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
569 // CHECK-RV64-NEXT: entry:
570 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
571 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
572 //
test_vwadd_vv_i64m8(vint32m4_t op1,vint32m4_t op2,size_t vl)573 vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
574 return vwadd_vv_i64m8(op1, op2, vl);
575 }
576
577 //
578 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
579 // CHECK-RV64-NEXT: entry:
580 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
581 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
582 //
test_vwadd_vx_i64m8(vint32m4_t op1,int32_t op2,size_t vl)583 vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
584 return vwadd_vx_i64m8(op1, op2, vl);
585 }
586
587 //
588 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
589 // CHECK-RV64-NEXT: entry:
590 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
591 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
592 //
test_vwadd_wv_i64m8(vint64m8_t op1,vint32m4_t op2,size_t vl)593 vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
594 return vwadd_wv_i64m8(op1, op2, vl);
595 }
596
597 //
598 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
599 // CHECK-RV64-NEXT: entry:
600 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
601 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
602 //
test_vwadd_wx_i64m8(vint64m8_t op1,int32_t op2,size_t vl)603 vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
604 return vwadd_wx_i64m8(op1, op2, vl);
605 }
606
607 //
608 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
609 // CHECK-RV64-NEXT: entry:
610 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
611 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
612 //
test_vwaddu_vv_u16mf4(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)613 vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2,
614 size_t vl) {
615 return vwaddu_vv_u16mf4(op1, op2, vl);
616 }
617
618 //
619 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
620 // CHECK-RV64-NEXT: entry:
621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
623 //
test_vwaddu_vx_u16mf4(vuint8mf8_t op1,uint8_t op2,size_t vl)624 vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
625 return vwaddu_vx_u16mf4(op1, op2, vl);
626 }
627
628 //
629 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
630 // CHECK-RV64-NEXT: entry:
631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
632 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
633 //
test_vwaddu_wv_u16mf4(vuint16mf4_t op1,vuint8mf8_t op2,size_t vl)634 vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2,
635 size_t vl) {
636 return vwaddu_wv_u16mf4(op1, op2, vl);
637 }
638
639 //
640 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
641 // CHECK-RV64-NEXT: entry:
642 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
643 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
644 //
test_vwaddu_wx_u16mf4(vuint16mf4_t op1,uint8_t op2,size_t vl)645 vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
646 return vwaddu_wx_u16mf4(op1, op2, vl);
647 }
648
649 //
650 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
651 // CHECK-RV64-NEXT: entry:
652 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
653 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
654 //
test_vwaddu_vv_u16mf2(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)655 vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2,
656 size_t vl) {
657 return vwaddu_vv_u16mf2(op1, op2, vl);
658 }
659
660 //
661 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
662 // CHECK-RV64-NEXT: entry:
663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
664 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
665 //
test_vwaddu_vx_u16mf2(vuint8mf4_t op1,uint8_t op2,size_t vl)666 vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
667 return vwaddu_vx_u16mf2(op1, op2, vl);
668 }
669
670 //
671 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
672 // CHECK-RV64-NEXT: entry:
673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
674 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
675 //
test_vwaddu_wv_u16mf2(vuint16mf2_t op1,vuint8mf4_t op2,size_t vl)676 vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2,
677 size_t vl) {
678 return vwaddu_wv_u16mf2(op1, op2, vl);
679 }
680
681 //
682 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
683 // CHECK-RV64-NEXT: entry:
684 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
685 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
686 //
test_vwaddu_wx_u16mf2(vuint16mf2_t op1,uint8_t op2,size_t vl)687 vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
688 return vwaddu_wx_u16mf2(op1, op2, vl);
689 }
690
691 //
692 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
693 // CHECK-RV64-NEXT: entry:
694 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
695 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
696 //
test_vwaddu_vv_u16m1(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)697 vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
698 return vwaddu_vv_u16m1(op1, op2, vl);
699 }
700
701 //
702 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
703 // CHECK-RV64-NEXT: entry:
704 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
705 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
706 //
test_vwaddu_vx_u16m1(vuint8mf2_t op1,uint8_t op2,size_t vl)707 vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
708 return vwaddu_vx_u16m1(op1, op2, vl);
709 }
710
711 //
712 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
713 // CHECK-RV64-NEXT: entry:
714 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
715 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
716 //
test_vwaddu_wv_u16m1(vuint16m1_t op1,vuint8mf2_t op2,size_t vl)717 vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
718 return vwaddu_wv_u16m1(op1, op2, vl);
719 }
720
721 //
722 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
723 // CHECK-RV64-NEXT: entry:
724 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
725 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
726 //
test_vwaddu_wx_u16m1(vuint16m1_t op1,uint8_t op2,size_t vl)727 vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
728 return vwaddu_wx_u16m1(op1, op2, vl);
729 }
730
731 //
732 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
733 // CHECK-RV64-NEXT: entry:
734 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
735 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
736 //
test_vwaddu_vv_u16m2(vuint8m1_t op1,vuint8m1_t op2,size_t vl)737 vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
738 return vwaddu_vv_u16m2(op1, op2, vl);
739 }
740
741 //
742 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
743 // CHECK-RV64-NEXT: entry:
744 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
745 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
746 //
test_vwaddu_vx_u16m2(vuint8m1_t op1,uint8_t op2,size_t vl)747 vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
748 return vwaddu_vx_u16m2(op1, op2, vl);
749 }
750
751 //
752 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
753 // CHECK-RV64-NEXT: entry:
754 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
755 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
756 //
test_vwaddu_wv_u16m2(vuint16m2_t op1,vuint8m1_t op2,size_t vl)757 vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
758 return vwaddu_wv_u16m2(op1, op2, vl);
759 }
760
761 //
762 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
763 // CHECK-RV64-NEXT: entry:
764 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
765 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
766 //
test_vwaddu_wx_u16m2(vuint16m2_t op1,uint8_t op2,size_t vl)767 vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
768 return vwaddu_wx_u16m2(op1, op2, vl);
769 }
770
771 //
772 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
773 // CHECK-RV64-NEXT: entry:
774 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
775 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
776 //
test_vwaddu_vv_u16m4(vuint8m2_t op1,vuint8m2_t op2,size_t vl)777 vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
778 return vwaddu_vv_u16m4(op1, op2, vl);
779 }
780
781 //
782 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
783 // CHECK-RV64-NEXT: entry:
784 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
785 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
786 //
test_vwaddu_vx_u16m4(vuint8m2_t op1,uint8_t op2,size_t vl)787 vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
788 return vwaddu_vx_u16m4(op1, op2, vl);
789 }
790
791 //
792 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
793 // CHECK-RV64-NEXT: entry:
794 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
795 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
796 //
test_vwaddu_wv_u16m4(vuint16m4_t op1,vuint8m2_t op2,size_t vl)797 vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
798 return vwaddu_wv_u16m4(op1, op2, vl);
799 }
800
801 //
802 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
803 // CHECK-RV64-NEXT: entry:
804 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
805 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
806 //
test_vwaddu_wx_u16m4(vuint16m4_t op1,uint8_t op2,size_t vl)807 vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
808 return vwaddu_wx_u16m4(op1, op2, vl);
809 }
810
811 //
812 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
813 // CHECK-RV64-NEXT: entry:
814 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
815 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
816 //
test_vwaddu_vv_u16m8(vuint8m4_t op1,vuint8m4_t op2,size_t vl)817 vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
818 return vwaddu_vv_u16m8(op1, op2, vl);
819 }
820
821 //
822 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
823 // CHECK-RV64-NEXT: entry:
824 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
825 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
826 //
test_vwaddu_vx_u16m8(vuint8m4_t op1,uint8_t op2,size_t vl)827 vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
828 return vwaddu_vx_u16m8(op1, op2, vl);
829 }
830
831 //
832 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
833 // CHECK-RV64-NEXT: entry:
834 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
835 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
836 //
test_vwaddu_wv_u16m8(vuint16m8_t op1,vuint8m4_t op2,size_t vl)837 vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
838 return vwaddu_wv_u16m8(op1, op2, vl);
839 }
840
841 //
842 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
843 // CHECK-RV64-NEXT: entry:
844 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
845 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
846 //
test_vwaddu_wx_u16m8(vuint16m8_t op1,uint8_t op2,size_t vl)847 vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
848 return vwaddu_wx_u16m8(op1, op2, vl);
849 }
850
851 //
852 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
853 // CHECK-RV64-NEXT: entry:
854 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
855 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
856 //
test_vwaddu_vv_u32mf2(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)857 vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2,
858 size_t vl) {
859 return vwaddu_vv_u32mf2(op1, op2, vl);
860 }
861
862 //
863 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
864 // CHECK-RV64-NEXT: entry:
865 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
866 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
867 //
test_vwaddu_vx_u32mf2(vuint16mf4_t op1,uint16_t op2,size_t vl)868 vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
869 return vwaddu_vx_u32mf2(op1, op2, vl);
870 }
871
872 //
873 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
874 // CHECK-RV64-NEXT: entry:
875 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
876 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
877 //
test_vwaddu_wv_u32mf2(vuint32mf2_t op1,vuint16mf4_t op2,size_t vl)878 vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2,
879 size_t vl) {
880 return vwaddu_wv_u32mf2(op1, op2, vl);
881 }
882
883 //
884 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
885 // CHECK-RV64-NEXT: entry:
886 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
887 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
888 //
test_vwaddu_wx_u32mf2(vuint32mf2_t op1,uint16_t op2,size_t vl)889 vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
890 return vwaddu_wx_u32mf2(op1, op2, vl);
891 }
892
893 //
894 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
895 // CHECK-RV64-NEXT: entry:
896 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
897 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
898 //
test_vwaddu_vv_u32m1(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)899 vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2,
900 size_t vl) {
901 return vwaddu_vv_u32m1(op1, op2, vl);
902 }
903
904 //
905 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
906 // CHECK-RV64-NEXT: entry:
907 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
908 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
909 //
test_vwaddu_vx_u32m1(vuint16mf2_t op1,uint16_t op2,size_t vl)910 vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
911 return vwaddu_vx_u32m1(op1, op2, vl);
912 }
913
914 //
915 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
916 // CHECK-RV64-NEXT: entry:
917 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
918 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
919 //
test_vwaddu_wv_u32m1(vuint32m1_t op1,vuint16mf2_t op2,size_t vl)920 vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
921 return vwaddu_wv_u32m1(op1, op2, vl);
922 }
923
924 //
925 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
926 // CHECK-RV64-NEXT: entry:
927 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
928 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
929 //
test_vwaddu_wx_u32m1(vuint32m1_t op1,uint16_t op2,size_t vl)930 vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
931 return vwaddu_wx_u32m1(op1, op2, vl);
932 }
933
934 //
935 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
936 // CHECK-RV64-NEXT: entry:
937 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
938 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
939 //
test_vwaddu_vv_u32m2(vuint16m1_t op1,vuint16m1_t op2,size_t vl)940 vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
941 return vwaddu_vv_u32m2(op1, op2, vl);
942 }
943
944 //
945 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
946 // CHECK-RV64-NEXT: entry:
947 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
948 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
949 //
test_vwaddu_vx_u32m2(vuint16m1_t op1,uint16_t op2,size_t vl)950 vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
951 return vwaddu_vx_u32m2(op1, op2, vl);
952 }
953
954 //
955 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
956 // CHECK-RV64-NEXT: entry:
957 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
958 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
959 //
test_vwaddu_wv_u32m2(vuint32m2_t op1,vuint16m1_t op2,size_t vl)960 vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
961 return vwaddu_wv_u32m2(op1, op2, vl);
962 }
963
964 //
965 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
966 // CHECK-RV64-NEXT: entry:
967 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
968 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
969 //
test_vwaddu_wx_u32m2(vuint32m2_t op1,uint16_t op2,size_t vl)970 vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
971 return vwaddu_wx_u32m2(op1, op2, vl);
972 }
973
974 //
975 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
976 // CHECK-RV64-NEXT: entry:
977 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
978 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
979 //
test_vwaddu_vv_u32m4(vuint16m2_t op1,vuint16m2_t op2,size_t vl)980 vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
981 return vwaddu_vv_u32m4(op1, op2, vl);
982 }
983
984 //
985 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
986 // CHECK-RV64-NEXT: entry:
987 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
988 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
989 //
test_vwaddu_vx_u32m4(vuint16m2_t op1,uint16_t op2,size_t vl)990 vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
991 return vwaddu_vx_u32m4(op1, op2, vl);
992 }
993
994 //
995 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
996 // CHECK-RV64-NEXT: entry:
997 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
998 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
999 //
test_vwaddu_wv_u32m4(vuint32m4_t op1,vuint16m2_t op2,size_t vl)1000 vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
1001 return vwaddu_wv_u32m4(op1, op2, vl);
1002 }
1003
1004 //
1005 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
1006 // CHECK-RV64-NEXT: entry:
1007 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1008 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1009 //
test_vwaddu_wx_u32m4(vuint32m4_t op1,uint16_t op2,size_t vl)1010 vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
1011 return vwaddu_wx_u32m4(op1, op2, vl);
1012 }
1013
1014 //
1015 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
1016 // CHECK-RV64-NEXT: entry:
1017 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1018 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1019 //
test_vwaddu_vv_u32m8(vuint16m4_t op1,vuint16m4_t op2,size_t vl)1020 vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
1021 return vwaddu_vv_u32m8(op1, op2, vl);
1022 }
1023
1024 //
1025 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
1026 // CHECK-RV64-NEXT: entry:
1027 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1028 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1029 //
test_vwaddu_vx_u32m8(vuint16m4_t op1,uint16_t op2,size_t vl)1030 vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
1031 return vwaddu_vx_u32m8(op1, op2, vl);
1032 }
1033
1034 //
1035 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
1036 // CHECK-RV64-NEXT: entry:
1037 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1038 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1039 //
test_vwaddu_wv_u32m8(vuint32m8_t op1,vuint16m4_t op2,size_t vl)1040 vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
1041 return vwaddu_wv_u32m8(op1, op2, vl);
1042 }
1043
1044 //
1045 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
1046 // CHECK-RV64-NEXT: entry:
1047 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
1048 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1049 //
test_vwaddu_wx_u32m8(vuint32m8_t op1,uint16_t op2,size_t vl)1050 vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
1051 return vwaddu_wx_u32m8(op1, op2, vl);
1052 }
1053
1054 //
1055 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
1056 // CHECK-RV64-NEXT: entry:
1057 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1058 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1059 //
test_vwaddu_vv_u64m1(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1060 vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2,
1061 size_t vl) {
1062 return vwaddu_vv_u64m1(op1, op2, vl);
1063 }
1064
1065 //
1066 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
1067 // CHECK-RV64-NEXT: entry:
1068 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1069 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1070 //
test_vwaddu_vx_u64m1(vuint32mf2_t op1,uint32_t op2,size_t vl)1071 vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
1072 return vwaddu_vx_u64m1(op1, op2, vl);
1073 }
1074
1075 //
1076 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
1077 // CHECK-RV64-NEXT: entry:
1078 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1079 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1080 //
test_vwaddu_wv_u64m1(vuint64m1_t op1,vuint32mf2_t op2,size_t vl)1081 vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
1082 return vwaddu_wv_u64m1(op1, op2, vl);
1083 }
1084
1085 //
1086 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
1087 // CHECK-RV64-NEXT: entry:
1088 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1089 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1090 //
test_vwaddu_wx_u64m1(vuint64m1_t op1,uint32_t op2,size_t vl)1091 vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
1092 return vwaddu_wx_u64m1(op1, op2, vl);
1093 }
1094
1095 //
1096 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
1097 // CHECK-RV64-NEXT: entry:
1098 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1099 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1100 //
test_vwaddu_vv_u64m2(vuint32m1_t op1,vuint32m1_t op2,size_t vl)1101 vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
1102 return vwaddu_vv_u64m2(op1, op2, vl);
1103 }
1104
1105 //
1106 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
1107 // CHECK-RV64-NEXT: entry:
1108 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1109 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1110 //
test_vwaddu_vx_u64m2(vuint32m1_t op1,uint32_t op2,size_t vl)1111 vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
1112 return vwaddu_vx_u64m2(op1, op2, vl);
1113 }
1114
1115 //
1116 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
1117 // CHECK-RV64-NEXT: entry:
1118 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1119 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1120 //
test_vwaddu_wv_u64m2(vuint64m2_t op1,vuint32m1_t op2,size_t vl)1121 vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
1122 return vwaddu_wv_u64m2(op1, op2, vl);
1123 }
1124
1125 //
1126 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
1127 // CHECK-RV64-NEXT: entry:
1128 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1129 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1130 //
test_vwaddu_wx_u64m2(vuint64m2_t op1,uint32_t op2,size_t vl)1131 vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
1132 return vwaddu_wx_u64m2(op1, op2, vl);
1133 }
1134
1135 //
1136 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
1137 // CHECK-RV64-NEXT: entry:
1138 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1139 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1140 //
test_vwaddu_vv_u64m4(vuint32m2_t op1,vuint32m2_t op2,size_t vl)1141 vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
1142 return vwaddu_vv_u64m4(op1, op2, vl);
1143 }
1144
1145 //
1146 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
1147 // CHECK-RV64-NEXT: entry:
1148 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1149 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1150 //
test_vwaddu_vx_u64m4(vuint32m2_t op1,uint32_t op2,size_t vl)1151 vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
1152 return vwaddu_vx_u64m4(op1, op2, vl);
1153 }
1154
1155 //
1156 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
1157 // CHECK-RV64-NEXT: entry:
1158 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1159 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1160 //
test_vwaddu_wv_u64m4(vuint64m4_t op1,vuint32m2_t op2,size_t vl)1161 vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
1162 return vwaddu_wv_u64m4(op1, op2, vl);
1163 }
1164
1165 //
1166 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
1167 // CHECK-RV64-NEXT: entry:
1168 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1169 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1170 //
test_vwaddu_wx_u64m4(vuint64m4_t op1,uint32_t op2,size_t vl)1171 vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
1172 return vwaddu_wx_u64m4(op1, op2, vl);
1173 }
1174
1175 //
1176 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
1177 // CHECK-RV64-NEXT: entry:
1178 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1179 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1180 //
test_vwaddu_vv_u64m8(vuint32m4_t op1,vuint32m4_t op2,size_t vl)1181 vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
1182 return vwaddu_vv_u64m8(op1, op2, vl);
1183 }
1184
1185 //
1186 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
1187 // CHECK-RV64-NEXT: entry:
1188 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1189 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1190 //
test_vwaddu_vx_u64m8(vuint32m4_t op1,uint32_t op2,size_t vl)1191 vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
1192 return vwaddu_vx_u64m8(op1, op2, vl);
1193 }
1194
1195 //
1196 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
1197 // CHECK-RV64-NEXT: entry:
1198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1199 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1200 //
test_vwaddu_wv_u64m8(vuint64m8_t op1,vuint32m4_t op2,size_t vl)1201 vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
1202 return vwaddu_wv_u64m8(op1, op2, vl);
1203 }
1204
1205 //
1206 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
1207 // CHECK-RV64-NEXT: entry:
1208 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1209 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1210 //
test_vwaddu_wx_u64m8(vuint64m8_t op1,uint32_t op2,size_t vl)1211 vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
1212 return vwaddu_wx_u64m8(op1, op2, vl);
1213 }
1214
1215 //
1216 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m(
1217 // CHECK-RV64-NEXT: entry:
1218 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1219 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1220 //
test_vwadd_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)1221 vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
1222 vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
1223 return vwadd_vv_i16mf4_m(mask, maskedoff, op1, op2, vl);
1224 }
1225
1226 //
1227 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m(
1228 // CHECK-RV64-NEXT: entry:
1229 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1230 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1231 //
test_vwadd_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)1232 vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
1233 vint8mf8_t op1, int8_t op2, size_t vl) {
1234 return vwadd_vx_i16mf4_m(mask, maskedoff, op1, op2, vl);
1235 }
1236
1237 //
1238 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m(
1239 // CHECK-RV64-NEXT: entry:
1240 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1241 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1242 //
test_vwadd_wv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint8mf8_t op2,size_t vl)1243 vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
1244 vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
1245 return vwadd_wv_i16mf4_m(mask, maskedoff, op1, op2, vl);
1246 }
1247
1248 //
1249 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m(
1250 // CHECK-RV64-NEXT: entry:
1251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1252 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1253 //
test_vwadd_wx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int8_t op2,size_t vl)1254 vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff,
1255 vint16mf4_t op1, int8_t op2, size_t vl) {
1256 return vwadd_wx_i16mf4_m(mask, maskedoff, op1, op2, vl);
1257 }
1258
1259 //
1260 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m(
1261 // CHECK-RV64-NEXT: entry:
1262 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1263 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1264 //
test_vwadd_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)1265 vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
1266 vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
1267 return vwadd_vv_i16mf2_m(mask, maskedoff, op1, op2, vl);
1268 }
1269
1270 //
1271 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m(
1272 // CHECK-RV64-NEXT: entry:
1273 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1274 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1275 //
test_vwadd_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)1276 vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
1277 vint8mf4_t op1, int8_t op2, size_t vl) {
1278 return vwadd_vx_i16mf2_m(mask, maskedoff, op1, op2, vl);
1279 }
1280
1281 //
1282 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m(
1283 // CHECK-RV64-NEXT: entry:
1284 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1285 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1286 //
test_vwadd_wv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint8mf4_t op2,size_t vl)1287 vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
1288 vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
1289 return vwadd_wv_i16mf2_m(mask, maskedoff, op1, op2, vl);
1290 }
1291
1292 //
1293 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m(
1294 // CHECK-RV64-NEXT: entry:
1295 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1296 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1297 //
test_vwadd_wx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int8_t op2,size_t vl)1298 vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff,
1299 vint16mf2_t op1, int8_t op2, size_t vl) {
1300 return vwadd_wx_i16mf2_m(mask, maskedoff, op1, op2, vl);
1301 }
1302
1303 //
1304 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m(
1305 // CHECK-RV64-NEXT: entry:
1306 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1307 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1308 //
test_vwadd_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)1309 vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
1310 vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
1311 return vwadd_vv_i16m1_m(mask, maskedoff, op1, op2, vl);
1312 }
1313
1314 //
1315 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m(
1316 // CHECK-RV64-NEXT: entry:
1317 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1318 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1319 //
test_vwadd_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)1320 vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
1321 vint8mf2_t op1, int8_t op2, size_t vl) {
1322 return vwadd_vx_i16m1_m(mask, maskedoff, op1, op2, vl);
1323 }
1324
1325 //
1326 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m(
1327 // CHECK-RV64-NEXT: entry:
1328 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1329 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1330 //
test_vwadd_wv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint8mf2_t op2,size_t vl)1331 vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
1332 vint16m1_t op1, vint8mf2_t op2, size_t vl) {
1333 return vwadd_wv_i16m1_m(mask, maskedoff, op1, op2, vl);
1334 }
1335
1336 //
1337 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m(
1338 // CHECK-RV64-NEXT: entry:
1339 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1340 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1341 //
test_vwadd_wx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int8_t op2,size_t vl)1342 vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff,
1343 vint16m1_t op1, int8_t op2, size_t vl) {
1344 return vwadd_wx_i16m1_m(mask, maskedoff, op1, op2, vl);
1345 }
1346
1347 //
1348 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m(
1349 // CHECK-RV64-NEXT: entry:
1350 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1351 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1352 //
test_vwadd_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)1353 vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
1354 vint8m1_t op1, vint8m1_t op2, size_t vl) {
1355 return vwadd_vv_i16m2_m(mask, maskedoff, op1, op2, vl);
1356 }
1357
1358 //
1359 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m(
1360 // CHECK-RV64-NEXT: entry:
1361 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1362 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1363 //
test_vwadd_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)1364 vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
1365 vint8m1_t op1, int8_t op2, size_t vl) {
1366 return vwadd_vx_i16m2_m(mask, maskedoff, op1, op2, vl);
1367 }
1368
1369 //
1370 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m(
1371 // CHECK-RV64-NEXT: entry:
1372 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1373 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1374 //
test_vwadd_wv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint8m1_t op2,size_t vl)1375 vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
1376 vint16m2_t op1, vint8m1_t op2, size_t vl) {
1377 return vwadd_wv_i16m2_m(mask, maskedoff, op1, op2, vl);
1378 }
1379
1380 //
1381 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m(
1382 // CHECK-RV64-NEXT: entry:
1383 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1384 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1385 //
test_vwadd_wx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int8_t op2,size_t vl)1386 vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff,
1387 vint16m2_t op1, int8_t op2, size_t vl) {
1388 return vwadd_wx_i16m2_m(mask, maskedoff, op1, op2, vl);
1389 }
1390
1391 //
1392 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m(
1393 // CHECK-RV64-NEXT: entry:
1394 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1395 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1396 //
test_vwadd_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)1397 vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
1398 vint8m2_t op1, vint8m2_t op2, size_t vl) {
1399 return vwadd_vv_i16m4_m(mask, maskedoff, op1, op2, vl);
1400 }
1401
1402 //
1403 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m(
1404 // CHECK-RV64-NEXT: entry:
1405 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1406 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1407 //
test_vwadd_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)1408 vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
1409 vint8m2_t op1, int8_t op2, size_t vl) {
1410 return vwadd_vx_i16m4_m(mask, maskedoff, op1, op2, vl);
1411 }
1412
1413 //
1414 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m(
1415 // CHECK-RV64-NEXT: entry:
1416 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1417 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1418 //
test_vwadd_wv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint8m2_t op2,size_t vl)1419 vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
1420 vint16m4_t op1, vint8m2_t op2, size_t vl) {
1421 return vwadd_wv_i16m4_m(mask, maskedoff, op1, op2, vl);
1422 }
1423
1424 //
1425 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m(
1426 // CHECK-RV64-NEXT: entry:
1427 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1428 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1429 //
test_vwadd_wx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int8_t op2,size_t vl)1430 vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff,
1431 vint16m4_t op1, int8_t op2, size_t vl) {
1432 return vwadd_wx_i16m4_m(mask, maskedoff, op1, op2, vl);
1433 }
1434
1435 //
1436 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m(
1437 // CHECK-RV64-NEXT: entry:
1438 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1439 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1440 //
test_vwadd_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)1441 vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
1442 vint8m4_t op1, vint8m4_t op2, size_t vl) {
1443 return vwadd_vv_i16m8_m(mask, maskedoff, op1, op2, vl);
1444 }
1445
1446 //
1447 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m(
1448 // CHECK-RV64-NEXT: entry:
1449 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1450 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1451 //
test_vwadd_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)1452 vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
1453 vint8m4_t op1, int8_t op2, size_t vl) {
1454 return vwadd_vx_i16m8_m(mask, maskedoff, op1, op2, vl);
1455 }
1456
1457 //
1458 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m(
1459 // CHECK-RV64-NEXT: entry:
1460 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1461 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1462 //
test_vwadd_wv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint8m4_t op2,size_t vl)1463 vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
1464 vint16m8_t op1, vint8m4_t op2, size_t vl) {
1465 return vwadd_wv_i16m8_m(mask, maskedoff, op1, op2, vl);
1466 }
1467
1468 //
1469 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m(
1470 // CHECK-RV64-NEXT: entry:
1471 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1472 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1473 //
test_vwadd_wx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int8_t op2,size_t vl)1474 vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff,
1475 vint16m8_t op1, int8_t op2, size_t vl) {
1476 return vwadd_wx_i16m8_m(mask, maskedoff, op1, op2, vl);
1477 }
1478
1479 //
1480 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m(
1481 // CHECK-RV64-NEXT: entry:
1482 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1483 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1484 //
test_vwadd_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)1485 vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
1486 vint16mf4_t op1, vint16mf4_t op2,
1487 size_t vl) {
1488 return vwadd_vv_i32mf2_m(mask, maskedoff, op1, op2, vl);
1489 }
1490
1491 //
1492 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m(
1493 // CHECK-RV64-NEXT: entry:
1494 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1495 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1496 //
test_vwadd_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)1497 vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
1498 vint16mf4_t op1, int16_t op2, size_t vl) {
1499 return vwadd_vx_i32mf2_m(mask, maskedoff, op1, op2, vl);
1500 }
1501
1502 //
1503 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m(
1504 // CHECK-RV64-NEXT: entry:
1505 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1506 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1507 //
test_vwadd_wv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint16mf4_t op2,size_t vl)1508 vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
1509 vint32mf2_t op1, vint16mf4_t op2,
1510 size_t vl) {
1511 return vwadd_wv_i32mf2_m(mask, maskedoff, op1, op2, vl);
1512 }
1513
1514 //
1515 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m(
1516 // CHECK-RV64-NEXT: entry:
1517 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1518 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1519 //
test_vwadd_wx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int16_t op2,size_t vl)1520 vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff,
1521 vint32mf2_t op1, int16_t op2, size_t vl) {
1522 return vwadd_wx_i32mf2_m(mask, maskedoff, op1, op2, vl);
1523 }
1524
1525 //
1526 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m(
1527 // CHECK-RV64-NEXT: entry:
1528 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1529 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1530 //
test_vwadd_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)1531 vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
1532 vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
1533 return vwadd_vv_i32m1_m(mask, maskedoff, op1, op2, vl);
1534 }
1535
1536 //
1537 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m(
1538 // CHECK-RV64-NEXT: entry:
1539 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1540 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1541 //
test_vwadd_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)1542 vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
1543 vint16mf2_t op1, int16_t op2, size_t vl) {
1544 return vwadd_vx_i32m1_m(mask, maskedoff, op1, op2, vl);
1545 }
1546
1547 //
1548 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m(
1549 // CHECK-RV64-NEXT: entry:
1550 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1551 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1552 //
test_vwadd_wv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint16mf2_t op2,size_t vl)1553 vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
1554 vint32m1_t op1, vint16mf2_t op2, size_t vl) {
1555 return vwadd_wv_i32m1_m(mask, maskedoff, op1, op2, vl);
1556 }
1557
1558 //
1559 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m(
1560 // CHECK-RV64-NEXT: entry:
1561 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1562 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1563 //
test_vwadd_wx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int16_t op2,size_t vl)1564 vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff,
1565 vint32m1_t op1, int16_t op2, size_t vl) {
1566 return vwadd_wx_i32m1_m(mask, maskedoff, op1, op2, vl);
1567 }
1568
1569 //
1570 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m(
1571 // CHECK-RV64-NEXT: entry:
1572 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1573 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1574 //
test_vwadd_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)1575 vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
1576 vint16m1_t op1, vint16m1_t op2, size_t vl) {
1577 return vwadd_vv_i32m2_m(mask, maskedoff, op1, op2, vl);
1578 }
1579
1580 //
1581 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m(
1582 // CHECK-RV64-NEXT: entry:
1583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1584 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1585 //
test_vwadd_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)1586 vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
1587 vint16m1_t op1, int16_t op2, size_t vl) {
1588 return vwadd_vx_i32m2_m(mask, maskedoff, op1, op2, vl);
1589 }
1590
1591 //
1592 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m(
1593 // CHECK-RV64-NEXT: entry:
1594 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1595 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1596 //
test_vwadd_wv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint16m1_t op2,size_t vl)1597 vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
1598 vint32m2_t op1, vint16m1_t op2, size_t vl) {
1599 return vwadd_wv_i32m2_m(mask, maskedoff, op1, op2, vl);
1600 }
1601
1602 //
1603 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m(
1604 // CHECK-RV64-NEXT: entry:
1605 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1606 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1607 //
test_vwadd_wx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int16_t op2,size_t vl)1608 vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff,
1609 vint32m2_t op1, int16_t op2, size_t vl) {
1610 return vwadd_wx_i32m2_m(mask, maskedoff, op1, op2, vl);
1611 }
1612
1613 //
1614 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m(
1615 // CHECK-RV64-NEXT: entry:
1616 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1617 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1618 //
test_vwadd_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)1619 vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
1620 vint16m2_t op1, vint16m2_t op2, size_t vl) {
1621 return vwadd_vv_i32m4_m(mask, maskedoff, op1, op2, vl);
1622 }
1623
1624 //
1625 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m(
1626 // CHECK-RV64-NEXT: entry:
1627 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1628 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1629 //
test_vwadd_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)1630 vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
1631 vint16m2_t op1, int16_t op2, size_t vl) {
1632 return vwadd_vx_i32m4_m(mask, maskedoff, op1, op2, vl);
1633 }
1634
1635 //
1636 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m(
1637 // CHECK-RV64-NEXT: entry:
1638 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1639 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1640 //
test_vwadd_wv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint16m2_t op2,size_t vl)1641 vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
1642 vint32m4_t op1, vint16m2_t op2, size_t vl) {
1643 return vwadd_wv_i32m4_m(mask, maskedoff, op1, op2, vl);
1644 }
1645
1646 //
1647 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m(
1648 // CHECK-RV64-NEXT: entry:
1649 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1650 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1651 //
test_vwadd_wx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int16_t op2,size_t vl)1652 vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff,
1653 vint32m4_t op1, int16_t op2, size_t vl) {
1654 return vwadd_wx_i32m4_m(mask, maskedoff, op1, op2, vl);
1655 }
1656
1657 //
1658 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m(
1659 // CHECK-RV64-NEXT: entry:
1660 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1661 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1662 //
test_vwadd_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)1663 vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
1664 vint16m4_t op1, vint16m4_t op2, size_t vl) {
1665 return vwadd_vv_i32m8_m(mask, maskedoff, op1, op2, vl);
1666 }
1667
1668 //
1669 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m(
1670 // CHECK-RV64-NEXT: entry:
1671 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1672 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1673 //
test_vwadd_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)1674 vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
1675 vint16m4_t op1, int16_t op2, size_t vl) {
1676 return vwadd_vx_i32m8_m(mask, maskedoff, op1, op2, vl);
1677 }
1678
1679 //
1680 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m(
1681 // CHECK-RV64-NEXT: entry:
1682 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1683 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1684 //
test_vwadd_wv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint16m4_t op2,size_t vl)1685 vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
1686 vint32m8_t op1, vint16m4_t op2, size_t vl) {
1687 return vwadd_wv_i32m8_m(mask, maskedoff, op1, op2, vl);
1688 }
1689
1690 //
1691 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m(
1692 // CHECK-RV64-NEXT: entry:
1693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1694 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1695 //
test_vwadd_wx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int16_t op2,size_t vl)1696 vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff,
1697 vint32m8_t op1, int16_t op2, size_t vl) {
1698 return vwadd_wx_i32m8_m(mask, maskedoff, op1, op2, vl);
1699 }
1700
1701 //
1702 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m(
1703 // CHECK-RV64-NEXT: entry:
1704 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1705 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1706 //
test_vwadd_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1707 vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
1708 vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
1709 return vwadd_vv_i64m1_m(mask, maskedoff, op1, op2, vl);
1710 }
1711
1712 //
1713 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m(
1714 // CHECK-RV64-NEXT: entry:
1715 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1716 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1717 //
test_vwadd_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)1718 vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
1719 vint32mf2_t op1, int32_t op2, size_t vl) {
1720 return vwadd_vx_i64m1_m(mask, maskedoff, op1, op2, vl);
1721 }
1722
1723 //
1724 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m(
1725 // CHECK-RV64-NEXT: entry:
1726 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1727 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1728 //
test_vwadd_wv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint32mf2_t op2,size_t vl)1729 vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
1730 vint64m1_t op1, vint32mf2_t op2, size_t vl) {
1731 return vwadd_wv_i64m1_m(mask, maskedoff, op1, op2, vl);
1732 }
1733
1734 //
1735 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m(
1736 // CHECK-RV64-NEXT: entry:
1737 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1738 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1739 //
test_vwadd_wx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int32_t op2,size_t vl)1740 vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff,
1741 vint64m1_t op1, int32_t op2, size_t vl) {
1742 return vwadd_wx_i64m1_m(mask, maskedoff, op1, op2, vl);
1743 }
1744
1745 //
1746 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m(
1747 // CHECK-RV64-NEXT: entry:
1748 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1749 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1750 //
test_vwadd_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)1751 vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
1752 vint32m1_t op1, vint32m1_t op2, size_t vl) {
1753 return vwadd_vv_i64m2_m(mask, maskedoff, op1, op2, vl);
1754 }
1755
1756 //
1757 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m(
1758 // CHECK-RV64-NEXT: entry:
1759 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1760 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1761 //
test_vwadd_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)1762 vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
1763 vint32m1_t op1, int32_t op2, size_t vl) {
1764 return vwadd_vx_i64m2_m(mask, maskedoff, op1, op2, vl);
1765 }
1766
1767 //
1768 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m(
1769 // CHECK-RV64-NEXT: entry:
1770 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1771 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1772 //
test_vwadd_wv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint32m1_t op2,size_t vl)1773 vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
1774 vint64m2_t op1, vint32m1_t op2, size_t vl) {
1775 return vwadd_wv_i64m2_m(mask, maskedoff, op1, op2, vl);
1776 }
1777
1778 //
1779 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m(
1780 // CHECK-RV64-NEXT: entry:
1781 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1782 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1783 //
test_vwadd_wx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int32_t op2,size_t vl)1784 vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff,
1785 vint64m2_t op1, int32_t op2, size_t vl) {
1786 return vwadd_wx_i64m2_m(mask, maskedoff, op1, op2, vl);
1787 }
1788
1789 //
1790 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m(
1791 // CHECK-RV64-NEXT: entry:
1792 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1793 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1794 //
test_vwadd_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)1795 vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
1796 vint32m2_t op1, vint32m2_t op2, size_t vl) {
1797 return vwadd_vv_i64m4_m(mask, maskedoff, op1, op2, vl);
1798 }
1799
1800 //
1801 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m(
1802 // CHECK-RV64-NEXT: entry:
1803 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1804 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1805 //
test_vwadd_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)1806 vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
1807 vint32m2_t op1, int32_t op2, size_t vl) {
1808 return vwadd_vx_i64m4_m(mask, maskedoff, op1, op2, vl);
1809 }
1810
1811 //
1812 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m(
1813 // CHECK-RV64-NEXT: entry:
1814 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1815 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1816 //
test_vwadd_wv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint32m2_t op2,size_t vl)1817 vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
1818 vint64m4_t op1, vint32m2_t op2, size_t vl) {
1819 return vwadd_wv_i64m4_m(mask, maskedoff, op1, op2, vl);
1820 }
1821
1822 //
1823 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m(
1824 // CHECK-RV64-NEXT: entry:
1825 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1826 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1827 //
test_vwadd_wx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int32_t op2,size_t vl)1828 vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff,
1829 vint64m4_t op1, int32_t op2, size_t vl) {
1830 return vwadd_wx_i64m4_m(mask, maskedoff, op1, op2, vl);
1831 }
1832
1833 //
1834 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m(
1835 // CHECK-RV64-NEXT: entry:
1836 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1837 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1838 //
test_vwadd_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)1839 vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
1840 vint32m4_t op1, vint32m4_t op2, size_t vl) {
1841 return vwadd_vv_i64m8_m(mask, maskedoff, op1, op2, vl);
1842 }
1843
1844 //
1845 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m(
1846 // CHECK-RV64-NEXT: entry:
1847 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1848 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1849 //
test_vwadd_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)1850 vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
1851 vint32m4_t op1, int32_t op2, size_t vl) {
1852 return vwadd_vx_i64m8_m(mask, maskedoff, op1, op2, vl);
1853 }
1854
1855 //
1856 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m(
1857 // CHECK-RV64-NEXT: entry:
1858 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1859 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1860 //
test_vwadd_wv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint32m4_t op2,size_t vl)1861 vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
1862 vint64m8_t op1, vint32m4_t op2, size_t vl) {
1863 return vwadd_wv_i64m8_m(mask, maskedoff, op1, op2, vl);
1864 }
1865
1866 //
1867 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m(
1868 // CHECK-RV64-NEXT: entry:
1869 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1870 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1871 //
test_vwadd_wx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int32_t op2,size_t vl)1872 vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff,
1873 vint64m8_t op1, int32_t op2, size_t vl) {
1874 return vwadd_wx_i64m8_m(mask, maskedoff, op1, op2, vl);
1875 }
1876
1877 //
1878 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m(
1879 // CHECK-RV64-NEXT: entry:
1880 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1881 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1882 //
test_vwaddu_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1883 vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
1884 vuint8mf8_t op1, vuint8mf8_t op2,
1885 size_t vl) {
1886 return vwaddu_vv_u16mf4_m(mask, maskedoff, op1, op2, vl);
1887 }
1888
1889 //
1890 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m(
1891 // CHECK-RV64-NEXT: entry:
1892 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1893 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1894 //
test_vwaddu_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)1895 vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
1896 vuint8mf8_t op1, uint8_t op2, size_t vl) {
1897 return vwaddu_vx_u16mf4_m(mask, maskedoff, op1, op2, vl);
1898 }
1899
1900 //
1901 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m(
1902 // CHECK-RV64-NEXT: entry:
1903 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1904 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1905 //
test_vwaddu_wv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint8mf8_t op2,size_t vl)1906 vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
1907 vuint16mf4_t op1, vuint8mf8_t op2,
1908 size_t vl) {
1909 return vwaddu_wv_u16mf4_m(mask, maskedoff, op1, op2, vl);
1910 }
1911
1912 //
1913 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m(
1914 // CHECK-RV64-NEXT: entry:
1915 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1916 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1917 //
test_vwaddu_wx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint8_t op2,size_t vl)1918 vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff,
1919 vuint16mf4_t op1, uint8_t op2, size_t vl) {
1920 return vwaddu_wx_u16mf4_m(mask, maskedoff, op1, op2, vl);
1921 }
1922
1923 //
1924 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m(
1925 // CHECK-RV64-NEXT: entry:
1926 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1927 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1928 //
test_vwaddu_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1929 vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
1930 vuint8mf4_t op1, vuint8mf4_t op2,
1931 size_t vl) {
1932 return vwaddu_vv_u16mf2_m(mask, maskedoff, op1, op2, vl);
1933 }
1934
1935 //
1936 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m(
1937 // CHECK-RV64-NEXT: entry:
1938 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1939 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1940 //
test_vwaddu_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)1941 vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
1942 vuint8mf4_t op1, uint8_t op2, size_t vl) {
1943 return vwaddu_vx_u16mf2_m(mask, maskedoff, op1, op2, vl);
1944 }
1945
1946 //
1947 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m(
1948 // CHECK-RV64-NEXT: entry:
1949 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1950 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1951 //
test_vwaddu_wv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint8mf4_t op2,size_t vl)1952 vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
1953 vuint16mf2_t op1, vuint8mf4_t op2,
1954 size_t vl) {
1955 return vwaddu_wv_u16mf2_m(mask, maskedoff, op1, op2, vl);
1956 }
1957
1958 //
1959 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m(
1960 // CHECK-RV64-NEXT: entry:
1961 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1962 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1963 //
test_vwaddu_wx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint8_t op2,size_t vl)1964 vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff,
1965 vuint16mf2_t op1, uint8_t op2, size_t vl) {
1966 return vwaddu_wx_u16mf2_m(mask, maskedoff, op1, op2, vl);
1967 }
1968
1969 //
1970 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m(
1971 // CHECK-RV64-NEXT: entry:
1972 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1973 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1974 //
test_vwaddu_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1975 vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
1976 vuint8mf2_t op1, vuint8mf2_t op2,
1977 size_t vl) {
1978 return vwaddu_vv_u16m1_m(mask, maskedoff, op1, op2, vl);
1979 }
1980
1981 //
1982 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m(
1983 // CHECK-RV64-NEXT: entry:
1984 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1985 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1986 //
test_vwaddu_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)1987 vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
1988 vuint8mf2_t op1, uint8_t op2, size_t vl) {
1989 return vwaddu_vx_u16m1_m(mask, maskedoff, op1, op2, vl);
1990 }
1991
1992 //
1993 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m(
1994 // CHECK-RV64-NEXT: entry:
1995 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1996 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1997 //
test_vwaddu_wv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint8mf2_t op2,size_t vl)1998 vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
1999 vuint16m1_t op1, vuint8mf2_t op2,
2000 size_t vl) {
2001 return vwaddu_wv_u16m1_m(mask, maskedoff, op1, op2, vl);
2002 }
2003
2004 //
2005 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m(
2006 // CHECK-RV64-NEXT: entry:
2007 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2008 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2009 //
test_vwaddu_wx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint8_t op2,size_t vl)2010 vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff,
2011 vuint16m1_t op1, uint8_t op2, size_t vl) {
2012 return vwaddu_wx_u16m1_m(mask, maskedoff, op1, op2, vl);
2013 }
2014
2015 //
2016 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m(
2017 // CHECK-RV64-NEXT: entry:
2018 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2019 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2020 //
test_vwaddu_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)2021 vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
2022 vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
2023 return vwaddu_vv_u16m2_m(mask, maskedoff, op1, op2, vl);
2024 }
2025
2026 //
2027 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m(
2028 // CHECK-RV64-NEXT: entry:
2029 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2030 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2031 //
test_vwaddu_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)2032 vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
2033 vuint8m1_t op1, uint8_t op2, size_t vl) {
2034 return vwaddu_vx_u16m2_m(mask, maskedoff, op1, op2, vl);
2035 }
2036
2037 //
2038 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m(
2039 // CHECK-RV64-NEXT: entry:
2040 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2041 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2042 //
test_vwaddu_wv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint8m1_t op2,size_t vl)2043 vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
2044 vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
2045 return vwaddu_wv_u16m2_m(mask, maskedoff, op1, op2, vl);
2046 }
2047
2048 //
2049 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m(
2050 // CHECK-RV64-NEXT: entry:
2051 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2052 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2053 //
test_vwaddu_wx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint8_t op2,size_t vl)2054 vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff,
2055 vuint16m2_t op1, uint8_t op2, size_t vl) {
2056 return vwaddu_wx_u16m2_m(mask, maskedoff, op1, op2, vl);
2057 }
2058
2059 //
2060 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m(
2061 // CHECK-RV64-NEXT: entry:
2062 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2063 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2064 //
test_vwaddu_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)2065 vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
2066 vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
2067 return vwaddu_vv_u16m4_m(mask, maskedoff, op1, op2, vl);
2068 }
2069
2070 //
2071 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m(
2072 // CHECK-RV64-NEXT: entry:
2073 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2074 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2075 //
test_vwaddu_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)2076 vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
2077 vuint8m2_t op1, uint8_t op2, size_t vl) {
2078 return vwaddu_vx_u16m4_m(mask, maskedoff, op1, op2, vl);
2079 }
2080
2081 //
2082 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m(
2083 // CHECK-RV64-NEXT: entry:
2084 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2085 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2086 //
test_vwaddu_wv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint8m2_t op2,size_t vl)2087 vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
2088 vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
2089 return vwaddu_wv_u16m4_m(mask, maskedoff, op1, op2, vl);
2090 }
2091
2092 //
2093 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m(
2094 // CHECK-RV64-NEXT: entry:
2095 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2096 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2097 //
test_vwaddu_wx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint8_t op2,size_t vl)2098 vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff,
2099 vuint16m4_t op1, uint8_t op2, size_t vl) {
2100 return vwaddu_wx_u16m4_m(mask, maskedoff, op1, op2, vl);
2101 }
2102
2103 //
2104 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m(
2105 // CHECK-RV64-NEXT: entry:
2106 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2107 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2108 //
test_vwaddu_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)2109 vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
2110 vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
2111 return vwaddu_vv_u16m8_m(mask, maskedoff, op1, op2, vl);
2112 }
2113
2114 //
2115 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m(
2116 // CHECK-RV64-NEXT: entry:
2117 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2118 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2119 //
test_vwaddu_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)2120 vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
2121 vuint8m4_t op1, uint8_t op2, size_t vl) {
2122 return vwaddu_vx_u16m8_m(mask, maskedoff, op1, op2, vl);
2123 }
2124
2125 //
2126 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m(
2127 // CHECK-RV64-NEXT: entry:
2128 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2129 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2130 //
test_vwaddu_wv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint8m4_t op2,size_t vl)2131 vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
2132 vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
2133 return vwaddu_wv_u16m8_m(mask, maskedoff, op1, op2, vl);
2134 }
2135
2136 //
2137 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m(
2138 // CHECK-RV64-NEXT: entry:
2139 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2140 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2141 //
test_vwaddu_wx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint8_t op2,size_t vl)2142 vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff,
2143 vuint16m8_t op1, uint8_t op2, size_t vl) {
2144 return vwaddu_wx_u16m8_m(mask, maskedoff, op1, op2, vl);
2145 }
2146
2147 //
2148 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m(
2149 // CHECK-RV64-NEXT: entry:
2150 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2151 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2152 //
test_vwaddu_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)2153 vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
2154 vuint16mf4_t op1, vuint16mf4_t op2,
2155 size_t vl) {
2156 return vwaddu_vv_u32mf2_m(mask, maskedoff, op1, op2, vl);
2157 }
2158
2159 //
2160 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m(
2161 // CHECK-RV64-NEXT: entry:
2162 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2163 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2164 //
test_vwaddu_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)2165 vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
2166 vuint16mf4_t op1, uint16_t op2,
2167 size_t vl) {
2168 return vwaddu_vx_u32mf2_m(mask, maskedoff, op1, op2, vl);
2169 }
2170
2171 //
2172 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m(
2173 // CHECK-RV64-NEXT: entry:
2174 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2175 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2176 //
test_vwaddu_wv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint16mf4_t op2,size_t vl)2177 vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
2178 vuint32mf2_t op1, vuint16mf4_t op2,
2179 size_t vl) {
2180 return vwaddu_wv_u32mf2_m(mask, maskedoff, op1, op2, vl);
2181 }
2182
2183 //
2184 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m(
2185 // CHECK-RV64-NEXT: entry:
2186 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2187 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2188 //
test_vwaddu_wx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint16_t op2,size_t vl)2189 vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff,
2190 vuint32mf2_t op1, uint16_t op2,
2191 size_t vl) {
2192 return vwaddu_wx_u32mf2_m(mask, maskedoff, op1, op2, vl);
2193 }
2194
2195 //
2196 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m(
2197 // CHECK-RV64-NEXT: entry:
2198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2199 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2200 //
test_vwaddu_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)2201 vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
2202 vuint16mf2_t op1, vuint16mf2_t op2,
2203 size_t vl) {
2204 return vwaddu_vv_u32m1_m(mask, maskedoff, op1, op2, vl);
2205 }
2206
2207 //
2208 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m(
2209 // CHECK-RV64-NEXT: entry:
2210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2211 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2212 //
test_vwaddu_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)2213 vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
2214 vuint16mf2_t op1, uint16_t op2, size_t vl) {
2215 return vwaddu_vx_u32m1_m(mask, maskedoff, op1, op2, vl);
2216 }
2217
2218 //
2219 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m(
2220 // CHECK-RV64-NEXT: entry:
2221 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2222 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2223 //
test_vwaddu_wv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint16mf2_t op2,size_t vl)2224 vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
2225 vuint32m1_t op1, vuint16mf2_t op2,
2226 size_t vl) {
2227 return vwaddu_wv_u32m1_m(mask, maskedoff, op1, op2, vl);
2228 }
2229
2230 //
2231 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m(
2232 // CHECK-RV64-NEXT: entry:
2233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2234 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2235 //
test_vwaddu_wx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint16_t op2,size_t vl)2236 vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff,
2237 vuint32m1_t op1, uint16_t op2, size_t vl) {
2238 return vwaddu_wx_u32m1_m(mask, maskedoff, op1, op2, vl);
2239 }
2240
2241 //
2242 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m(
2243 // CHECK-RV64-NEXT: entry:
2244 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2245 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2246 //
test_vwaddu_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)2247 vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
2248 vuint16m1_t op1, vuint16m1_t op2,
2249 size_t vl) {
2250 return vwaddu_vv_u32m2_m(mask, maskedoff, op1, op2, vl);
2251 }
2252
2253 //
2254 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m(
2255 // CHECK-RV64-NEXT: entry:
2256 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2257 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2258 //
test_vwaddu_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)2259 vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
2260 vuint16m1_t op1, uint16_t op2, size_t vl) {
2261 return vwaddu_vx_u32m2_m(mask, maskedoff, op1, op2, vl);
2262 }
2263
2264 //
2265 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m(
2266 // CHECK-RV64-NEXT: entry:
2267 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2268 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2269 //
test_vwaddu_wv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint16m1_t op2,size_t vl)2270 vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
2271 vuint32m2_t op1, vuint16m1_t op2,
2272 size_t vl) {
2273 return vwaddu_wv_u32m2_m(mask, maskedoff, op1, op2, vl);
2274 }
2275
2276 //
2277 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m(
2278 // CHECK-RV64-NEXT: entry:
2279 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2280 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2281 //
test_vwaddu_wx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint16_t op2,size_t vl)2282 vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff,
2283 vuint32m2_t op1, uint16_t op2, size_t vl) {
2284 return vwaddu_wx_u32m2_m(mask, maskedoff, op1, op2, vl);
2285 }
2286
2287 //
2288 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m(
2289 // CHECK-RV64-NEXT: entry:
2290 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2291 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2292 //
test_vwaddu_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)2293 vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
2294 vuint16m2_t op1, vuint16m2_t op2,
2295 size_t vl) {
2296 return vwaddu_vv_u32m4_m(mask, maskedoff, op1, op2, vl);
2297 }
2298
2299 //
2300 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m(
2301 // CHECK-RV64-NEXT: entry:
2302 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2303 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2304 //
test_vwaddu_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)2305 vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
2306 vuint16m2_t op1, uint16_t op2, size_t vl) {
2307 return vwaddu_vx_u32m4_m(mask, maskedoff, op1, op2, vl);
2308 }
2309
2310 //
2311 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m(
2312 // CHECK-RV64-NEXT: entry:
2313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2314 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2315 //
test_vwaddu_wv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint16m2_t op2,size_t vl)2316 vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
2317 vuint32m4_t op1, vuint16m2_t op2,
2318 size_t vl) {
2319 return vwaddu_wv_u32m4_m(mask, maskedoff, op1, op2, vl);
2320 }
2321
2322 //
2323 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m(
2324 // CHECK-RV64-NEXT: entry:
2325 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2326 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2327 //
test_vwaddu_wx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint16_t op2,size_t vl)2328 vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff,
2329 vuint32m4_t op1, uint16_t op2, size_t vl) {
2330 return vwaddu_wx_u32m4_m(mask, maskedoff, op1, op2, vl);
2331 }
2332
2333 //
2334 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m(
2335 // CHECK-RV64-NEXT: entry:
2336 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2337 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2338 //
test_vwaddu_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)2339 vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
2340 vuint16m4_t op1, vuint16m4_t op2,
2341 size_t vl) {
2342 return vwaddu_vv_u32m8_m(mask, maskedoff, op1, op2, vl);
2343 }
2344
2345 //
2346 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m(
2347 // CHECK-RV64-NEXT: entry:
2348 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2349 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2350 //
test_vwaddu_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)2351 vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
2352 vuint16m4_t op1, uint16_t op2, size_t vl) {
2353 return vwaddu_vx_u32m8_m(mask, maskedoff, op1, op2, vl);
2354 }
2355
2356 //
2357 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m(
2358 // CHECK-RV64-NEXT: entry:
2359 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2360 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2361 //
test_vwaddu_wv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint16m4_t op2,size_t vl)2362 vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
2363 vuint32m8_t op1, vuint16m4_t op2,
2364 size_t vl) {
2365 return vwaddu_wv_u32m8_m(mask, maskedoff, op1, op2, vl);
2366 }
2367
2368 //
2369 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m(
2370 // CHECK-RV64-NEXT: entry:
2371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2372 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2373 //
test_vwaddu_wx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint16_t op2,size_t vl)2374 vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff,
2375 vuint32m8_t op1, uint16_t op2, size_t vl) {
2376 return vwaddu_wx_u32m8_m(mask, maskedoff, op1, op2, vl);
2377 }
2378
2379 //
2380 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m(
2381 // CHECK-RV64-NEXT: entry:
2382 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2383 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2384 //
test_vwaddu_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)2385 vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
2386 vuint32mf2_t op1, vuint32mf2_t op2,
2387 size_t vl) {
2388 return vwaddu_vv_u64m1_m(mask, maskedoff, op1, op2, vl);
2389 }
2390
2391 //
2392 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m(
2393 // CHECK-RV64-NEXT: entry:
2394 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2395 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2396 //
test_vwaddu_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)2397 vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
2398 vuint32mf2_t op1, uint32_t op2, size_t vl) {
2399 return vwaddu_vx_u64m1_m(mask, maskedoff, op1, op2, vl);
2400 }
2401
2402 //
2403 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m(
2404 // CHECK-RV64-NEXT: entry:
2405 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2406 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2407 //
test_vwaddu_wv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint32mf2_t op2,size_t vl)2408 vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
2409 vuint64m1_t op1, vuint32mf2_t op2,
2410 size_t vl) {
2411 return vwaddu_wv_u64m1_m(mask, maskedoff, op1, op2, vl);
2412 }
2413
2414 //
2415 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m(
2416 // CHECK-RV64-NEXT: entry:
2417 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2418 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2419 //
test_vwaddu_wx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint32_t op2,size_t vl)2420 vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff,
2421 vuint64m1_t op1, uint32_t op2, size_t vl) {
2422 return vwaddu_wx_u64m1_m(mask, maskedoff, op1, op2, vl);
2423 }
2424
2425 //
2426 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m(
2427 // CHECK-RV64-NEXT: entry:
2428 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2429 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2430 //
test_vwaddu_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)2431 vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
2432 vuint32m1_t op1, vuint32m1_t op2,
2433 size_t vl) {
2434 return vwaddu_vv_u64m2_m(mask, maskedoff, op1, op2, vl);
2435 }
2436
2437 //
2438 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m(
2439 // CHECK-RV64-NEXT: entry:
2440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2441 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2442 //
test_vwaddu_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)2443 vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
2444 vuint32m1_t op1, uint32_t op2, size_t vl) {
2445 return vwaddu_vx_u64m2_m(mask, maskedoff, op1, op2, vl);
2446 }
2447
2448 //
2449 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m(
2450 // CHECK-RV64-NEXT: entry:
2451 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2452 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2453 //
test_vwaddu_wv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint32m1_t op2,size_t vl)2454 vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
2455 vuint64m2_t op1, vuint32m1_t op2,
2456 size_t vl) {
2457 return vwaddu_wv_u64m2_m(mask, maskedoff, op1, op2, vl);
2458 }
2459
2460 //
2461 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m(
2462 // CHECK-RV64-NEXT: entry:
2463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2464 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2465 //
test_vwaddu_wx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint32_t op2,size_t vl)2466 vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff,
2467 vuint64m2_t op1, uint32_t op2, size_t vl) {
2468 return vwaddu_wx_u64m2_m(mask, maskedoff, op1, op2, vl);
2469 }
2470
2471 //
2472 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m(
2473 // CHECK-RV64-NEXT: entry:
2474 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2475 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2476 //
test_vwaddu_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)2477 vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
2478 vuint32m2_t op1, vuint32m2_t op2,
2479 size_t vl) {
2480 return vwaddu_vv_u64m4_m(mask, maskedoff, op1, op2, vl);
2481 }
2482
2483 //
2484 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m(
2485 // CHECK-RV64-NEXT: entry:
2486 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2487 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2488 //
test_vwaddu_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)2489 vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
2490 vuint32m2_t op1, uint32_t op2, size_t vl) {
2491 return vwaddu_vx_u64m4_m(mask, maskedoff, op1, op2, vl);
2492 }
2493
2494 //
2495 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m(
2496 // CHECK-RV64-NEXT: entry:
2497 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2498 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2499 //
test_vwaddu_wv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint32m2_t op2,size_t vl)2500 vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
2501 vuint64m4_t op1, vuint32m2_t op2,
2502 size_t vl) {
2503 return vwaddu_wv_u64m4_m(mask, maskedoff, op1, op2, vl);
2504 }
2505
2506 //
2507 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m(
2508 // CHECK-RV64-NEXT: entry:
2509 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2510 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2511 //
test_vwaddu_wx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint32_t op2,size_t vl)2512 vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff,
2513 vuint64m4_t op1, uint32_t op2, size_t vl) {
2514 return vwaddu_wx_u64m4_m(mask, maskedoff, op1, op2, vl);
2515 }
2516
2517 //
2518 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m(
2519 // CHECK-RV64-NEXT: entry:
2520 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2521 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2522 //
test_vwaddu_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)2523 vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
2524 vuint32m4_t op1, vuint32m4_t op2,
2525 size_t vl) {
2526 return vwaddu_vv_u64m8_m(mask, maskedoff, op1, op2, vl);
2527 }
2528
2529 //
2530 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m(
2531 // CHECK-RV64-NEXT: entry:
2532 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2533 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2534 //
test_vwaddu_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)2535 vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
2536 vuint32m4_t op1, uint32_t op2, size_t vl) {
2537 return vwaddu_vx_u64m8_m(mask, maskedoff, op1, op2, vl);
2538 }
2539
2540 //
2541 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m(
2542 // CHECK-RV64-NEXT: entry:
2543 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2544 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2545 //
test_vwaddu_wv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint32m4_t op2,size_t vl)2546 vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
2547 vuint64m8_t op1, vuint32m4_t op2,
2548 size_t vl) {
2549 return vwaddu_wv_u64m8_m(mask, maskedoff, op1, op2, vl);
2550 }
2551
2552 //
2553 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m(
2554 // CHECK-RV64-NEXT: entry:
2555 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2556 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2557 //
test_vwaddu_wx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint32_t op2,size_t vl)2558 vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff,
2559 vuint64m8_t op1, uint32_t op2, size_t vl) {
2560 return vwaddu_wx_u64m8_m(mask, maskedoff, op1, op2, vl);
2561 }
2562