1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4
5 #include <riscv_vector.h>
6
7 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4(
8 // CHECK-RV64-NEXT: entry:
9 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
10 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
11 //
test_vwadd_vv_i16mf4(vint8mf8_t op1,vint8mf8_t op2,size_t vl)12 vint16mf4_t test_vwadd_vv_i16mf4(vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
13 return vwadd_vv(op1, op2, vl);
14 }
15
16 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4(
17 // CHECK-RV64-NEXT: entry:
18 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
19 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
20 //
test_vwadd_vx_i16mf4(vint8mf8_t op1,int8_t op2,size_t vl)21 vint16mf4_t test_vwadd_vx_i16mf4(vint8mf8_t op1, int8_t op2, size_t vl) {
22 return vwadd_vx(op1, op2, vl);
23 }
24
25 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4(
26 // CHECK-RV64-NEXT: entry:
27 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
28 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
29 //
test_vwadd_wv_i16mf4(vint16mf4_t op1,vint8mf8_t op2,size_t vl)30 vint16mf4_t test_vwadd_wv_i16mf4(vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
31 return vwadd_wv(op1, op2, vl);
32 }
33
34 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4(
35 // CHECK-RV64-NEXT: entry:
36 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
37 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
38 //
test_vwadd_wx_i16mf4(vint16mf4_t op1,int8_t op2,size_t vl)39 vint16mf4_t test_vwadd_wx_i16mf4(vint16mf4_t op1, int8_t op2, size_t vl) {
40 return vwadd_wx(op1, op2, vl);
41 }
42
43 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2(
44 // CHECK-RV64-NEXT: entry:
45 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
46 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
47 //
test_vwadd_vv_i16mf2(vint8mf4_t op1,vint8mf4_t op2,size_t vl)48 vint16mf2_t test_vwadd_vv_i16mf2(vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
49 return vwadd_vv(op1, op2, vl);
50 }
51
52 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2(
53 // CHECK-RV64-NEXT: entry:
54 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
55 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
56 //
test_vwadd_vx_i16mf2(vint8mf4_t op1,int8_t op2,size_t vl)57 vint16mf2_t test_vwadd_vx_i16mf2(vint8mf4_t op1, int8_t op2, size_t vl) {
58 return vwadd_vx(op1, op2, vl);
59 }
60
61 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2(
62 // CHECK-RV64-NEXT: entry:
63 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
64 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
65 //
test_vwadd_wv_i16mf2(vint16mf2_t op1,vint8mf4_t op2,size_t vl)66 vint16mf2_t test_vwadd_wv_i16mf2(vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
67 return vwadd_wv(op1, op2, vl);
68 }
69
70 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2(
71 // CHECK-RV64-NEXT: entry:
72 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
73 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
74 //
test_vwadd_wx_i16mf2(vint16mf2_t op1,int8_t op2,size_t vl)75 vint16mf2_t test_vwadd_wx_i16mf2(vint16mf2_t op1, int8_t op2, size_t vl) {
76 return vwadd_wx(op1, op2, vl);
77 }
78
79 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1(
80 // CHECK-RV64-NEXT: entry:
81 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
82 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
83 //
test_vwadd_vv_i16m1(vint8mf2_t op1,vint8mf2_t op2,size_t vl)84 vint16m1_t test_vwadd_vv_i16m1(vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
85 return vwadd_vv(op1, op2, vl);
86 }
87
88 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1(
89 // CHECK-RV64-NEXT: entry:
90 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
91 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
92 //
test_vwadd_vx_i16m1(vint8mf2_t op1,int8_t op2,size_t vl)93 vint16m1_t test_vwadd_vx_i16m1(vint8mf2_t op1, int8_t op2, size_t vl) {
94 return vwadd_vx(op1, op2, vl);
95 }
96
97 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1(
98 // CHECK-RV64-NEXT: entry:
99 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
101 //
test_vwadd_wv_i16m1(vint16m1_t op1,vint8mf2_t op2,size_t vl)102 vint16m1_t test_vwadd_wv_i16m1(vint16m1_t op1, vint8mf2_t op2, size_t vl) {
103 return vwadd_wv(op1, op2, vl);
104 }
105
106 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1(
107 // CHECK-RV64-NEXT: entry:
108 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
109 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
110 //
test_vwadd_wx_i16m1(vint16m1_t op1,int8_t op2,size_t vl)111 vint16m1_t test_vwadd_wx_i16m1(vint16m1_t op1, int8_t op2, size_t vl) {
112 return vwadd_wx(op1, op2, vl);
113 }
114
115 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2(
116 // CHECK-RV64-NEXT: entry:
117 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
118 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
119 //
test_vwadd_vv_i16m2(vint8m1_t op1,vint8m1_t op2,size_t vl)120 vint16m2_t test_vwadd_vv_i16m2(vint8m1_t op1, vint8m1_t op2, size_t vl) {
121 return vwadd_vv(op1, op2, vl);
122 }
123
124 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2(
125 // CHECK-RV64-NEXT: entry:
126 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
127 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
128 //
test_vwadd_vx_i16m2(vint8m1_t op1,int8_t op2,size_t vl)129 vint16m2_t test_vwadd_vx_i16m2(vint8m1_t op1, int8_t op2, size_t vl) {
130 return vwadd_vx(op1, op2, vl);
131 }
132
133 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2(
134 // CHECK-RV64-NEXT: entry:
135 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
136 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
137 //
test_vwadd_wv_i16m2(vint16m2_t op1,vint8m1_t op2,size_t vl)138 vint16m2_t test_vwadd_wv_i16m2(vint16m2_t op1, vint8m1_t op2, size_t vl) {
139 return vwadd_wv(op1, op2, vl);
140 }
141
142 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2(
143 // CHECK-RV64-NEXT: entry:
144 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
145 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
146 //
test_vwadd_wx_i16m2(vint16m2_t op1,int8_t op2,size_t vl)147 vint16m2_t test_vwadd_wx_i16m2(vint16m2_t op1, int8_t op2, size_t vl) {
148 return vwadd_wx(op1, op2, vl);
149 }
150
151 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4(
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
154 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
155 //
test_vwadd_vv_i16m4(vint8m2_t op1,vint8m2_t op2,size_t vl)156 vint16m4_t test_vwadd_vv_i16m4(vint8m2_t op1, vint8m2_t op2, size_t vl) {
157 return vwadd_vv(op1, op2, vl);
158 }
159
160 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4(
161 // CHECK-RV64-NEXT: entry:
162 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
163 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
164 //
test_vwadd_vx_i16m4(vint8m2_t op1,int8_t op2,size_t vl)165 vint16m4_t test_vwadd_vx_i16m4(vint8m2_t op1, int8_t op2, size_t vl) {
166 return vwadd_vx(op1, op2, vl);
167 }
168
169 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4(
170 // CHECK-RV64-NEXT: entry:
171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
172 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
173 //
test_vwadd_wv_i16m4(vint16m4_t op1,vint8m2_t op2,size_t vl)174 vint16m4_t test_vwadd_wv_i16m4(vint16m4_t op1, vint8m2_t op2, size_t vl) {
175 return vwadd_wv(op1, op2, vl);
176 }
177
178 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4(
179 // CHECK-RV64-NEXT: entry:
180 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
181 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
182 //
test_vwadd_wx_i16m4(vint16m4_t op1,int8_t op2,size_t vl)183 vint16m4_t test_vwadd_wx_i16m4(vint16m4_t op1, int8_t op2, size_t vl) {
184 return vwadd_wx(op1, op2, vl);
185 }
186
187 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8(
188 // CHECK-RV64-NEXT: entry:
189 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
190 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
191 //
test_vwadd_vv_i16m8(vint8m4_t op1,vint8m4_t op2,size_t vl)192 vint16m8_t test_vwadd_vv_i16m8(vint8m4_t op1, vint8m4_t op2, size_t vl) {
193 return vwadd_vv(op1, op2, vl);
194 }
195
196 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8(
197 // CHECK-RV64-NEXT: entry:
198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
199 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
200 //
test_vwadd_vx_i16m8(vint8m4_t op1,int8_t op2,size_t vl)201 vint16m8_t test_vwadd_vx_i16m8(vint8m4_t op1, int8_t op2, size_t vl) {
202 return vwadd_vx(op1, op2, vl);
203 }
204
205 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8(
206 // CHECK-RV64-NEXT: entry:
207 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
208 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
209 //
test_vwadd_wv_i16m8(vint16m8_t op1,vint8m4_t op2,size_t vl)210 vint16m8_t test_vwadd_wv_i16m8(vint16m8_t op1, vint8m4_t op2, size_t vl) {
211 return vwadd_wv(op1, op2, vl);
212 }
213
214 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8(
215 // CHECK-RV64-NEXT: entry:
216 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
217 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
218 //
test_vwadd_wx_i16m8(vint16m8_t op1,int8_t op2,size_t vl)219 vint16m8_t test_vwadd_wx_i16m8(vint16m8_t op1, int8_t op2, size_t vl) {
220 return vwadd_wx(op1, op2, vl);
221 }
222
223 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2(
224 // CHECK-RV64-NEXT: entry:
225 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
226 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
227 //
test_vwadd_vv_i32mf2(vint16mf4_t op1,vint16mf4_t op2,size_t vl)228 vint32mf2_t test_vwadd_vv_i32mf2(vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
229 return vwadd_vv(op1, op2, vl);
230 }
231
232 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2(
233 // CHECK-RV64-NEXT: entry:
234 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
235 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
236 //
test_vwadd_vx_i32mf2(vint16mf4_t op1,int16_t op2,size_t vl)237 vint32mf2_t test_vwadd_vx_i32mf2(vint16mf4_t op1, int16_t op2, size_t vl) {
238 return vwadd_vx(op1, op2, vl);
239 }
240
241 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2(
242 // CHECK-RV64-NEXT: entry:
243 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
244 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
245 //
test_vwadd_wv_i32mf2(vint32mf2_t op1,vint16mf4_t op2,size_t vl)246 vint32mf2_t test_vwadd_wv_i32mf2(vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
247 return vwadd_wv(op1, op2, vl);
248 }
249
250 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2(
251 // CHECK-RV64-NEXT: entry:
252 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
254 //
test_vwadd_wx_i32mf2(vint32mf2_t op1,int16_t op2,size_t vl)255 vint32mf2_t test_vwadd_wx_i32mf2(vint32mf2_t op1, int16_t op2, size_t vl) {
256 return vwadd_wx(op1, op2, vl);
257 }
258
259 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1(
260 // CHECK-RV64-NEXT: entry:
261 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
262 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
263 //
test_vwadd_vv_i32m1(vint16mf2_t op1,vint16mf2_t op2,size_t vl)264 vint32m1_t test_vwadd_vv_i32m1(vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
265 return vwadd_vv(op1, op2, vl);
266 }
267
268 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1(
269 // CHECK-RV64-NEXT: entry:
270 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
271 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
272 //
test_vwadd_vx_i32m1(vint16mf2_t op1,int16_t op2,size_t vl)273 vint32m1_t test_vwadd_vx_i32m1(vint16mf2_t op1, int16_t op2, size_t vl) {
274 return vwadd_vx(op1, op2, vl);
275 }
276
277 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1(
278 // CHECK-RV64-NEXT: entry:
279 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
280 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
281 //
test_vwadd_wv_i32m1(vint32m1_t op1,vint16mf2_t op2,size_t vl)282 vint32m1_t test_vwadd_wv_i32m1(vint32m1_t op1, vint16mf2_t op2, size_t vl) {
283 return vwadd_wv(op1, op2, vl);
284 }
285
286 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1(
287 // CHECK-RV64-NEXT: entry:
288 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
289 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
290 //
test_vwadd_wx_i32m1(vint32m1_t op1,int16_t op2,size_t vl)291 vint32m1_t test_vwadd_wx_i32m1(vint32m1_t op1, int16_t op2, size_t vl) {
292 return vwadd_wx(op1, op2, vl);
293 }
294
295 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2(
296 // CHECK-RV64-NEXT: entry:
297 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
298 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
299 //
test_vwadd_vv_i32m2(vint16m1_t op1,vint16m1_t op2,size_t vl)300 vint32m2_t test_vwadd_vv_i32m2(vint16m1_t op1, vint16m1_t op2, size_t vl) {
301 return vwadd_vv(op1, op2, vl);
302 }
303
304 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2(
305 // CHECK-RV64-NEXT: entry:
306 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
307 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
308 //
test_vwadd_vx_i32m2(vint16m1_t op1,int16_t op2,size_t vl)309 vint32m2_t test_vwadd_vx_i32m2(vint16m1_t op1, int16_t op2, size_t vl) {
310 return vwadd_vx(op1, op2, vl);
311 }
312
313 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2(
314 // CHECK-RV64-NEXT: entry:
315 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
316 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
317 //
test_vwadd_wv_i32m2(vint32m2_t op1,vint16m1_t op2,size_t vl)318 vint32m2_t test_vwadd_wv_i32m2(vint32m2_t op1, vint16m1_t op2, size_t vl) {
319 return vwadd_wv(op1, op2, vl);
320 }
321
322 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2(
323 // CHECK-RV64-NEXT: entry:
324 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
325 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
326 //
test_vwadd_wx_i32m2(vint32m2_t op1,int16_t op2,size_t vl)327 vint32m2_t test_vwadd_wx_i32m2(vint32m2_t op1, int16_t op2, size_t vl) {
328 return vwadd_wx(op1, op2, vl);
329 }
330
331 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4(
332 // CHECK-RV64-NEXT: entry:
333 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
334 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
335 //
test_vwadd_vv_i32m4(vint16m2_t op1,vint16m2_t op2,size_t vl)336 vint32m4_t test_vwadd_vv_i32m4(vint16m2_t op1, vint16m2_t op2, size_t vl) {
337 return vwadd_vv(op1, op2, vl);
338 }
339
340 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4(
341 // CHECK-RV64-NEXT: entry:
342 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
343 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
344 //
test_vwadd_vx_i32m4(vint16m2_t op1,int16_t op2,size_t vl)345 vint32m4_t test_vwadd_vx_i32m4(vint16m2_t op1, int16_t op2, size_t vl) {
346 return vwadd_vx(op1, op2, vl);
347 }
348
349 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4(
350 // CHECK-RV64-NEXT: entry:
351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
353 //
test_vwadd_wv_i32m4(vint32m4_t op1,vint16m2_t op2,size_t vl)354 vint32m4_t test_vwadd_wv_i32m4(vint32m4_t op1, vint16m2_t op2, size_t vl) {
355 return vwadd_wv(op1, op2, vl);
356 }
357
358 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4(
359 // CHECK-RV64-NEXT: entry:
360 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
361 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
362 //
test_vwadd_wx_i32m4(vint32m4_t op1,int16_t op2,size_t vl)363 vint32m4_t test_vwadd_wx_i32m4(vint32m4_t op1, int16_t op2, size_t vl) {
364 return vwadd_wx(op1, op2, vl);
365 }
366
367 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8(
368 // CHECK-RV64-NEXT: entry:
369 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
370 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
371 //
test_vwadd_vv_i32m8(vint16m4_t op1,vint16m4_t op2,size_t vl)372 vint32m8_t test_vwadd_vv_i32m8(vint16m4_t op1, vint16m4_t op2, size_t vl) {
373 return vwadd_vv(op1, op2, vl);
374 }
375
376 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8(
377 // CHECK-RV64-NEXT: entry:
378 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
379 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
380 //
test_vwadd_vx_i32m8(vint16m4_t op1,int16_t op2,size_t vl)381 vint32m8_t test_vwadd_vx_i32m8(vint16m4_t op1, int16_t op2, size_t vl) {
382 return vwadd_vx(op1, op2, vl);
383 }
384
385 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8(
386 // CHECK-RV64-NEXT: entry:
387 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
388 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
389 //
test_vwadd_wv_i32m8(vint32m8_t op1,vint16m4_t op2,size_t vl)390 vint32m8_t test_vwadd_wv_i32m8(vint32m8_t op1, vint16m4_t op2, size_t vl) {
391 return vwadd_wv(op1, op2, vl);
392 }
393
394 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8(
395 // CHECK-RV64-NEXT: entry:
396 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
397 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
398 //
test_vwadd_wx_i32m8(vint32m8_t op1,int16_t op2,size_t vl)399 vint32m8_t test_vwadd_wx_i32m8(vint32m8_t op1, int16_t op2, size_t vl) {
400 return vwadd_wx(op1, op2, vl);
401 }
402
403 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1(
404 // CHECK-RV64-NEXT: entry:
405 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
406 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
407 //
test_vwadd_vv_i64m1(vint32mf2_t op1,vint32mf2_t op2,size_t vl)408 vint64m1_t test_vwadd_vv_i64m1(vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
409 return vwadd_vv(op1, op2, vl);
410 }
411
412 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1(
413 // CHECK-RV64-NEXT: entry:
414 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
415 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
416 //
test_vwadd_vx_i64m1(vint32mf2_t op1,int32_t op2,size_t vl)417 vint64m1_t test_vwadd_vx_i64m1(vint32mf2_t op1, int32_t op2, size_t vl) {
418 return vwadd_vx(op1, op2, vl);
419 }
420
421 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1(
422 // CHECK-RV64-NEXT: entry:
423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
424 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
425 //
test_vwadd_wv_i64m1(vint64m1_t op1,vint32mf2_t op2,size_t vl)426 vint64m1_t test_vwadd_wv_i64m1(vint64m1_t op1, vint32mf2_t op2, size_t vl) {
427 return vwadd_wv(op1, op2, vl);
428 }
429
430 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1(
431 // CHECK-RV64-NEXT: entry:
432 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
433 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
434 //
test_vwadd_wx_i64m1(vint64m1_t op1,int32_t op2,size_t vl)435 vint64m1_t test_vwadd_wx_i64m1(vint64m1_t op1, int32_t op2, size_t vl) {
436 return vwadd_wx(op1, op2, vl);
437 }
438
439 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2(
440 // CHECK-RV64-NEXT: entry:
441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
442 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
443 //
test_vwadd_vv_i64m2(vint32m1_t op1,vint32m1_t op2,size_t vl)444 vint64m2_t test_vwadd_vv_i64m2(vint32m1_t op1, vint32m1_t op2, size_t vl) {
445 return vwadd_vv(op1, op2, vl);
446 }
447
448 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2(
449 // CHECK-RV64-NEXT: entry:
450 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
452 //
test_vwadd_vx_i64m2(vint32m1_t op1,int32_t op2,size_t vl)453 vint64m2_t test_vwadd_vx_i64m2(vint32m1_t op1, int32_t op2, size_t vl) {
454 return vwadd_vx(op1, op2, vl);
455 }
456
457 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2(
458 // CHECK-RV64-NEXT: entry:
459 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
460 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
461 //
test_vwadd_wv_i64m2(vint64m2_t op1,vint32m1_t op2,size_t vl)462 vint64m2_t test_vwadd_wv_i64m2(vint64m2_t op1, vint32m1_t op2, size_t vl) {
463 return vwadd_wv(op1, op2, vl);
464 }
465
466 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2(
467 // CHECK-RV64-NEXT: entry:
468 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
469 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
470 //
test_vwadd_wx_i64m2(vint64m2_t op1,int32_t op2,size_t vl)471 vint64m2_t test_vwadd_wx_i64m2(vint64m2_t op1, int32_t op2, size_t vl) {
472 return vwadd_wx(op1, op2, vl);
473 }
474
475 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4(
476 // CHECK-RV64-NEXT: entry:
477 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
478 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
479 //
test_vwadd_vv_i64m4(vint32m2_t op1,vint32m2_t op2,size_t vl)480 vint64m4_t test_vwadd_vv_i64m4(vint32m2_t op1, vint32m2_t op2, size_t vl) {
481 return vwadd_vv(op1, op2, vl);
482 }
483
484 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4(
485 // CHECK-RV64-NEXT: entry:
486 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
487 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
488 //
test_vwadd_vx_i64m4(vint32m2_t op1,int32_t op2,size_t vl)489 vint64m4_t test_vwadd_vx_i64m4(vint32m2_t op1, int32_t op2, size_t vl) {
490 return vwadd_vx(op1, op2, vl);
491 }
492
493 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4(
494 // CHECK-RV64-NEXT: entry:
495 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
496 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
497 //
test_vwadd_wv_i64m4(vint64m4_t op1,vint32m2_t op2,size_t vl)498 vint64m4_t test_vwadd_wv_i64m4(vint64m4_t op1, vint32m2_t op2, size_t vl) {
499 return vwadd_wv(op1, op2, vl);
500 }
501
502 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4(
503 // CHECK-RV64-NEXT: entry:
504 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
505 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
506 //
test_vwadd_wx_i64m4(vint64m4_t op1,int32_t op2,size_t vl)507 vint64m4_t test_vwadd_wx_i64m4(vint64m4_t op1, int32_t op2, size_t vl) {
508 return vwadd_wx(op1, op2, vl);
509 }
510
511 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8(
512 // CHECK-RV64-NEXT: entry:
513 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
514 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
515 //
test_vwadd_vv_i64m8(vint32m4_t op1,vint32m4_t op2,size_t vl)516 vint64m8_t test_vwadd_vv_i64m8(vint32m4_t op1, vint32m4_t op2, size_t vl) {
517 return vwadd_vv(op1, op2, vl);
518 }
519
520 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8(
521 // CHECK-RV64-NEXT: entry:
522 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
523 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
524 //
test_vwadd_vx_i64m8(vint32m4_t op1,int32_t op2,size_t vl)525 vint64m8_t test_vwadd_vx_i64m8(vint32m4_t op1, int32_t op2, size_t vl) {
526 return vwadd_vx(op1, op2, vl);
527 }
528
529 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8(
530 // CHECK-RV64-NEXT: entry:
531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
532 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
533 //
test_vwadd_wv_i64m8(vint64m8_t op1,vint32m4_t op2,size_t vl)534 vint64m8_t test_vwadd_wv_i64m8(vint64m8_t op1, vint32m4_t op2, size_t vl) {
535 return vwadd_wv(op1, op2, vl);
536 }
537
538 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8(
539 // CHECK-RV64-NEXT: entry:
540 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
541 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
542 //
test_vwadd_wx_i64m8(vint64m8_t op1,int32_t op2,size_t vl)543 vint64m8_t test_vwadd_wx_i64m8(vint64m8_t op1, int32_t op2, size_t vl) {
544 return vwadd_wx(op1, op2, vl);
545 }
546
547 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4(
548 // CHECK-RV64-NEXT: entry:
549 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
550 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
551 //
test_vwaddu_vv_u16mf4(vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)552 vuint16mf4_t test_vwaddu_vv_u16mf4(vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
553 return vwaddu_vv(op1, op2, vl);
554 }
555
556 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4(
557 // CHECK-RV64-NEXT: entry:
558 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
559 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
560 //
test_vwaddu_vx_u16mf4(vuint8mf8_t op1,uint8_t op2,size_t vl)561 vuint16mf4_t test_vwaddu_vx_u16mf4(vuint8mf8_t op1, uint8_t op2, size_t vl) {
562 return vwaddu_vx(op1, op2, vl);
563 }
564
565 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4(
566 // CHECK-RV64-NEXT: entry:
567 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
568 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
569 //
test_vwaddu_wv_u16mf4(vuint16mf4_t op1,vuint8mf8_t op2,size_t vl)570 vuint16mf4_t test_vwaddu_wv_u16mf4(vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
571 return vwaddu_wv(op1, op2, vl);
572 }
573
574 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4(
575 // CHECK-RV64-NEXT: entry:
576 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.nxv1i16.i8.i64(<vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
577 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
578 //
test_vwaddu_wx_u16mf4(vuint16mf4_t op1,uint8_t op2,size_t vl)579 vuint16mf4_t test_vwaddu_wx_u16mf4(vuint16mf4_t op1, uint8_t op2, size_t vl) {
580 return vwaddu_wx(op1, op2, vl);
581 }
582
583 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2(
584 // CHECK-RV64-NEXT: entry:
585 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
586 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
587 //
test_vwaddu_vv_u16mf2(vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)588 vuint16mf2_t test_vwaddu_vv_u16mf2(vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
589 return vwaddu_vv(op1, op2, vl);
590 }
591
592 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2(
593 // CHECK-RV64-NEXT: entry:
594 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
595 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
596 //
test_vwaddu_vx_u16mf2(vuint8mf4_t op1,uint8_t op2,size_t vl)597 vuint16mf2_t test_vwaddu_vx_u16mf2(vuint8mf4_t op1, uint8_t op2, size_t vl) {
598 return vwaddu_vx(op1, op2, vl);
599 }
600
601 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2(
602 // CHECK-RV64-NEXT: entry:
603 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
604 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
605 //
test_vwaddu_wv_u16mf2(vuint16mf2_t op1,vuint8mf4_t op2,size_t vl)606 vuint16mf2_t test_vwaddu_wv_u16mf2(vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
607 return vwaddu_wv(op1, op2, vl);
608 }
609
610 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2(
611 // CHECK-RV64-NEXT: entry:
612 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.nxv2i16.i8.i64(<vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
613 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
614 //
test_vwaddu_wx_u16mf2(vuint16mf2_t op1,uint8_t op2,size_t vl)615 vuint16mf2_t test_vwaddu_wx_u16mf2(vuint16mf2_t op1, uint8_t op2, size_t vl) {
616 return vwaddu_wx(op1, op2, vl);
617 }
618
619 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1(
620 // CHECK-RV64-NEXT: entry:
621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
622 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
623 //
test_vwaddu_vv_u16m1(vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)624 vuint16m1_t test_vwaddu_vv_u16m1(vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
625 return vwaddu_vv(op1, op2, vl);
626 }
627
628 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1(
629 // CHECK-RV64-NEXT: entry:
630 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
631 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
632 //
test_vwaddu_vx_u16m1(vuint8mf2_t op1,uint8_t op2,size_t vl)633 vuint16m1_t test_vwaddu_vx_u16m1(vuint8mf2_t op1, uint8_t op2, size_t vl) {
634 return vwaddu_vx(op1, op2, vl);
635 }
636
637 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1(
638 // CHECK-RV64-NEXT: entry:
639 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
640 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
641 //
test_vwaddu_wv_u16m1(vuint16m1_t op1,vuint8mf2_t op2,size_t vl)642 vuint16m1_t test_vwaddu_wv_u16m1(vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
643 return vwaddu_wv(op1, op2, vl);
644 }
645
646 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1(
647 // CHECK-RV64-NEXT: entry:
648 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.nxv4i16.i8.i64(<vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
649 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
650 //
test_vwaddu_wx_u16m1(vuint16m1_t op1,uint8_t op2,size_t vl)651 vuint16m1_t test_vwaddu_wx_u16m1(vuint16m1_t op1, uint8_t op2, size_t vl) {
652 return vwaddu_wx(op1, op2, vl);
653 }
654
655 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2(
656 // CHECK-RV64-NEXT: entry:
657 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
658 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
659 //
test_vwaddu_vv_u16m2(vuint8m1_t op1,vuint8m1_t op2,size_t vl)660 vuint16m2_t test_vwaddu_vv_u16m2(vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
661 return vwaddu_vv(op1, op2, vl);
662 }
663
664 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2(
665 // CHECK-RV64-NEXT: entry:
666 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
667 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
668 //
test_vwaddu_vx_u16m2(vuint8m1_t op1,uint8_t op2,size_t vl)669 vuint16m2_t test_vwaddu_vx_u16m2(vuint8m1_t op1, uint8_t op2, size_t vl) {
670 return vwaddu_vx(op1, op2, vl);
671 }
672
673 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2(
674 // CHECK-RV64-NEXT: entry:
675 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
676 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
677 //
test_vwaddu_wv_u16m2(vuint16m2_t op1,vuint8m1_t op2,size_t vl)678 vuint16m2_t test_vwaddu_wv_u16m2(vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
679 return vwaddu_wv(op1, op2, vl);
680 }
681
682 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2(
683 // CHECK-RV64-NEXT: entry:
684 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.nxv8i16.i8.i64(<vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
685 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
686 //
test_vwaddu_wx_u16m2(vuint16m2_t op1,uint8_t op2,size_t vl)687 vuint16m2_t test_vwaddu_wx_u16m2(vuint16m2_t op1, uint8_t op2, size_t vl) {
688 return vwaddu_wx(op1, op2, vl);
689 }
690
691 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4(
692 // CHECK-RV64-NEXT: entry:
693 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
694 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
695 //
test_vwaddu_vv_u16m4(vuint8m2_t op1,vuint8m2_t op2,size_t vl)696 vuint16m4_t test_vwaddu_vv_u16m4(vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
697 return vwaddu_vv(op1, op2, vl);
698 }
699
700 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4(
701 // CHECK-RV64-NEXT: entry:
702 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
703 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
704 //
test_vwaddu_vx_u16m4(vuint8m2_t op1,uint8_t op2,size_t vl)705 vuint16m4_t test_vwaddu_vx_u16m4(vuint8m2_t op1, uint8_t op2, size_t vl) {
706 return vwaddu_vx(op1, op2, vl);
707 }
708
709 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4(
710 // CHECK-RV64-NEXT: entry:
711 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
712 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
713 //
test_vwaddu_wv_u16m4(vuint16m4_t op1,vuint8m2_t op2,size_t vl)714 vuint16m4_t test_vwaddu_wv_u16m4(vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
715 return vwaddu_wv(op1, op2, vl);
716 }
717
718 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4(
719 // CHECK-RV64-NEXT: entry:
720 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.nxv16i16.i8.i64(<vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
721 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
722 //
test_vwaddu_wx_u16m4(vuint16m4_t op1,uint8_t op2,size_t vl)723 vuint16m4_t test_vwaddu_wx_u16m4(vuint16m4_t op1, uint8_t op2, size_t vl) {
724 return vwaddu_wx(op1, op2, vl);
725 }
726
727 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8(
728 // CHECK-RV64-NEXT: entry:
729 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
730 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
731 //
test_vwaddu_vv_u16m8(vuint8m4_t op1,vuint8m4_t op2,size_t vl)732 vuint16m8_t test_vwaddu_vv_u16m8(vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
733 return vwaddu_vv(op1, op2, vl);
734 }
735
736 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8(
737 // CHECK-RV64-NEXT: entry:
738 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
739 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
740 //
test_vwaddu_vx_u16m8(vuint8m4_t op1,uint8_t op2,size_t vl)741 vuint16m8_t test_vwaddu_vx_u16m8(vuint8m4_t op1, uint8_t op2, size_t vl) {
742 return vwaddu_vx(op1, op2, vl);
743 }
744
745 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8(
746 // CHECK-RV64-NEXT: entry:
747 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
748 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
749 //
test_vwaddu_wv_u16m8(vuint16m8_t op1,vuint8m4_t op2,size_t vl)750 vuint16m8_t test_vwaddu_wv_u16m8(vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
751 return vwaddu_wv(op1, op2, vl);
752 }
753
754 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8(
755 // CHECK-RV64-NEXT: entry:
756 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.nxv32i16.i8.i64(<vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], i64 [[VL:%.*]])
757 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
758 //
test_vwaddu_wx_u16m8(vuint16m8_t op1,uint8_t op2,size_t vl)759 vuint16m8_t test_vwaddu_wx_u16m8(vuint16m8_t op1, uint8_t op2, size_t vl) {
760 return vwaddu_wx(op1, op2, vl);
761 }
762
763 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2(
764 // CHECK-RV64-NEXT: entry:
765 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
766 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
767 //
test_vwaddu_vv_u32mf2(vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)768 vuint32mf2_t test_vwaddu_vv_u32mf2(vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
769 return vwaddu_vv(op1, op2, vl);
770 }
771
772 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2(
773 // CHECK-RV64-NEXT: entry:
774 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
775 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
776 //
test_vwaddu_vx_u32mf2(vuint16mf4_t op1,uint16_t op2,size_t vl)777 vuint32mf2_t test_vwaddu_vx_u32mf2(vuint16mf4_t op1, uint16_t op2, size_t vl) {
778 return vwaddu_vx(op1, op2, vl);
779 }
780
781 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2(
782 // CHECK-RV64-NEXT: entry:
783 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
784 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
785 //
test_vwaddu_wv_u32mf2(vuint32mf2_t op1,vuint16mf4_t op2,size_t vl)786 vuint32mf2_t test_vwaddu_wv_u32mf2(vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
787 return vwaddu_wv(op1, op2, vl);
788 }
789
790 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2(
791 // CHECK-RV64-NEXT: entry:
792 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.nxv1i32.i16.i64(<vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
793 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
794 //
test_vwaddu_wx_u32mf2(vuint32mf2_t op1,uint16_t op2,size_t vl)795 vuint32mf2_t test_vwaddu_wx_u32mf2(vuint32mf2_t op1, uint16_t op2, size_t vl) {
796 return vwaddu_wx(op1, op2, vl);
797 }
798
799 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1(
800 // CHECK-RV64-NEXT: entry:
801 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
802 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
803 //
test_vwaddu_vv_u32m1(vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)804 vuint32m1_t test_vwaddu_vv_u32m1(vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
805 return vwaddu_vv(op1, op2, vl);
806 }
807
808 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1(
809 // CHECK-RV64-NEXT: entry:
810 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
811 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
812 //
test_vwaddu_vx_u32m1(vuint16mf2_t op1,uint16_t op2,size_t vl)813 vuint32m1_t test_vwaddu_vx_u32m1(vuint16mf2_t op1, uint16_t op2, size_t vl) {
814 return vwaddu_vx(op1, op2, vl);
815 }
816
817 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1(
818 // CHECK-RV64-NEXT: entry:
819 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
820 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
821 //
test_vwaddu_wv_u32m1(vuint32m1_t op1,vuint16mf2_t op2,size_t vl)822 vuint32m1_t test_vwaddu_wv_u32m1(vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
823 return vwaddu_wv(op1, op2, vl);
824 }
825
826 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1(
827 // CHECK-RV64-NEXT: entry:
828 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.nxv2i32.i16.i64(<vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
829 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
830 //
test_vwaddu_wx_u32m1(vuint32m1_t op1,uint16_t op2,size_t vl)831 vuint32m1_t test_vwaddu_wx_u32m1(vuint32m1_t op1, uint16_t op2, size_t vl) {
832 return vwaddu_wx(op1, op2, vl);
833 }
834
835 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2(
836 // CHECK-RV64-NEXT: entry:
837 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
838 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
839 //
test_vwaddu_vv_u32m2(vuint16m1_t op1,vuint16m1_t op2,size_t vl)840 vuint32m2_t test_vwaddu_vv_u32m2(vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
841 return vwaddu_vv(op1, op2, vl);
842 }
843
844 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2(
845 // CHECK-RV64-NEXT: entry:
846 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
847 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
848 //
test_vwaddu_vx_u32m2(vuint16m1_t op1,uint16_t op2,size_t vl)849 vuint32m2_t test_vwaddu_vx_u32m2(vuint16m1_t op1, uint16_t op2, size_t vl) {
850 return vwaddu_vx(op1, op2, vl);
851 }
852
853 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2(
854 // CHECK-RV64-NEXT: entry:
855 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
856 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
857 //
test_vwaddu_wv_u32m2(vuint32m2_t op1,vuint16m1_t op2,size_t vl)858 vuint32m2_t test_vwaddu_wv_u32m2(vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
859 return vwaddu_wv(op1, op2, vl);
860 }
861
862 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2(
863 // CHECK-RV64-NEXT: entry:
864 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.nxv4i32.i16.i64(<vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
865 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
866 //
test_vwaddu_wx_u32m2(vuint32m2_t op1,uint16_t op2,size_t vl)867 vuint32m2_t test_vwaddu_wx_u32m2(vuint32m2_t op1, uint16_t op2, size_t vl) {
868 return vwaddu_wx(op1, op2, vl);
869 }
870
871 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4(
872 // CHECK-RV64-NEXT: entry:
873 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
874 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
875 //
test_vwaddu_vv_u32m4(vuint16m2_t op1,vuint16m2_t op2,size_t vl)876 vuint32m4_t test_vwaddu_vv_u32m4(vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
877 return vwaddu_vv(op1, op2, vl);
878 }
879
880 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4(
881 // CHECK-RV64-NEXT: entry:
882 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
883 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
884 //
test_vwaddu_vx_u32m4(vuint16m2_t op1,uint16_t op2,size_t vl)885 vuint32m4_t test_vwaddu_vx_u32m4(vuint16m2_t op1, uint16_t op2, size_t vl) {
886 return vwaddu_vx(op1, op2, vl);
887 }
888
889 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4(
890 // CHECK-RV64-NEXT: entry:
891 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
892 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
893 //
test_vwaddu_wv_u32m4(vuint32m4_t op1,vuint16m2_t op2,size_t vl)894 vuint32m4_t test_vwaddu_wv_u32m4(vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
895 return vwaddu_wv(op1, op2, vl);
896 }
897
898 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4(
899 // CHECK-RV64-NEXT: entry:
900 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.nxv8i32.i16.i64(<vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
901 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
902 //
test_vwaddu_wx_u32m4(vuint32m4_t op1,uint16_t op2,size_t vl)903 vuint32m4_t test_vwaddu_wx_u32m4(vuint32m4_t op1, uint16_t op2, size_t vl) {
904 return vwaddu_wx(op1, op2, vl);
905 }
906
907 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8(
908 // CHECK-RV64-NEXT: entry:
909 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
910 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
911 //
test_vwaddu_vv_u32m8(vuint16m4_t op1,vuint16m4_t op2,size_t vl)912 vuint32m8_t test_vwaddu_vv_u32m8(vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
913 return vwaddu_vv(op1, op2, vl);
914 }
915
916 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8(
917 // CHECK-RV64-NEXT: entry:
918 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
919 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
920 //
test_vwaddu_vx_u32m8(vuint16m4_t op1,uint16_t op2,size_t vl)921 vuint32m8_t test_vwaddu_vx_u32m8(vuint16m4_t op1, uint16_t op2, size_t vl) {
922 return vwaddu_vx(op1, op2, vl);
923 }
924
925 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8(
926 // CHECK-RV64-NEXT: entry:
927 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
928 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
929 //
test_vwaddu_wv_u32m8(vuint32m8_t op1,vuint16m4_t op2,size_t vl)930 vuint32m8_t test_vwaddu_wv_u32m8(vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
931 return vwaddu_wv(op1, op2, vl);
932 }
933
934 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8(
935 // CHECK-RV64-NEXT: entry:
936 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.nxv16i32.i16.i64(<vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], i64 [[VL:%.*]])
937 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
938 //
test_vwaddu_wx_u32m8(vuint32m8_t op1,uint16_t op2,size_t vl)939 vuint32m8_t test_vwaddu_wx_u32m8(vuint32m8_t op1, uint16_t op2, size_t vl) {
940 return vwaddu_wx(op1, op2, vl);
941 }
942
943 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1(
944 // CHECK-RV64-NEXT: entry:
945 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
946 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
947 //
test_vwaddu_vv_u64m1(vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)948 vuint64m1_t test_vwaddu_vv_u64m1(vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
949 return vwaddu_vv(op1, op2, vl);
950 }
951
952 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1(
953 // CHECK-RV64-NEXT: entry:
954 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
955 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
956 //
test_vwaddu_vx_u64m1(vuint32mf2_t op1,uint32_t op2,size_t vl)957 vuint64m1_t test_vwaddu_vx_u64m1(vuint32mf2_t op1, uint32_t op2, size_t vl) {
958 return vwaddu_vx(op1, op2, vl);
959 }
960
961 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1(
962 // CHECK-RV64-NEXT: entry:
963 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
964 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
965 //
test_vwaddu_wv_u64m1(vuint64m1_t op1,vuint32mf2_t op2,size_t vl)966 vuint64m1_t test_vwaddu_wv_u64m1(vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
967 return vwaddu_wv(op1, op2, vl);
968 }
969
970 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1(
971 // CHECK-RV64-NEXT: entry:
972 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.nxv1i64.i32.i64(<vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
973 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
974 //
test_vwaddu_wx_u64m1(vuint64m1_t op1,uint32_t op2,size_t vl)975 vuint64m1_t test_vwaddu_wx_u64m1(vuint64m1_t op1, uint32_t op2, size_t vl) {
976 return vwaddu_wx(op1, op2, vl);
977 }
978
979 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2(
980 // CHECK-RV64-NEXT: entry:
981 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
982 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
983 //
test_vwaddu_vv_u64m2(vuint32m1_t op1,vuint32m1_t op2,size_t vl)984 vuint64m2_t test_vwaddu_vv_u64m2(vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
985 return vwaddu_vv(op1, op2, vl);
986 }
987
988 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2(
989 // CHECK-RV64-NEXT: entry:
990 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
991 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
992 //
test_vwaddu_vx_u64m2(vuint32m1_t op1,uint32_t op2,size_t vl)993 vuint64m2_t test_vwaddu_vx_u64m2(vuint32m1_t op1, uint32_t op2, size_t vl) {
994 return vwaddu_vx(op1, op2, vl);
995 }
996
997 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2(
998 // CHECK-RV64-NEXT: entry:
999 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1000 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1001 //
test_vwaddu_wv_u64m2(vuint64m2_t op1,vuint32m1_t op2,size_t vl)1002 vuint64m2_t test_vwaddu_wv_u64m2(vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
1003 return vwaddu_wv(op1, op2, vl);
1004 }
1005
1006 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2(
1007 // CHECK-RV64-NEXT: entry:
1008 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.nxv2i64.i32.i64(<vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1009 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1010 //
test_vwaddu_wx_u64m2(vuint64m2_t op1,uint32_t op2,size_t vl)1011 vuint64m2_t test_vwaddu_wx_u64m2(vuint64m2_t op1, uint32_t op2, size_t vl) {
1012 return vwaddu_wx(op1, op2, vl);
1013 }
1014
1015 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4(
1016 // CHECK-RV64-NEXT: entry:
1017 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1018 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1019 //
test_vwaddu_vv_u64m4(vuint32m2_t op1,vuint32m2_t op2,size_t vl)1020 vuint64m4_t test_vwaddu_vv_u64m4(vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
1021 return vwaddu_vv(op1, op2, vl);
1022 }
1023
1024 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4(
1025 // CHECK-RV64-NEXT: entry:
1026 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1027 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1028 //
test_vwaddu_vx_u64m4(vuint32m2_t op1,uint32_t op2,size_t vl)1029 vuint64m4_t test_vwaddu_vx_u64m4(vuint32m2_t op1, uint32_t op2, size_t vl) {
1030 return vwaddu_vx(op1, op2, vl);
1031 }
1032
1033 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4(
1034 // CHECK-RV64-NEXT: entry:
1035 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1036 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1037 //
test_vwaddu_wv_u64m4(vuint64m4_t op1,vuint32m2_t op2,size_t vl)1038 vuint64m4_t test_vwaddu_wv_u64m4(vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
1039 return vwaddu_wv(op1, op2, vl);
1040 }
1041
1042 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4(
1043 // CHECK-RV64-NEXT: entry:
1044 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.nxv4i64.i32.i64(<vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1045 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1046 //
test_vwaddu_wx_u64m4(vuint64m4_t op1,uint32_t op2,size_t vl)1047 vuint64m4_t test_vwaddu_wx_u64m4(vuint64m4_t op1, uint32_t op2, size_t vl) {
1048 return vwaddu_wx(op1, op2, vl);
1049 }
1050
1051 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8(
1052 // CHECK-RV64-NEXT: entry:
1053 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1054 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1055 //
test_vwaddu_vv_u64m8(vuint32m4_t op1,vuint32m4_t op2,size_t vl)1056 vuint64m8_t test_vwaddu_vv_u64m8(vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
1057 return vwaddu_vv(op1, op2, vl);
1058 }
1059
1060 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8(
1061 // CHECK-RV64-NEXT: entry:
1062 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1063 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1064 //
test_vwaddu_vx_u64m8(vuint32m4_t op1,uint32_t op2,size_t vl)1065 vuint64m8_t test_vwaddu_vx_u64m8(vuint32m4_t op1, uint32_t op2, size_t vl) {
1066 return vwaddu_vx(op1, op2, vl);
1067 }
1068
1069 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8(
1070 // CHECK-RV64-NEXT: entry:
1071 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1072 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1073 //
test_vwaddu_wv_u64m8(vuint64m8_t op1,vuint32m4_t op2,size_t vl)1074 vuint64m8_t test_vwaddu_wv_u64m8(vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
1075 return vwaddu_wv(op1, op2, vl);
1076 }
1077
1078 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8(
1079 // CHECK-RV64-NEXT: entry:
1080 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.nxv8i64.i32.i64(<vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], i64 [[VL:%.*]])
1081 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1082 //
test_vwaddu_wx_u64m8(vuint64m8_t op1,uint32_t op2,size_t vl)1083 vuint64m8_t test_vwaddu_wx_u64m8(vuint64m8_t op1, uint32_t op2, size_t vl) {
1084 return vwaddu_wx(op1, op2, vl);
1085 }
1086
1087 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_m(
1088 // CHECK-RV64-NEXT: entry:
1089 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1090 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1091 //
test_vwadd_vv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl)1092 vint16mf4_t test_vwadd_vv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
1093 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1094 }
1095
1096 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_m(
1097 // CHECK-RV64-NEXT: entry:
1098 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1099 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1100 //
test_vwadd_vx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl)1101 vint16mf4_t test_vwadd_vx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl) {
1102 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1103 }
1104
1105 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_m(
1106 // CHECK-RV64-NEXT: entry:
1107 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1108 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1109 //
test_vwadd_wv_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint8mf8_t op2,size_t vl)1110 vint16mf4_t test_vwadd_wv_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl) {
1111 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1112 }
1113
1114 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_m(
1115 // CHECK-RV64-NEXT: entry:
1116 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1117 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1118 //
test_vwadd_wx_i16mf4_m(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int8_t op2,size_t vl)1119 vint16mf4_t test_vwadd_wx_i16mf4_m(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl) {
1120 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1121 }
1122
1123 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_m(
1124 // CHECK-RV64-NEXT: entry:
1125 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1126 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1127 //
test_vwadd_vv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl)1128 vint16mf2_t test_vwadd_vv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
1129 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1130 }
1131
1132 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_m(
1133 // CHECK-RV64-NEXT: entry:
1134 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1135 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1136 //
test_vwadd_vx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl)1137 vint16mf2_t test_vwadd_vx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl) {
1138 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1139 }
1140
1141 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_m(
1142 // CHECK-RV64-NEXT: entry:
1143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1144 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1145 //
test_vwadd_wv_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint8mf4_t op2,size_t vl)1146 vint16mf2_t test_vwadd_wv_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl) {
1147 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1148 }
1149
1150 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_m(
1151 // CHECK-RV64-NEXT: entry:
1152 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1153 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1154 //
test_vwadd_wx_i16mf2_m(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int8_t op2,size_t vl)1155 vint16mf2_t test_vwadd_wx_i16mf2_m(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl) {
1156 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1157 }
1158
1159 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_m(
1160 // CHECK-RV64-NEXT: entry:
1161 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1162 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1163 //
test_vwadd_vv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl)1164 vint16m1_t test_vwadd_vv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
1165 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1166 }
1167
1168 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_m(
1169 // CHECK-RV64-NEXT: entry:
1170 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1171 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1172 //
test_vwadd_vx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl)1173 vint16m1_t test_vwadd_vx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl) {
1174 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1175 }
1176
1177 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_m(
1178 // CHECK-RV64-NEXT: entry:
1179 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1180 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1181 //
test_vwadd_wv_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint8mf2_t op2,size_t vl)1182 vint16m1_t test_vwadd_wv_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl) {
1183 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1184 }
1185
1186 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_m(
1187 // CHECK-RV64-NEXT: entry:
1188 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1189 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1190 //
test_vwadd_wx_i16m1_m(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int8_t op2,size_t vl)1191 vint16m1_t test_vwadd_wx_i16m1_m(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl) {
1192 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1193 }
1194
1195 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_m(
1196 // CHECK-RV64-NEXT: entry:
1197 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1198 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1199 //
test_vwadd_vv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl)1200 vint16m2_t test_vwadd_vv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl) {
1201 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1202 }
1203
1204 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_m(
1205 // CHECK-RV64-NEXT: entry:
1206 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1207 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1208 //
test_vwadd_vx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl)1209 vint16m2_t test_vwadd_vx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl) {
1210 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1211 }
1212
1213 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_m(
1214 // CHECK-RV64-NEXT: entry:
1215 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1216 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1217 //
test_vwadd_wv_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint8m1_t op2,size_t vl)1218 vint16m2_t test_vwadd_wv_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl) {
1219 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1220 }
1221
1222 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_m(
1223 // CHECK-RV64-NEXT: entry:
1224 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1225 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1226 //
test_vwadd_wx_i16m2_m(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int8_t op2,size_t vl)1227 vint16m2_t test_vwadd_wx_i16m2_m(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl) {
1228 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1229 }
1230
1231 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_m(
1232 // CHECK-RV64-NEXT: entry:
1233 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1234 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1235 //
test_vwadd_vv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl)1236 vint16m4_t test_vwadd_vv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl) {
1237 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1238 }
1239
1240 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_m(
1241 // CHECK-RV64-NEXT: entry:
1242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1243 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1244 //
test_vwadd_vx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl)1245 vint16m4_t test_vwadd_vx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl) {
1246 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1247 }
1248
1249 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_m(
1250 // CHECK-RV64-NEXT: entry:
1251 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1252 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1253 //
test_vwadd_wv_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint8m2_t op2,size_t vl)1254 vint16m4_t test_vwadd_wv_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl) {
1255 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1256 }
1257
1258 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_m(
1259 // CHECK-RV64-NEXT: entry:
1260 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1261 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1262 //
test_vwadd_wx_i16m4_m(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int8_t op2,size_t vl)1263 vint16m4_t test_vwadd_wx_i16m4_m(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl) {
1264 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1265 }
1266
1267 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_m(
1268 // CHECK-RV64-NEXT: entry:
1269 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1270 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1271 //
test_vwadd_vv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl)1272 vint16m8_t test_vwadd_vv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl) {
1273 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1274 }
1275
1276 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_m(
1277 // CHECK-RV64-NEXT: entry:
1278 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1279 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1280 //
test_vwadd_vx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl)1281 vint16m8_t test_vwadd_vx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl) {
1282 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1283 }
1284
1285 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_m(
1286 // CHECK-RV64-NEXT: entry:
1287 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1288 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1289 //
test_vwadd_wv_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint8m4_t op2,size_t vl)1290 vint16m8_t test_vwadd_wv_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl) {
1291 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1292 }
1293
1294 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_m(
1295 // CHECK-RV64-NEXT: entry:
1296 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1297 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1298 //
test_vwadd_wx_i16m8_m(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int8_t op2,size_t vl)1299 vint16m8_t test_vwadd_wx_i16m8_m(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl) {
1300 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1301 }
1302
1303 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_m(
1304 // CHECK-RV64-NEXT: entry:
1305 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1306 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1307 //
test_vwadd_vv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl)1308 vint32mf2_t test_vwadd_vv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl) {
1309 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1310 }
1311
1312 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_m(
1313 // CHECK-RV64-NEXT: entry:
1314 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1315 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1316 //
test_vwadd_vx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl)1317 vint32mf2_t test_vwadd_vx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl) {
1318 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1319 }
1320
1321 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_m(
1322 // CHECK-RV64-NEXT: entry:
1323 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1324 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1325 //
test_vwadd_wv_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint16mf4_t op2,size_t vl)1326 vint32mf2_t test_vwadd_wv_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl) {
1327 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1328 }
1329
1330 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_m(
1331 // CHECK-RV64-NEXT: entry:
1332 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1333 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1334 //
test_vwadd_wx_i32mf2_m(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int16_t op2,size_t vl)1335 vint32mf2_t test_vwadd_wx_i32mf2_m(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl) {
1336 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1337 }
1338
1339 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_m(
1340 // CHECK-RV64-NEXT: entry:
1341 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1342 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1343 //
test_vwadd_vv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl)1344 vint32m1_t test_vwadd_vv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
1345 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1346 }
1347
1348 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_m(
1349 // CHECK-RV64-NEXT: entry:
1350 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1351 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1352 //
test_vwadd_vx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl)1353 vint32m1_t test_vwadd_vx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl) {
1354 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1355 }
1356
1357 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_m(
1358 // CHECK-RV64-NEXT: entry:
1359 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1360 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1361 //
test_vwadd_wv_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint16mf2_t op2,size_t vl)1362 vint32m1_t test_vwadd_wv_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl) {
1363 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1364 }
1365
1366 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_m(
1367 // CHECK-RV64-NEXT: entry:
1368 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1369 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1370 //
test_vwadd_wx_i32m1_m(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int16_t op2,size_t vl)1371 vint32m1_t test_vwadd_wx_i32m1_m(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl) {
1372 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1373 }
1374
1375 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_m(
1376 // CHECK-RV64-NEXT: entry:
1377 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1378 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1379 //
test_vwadd_vv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl)1380 vint32m2_t test_vwadd_vv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl) {
1381 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1382 }
1383
1384 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_m(
1385 // CHECK-RV64-NEXT: entry:
1386 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1387 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1388 //
test_vwadd_vx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl)1389 vint32m2_t test_vwadd_vx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl) {
1390 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1391 }
1392
1393 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_m(
1394 // CHECK-RV64-NEXT: entry:
1395 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1396 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1397 //
test_vwadd_wv_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint16m1_t op2,size_t vl)1398 vint32m2_t test_vwadd_wv_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl) {
1399 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1400 }
1401
1402 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_m(
1403 // CHECK-RV64-NEXT: entry:
1404 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1405 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1406 //
test_vwadd_wx_i32m2_m(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int16_t op2,size_t vl)1407 vint32m2_t test_vwadd_wx_i32m2_m(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl) {
1408 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1409 }
1410
1411 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_m(
1412 // CHECK-RV64-NEXT: entry:
1413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1414 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1415 //
test_vwadd_vv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl)1416 vint32m4_t test_vwadd_vv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl) {
1417 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1418 }
1419
1420 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_m(
1421 // CHECK-RV64-NEXT: entry:
1422 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1423 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1424 //
test_vwadd_vx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl)1425 vint32m4_t test_vwadd_vx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl) {
1426 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1427 }
1428
1429 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_m(
1430 // CHECK-RV64-NEXT: entry:
1431 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1432 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1433 //
test_vwadd_wv_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint16m2_t op2,size_t vl)1434 vint32m4_t test_vwadd_wv_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl) {
1435 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1436 }
1437
1438 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_m(
1439 // CHECK-RV64-NEXT: entry:
1440 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1441 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1442 //
test_vwadd_wx_i32m4_m(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int16_t op2,size_t vl)1443 vint32m4_t test_vwadd_wx_i32m4_m(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl) {
1444 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1445 }
1446
1447 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_m(
1448 // CHECK-RV64-NEXT: entry:
1449 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1450 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1451 //
test_vwadd_vv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl)1452 vint32m8_t test_vwadd_vv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl) {
1453 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1454 }
1455
1456 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_m(
1457 // CHECK-RV64-NEXT: entry:
1458 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1459 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1460 //
test_vwadd_vx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl)1461 vint32m8_t test_vwadd_vx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl) {
1462 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1463 }
1464
1465 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_m(
1466 // CHECK-RV64-NEXT: entry:
1467 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1468 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1469 //
test_vwadd_wv_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint16m4_t op2,size_t vl)1470 vint32m8_t test_vwadd_wv_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl) {
1471 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1472 }
1473
1474 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_m(
1475 // CHECK-RV64-NEXT: entry:
1476 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1477 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1478 //
test_vwadd_wx_i32m8_m(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int16_t op2,size_t vl)1479 vint32m8_t test_vwadd_wx_i32m8_m(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl) {
1480 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1481 }
1482
1483 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_m(
1484 // CHECK-RV64-NEXT: entry:
1485 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1486 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1487 //
test_vwadd_vv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1488 vint64m1_t test_vwadd_vv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
1489 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1490 }
1491
1492 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_m(
1493 // CHECK-RV64-NEXT: entry:
1494 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1495 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1496 //
test_vwadd_vx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl)1497 vint64m1_t test_vwadd_vx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl) {
1498 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1499 }
1500
1501 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_m(
1502 // CHECK-RV64-NEXT: entry:
1503 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1504 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1505 //
test_vwadd_wv_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint32mf2_t op2,size_t vl)1506 vint64m1_t test_vwadd_wv_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl) {
1507 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1508 }
1509
1510 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_m(
1511 // CHECK-RV64-NEXT: entry:
1512 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1513 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1514 //
test_vwadd_wx_i64m1_m(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int32_t op2,size_t vl)1515 vint64m1_t test_vwadd_wx_i64m1_m(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl) {
1516 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1517 }
1518
1519 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_m(
1520 // CHECK-RV64-NEXT: entry:
1521 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1522 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1523 //
test_vwadd_vv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl)1524 vint64m2_t test_vwadd_vv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl) {
1525 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1526 }
1527
1528 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_m(
1529 // CHECK-RV64-NEXT: entry:
1530 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1531 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1532 //
test_vwadd_vx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl)1533 vint64m2_t test_vwadd_vx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl) {
1534 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1535 }
1536
1537 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_m(
1538 // CHECK-RV64-NEXT: entry:
1539 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1540 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1541 //
test_vwadd_wv_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint32m1_t op2,size_t vl)1542 vint64m2_t test_vwadd_wv_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl) {
1543 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1544 }
1545
1546 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_m(
1547 // CHECK-RV64-NEXT: entry:
1548 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1549 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1550 //
test_vwadd_wx_i64m2_m(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int32_t op2,size_t vl)1551 vint64m2_t test_vwadd_wx_i64m2_m(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl) {
1552 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1553 }
1554
1555 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_m(
1556 // CHECK-RV64-NEXT: entry:
1557 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1558 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1559 //
test_vwadd_vv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl)1560 vint64m4_t test_vwadd_vv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl) {
1561 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1562 }
1563
1564 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_m(
1565 // CHECK-RV64-NEXT: entry:
1566 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1567 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1568 //
test_vwadd_vx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl)1569 vint64m4_t test_vwadd_vx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl) {
1570 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1571 }
1572
1573 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_m(
1574 // CHECK-RV64-NEXT: entry:
1575 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1576 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1577 //
test_vwadd_wv_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint32m2_t op2,size_t vl)1578 vint64m4_t test_vwadd_wv_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl) {
1579 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1580 }
1581
1582 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_m(
1583 // CHECK-RV64-NEXT: entry:
1584 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1585 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1586 //
test_vwadd_wx_i64m4_m(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int32_t op2,size_t vl)1587 vint64m4_t test_vwadd_wx_i64m4_m(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl) {
1588 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1589 }
1590
1591 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_m(
1592 // CHECK-RV64-NEXT: entry:
1593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1594 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1595 //
test_vwadd_vv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl)1596 vint64m8_t test_vwadd_vv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl) {
1597 return vwadd_vv(mask, maskedoff, op1, op2, vl);
1598 }
1599
1600 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_m(
1601 // CHECK-RV64-NEXT: entry:
1602 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1603 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1604 //
test_vwadd_vx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl)1605 vint64m8_t test_vwadd_vx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl) {
1606 return vwadd_vx(mask, maskedoff, op1, op2, vl);
1607 }
1608
1609 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_m(
1610 // CHECK-RV64-NEXT: entry:
1611 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1612 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1613 //
test_vwadd_wv_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint32m4_t op2,size_t vl)1614 vint64m8_t test_vwadd_wv_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl) {
1615 return vwadd_wv(mask, maskedoff, op1, op2, vl);
1616 }
1617
1618 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_m(
1619 // CHECK-RV64-NEXT: entry:
1620 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1621 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1622 //
test_vwadd_wx_i64m8_m(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int32_t op2,size_t vl)1623 vint64m8_t test_vwadd_wx_i64m8_m(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl) {
1624 return vwadd_wx(mask, maskedoff, op1, op2, vl);
1625 }
1626
1627 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_m(
1628 // CHECK-RV64-NEXT: entry:
1629 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1630 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1631 //
test_vwaddu_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1632 vuint16mf4_t test_vwaddu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl) {
1633 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1634 }
1635
1636 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_m(
1637 // CHECK-RV64-NEXT: entry:
1638 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1639 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1640 //
test_vwaddu_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl)1641 vuint16mf4_t test_vwaddu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl) {
1642 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1643 }
1644
1645 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_m(
1646 // CHECK-RV64-NEXT: entry:
1647 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1648 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1649 //
test_vwaddu_wv_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint8mf8_t op2,size_t vl)1650 vuint16mf4_t test_vwaddu_wv_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl) {
1651 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1652 }
1653
1654 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_m(
1655 // CHECK-RV64-NEXT: entry:
1656 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1657 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1658 //
test_vwaddu_wx_u16mf4_m(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint8_t op2,size_t vl)1659 vuint16mf4_t test_vwaddu_wx_u16mf4_m(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl) {
1660 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1661 }
1662
1663 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_m(
1664 // CHECK-RV64-NEXT: entry:
1665 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1666 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1667 //
test_vwaddu_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1668 vuint16mf2_t test_vwaddu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl) {
1669 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1670 }
1671
1672 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_m(
1673 // CHECK-RV64-NEXT: entry:
1674 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1675 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1676 //
test_vwaddu_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl)1677 vuint16mf2_t test_vwaddu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl) {
1678 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1679 }
1680
1681 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_m(
1682 // CHECK-RV64-NEXT: entry:
1683 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1684 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1685 //
test_vwaddu_wv_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint8mf4_t op2,size_t vl)1686 vuint16mf2_t test_vwaddu_wv_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl) {
1687 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1688 }
1689
1690 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_m(
1691 // CHECK-RV64-NEXT: entry:
1692 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1693 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1694 //
test_vwaddu_wx_u16mf2_m(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint8_t op2,size_t vl)1695 vuint16mf2_t test_vwaddu_wx_u16mf2_m(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl) {
1696 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1697 }
1698
1699 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_m(
1700 // CHECK-RV64-NEXT: entry:
1701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1702 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1703 //
test_vwaddu_vv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1704 vuint16m1_t test_vwaddu_vv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl) {
1705 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1706 }
1707
1708 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_m(
1709 // CHECK-RV64-NEXT: entry:
1710 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1711 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1712 //
test_vwaddu_vx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl)1713 vuint16m1_t test_vwaddu_vx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl) {
1714 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1715 }
1716
1717 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_m(
1718 // CHECK-RV64-NEXT: entry:
1719 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1720 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1721 //
test_vwaddu_wv_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint8mf2_t op2,size_t vl)1722 vuint16m1_t test_vwaddu_wv_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl) {
1723 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1724 }
1725
1726 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_m(
1727 // CHECK-RV64-NEXT: entry:
1728 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1729 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1730 //
test_vwaddu_wx_u16m1_m(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint8_t op2,size_t vl)1731 vuint16m1_t test_vwaddu_wx_u16m1_m(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl) {
1732 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1733 }
1734
1735 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_m(
1736 // CHECK-RV64-NEXT: entry:
1737 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1738 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1739 //
test_vwaddu_vv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl)1740 vuint16m2_t test_vwaddu_vv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1741 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1742 }
1743
1744 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_m(
1745 // CHECK-RV64-NEXT: entry:
1746 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1747 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1748 //
test_vwaddu_vx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl)1749 vuint16m2_t test_vwaddu_vx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl) {
1750 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1751 }
1752
1753 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_m(
1754 // CHECK-RV64-NEXT: entry:
1755 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1756 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1757 //
test_vwaddu_wv_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint8m1_t op2,size_t vl)1758 vuint16m2_t test_vwaddu_wv_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl) {
1759 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1760 }
1761
1762 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_m(
1763 // CHECK-RV64-NEXT: entry:
1764 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1765 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1766 //
test_vwaddu_wx_u16m2_m(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint8_t op2,size_t vl)1767 vuint16m2_t test_vwaddu_wx_u16m2_m(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl) {
1768 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1769 }
1770
1771 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_m(
1772 // CHECK-RV64-NEXT: entry:
1773 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1774 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1775 //
test_vwaddu_vv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl)1776 vuint16m4_t test_vwaddu_vv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1777 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1778 }
1779
1780 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_m(
1781 // CHECK-RV64-NEXT: entry:
1782 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1783 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1784 //
test_vwaddu_vx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl)1785 vuint16m4_t test_vwaddu_vx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl) {
1786 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1787 }
1788
1789 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_m(
1790 // CHECK-RV64-NEXT: entry:
1791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1792 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1793 //
test_vwaddu_wv_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint8m2_t op2,size_t vl)1794 vuint16m4_t test_vwaddu_wv_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl) {
1795 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1796 }
1797
1798 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_m(
1799 // CHECK-RV64-NEXT: entry:
1800 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1801 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1802 //
test_vwaddu_wx_u16m4_m(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint8_t op2,size_t vl)1803 vuint16m4_t test_vwaddu_wx_u16m4_m(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl) {
1804 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1805 }
1806
1807 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_m(
1808 // CHECK-RV64-NEXT: entry:
1809 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1810 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1811 //
test_vwaddu_vv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl)1812 vuint16m8_t test_vwaddu_vv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1813 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1814 }
1815
1816 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_m(
1817 // CHECK-RV64-NEXT: entry:
1818 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1819 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1820 //
test_vwaddu_vx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl)1821 vuint16m8_t test_vwaddu_vx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl) {
1822 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1823 }
1824
1825 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_m(
1826 // CHECK-RV64-NEXT: entry:
1827 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1828 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1829 //
test_vwaddu_wv_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint8m4_t op2,size_t vl)1830 vuint16m8_t test_vwaddu_wv_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl) {
1831 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1832 }
1833
1834 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_m(
1835 // CHECK-RV64-NEXT: entry:
1836 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1837 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1838 //
test_vwaddu_wx_u16m8_m(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint8_t op2,size_t vl)1839 vuint16m8_t test_vwaddu_wx_u16m8_m(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl) {
1840 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1841 }
1842
1843 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_m(
1844 // CHECK-RV64-NEXT: entry:
1845 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1846 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1847 //
test_vwaddu_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1848 vuint32mf2_t test_vwaddu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl) {
1849 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1850 }
1851
1852 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_m(
1853 // CHECK-RV64-NEXT: entry:
1854 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1855 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1856 //
test_vwaddu_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl)1857 vuint32mf2_t test_vwaddu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl) {
1858 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1859 }
1860
1861 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_m(
1862 // CHECK-RV64-NEXT: entry:
1863 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1864 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1865 //
test_vwaddu_wv_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint16mf4_t op2,size_t vl)1866 vuint32mf2_t test_vwaddu_wv_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl) {
1867 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1868 }
1869
1870 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_m(
1871 // CHECK-RV64-NEXT: entry:
1872 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1873 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1874 //
test_vwaddu_wx_u32mf2_m(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint16_t op2,size_t vl)1875 vuint32mf2_t test_vwaddu_wx_u32mf2_m(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl) {
1876 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1877 }
1878
1879 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_m(
1880 // CHECK-RV64-NEXT: entry:
1881 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1882 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1883 //
test_vwaddu_vv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1884 vuint32m1_t test_vwaddu_vv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl) {
1885 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1886 }
1887
1888 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_m(
1889 // CHECK-RV64-NEXT: entry:
1890 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1891 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1892 //
test_vwaddu_vx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl)1893 vuint32m1_t test_vwaddu_vx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl) {
1894 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1895 }
1896
1897 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_m(
1898 // CHECK-RV64-NEXT: entry:
1899 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1900 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1901 //
test_vwaddu_wv_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint16mf2_t op2,size_t vl)1902 vuint32m1_t test_vwaddu_wv_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl) {
1903 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1904 }
1905
1906 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_m(
1907 // CHECK-RV64-NEXT: entry:
1908 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1909 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1910 //
test_vwaddu_wx_u32m1_m(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint16_t op2,size_t vl)1911 vuint32m1_t test_vwaddu_wx_u32m1_m(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl) {
1912 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1913 }
1914
1915 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_m(
1916 // CHECK-RV64-NEXT: entry:
1917 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1918 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1919 //
test_vwaddu_vv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl)1920 vuint32m2_t test_vwaddu_vv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl) {
1921 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1922 }
1923
1924 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_m(
1925 // CHECK-RV64-NEXT: entry:
1926 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1927 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1928 //
test_vwaddu_vx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl)1929 vuint32m2_t test_vwaddu_vx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl) {
1930 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1931 }
1932
1933 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_m(
1934 // CHECK-RV64-NEXT: entry:
1935 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1936 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1937 //
test_vwaddu_wv_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint16m1_t op2,size_t vl)1938 vuint32m2_t test_vwaddu_wv_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl) {
1939 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1940 }
1941
1942 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_m(
1943 // CHECK-RV64-NEXT: entry:
1944 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1945 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1946 //
test_vwaddu_wx_u32m2_m(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint16_t op2,size_t vl)1947 vuint32m2_t test_vwaddu_wx_u32m2_m(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl) {
1948 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1949 }
1950
1951 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_m(
1952 // CHECK-RV64-NEXT: entry:
1953 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1954 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1955 //
test_vwaddu_vv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl)1956 vuint32m4_t test_vwaddu_vv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl) {
1957 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1958 }
1959
1960 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_m(
1961 // CHECK-RV64-NEXT: entry:
1962 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1963 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1964 //
test_vwaddu_vx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl)1965 vuint32m4_t test_vwaddu_vx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl) {
1966 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
1967 }
1968
1969 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_m(
1970 // CHECK-RV64-NEXT: entry:
1971 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1972 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1973 //
test_vwaddu_wv_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint16m2_t op2,size_t vl)1974 vuint32m4_t test_vwaddu_wv_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl) {
1975 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
1976 }
1977
1978 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_m(
1979 // CHECK-RV64-NEXT: entry:
1980 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1981 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1982 //
test_vwaddu_wx_u32m4_m(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint16_t op2,size_t vl)1983 vuint32m4_t test_vwaddu_wx_u32m4_m(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl) {
1984 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
1985 }
1986
1987 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_m(
1988 // CHECK-RV64-NEXT: entry:
1989 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1990 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1991 //
test_vwaddu_vv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl)1992 vuint32m8_t test_vwaddu_vv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl) {
1993 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
1994 }
1995
1996 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_m(
1997 // CHECK-RV64-NEXT: entry:
1998 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
1999 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2000 //
test_vwaddu_vx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl)2001 vuint32m8_t test_vwaddu_vx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl) {
2002 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
2003 }
2004
2005 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_m(
2006 // CHECK-RV64-NEXT: entry:
2007 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2008 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2009 //
test_vwaddu_wv_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint16m4_t op2,size_t vl)2010 vuint32m8_t test_vwaddu_wv_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl) {
2011 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
2012 }
2013
2014 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_m(
2015 // CHECK-RV64-NEXT: entry:
2016 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2017 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2018 //
test_vwaddu_wx_u32m8_m(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint16_t op2,size_t vl)2019 vuint32m8_t test_vwaddu_wx_u32m8_m(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl) {
2020 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
2021 }
2022
2023 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_m(
2024 // CHECK-RV64-NEXT: entry:
2025 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2026 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2027 //
test_vwaddu_vv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)2028 vuint64m1_t test_vwaddu_vv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl) {
2029 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
2030 }
2031
2032 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_m(
2033 // CHECK-RV64-NEXT: entry:
2034 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2035 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2036 //
test_vwaddu_vx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl)2037 vuint64m1_t test_vwaddu_vx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl) {
2038 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
2039 }
2040
2041 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_m(
2042 // CHECK-RV64-NEXT: entry:
2043 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2044 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2045 //
test_vwaddu_wv_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint32mf2_t op2,size_t vl)2046 vuint64m1_t test_vwaddu_wv_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl) {
2047 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
2048 }
2049
2050 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_m(
2051 // CHECK-RV64-NEXT: entry:
2052 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2053 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2054 //
test_vwaddu_wx_u64m1_m(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint32_t op2,size_t vl)2055 vuint64m1_t test_vwaddu_wx_u64m1_m(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl) {
2056 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
2057 }
2058
2059 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_m(
2060 // CHECK-RV64-NEXT: entry:
2061 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2062 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2063 //
test_vwaddu_vv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl)2064 vuint64m2_t test_vwaddu_vv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl) {
2065 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
2066 }
2067
2068 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_m(
2069 // CHECK-RV64-NEXT: entry:
2070 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2071 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2072 //
test_vwaddu_vx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl)2073 vuint64m2_t test_vwaddu_vx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl) {
2074 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
2075 }
2076
2077 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_m(
2078 // CHECK-RV64-NEXT: entry:
2079 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2080 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2081 //
test_vwaddu_wv_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint32m1_t op2,size_t vl)2082 vuint64m2_t test_vwaddu_wv_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl) {
2083 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
2084 }
2085
2086 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_m(
2087 // CHECK-RV64-NEXT: entry:
2088 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2089 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2090 //
test_vwaddu_wx_u64m2_m(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint32_t op2,size_t vl)2091 vuint64m2_t test_vwaddu_wx_u64m2_m(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl) {
2092 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
2093 }
2094
2095 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_m(
2096 // CHECK-RV64-NEXT: entry:
2097 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2098 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2099 //
test_vwaddu_vv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl)2100 vuint64m4_t test_vwaddu_vv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl) {
2101 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
2102 }
2103
2104 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_m(
2105 // CHECK-RV64-NEXT: entry:
2106 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2107 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2108 //
test_vwaddu_vx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl)2109 vuint64m4_t test_vwaddu_vx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl) {
2110 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
2111 }
2112
2113 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_m(
2114 // CHECK-RV64-NEXT: entry:
2115 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2116 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2117 //
test_vwaddu_wv_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint32m2_t op2,size_t vl)2118 vuint64m4_t test_vwaddu_wv_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl) {
2119 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
2120 }
2121
2122 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_m(
2123 // CHECK-RV64-NEXT: entry:
2124 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2125 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2126 //
test_vwaddu_wx_u64m4_m(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint32_t op2,size_t vl)2127 vuint64m4_t test_vwaddu_wx_u64m4_m(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl) {
2128 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
2129 }
2130
2131 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_m(
2132 // CHECK-RV64-NEXT: entry:
2133 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2134 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2135 //
test_vwaddu_vv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl)2136 vuint64m8_t test_vwaddu_vv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl) {
2137 return vwaddu_vv(mask, maskedoff, op1, op2, vl);
2138 }
2139
2140 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_m(
2141 // CHECK-RV64-NEXT: entry:
2142 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2143 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2144 //
test_vwaddu_vx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl)2145 vuint64m8_t test_vwaddu_vx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl) {
2146 return vwaddu_vx(mask, maskedoff, op1, op2, vl);
2147 }
2148
2149 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_m(
2150 // CHECK-RV64-NEXT: entry:
2151 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2152 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2153 //
test_vwaddu_wv_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint32m4_t op2,size_t vl)2154 vuint64m8_t test_vwaddu_wv_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl) {
2155 return vwaddu_wv(mask, maskedoff, op1, op2, vl);
2156 }
2157
2158 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_m(
2159 // CHECK-RV64-NEXT: entry:
2160 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2161 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2162 //
test_vwaddu_wx_u64m8_m(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint32_t op2,size_t vl)2163 vuint64m8_t test_vwaddu_wx_u64m8_m(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl) {
2164 return vwaddu_wx(mask, maskedoff, op1, op2, vl);
2165 }
2166
2167 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf4_mt(
2168 // CHECK-RV64-NEXT: entry:
2169 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2170 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2171 //
test_vwadd_vv_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint8mf8_t op1,vint8mf8_t op2,size_t vl,size_t ta)2172 vint16mf4_t test_vwadd_vv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, vint8mf8_t op2, size_t vl, size_t ta) {
2173 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2174 }
2175
2176 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf4_mt(
2177 // CHECK-RV64-NEXT: entry:
2178 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2179 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2180 //
test_vwadd_vx_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint8mf8_t op1,int8_t op2,size_t vl,size_t ta)2181 vint16mf4_t test_vwadd_vx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint8mf8_t op1, int8_t op2, size_t vl, size_t ta) {
2182 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2183 }
2184
2185 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf4_mt(
2186 // CHECK-RV64-NEXT: entry:
2187 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2188 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2189 //
test_vwadd_wv_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,vint8mf8_t op2,size_t vl,size_t ta)2190 vint16mf4_t test_vwadd_wv_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, vint8mf8_t op2, size_t vl, size_t ta) {
2191 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2192 }
2193
2194 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf4_mt(
2195 // CHECK-RV64-NEXT: entry:
2196 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2197 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2198 //
test_vwadd_wx_i16mf4_mt(vbool64_t mask,vint16mf4_t maskedoff,vint16mf4_t op1,int8_t op2,size_t vl,size_t ta)2199 vint16mf4_t test_vwadd_wx_i16mf4_mt(vbool64_t mask, vint16mf4_t maskedoff, vint16mf4_t op1, int8_t op2, size_t vl, size_t ta) {
2200 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2201 }
2202
2203 // CHECK-RV64-LABEL: @test_vwadd_vv_i16mf2_mt(
2204 // CHECK-RV64-NEXT: entry:
2205 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2206 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2207 //
test_vwadd_vv_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint8mf4_t op1,vint8mf4_t op2,size_t vl,size_t ta)2208 vint16mf2_t test_vwadd_vv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, vint8mf4_t op2, size_t vl, size_t ta) {
2209 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2210 }
2211
2212 // CHECK-RV64-LABEL: @test_vwadd_vx_i16mf2_mt(
2213 // CHECK-RV64-NEXT: entry:
2214 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2215 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2216 //
test_vwadd_vx_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint8mf4_t op1,int8_t op2,size_t vl,size_t ta)2217 vint16mf2_t test_vwadd_vx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint8mf4_t op1, int8_t op2, size_t vl, size_t ta) {
2218 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2219 }
2220
2221 // CHECK-RV64-LABEL: @test_vwadd_wv_i16mf2_mt(
2222 // CHECK-RV64-NEXT: entry:
2223 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2224 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2225 //
test_vwadd_wv_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,vint8mf4_t op2,size_t vl,size_t ta)2226 vint16mf2_t test_vwadd_wv_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, vint8mf4_t op2, size_t vl, size_t ta) {
2227 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2228 }
2229
2230 // CHECK-RV64-LABEL: @test_vwadd_wx_i16mf2_mt(
2231 // CHECK-RV64-NEXT: entry:
2232 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2233 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2234 //
test_vwadd_wx_i16mf2_mt(vbool32_t mask,vint16mf2_t maskedoff,vint16mf2_t op1,int8_t op2,size_t vl,size_t ta)2235 vint16mf2_t test_vwadd_wx_i16mf2_mt(vbool32_t mask, vint16mf2_t maskedoff, vint16mf2_t op1, int8_t op2, size_t vl, size_t ta) {
2236 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2237 }
2238
2239 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m1_mt(
2240 // CHECK-RV64-NEXT: entry:
2241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2242 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2243 //
test_vwadd_vv_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint8mf2_t op1,vint8mf2_t op2,size_t vl,size_t ta)2244 vint16m1_t test_vwadd_vv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, vint8mf2_t op2, size_t vl, size_t ta) {
2245 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2246 }
2247
2248 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m1_mt(
2249 // CHECK-RV64-NEXT: entry:
2250 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2251 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2252 //
test_vwadd_vx_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint8mf2_t op1,int8_t op2,size_t vl,size_t ta)2253 vint16m1_t test_vwadd_vx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint8mf2_t op1, int8_t op2, size_t vl, size_t ta) {
2254 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2255 }
2256
2257 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m1_mt(
2258 // CHECK-RV64-NEXT: entry:
2259 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2260 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2261 //
test_vwadd_wv_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,vint8mf2_t op2,size_t vl,size_t ta)2262 vint16m1_t test_vwadd_wv_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, vint8mf2_t op2, size_t vl, size_t ta) {
2263 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2264 }
2265
2266 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m1_mt(
2267 // CHECK-RV64-NEXT: entry:
2268 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2269 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2270 //
test_vwadd_wx_i16m1_mt(vbool16_t mask,vint16m1_t maskedoff,vint16m1_t op1,int8_t op2,size_t vl,size_t ta)2271 vint16m1_t test_vwadd_wx_i16m1_mt(vbool16_t mask, vint16m1_t maskedoff, vint16m1_t op1, int8_t op2, size_t vl, size_t ta) {
2272 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2273 }
2274
2275 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m2_mt(
2276 // CHECK-RV64-NEXT: entry:
2277 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2278 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2279 //
test_vwadd_vv_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint8m1_t op1,vint8m1_t op2,size_t vl,size_t ta)2280 vint16m2_t test_vwadd_vv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, vint8m1_t op2, size_t vl, size_t ta) {
2281 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2282 }
2283
2284 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m2_mt(
2285 // CHECK-RV64-NEXT: entry:
2286 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2287 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2288 //
test_vwadd_vx_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint8m1_t op1,int8_t op2,size_t vl,size_t ta)2289 vint16m2_t test_vwadd_vx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint8m1_t op1, int8_t op2, size_t vl, size_t ta) {
2290 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2291 }
2292
2293 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m2_mt(
2294 // CHECK-RV64-NEXT: entry:
2295 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2296 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2297 //
test_vwadd_wv_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,vint8m1_t op2,size_t vl,size_t ta)2298 vint16m2_t test_vwadd_wv_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, vint8m1_t op2, size_t vl, size_t ta) {
2299 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2300 }
2301
2302 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m2_mt(
2303 // CHECK-RV64-NEXT: entry:
2304 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2305 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2306 //
test_vwadd_wx_i16m2_mt(vbool8_t mask,vint16m2_t maskedoff,vint16m2_t op1,int8_t op2,size_t vl,size_t ta)2307 vint16m2_t test_vwadd_wx_i16m2_mt(vbool8_t mask, vint16m2_t maskedoff, vint16m2_t op1, int8_t op2, size_t vl, size_t ta) {
2308 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2309 }
2310
2311 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m4_mt(
2312 // CHECK-RV64-NEXT: entry:
2313 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2314 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2315 //
test_vwadd_vv_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint8m2_t op1,vint8m2_t op2,size_t vl,size_t ta)2316 vint16m4_t test_vwadd_vv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, vint8m2_t op2, size_t vl, size_t ta) {
2317 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2318 }
2319
2320 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m4_mt(
2321 // CHECK-RV64-NEXT: entry:
2322 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2323 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2324 //
test_vwadd_vx_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint8m2_t op1,int8_t op2,size_t vl,size_t ta)2325 vint16m4_t test_vwadd_vx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint8m2_t op1, int8_t op2, size_t vl, size_t ta) {
2326 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2327 }
2328
2329 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m4_mt(
2330 // CHECK-RV64-NEXT: entry:
2331 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2332 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2333 //
test_vwadd_wv_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,vint8m2_t op2,size_t vl,size_t ta)2334 vint16m4_t test_vwadd_wv_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, vint8m2_t op2, size_t vl, size_t ta) {
2335 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2336 }
2337
2338 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m4_mt(
2339 // CHECK-RV64-NEXT: entry:
2340 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2341 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2342 //
test_vwadd_wx_i16m4_mt(vbool4_t mask,vint16m4_t maskedoff,vint16m4_t op1,int8_t op2,size_t vl,size_t ta)2343 vint16m4_t test_vwadd_wx_i16m4_mt(vbool4_t mask, vint16m4_t maskedoff, vint16m4_t op1, int8_t op2, size_t vl, size_t ta) {
2344 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2345 }
2346
2347 // CHECK-RV64-LABEL: @test_vwadd_vv_i16m8_mt(
2348 // CHECK-RV64-NEXT: entry:
2349 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2350 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2351 //
test_vwadd_vv_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint8m4_t op1,vint8m4_t op2,size_t vl,size_t ta)2352 vint16m8_t test_vwadd_vv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, vint8m4_t op2, size_t vl, size_t ta) {
2353 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2354 }
2355
2356 // CHECK-RV64-LABEL: @test_vwadd_vx_i16m8_mt(
2357 // CHECK-RV64-NEXT: entry:
2358 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2359 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2360 //
test_vwadd_vx_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint8m4_t op1,int8_t op2,size_t vl,size_t ta)2361 vint16m8_t test_vwadd_vx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint8m4_t op1, int8_t op2, size_t vl, size_t ta) {
2362 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2363 }
2364
2365 // CHECK-RV64-LABEL: @test_vwadd_wv_i16m8_mt(
2366 // CHECK-RV64-NEXT: entry:
2367 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2368 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2369 //
test_vwadd_wv_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,vint8m4_t op2,size_t vl,size_t ta)2370 vint16m8_t test_vwadd_wv_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, vint8m4_t op2, size_t vl, size_t ta) {
2371 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2372 }
2373
2374 // CHECK-RV64-LABEL: @test_vwadd_wx_i16m8_mt(
2375 // CHECK-RV64-NEXT: entry:
2376 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2377 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2378 //
test_vwadd_wx_i16m8_mt(vbool2_t mask,vint16m8_t maskedoff,vint16m8_t op1,int8_t op2,size_t vl,size_t ta)2379 vint16m8_t test_vwadd_wx_i16m8_mt(vbool2_t mask, vint16m8_t maskedoff, vint16m8_t op1, int8_t op2, size_t vl, size_t ta) {
2380 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2381 }
2382
2383 // CHECK-RV64-LABEL: @test_vwadd_vv_i32mf2_mt(
2384 // CHECK-RV64-NEXT: entry:
2385 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2386 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2387 //
test_vwadd_vv_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint16mf4_t op1,vint16mf4_t op2,size_t vl,size_t ta)2388 vint32mf2_t test_vwadd_vv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, vint16mf4_t op2, size_t vl, size_t ta) {
2389 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2390 }
2391
2392 // CHECK-RV64-LABEL: @test_vwadd_vx_i32mf2_mt(
2393 // CHECK-RV64-NEXT: entry:
2394 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2395 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2396 //
test_vwadd_vx_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint16mf4_t op1,int16_t op2,size_t vl,size_t ta)2397 vint32mf2_t test_vwadd_vx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint16mf4_t op1, int16_t op2, size_t vl, size_t ta) {
2398 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2399 }
2400
2401 // CHECK-RV64-LABEL: @test_vwadd_wv_i32mf2_mt(
2402 // CHECK-RV64-NEXT: entry:
2403 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2404 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2405 //
test_vwadd_wv_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,vint16mf4_t op2,size_t vl,size_t ta)2406 vint32mf2_t test_vwadd_wv_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, vint16mf4_t op2, size_t vl, size_t ta) {
2407 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2408 }
2409
2410 // CHECK-RV64-LABEL: @test_vwadd_wx_i32mf2_mt(
2411 // CHECK-RV64-NEXT: entry:
2412 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2413 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2414 //
test_vwadd_wx_i32mf2_mt(vbool64_t mask,vint32mf2_t maskedoff,vint32mf2_t op1,int16_t op2,size_t vl,size_t ta)2415 vint32mf2_t test_vwadd_wx_i32mf2_mt(vbool64_t mask, vint32mf2_t maskedoff, vint32mf2_t op1, int16_t op2, size_t vl, size_t ta) {
2416 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2417 }
2418
2419 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m1_mt(
2420 // CHECK-RV64-NEXT: entry:
2421 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2422 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2423 //
test_vwadd_vv_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint16mf2_t op1,vint16mf2_t op2,size_t vl,size_t ta)2424 vint32m1_t test_vwadd_vv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, vint16mf2_t op2, size_t vl, size_t ta) {
2425 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2426 }
2427
2428 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m1_mt(
2429 // CHECK-RV64-NEXT: entry:
2430 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2431 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2432 //
test_vwadd_vx_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint16mf2_t op1,int16_t op2,size_t vl,size_t ta)2433 vint32m1_t test_vwadd_vx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint16mf2_t op1, int16_t op2, size_t vl, size_t ta) {
2434 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2435 }
2436
2437 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m1_mt(
2438 // CHECK-RV64-NEXT: entry:
2439 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2440 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2441 //
test_vwadd_wv_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,vint16mf2_t op2,size_t vl,size_t ta)2442 vint32m1_t test_vwadd_wv_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, vint16mf2_t op2, size_t vl, size_t ta) {
2443 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2444 }
2445
2446 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m1_mt(
2447 // CHECK-RV64-NEXT: entry:
2448 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2449 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2450 //
test_vwadd_wx_i32m1_mt(vbool32_t mask,vint32m1_t maskedoff,vint32m1_t op1,int16_t op2,size_t vl,size_t ta)2451 vint32m1_t test_vwadd_wx_i32m1_mt(vbool32_t mask, vint32m1_t maskedoff, vint32m1_t op1, int16_t op2, size_t vl, size_t ta) {
2452 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2453 }
2454
2455 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m2_mt(
2456 // CHECK-RV64-NEXT: entry:
2457 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2458 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2459 //
test_vwadd_vv_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint16m1_t op1,vint16m1_t op2,size_t vl,size_t ta)2460 vint32m2_t test_vwadd_vv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, vint16m1_t op2, size_t vl, size_t ta) {
2461 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2462 }
2463
2464 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m2_mt(
2465 // CHECK-RV64-NEXT: entry:
2466 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2467 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2468 //
test_vwadd_vx_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint16m1_t op1,int16_t op2,size_t vl,size_t ta)2469 vint32m2_t test_vwadd_vx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint16m1_t op1, int16_t op2, size_t vl, size_t ta) {
2470 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2471 }
2472
2473 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m2_mt(
2474 // CHECK-RV64-NEXT: entry:
2475 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2476 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2477 //
test_vwadd_wv_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,vint16m1_t op2,size_t vl,size_t ta)2478 vint32m2_t test_vwadd_wv_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, vint16m1_t op2, size_t vl, size_t ta) {
2479 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2480 }
2481
2482 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m2_mt(
2483 // CHECK-RV64-NEXT: entry:
2484 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2485 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2486 //
test_vwadd_wx_i32m2_mt(vbool16_t mask,vint32m2_t maskedoff,vint32m2_t op1,int16_t op2,size_t vl,size_t ta)2487 vint32m2_t test_vwadd_wx_i32m2_mt(vbool16_t mask, vint32m2_t maskedoff, vint32m2_t op1, int16_t op2, size_t vl, size_t ta) {
2488 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2489 }
2490
2491 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m4_mt(
2492 // CHECK-RV64-NEXT: entry:
2493 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2494 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2495 //
test_vwadd_vv_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint16m2_t op1,vint16m2_t op2,size_t vl,size_t ta)2496 vint32m4_t test_vwadd_vv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, vint16m2_t op2, size_t vl, size_t ta) {
2497 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2498 }
2499
2500 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m4_mt(
2501 // CHECK-RV64-NEXT: entry:
2502 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2503 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2504 //
test_vwadd_vx_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint16m2_t op1,int16_t op2,size_t vl,size_t ta)2505 vint32m4_t test_vwadd_vx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint16m2_t op1, int16_t op2, size_t vl, size_t ta) {
2506 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2507 }
2508
2509 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m4_mt(
2510 // CHECK-RV64-NEXT: entry:
2511 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2512 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2513 //
test_vwadd_wv_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,vint16m2_t op2,size_t vl,size_t ta)2514 vint32m4_t test_vwadd_wv_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, vint16m2_t op2, size_t vl, size_t ta) {
2515 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2516 }
2517
2518 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m4_mt(
2519 // CHECK-RV64-NEXT: entry:
2520 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2521 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2522 //
test_vwadd_wx_i32m4_mt(vbool8_t mask,vint32m4_t maskedoff,vint32m4_t op1,int16_t op2,size_t vl,size_t ta)2523 vint32m4_t test_vwadd_wx_i32m4_mt(vbool8_t mask, vint32m4_t maskedoff, vint32m4_t op1, int16_t op2, size_t vl, size_t ta) {
2524 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2525 }
2526
2527 // CHECK-RV64-LABEL: @test_vwadd_vv_i32m8_mt(
2528 // CHECK-RV64-NEXT: entry:
2529 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2530 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2531 //
test_vwadd_vv_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint16m4_t op1,vint16m4_t op2,size_t vl,size_t ta)2532 vint32m8_t test_vwadd_vv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, vint16m4_t op2, size_t vl, size_t ta) {
2533 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2534 }
2535
2536 // CHECK-RV64-LABEL: @test_vwadd_vx_i32m8_mt(
2537 // CHECK-RV64-NEXT: entry:
2538 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2539 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2540 //
test_vwadd_vx_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint16m4_t op1,int16_t op2,size_t vl,size_t ta)2541 vint32m8_t test_vwadd_vx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint16m4_t op1, int16_t op2, size_t vl, size_t ta) {
2542 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2543 }
2544
2545 // CHECK-RV64-LABEL: @test_vwadd_wv_i32m8_mt(
2546 // CHECK-RV64-NEXT: entry:
2547 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2548 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2549 //
test_vwadd_wv_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,vint16m4_t op2,size_t vl,size_t ta)2550 vint32m8_t test_vwadd_wv_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, vint16m4_t op2, size_t vl, size_t ta) {
2551 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2552 }
2553
2554 // CHECK-RV64-LABEL: @test_vwadd_wx_i32m8_mt(
2555 // CHECK-RV64-NEXT: entry:
2556 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2557 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2558 //
test_vwadd_wx_i32m8_mt(vbool4_t mask,vint32m8_t maskedoff,vint32m8_t op1,int16_t op2,size_t vl,size_t ta)2559 vint32m8_t test_vwadd_wx_i32m8_mt(vbool4_t mask, vint32m8_t maskedoff, vint32m8_t op1, int16_t op2, size_t vl, size_t ta) {
2560 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2561 }
2562
2563 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m1_mt(
2564 // CHECK-RV64-NEXT: entry:
2565 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2566 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2567 //
test_vwadd_vv_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint32mf2_t op1,vint32mf2_t op2,size_t vl,size_t ta)2568 vint64m1_t test_vwadd_vv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, vint32mf2_t op2, size_t vl, size_t ta) {
2569 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2570 }
2571
2572 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m1_mt(
2573 // CHECK-RV64-NEXT: entry:
2574 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2575 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2576 //
test_vwadd_vx_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint32mf2_t op1,int32_t op2,size_t vl,size_t ta)2577 vint64m1_t test_vwadd_vx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint32mf2_t op1, int32_t op2, size_t vl, size_t ta) {
2578 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2579 }
2580
2581 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m1_mt(
2582 // CHECK-RV64-NEXT: entry:
2583 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2584 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2585 //
test_vwadd_wv_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,vint32mf2_t op2,size_t vl,size_t ta)2586 vint64m1_t test_vwadd_wv_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, vint32mf2_t op2, size_t vl, size_t ta) {
2587 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2588 }
2589
2590 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m1_mt(
2591 // CHECK-RV64-NEXT: entry:
2592 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2593 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2594 //
test_vwadd_wx_i64m1_mt(vbool64_t mask,vint64m1_t maskedoff,vint64m1_t op1,int32_t op2,size_t vl,size_t ta)2595 vint64m1_t test_vwadd_wx_i64m1_mt(vbool64_t mask, vint64m1_t maskedoff, vint64m1_t op1, int32_t op2, size_t vl, size_t ta) {
2596 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2597 }
2598
2599 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m2_mt(
2600 // CHECK-RV64-NEXT: entry:
2601 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2602 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2603 //
test_vwadd_vv_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint32m1_t op1,vint32m1_t op2,size_t vl,size_t ta)2604 vint64m2_t test_vwadd_vv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, vint32m1_t op2, size_t vl, size_t ta) {
2605 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2606 }
2607
2608 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m2_mt(
2609 // CHECK-RV64-NEXT: entry:
2610 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2611 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2612 //
test_vwadd_vx_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint32m1_t op1,int32_t op2,size_t vl,size_t ta)2613 vint64m2_t test_vwadd_vx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint32m1_t op1, int32_t op2, size_t vl, size_t ta) {
2614 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2615 }
2616
2617 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m2_mt(
2618 // CHECK-RV64-NEXT: entry:
2619 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2620 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2621 //
test_vwadd_wv_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,vint32m1_t op2,size_t vl,size_t ta)2622 vint64m2_t test_vwadd_wv_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, vint32m1_t op2, size_t vl, size_t ta) {
2623 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2624 }
2625
2626 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m2_mt(
2627 // CHECK-RV64-NEXT: entry:
2628 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2629 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2630 //
test_vwadd_wx_i64m2_mt(vbool32_t mask,vint64m2_t maskedoff,vint64m2_t op1,int32_t op2,size_t vl,size_t ta)2631 vint64m2_t test_vwadd_wx_i64m2_mt(vbool32_t mask, vint64m2_t maskedoff, vint64m2_t op1, int32_t op2, size_t vl, size_t ta) {
2632 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2633 }
2634
2635 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m4_mt(
2636 // CHECK-RV64-NEXT: entry:
2637 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2638 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2639 //
test_vwadd_vv_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint32m2_t op1,vint32m2_t op2,size_t vl,size_t ta)2640 vint64m4_t test_vwadd_vv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, vint32m2_t op2, size_t vl, size_t ta) {
2641 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2642 }
2643
2644 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m4_mt(
2645 // CHECK-RV64-NEXT: entry:
2646 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2647 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2648 //
test_vwadd_vx_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint32m2_t op1,int32_t op2,size_t vl,size_t ta)2649 vint64m4_t test_vwadd_vx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint32m2_t op1, int32_t op2, size_t vl, size_t ta) {
2650 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2651 }
2652
2653 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m4_mt(
2654 // CHECK-RV64-NEXT: entry:
2655 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2656 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2657 //
test_vwadd_wv_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,vint32m2_t op2,size_t vl,size_t ta)2658 vint64m4_t test_vwadd_wv_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, vint32m2_t op2, size_t vl, size_t ta) {
2659 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2660 }
2661
2662 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m4_mt(
2663 // CHECK-RV64-NEXT: entry:
2664 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2665 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2666 //
test_vwadd_wx_i64m4_mt(vbool16_t mask,vint64m4_t maskedoff,vint64m4_t op1,int32_t op2,size_t vl,size_t ta)2667 vint64m4_t test_vwadd_wx_i64m4_mt(vbool16_t mask, vint64m4_t maskedoff, vint64m4_t op1, int32_t op2, size_t vl, size_t ta) {
2668 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2669 }
2670
2671 // CHECK-RV64-LABEL: @test_vwadd_vv_i64m8_mt(
2672 // CHECK-RV64-NEXT: entry:
2673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2674 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2675 //
test_vwadd_vv_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint32m4_t op1,vint32m4_t op2,size_t vl,size_t ta)2676 vint64m8_t test_vwadd_vv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, vint32m4_t op2, size_t vl, size_t ta) {
2677 return vwadd_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2678 }
2679
2680 // CHECK-RV64-LABEL: @test_vwadd_vx_i64m8_mt(
2681 // CHECK-RV64-NEXT: entry:
2682 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2683 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2684 //
test_vwadd_vx_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint32m4_t op1,int32_t op2,size_t vl,size_t ta)2685 vint64m8_t test_vwadd_vx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint32m4_t op1, int32_t op2, size_t vl, size_t ta) {
2686 return vwadd_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2687 }
2688
2689 // CHECK-RV64-LABEL: @test_vwadd_wv_i64m8_mt(
2690 // CHECK-RV64-NEXT: entry:
2691 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2692 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2693 //
test_vwadd_wv_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,vint32m4_t op2,size_t vl,size_t ta)2694 vint64m8_t test_vwadd_wv_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, vint32m4_t op2, size_t vl, size_t ta) {
2695 return vwadd_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2696 }
2697
2698 // CHECK-RV64-LABEL: @test_vwadd_wx_i64m8_mt(
2699 // CHECK-RV64-NEXT: entry:
2700 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2701 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2702 //
test_vwadd_wx_i64m8_mt(vbool8_t mask,vint64m8_t maskedoff,vint64m8_t op1,int32_t op2,size_t vl,size_t ta)2703 vint64m8_t test_vwadd_wx_i64m8_mt(vbool8_t mask, vint64m8_t maskedoff, vint64m8_t op1, int32_t op2, size_t vl, size_t ta) {
2704 return vwadd_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2705 }
2706
2707 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf4_mt(
2708 // CHECK-RV64-NEXT: entry:
2709 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2710 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2711 //
test_vwaddu_vv_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl,size_t ta)2712 vuint16mf4_t test_vwaddu_vv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, vuint8mf8_t op2, size_t vl, size_t ta) {
2713 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2714 }
2715
2716 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf4_mt(
2717 // CHECK-RV64-NEXT: entry:
2718 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.mask.nxv1i16.nxv1i8.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2719 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2720 //
test_vwaddu_vx_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint8mf8_t op1,uint8_t op2,size_t vl,size_t ta)2721 vuint16mf4_t test_vwaddu_vx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint8mf8_t op1, uint8_t op2, size_t vl, size_t ta) {
2722 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2723 }
2724
2725 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf4_mt(
2726 // CHECK-RV64-NEXT: entry:
2727 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.nxv1i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2728 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2729 //
test_vwaddu_wv_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,vuint8mf8_t op2,size_t vl,size_t ta)2730 vuint16mf4_t test_vwaddu_wv_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, vuint8mf8_t op2, size_t vl, size_t ta) {
2731 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2732 }
2733
2734 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf4_mt(
2735 // CHECK-RV64-NEXT: entry:
2736 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwaddu.w.mask.nxv1i16.i8.i64(<vscale x 1 x i16> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2737 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2738 //
test_vwaddu_wx_u16mf4_mt(vbool64_t mask,vuint16mf4_t maskedoff,vuint16mf4_t op1,uint8_t op2,size_t vl,size_t ta)2739 vuint16mf4_t test_vwaddu_wx_u16mf4_mt(vbool64_t mask, vuint16mf4_t maskedoff, vuint16mf4_t op1, uint8_t op2, size_t vl, size_t ta) {
2740 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2741 }
2742
2743 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16mf2_mt(
2744 // CHECK-RV64-NEXT: entry:
2745 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2746 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2747 //
test_vwaddu_vv_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl,size_t ta)2748 vuint16mf2_t test_vwaddu_vv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, vuint8mf4_t op2, size_t vl, size_t ta) {
2749 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2750 }
2751
2752 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16mf2_mt(
2753 // CHECK-RV64-NEXT: entry:
2754 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.mask.nxv2i16.nxv2i8.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2755 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2756 //
test_vwaddu_vx_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint8mf4_t op1,uint8_t op2,size_t vl,size_t ta)2757 vuint16mf2_t test_vwaddu_vx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint8mf4_t op1, uint8_t op2, size_t vl, size_t ta) {
2758 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2759 }
2760
2761 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16mf2_mt(
2762 // CHECK-RV64-NEXT: entry:
2763 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.nxv2i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2764 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2765 //
test_vwaddu_wv_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,vuint8mf4_t op2,size_t vl,size_t ta)2766 vuint16mf2_t test_vwaddu_wv_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, vuint8mf4_t op2, size_t vl, size_t ta) {
2767 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2768 }
2769
2770 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16mf2_mt(
2771 // CHECK-RV64-NEXT: entry:
2772 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwaddu.w.mask.nxv2i16.i8.i64(<vscale x 2 x i16> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2773 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2774 //
test_vwaddu_wx_u16mf2_mt(vbool32_t mask,vuint16mf2_t maskedoff,vuint16mf2_t op1,uint8_t op2,size_t vl,size_t ta)2775 vuint16mf2_t test_vwaddu_wx_u16mf2_mt(vbool32_t mask, vuint16mf2_t maskedoff, vuint16mf2_t op1, uint8_t op2, size_t vl, size_t ta) {
2776 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2777 }
2778
2779 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m1_mt(
2780 // CHECK-RV64-NEXT: entry:
2781 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2782 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2783 //
test_vwaddu_vv_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl,size_t ta)2784 vuint16m1_t test_vwaddu_vv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, vuint8mf2_t op2, size_t vl, size_t ta) {
2785 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2786 }
2787
2788 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m1_mt(
2789 // CHECK-RV64-NEXT: entry:
2790 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.mask.nxv4i16.nxv4i8.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2791 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2792 //
test_vwaddu_vx_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint8mf2_t op1,uint8_t op2,size_t vl,size_t ta)2793 vuint16m1_t test_vwaddu_vx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint8mf2_t op1, uint8_t op2, size_t vl, size_t ta) {
2794 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2795 }
2796
2797 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m1_mt(
2798 // CHECK-RV64-NEXT: entry:
2799 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.nxv4i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2800 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2801 //
test_vwaddu_wv_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,vuint8mf2_t op2,size_t vl,size_t ta)2802 vuint16m1_t test_vwaddu_wv_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, vuint8mf2_t op2, size_t vl, size_t ta) {
2803 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2804 }
2805
2806 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m1_mt(
2807 // CHECK-RV64-NEXT: entry:
2808 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwaddu.w.mask.nxv4i16.i8.i64(<vscale x 4 x i16> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2809 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2810 //
test_vwaddu_wx_u16m1_mt(vbool16_t mask,vuint16m1_t maskedoff,vuint16m1_t op1,uint8_t op2,size_t vl,size_t ta)2811 vuint16m1_t test_vwaddu_wx_u16m1_mt(vbool16_t mask, vuint16m1_t maskedoff, vuint16m1_t op1, uint8_t op2, size_t vl, size_t ta) {
2812 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2813 }
2814
2815 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m2_mt(
2816 // CHECK-RV64-NEXT: entry:
2817 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2818 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2819 //
test_vwaddu_vv_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint8m1_t op1,vuint8m1_t op2,size_t vl,size_t ta)2820 vuint16m2_t test_vwaddu_vv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, vuint8m1_t op2, size_t vl, size_t ta) {
2821 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2822 }
2823
2824 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m2_mt(
2825 // CHECK-RV64-NEXT: entry:
2826 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.mask.nxv8i16.nxv8i8.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2827 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2828 //
test_vwaddu_vx_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint8m1_t op1,uint8_t op2,size_t vl,size_t ta)2829 vuint16m2_t test_vwaddu_vx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint8m1_t op1, uint8_t op2, size_t vl, size_t ta) {
2830 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2831 }
2832
2833 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m2_mt(
2834 // CHECK-RV64-NEXT: entry:
2835 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.nxv8i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2836 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2837 //
test_vwaddu_wv_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,vuint8m1_t op2,size_t vl,size_t ta)2838 vuint16m2_t test_vwaddu_wv_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, vuint8m1_t op2, size_t vl, size_t ta) {
2839 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2840 }
2841
2842 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m2_mt(
2843 // CHECK-RV64-NEXT: entry:
2844 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwaddu.w.mask.nxv8i16.i8.i64(<vscale x 8 x i16> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2845 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2846 //
test_vwaddu_wx_u16m2_mt(vbool8_t mask,vuint16m2_t maskedoff,vuint16m2_t op1,uint8_t op2,size_t vl,size_t ta)2847 vuint16m2_t test_vwaddu_wx_u16m2_mt(vbool8_t mask, vuint16m2_t maskedoff, vuint16m2_t op1, uint8_t op2, size_t vl, size_t ta) {
2848 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2849 }
2850
2851 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m4_mt(
2852 // CHECK-RV64-NEXT: entry:
2853 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2854 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2855 //
test_vwaddu_vv_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint8m2_t op1,vuint8m2_t op2,size_t vl,size_t ta)2856 vuint16m4_t test_vwaddu_vv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, vuint8m2_t op2, size_t vl, size_t ta) {
2857 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2858 }
2859
2860 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m4_mt(
2861 // CHECK-RV64-NEXT: entry:
2862 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.mask.nxv16i16.nxv16i8.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2863 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2864 //
test_vwaddu_vx_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint8m2_t op1,uint8_t op2,size_t vl,size_t ta)2865 vuint16m4_t test_vwaddu_vx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint8m2_t op1, uint8_t op2, size_t vl, size_t ta) {
2866 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2867 }
2868
2869 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m4_mt(
2870 // CHECK-RV64-NEXT: entry:
2871 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.nxv16i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2872 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2873 //
test_vwaddu_wv_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,vuint8m2_t op2,size_t vl,size_t ta)2874 vuint16m4_t test_vwaddu_wv_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, vuint8m2_t op2, size_t vl, size_t ta) {
2875 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2876 }
2877
2878 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m4_mt(
2879 // CHECK-RV64-NEXT: entry:
2880 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwaddu.w.mask.nxv16i16.i8.i64(<vscale x 16 x i16> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2881 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2882 //
test_vwaddu_wx_u16m4_mt(vbool4_t mask,vuint16m4_t maskedoff,vuint16m4_t op1,uint8_t op2,size_t vl,size_t ta)2883 vuint16m4_t test_vwaddu_wx_u16m4_mt(vbool4_t mask, vuint16m4_t maskedoff, vuint16m4_t op1, uint8_t op2, size_t vl, size_t ta) {
2884 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2885 }
2886
2887 // CHECK-RV64-LABEL: @test_vwaddu_vv_u16m8_mt(
2888 // CHECK-RV64-NEXT: entry:
2889 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2890 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2891 //
test_vwaddu_vv_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint8m4_t op1,vuint8m4_t op2,size_t vl,size_t ta)2892 vuint16m8_t test_vwaddu_vv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, vuint8m4_t op2, size_t vl, size_t ta) {
2893 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2894 }
2895
2896 // CHECK-RV64-LABEL: @test_vwaddu_vx_u16m8_mt(
2897 // CHECK-RV64-NEXT: entry:
2898 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.mask.nxv32i16.nxv32i8.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i8> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2899 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2900 //
test_vwaddu_vx_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint8m4_t op1,uint8_t op2,size_t vl,size_t ta)2901 vuint16m8_t test_vwaddu_vx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint8m4_t op1, uint8_t op2, size_t vl, size_t ta) {
2902 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2903 }
2904
2905 // CHECK-RV64-LABEL: @test_vwaddu_wv_u16m8_mt(
2906 // CHECK-RV64-NEXT: entry:
2907 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.nxv32i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2908 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2909 //
test_vwaddu_wv_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,vuint8m4_t op2,size_t vl,size_t ta)2910 vuint16m8_t test_vwaddu_wv_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, vuint8m4_t op2, size_t vl, size_t ta) {
2911 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2912 }
2913
2914 // CHECK-RV64-LABEL: @test_vwaddu_wx_u16m8_mt(
2915 // CHECK-RV64-NEXT: entry:
2916 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwaddu.w.mask.nxv32i16.i8.i64(<vscale x 32 x i16> [[MASKEDOFF:%.*]], <vscale x 32 x i16> [[OP1:%.*]], i8 [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2917 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2918 //
test_vwaddu_wx_u16m8_mt(vbool2_t mask,vuint16m8_t maskedoff,vuint16m8_t op1,uint8_t op2,size_t vl,size_t ta)2919 vuint16m8_t test_vwaddu_wx_u16m8_mt(vbool2_t mask, vuint16m8_t maskedoff, vuint16m8_t op1, uint8_t op2, size_t vl, size_t ta) {
2920 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2921 }
2922
2923 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32mf2_mt(
2924 // CHECK-RV64-NEXT: entry:
2925 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2926 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2927 //
test_vwaddu_vv_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl,size_t ta)2928 vuint32mf2_t test_vwaddu_vv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, vuint16mf4_t op2, size_t vl, size_t ta) {
2929 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2930 }
2931
2932 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32mf2_mt(
2933 // CHECK-RV64-NEXT: entry:
2934 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.mask.nxv1i32.nxv1i16.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2935 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2936 //
test_vwaddu_vx_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint16mf4_t op1,uint16_t op2,size_t vl,size_t ta)2937 vuint32mf2_t test_vwaddu_vx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint16mf4_t op1, uint16_t op2, size_t vl, size_t ta) {
2938 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2939 }
2940
2941 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32mf2_mt(
2942 // CHECK-RV64-NEXT: entry:
2943 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.nxv1i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2944 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2945 //
test_vwaddu_wv_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,vuint16mf4_t op2,size_t vl,size_t ta)2946 vuint32mf2_t test_vwaddu_wv_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, vuint16mf4_t op2, size_t vl, size_t ta) {
2947 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2948 }
2949
2950 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32mf2_mt(
2951 // CHECK-RV64-NEXT: entry:
2952 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwaddu.w.mask.nxv1i32.i16.i64(<vscale x 1 x i32> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2953 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2954 //
test_vwaddu_wx_u32mf2_mt(vbool64_t mask,vuint32mf2_t maskedoff,vuint32mf2_t op1,uint16_t op2,size_t vl,size_t ta)2955 vuint32mf2_t test_vwaddu_wx_u32mf2_mt(vbool64_t mask, vuint32mf2_t maskedoff, vuint32mf2_t op1, uint16_t op2, size_t vl, size_t ta) {
2956 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2957 }
2958
2959 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m1_mt(
2960 // CHECK-RV64-NEXT: entry:
2961 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2962 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2963 //
test_vwaddu_vv_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl,size_t ta)2964 vuint32m1_t test_vwaddu_vv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, vuint16mf2_t op2, size_t vl, size_t ta) {
2965 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2966 }
2967
2968 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m1_mt(
2969 // CHECK-RV64-NEXT: entry:
2970 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.mask.nxv2i32.nxv2i16.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2971 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2972 //
test_vwaddu_vx_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint16mf2_t op1,uint16_t op2,size_t vl,size_t ta)2973 vuint32m1_t test_vwaddu_vx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint16mf2_t op1, uint16_t op2, size_t vl, size_t ta) {
2974 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2975 }
2976
2977 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m1_mt(
2978 // CHECK-RV64-NEXT: entry:
2979 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.nxv2i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2980 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2981 //
test_vwaddu_wv_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,vuint16mf2_t op2,size_t vl,size_t ta)2982 vuint32m1_t test_vwaddu_wv_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, vuint16mf2_t op2, size_t vl, size_t ta) {
2983 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2984 }
2985
2986 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m1_mt(
2987 // CHECK-RV64-NEXT: entry:
2988 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwaddu.w.mask.nxv2i32.i16.i64(<vscale x 2 x i32> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2989 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2990 //
test_vwaddu_wx_u32m1_mt(vbool32_t mask,vuint32m1_t maskedoff,vuint32m1_t op1,uint16_t op2,size_t vl,size_t ta)2991 vuint32m1_t test_vwaddu_wx_u32m1_mt(vbool32_t mask, vuint32m1_t maskedoff, vuint32m1_t op1, uint16_t op2, size_t vl, size_t ta) {
2992 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
2993 }
2994
2995 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m2_mt(
2996 // CHECK-RV64-NEXT: entry:
2997 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
2998 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2999 //
test_vwaddu_vv_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint16m1_t op1,vuint16m1_t op2,size_t vl,size_t ta)3000 vuint32m2_t test_vwaddu_vv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, vuint16m1_t op2, size_t vl, size_t ta) {
3001 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3002 }
3003
3004 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m2_mt(
3005 // CHECK-RV64-NEXT: entry:
3006 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.mask.nxv4i32.nxv4i16.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3007 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
3008 //
test_vwaddu_vx_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint16m1_t op1,uint16_t op2,size_t vl,size_t ta)3009 vuint32m2_t test_vwaddu_vx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint16m1_t op1, uint16_t op2, size_t vl, size_t ta) {
3010 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3011 }
3012
3013 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m2_mt(
3014 // CHECK-RV64-NEXT: entry:
3015 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.nxv4i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3016 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
3017 //
test_vwaddu_wv_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,vuint16m1_t op2,size_t vl,size_t ta)3018 vuint32m2_t test_vwaddu_wv_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, vuint16m1_t op2, size_t vl, size_t ta) {
3019 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3020 }
3021
3022 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m2_mt(
3023 // CHECK-RV64-NEXT: entry:
3024 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwaddu.w.mask.nxv4i32.i16.i64(<vscale x 4 x i32> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3025 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
3026 //
test_vwaddu_wx_u32m2_mt(vbool16_t mask,vuint32m2_t maskedoff,vuint32m2_t op1,uint16_t op2,size_t vl,size_t ta)3027 vuint32m2_t test_vwaddu_wx_u32m2_mt(vbool16_t mask, vuint32m2_t maskedoff, vuint32m2_t op1, uint16_t op2, size_t vl, size_t ta) {
3028 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3029 }
3030
3031 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m4_mt(
3032 // CHECK-RV64-NEXT: entry:
3033 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3034 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
3035 //
test_vwaddu_vv_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint16m2_t op1,vuint16m2_t op2,size_t vl,size_t ta)3036 vuint32m4_t test_vwaddu_vv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, vuint16m2_t op2, size_t vl, size_t ta) {
3037 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3038 }
3039
3040 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m4_mt(
3041 // CHECK-RV64-NEXT: entry:
3042 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.mask.nxv8i32.nxv8i16.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3043 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
3044 //
test_vwaddu_vx_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint16m2_t op1,uint16_t op2,size_t vl,size_t ta)3045 vuint32m4_t test_vwaddu_vx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint16m2_t op1, uint16_t op2, size_t vl, size_t ta) {
3046 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3047 }
3048
3049 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m4_mt(
3050 // CHECK-RV64-NEXT: entry:
3051 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.nxv8i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3052 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
3053 //
test_vwaddu_wv_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,vuint16m2_t op2,size_t vl,size_t ta)3054 vuint32m4_t test_vwaddu_wv_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, vuint16m2_t op2, size_t vl, size_t ta) {
3055 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3056 }
3057
3058 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m4_mt(
3059 // CHECK-RV64-NEXT: entry:
3060 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwaddu.w.mask.nxv8i32.i16.i64(<vscale x 8 x i32> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3061 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
3062 //
test_vwaddu_wx_u32m4_mt(vbool8_t mask,vuint32m4_t maskedoff,vuint32m4_t op1,uint16_t op2,size_t vl,size_t ta)3063 vuint32m4_t test_vwaddu_wx_u32m4_mt(vbool8_t mask, vuint32m4_t maskedoff, vuint32m4_t op1, uint16_t op2, size_t vl, size_t ta) {
3064 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3065 }
3066
3067 // CHECK-RV64-LABEL: @test_vwaddu_vv_u32m8_mt(
3068 // CHECK-RV64-NEXT: entry:
3069 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3070 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
3071 //
test_vwaddu_vv_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint16m4_t op1,vuint16m4_t op2,size_t vl,size_t ta)3072 vuint32m8_t test_vwaddu_vv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, vuint16m4_t op2, size_t vl, size_t ta) {
3073 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3074 }
3075
3076 // CHECK-RV64-LABEL: @test_vwaddu_vx_u32m8_mt(
3077 // CHECK-RV64-NEXT: entry:
3078 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.mask.nxv16i32.nxv16i16.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i16> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3079 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
3080 //
test_vwaddu_vx_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint16m4_t op1,uint16_t op2,size_t vl,size_t ta)3081 vuint32m8_t test_vwaddu_vx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint16m4_t op1, uint16_t op2, size_t vl, size_t ta) {
3082 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3083 }
3084
3085 // CHECK-RV64-LABEL: @test_vwaddu_wv_u32m8_mt(
3086 // CHECK-RV64-NEXT: entry:
3087 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.nxv16i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3088 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
3089 //
test_vwaddu_wv_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,vuint16m4_t op2,size_t vl,size_t ta)3090 vuint32m8_t test_vwaddu_wv_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, vuint16m4_t op2, size_t vl, size_t ta) {
3091 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3092 }
3093
3094 // CHECK-RV64-LABEL: @test_vwaddu_wx_u32m8_mt(
3095 // CHECK-RV64-NEXT: entry:
3096 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwaddu.w.mask.nxv16i32.i16.i64(<vscale x 16 x i32> [[MASKEDOFF:%.*]], <vscale x 16 x i32> [[OP1:%.*]], i16 [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3097 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
3098 //
test_vwaddu_wx_u32m8_mt(vbool4_t mask,vuint32m8_t maskedoff,vuint32m8_t op1,uint16_t op2,size_t vl,size_t ta)3099 vuint32m8_t test_vwaddu_wx_u32m8_mt(vbool4_t mask, vuint32m8_t maskedoff, vuint32m8_t op1, uint16_t op2, size_t vl, size_t ta) {
3100 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3101 }
3102
3103 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m1_mt(
3104 // CHECK-RV64-NEXT: entry:
3105 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3106 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
3107 //
test_vwaddu_vv_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl,size_t ta)3108 vuint64m1_t test_vwaddu_vv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, vuint32mf2_t op2, size_t vl, size_t ta) {
3109 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3110 }
3111
3112 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m1_mt(
3113 // CHECK-RV64-NEXT: entry:
3114 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.mask.nxv1i64.nxv1i32.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3115 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
3116 //
test_vwaddu_vx_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint32mf2_t op1,uint32_t op2,size_t vl,size_t ta)3117 vuint64m1_t test_vwaddu_vx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint32mf2_t op1, uint32_t op2, size_t vl, size_t ta) {
3118 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3119 }
3120
3121 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m1_mt(
3122 // CHECK-RV64-NEXT: entry:
3123 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.nxv1i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3124 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
3125 //
test_vwaddu_wv_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,vuint32mf2_t op2,size_t vl,size_t ta)3126 vuint64m1_t test_vwaddu_wv_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, vuint32mf2_t op2, size_t vl, size_t ta) {
3127 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3128 }
3129
3130 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m1_mt(
3131 // CHECK-RV64-NEXT: entry:
3132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwaddu.w.mask.nxv1i64.i32.i64(<vscale x 1 x i64> [[MASKEDOFF:%.*]], <vscale x 1 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3133 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
3134 //
test_vwaddu_wx_u64m1_mt(vbool64_t mask,vuint64m1_t maskedoff,vuint64m1_t op1,uint32_t op2,size_t vl,size_t ta)3135 vuint64m1_t test_vwaddu_wx_u64m1_mt(vbool64_t mask, vuint64m1_t maskedoff, vuint64m1_t op1, uint32_t op2, size_t vl, size_t ta) {
3136 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3137 }
3138
3139 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m2_mt(
3140 // CHECK-RV64-NEXT: entry:
3141 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3142 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
3143 //
test_vwaddu_vv_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint32m1_t op1,vuint32m1_t op2,size_t vl,size_t ta)3144 vuint64m2_t test_vwaddu_vv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, vuint32m1_t op2, size_t vl, size_t ta) {
3145 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3146 }
3147
3148 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m2_mt(
3149 // CHECK-RV64-NEXT: entry:
3150 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.mask.nxv2i64.nxv2i32.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3151 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
3152 //
test_vwaddu_vx_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint32m1_t op1,uint32_t op2,size_t vl,size_t ta)3153 vuint64m2_t test_vwaddu_vx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint32m1_t op1, uint32_t op2, size_t vl, size_t ta) {
3154 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3155 }
3156
3157 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m2_mt(
3158 // CHECK-RV64-NEXT: entry:
3159 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.nxv2i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3160 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
3161 //
test_vwaddu_wv_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,vuint32m1_t op2,size_t vl,size_t ta)3162 vuint64m2_t test_vwaddu_wv_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, vuint32m1_t op2, size_t vl, size_t ta) {
3163 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3164 }
3165
3166 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m2_mt(
3167 // CHECK-RV64-NEXT: entry:
3168 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwaddu.w.mask.nxv2i64.i32.i64(<vscale x 2 x i64> [[MASKEDOFF:%.*]], <vscale x 2 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3169 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
3170 //
test_vwaddu_wx_u64m2_mt(vbool32_t mask,vuint64m2_t maskedoff,vuint64m2_t op1,uint32_t op2,size_t vl,size_t ta)3171 vuint64m2_t test_vwaddu_wx_u64m2_mt(vbool32_t mask, vuint64m2_t maskedoff, vuint64m2_t op1, uint32_t op2, size_t vl, size_t ta) {
3172 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3173 }
3174
3175 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m4_mt(
3176 // CHECK-RV64-NEXT: entry:
3177 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3178 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
3179 //
test_vwaddu_vv_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint32m2_t op1,vuint32m2_t op2,size_t vl,size_t ta)3180 vuint64m4_t test_vwaddu_vv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, vuint32m2_t op2, size_t vl, size_t ta) {
3181 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3182 }
3183
3184 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m4_mt(
3185 // CHECK-RV64-NEXT: entry:
3186 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.mask.nxv4i64.nxv4i32.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3187 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
3188 //
test_vwaddu_vx_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint32m2_t op1,uint32_t op2,size_t vl,size_t ta)3189 vuint64m4_t test_vwaddu_vx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint32m2_t op1, uint32_t op2, size_t vl, size_t ta) {
3190 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3191 }
3192
3193 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m4_mt(
3194 // CHECK-RV64-NEXT: entry:
3195 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.nxv4i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3196 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
3197 //
test_vwaddu_wv_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,vuint32m2_t op2,size_t vl,size_t ta)3198 vuint64m4_t test_vwaddu_wv_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, vuint32m2_t op2, size_t vl, size_t ta) {
3199 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3200 }
3201
3202 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m4_mt(
3203 // CHECK-RV64-NEXT: entry:
3204 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwaddu.w.mask.nxv4i64.i32.i64(<vscale x 4 x i64> [[MASKEDOFF:%.*]], <vscale x 4 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3205 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
3206 //
test_vwaddu_wx_u64m4_mt(vbool16_t mask,vuint64m4_t maskedoff,vuint64m4_t op1,uint32_t op2,size_t vl,size_t ta)3207 vuint64m4_t test_vwaddu_wx_u64m4_mt(vbool16_t mask, vuint64m4_t maskedoff, vuint64m4_t op1, uint32_t op2, size_t vl, size_t ta) {
3208 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3209 }
3210
3211 // CHECK-RV64-LABEL: @test_vwaddu_vv_u64m8_mt(
3212 // CHECK-RV64-NEXT: entry:
3213 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3214 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
3215 //
test_vwaddu_vv_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint32m4_t op1,vuint32m4_t op2,size_t vl,size_t ta)3216 vuint64m8_t test_vwaddu_vv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, vuint32m4_t op2, size_t vl, size_t ta) {
3217 return vwaddu_vv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3218 }
3219
3220 // CHECK-RV64-LABEL: @test_vwaddu_vx_u64m8_mt(
3221 // CHECK-RV64-NEXT: entry:
3222 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.mask.nxv8i64.nxv8i32.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i32> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3223 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
3224 //
test_vwaddu_vx_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint32m4_t op1,uint32_t op2,size_t vl,size_t ta)3225 vuint64m8_t test_vwaddu_vx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint32m4_t op1, uint32_t op2, size_t vl, size_t ta) {
3226 return vwaddu_vx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3227 }
3228
3229 // CHECK-RV64-LABEL: @test_vwaddu_wv_u64m8_mt(
3230 // CHECK-RV64-NEXT: entry:
3231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.nxv8i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3232 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
3233 //
test_vwaddu_wv_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,vuint32m4_t op2,size_t vl,size_t ta)3234 vuint64m8_t test_vwaddu_wv_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, vuint32m4_t op2, size_t vl, size_t ta) {
3235 return vwaddu_wv(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3236 }
3237
3238 // CHECK-RV64-LABEL: @test_vwaddu_wx_u64m8_mt(
3239 // CHECK-RV64-NEXT: entry:
3240 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwaddu.w.mask.nxv8i64.i32.i64(<vscale x 8 x i64> [[MASKEDOFF:%.*]], <vscale x 8 x i64> [[OP1:%.*]], i32 [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]], i64 1)
3241 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
3242 //
test_vwaddu_wx_u64m8_mt(vbool8_t mask,vuint64m8_t maskedoff,vuint64m8_t op1,uint32_t op2,size_t vl,size_t ta)3243 vuint64m8_t test_vwaddu_wx_u64m8_mt(vbool8_t mask, vuint64m8_t maskedoff, vuint64m8_t op1, uint32_t op2, size_t vl, size_t ta) {
3244 return vwaddu_wx(mask, maskedoff, op1, op2, vl, VE_TAIL_AGNOSTIC);
3245 }
3246
3247