1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4
5 #include <riscv_vector.h>
6
7 //
8 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
9 // CHECK-RV64-NEXT: entry:
10 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
11 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
12 //
test_vwmacc_vv_i16mf4(vint16mf4_t acc,vint8mf8_t op1,vint8mf8_t op2,size_t vl)13 vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
14 vint8mf8_t op2, size_t vl) {
15 return vwmacc_vv_i16mf4(acc, op1, op2, vl);
16 }
17
18 //
19 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
20 // CHECK-RV64-NEXT: entry:
21 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
22 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
23 //
test_vwmacc_vx_i16mf4(vint16mf4_t acc,int8_t op1,vint8mf8_t op2,size_t vl)24 vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
25 size_t vl) {
26 return vwmacc_vx_i16mf4(acc, op1, op2, vl);
27 }
28
29 //
30 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
31 // CHECK-RV64-NEXT: entry:
32 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
33 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
34 //
test_vwmacc_vv_i16mf2(vint16mf2_t acc,vint8mf4_t op1,vint8mf4_t op2,size_t vl)35 vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
36 vint8mf4_t op2, size_t vl) {
37 return vwmacc_vv_i16mf2(acc, op1, op2, vl);
38 }
39
40 //
41 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
42 // CHECK-RV64-NEXT: entry:
43 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
44 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
45 //
test_vwmacc_vx_i16mf2(vint16mf2_t acc,int8_t op1,vint8mf4_t op2,size_t vl)46 vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
47 size_t vl) {
48 return vwmacc_vx_i16mf2(acc, op1, op2, vl);
49 }
50
51 //
52 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
53 // CHECK-RV64-NEXT: entry:
54 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
55 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
56 //
test_vwmacc_vv_i16m1(vint16m1_t acc,vint8mf2_t op1,vint8mf2_t op2,size_t vl)57 vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
58 size_t vl) {
59 return vwmacc_vv_i16m1(acc, op1, op2, vl);
60 }
61
62 //
63 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
64 // CHECK-RV64-NEXT: entry:
65 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
66 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
67 //
test_vwmacc_vx_i16m1(vint16m1_t acc,int8_t op1,vint8mf2_t op2,size_t vl)68 vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
69 size_t vl) {
70 return vwmacc_vx_i16m1(acc, op1, op2, vl);
71 }
72
73 //
74 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
75 // CHECK-RV64-NEXT: entry:
76 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
77 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
78 //
test_vwmacc_vv_i16m2(vint16m2_t acc,vint8m1_t op1,vint8m1_t op2,size_t vl)79 vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
80 size_t vl) {
81 return vwmacc_vv_i16m2(acc, op1, op2, vl);
82 }
83
84 //
85 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
86 // CHECK-RV64-NEXT: entry:
87 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
88 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
89 //
test_vwmacc_vx_i16m2(vint16m2_t acc,int8_t op1,vint8m1_t op2,size_t vl)90 vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
91 size_t vl) {
92 return vwmacc_vx_i16m2(acc, op1, op2, vl);
93 }
94
95 //
96 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
97 // CHECK-RV64-NEXT: entry:
98 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
99 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
100 //
test_vwmacc_vv_i16m4(vint16m4_t acc,vint8m2_t op1,vint8m2_t op2,size_t vl)101 vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
102 size_t vl) {
103 return vwmacc_vv_i16m4(acc, op1, op2, vl);
104 }
105
106 //
107 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
108 // CHECK-RV64-NEXT: entry:
109 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
110 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
111 //
test_vwmacc_vx_i16m4(vint16m4_t acc,int8_t op1,vint8m2_t op2,size_t vl)112 vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
113 size_t vl) {
114 return vwmacc_vx_i16m4(acc, op1, op2, vl);
115 }
116
117 //
118 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
119 // CHECK-RV64-NEXT: entry:
120 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
121 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
122 //
test_vwmacc_vv_i16m8(vint16m8_t acc,vint8m4_t op1,vint8m4_t op2,size_t vl)123 vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
124 size_t vl) {
125 return vwmacc_vv_i16m8(acc, op1, op2, vl);
126 }
127
128 //
129 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
130 // CHECK-RV64-NEXT: entry:
131 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
132 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
133 //
test_vwmacc_vx_i16m8(vint16m8_t acc,int8_t op1,vint8m4_t op2,size_t vl)134 vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
135 size_t vl) {
136 return vwmacc_vx_i16m8(acc, op1, op2, vl);
137 }
138
139 //
140 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
141 // CHECK-RV64-NEXT: entry:
142 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
143 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
144 //
test_vwmacc_vv_i32mf2(vint32mf2_t acc,vint16mf4_t op1,vint16mf4_t op2,size_t vl)145 vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
146 vint16mf4_t op2, size_t vl) {
147 return vwmacc_vv_i32mf2(acc, op1, op2, vl);
148 }
149
150 //
151 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
152 // CHECK-RV64-NEXT: entry:
153 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
154 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
155 //
test_vwmacc_vx_i32mf2(vint32mf2_t acc,int16_t op1,vint16mf4_t op2,size_t vl)156 vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
157 size_t vl) {
158 return vwmacc_vx_i32mf2(acc, op1, op2, vl);
159 }
160
161 //
162 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
163 // CHECK-RV64-NEXT: entry:
164 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
165 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
166 //
test_vwmacc_vv_i32m1(vint32m1_t acc,vint16mf2_t op1,vint16mf2_t op2,size_t vl)167 vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
168 vint16mf2_t op2, size_t vl) {
169 return vwmacc_vv_i32m1(acc, op1, op2, vl);
170 }
171
172 //
173 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
174 // CHECK-RV64-NEXT: entry:
175 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
176 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
177 //
test_vwmacc_vx_i32m1(vint32m1_t acc,int16_t op1,vint16mf2_t op2,size_t vl)178 vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
179 size_t vl) {
180 return vwmacc_vx_i32m1(acc, op1, op2, vl);
181 }
182
183 //
184 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
185 // CHECK-RV64-NEXT: entry:
186 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
187 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
188 //
test_vwmacc_vv_i32m2(vint32m2_t acc,vint16m1_t op1,vint16m1_t op2,size_t vl)189 vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
190 size_t vl) {
191 return vwmacc_vv_i32m2(acc, op1, op2, vl);
192 }
193
194 //
195 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
196 // CHECK-RV64-NEXT: entry:
197 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
198 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
199 //
test_vwmacc_vx_i32m2(vint32m2_t acc,int16_t op1,vint16m1_t op2,size_t vl)200 vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
201 size_t vl) {
202 return vwmacc_vx_i32m2(acc, op1, op2, vl);
203 }
204
205 //
206 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
207 // CHECK-RV64-NEXT: entry:
208 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
209 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
210 //
test_vwmacc_vv_i32m4(vint32m4_t acc,vint16m2_t op1,vint16m2_t op2,size_t vl)211 vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
212 size_t vl) {
213 return vwmacc_vv_i32m4(acc, op1, op2, vl);
214 }
215
216 //
217 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
218 // CHECK-RV64-NEXT: entry:
219 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
220 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
221 //
test_vwmacc_vx_i32m4(vint32m4_t acc,int16_t op1,vint16m2_t op2,size_t vl)222 vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
223 size_t vl) {
224 return vwmacc_vx_i32m4(acc, op1, op2, vl);
225 }
226
227 //
228 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
229 // CHECK-RV64-NEXT: entry:
230 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
231 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
232 //
test_vwmacc_vv_i32m8(vint32m8_t acc,vint16m4_t op1,vint16m4_t op2,size_t vl)233 vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
234 size_t vl) {
235 return vwmacc_vv_i32m8(acc, op1, op2, vl);
236 }
237
238 //
239 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
240 // CHECK-RV64-NEXT: entry:
241 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
242 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
243 //
test_vwmacc_vx_i32m8(vint32m8_t acc,int16_t op1,vint16m4_t op2,size_t vl)244 vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
245 size_t vl) {
246 return vwmacc_vx_i32m8(acc, op1, op2, vl);
247 }
248
249 //
250 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
251 // CHECK-RV64-NEXT: entry:
252 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
253 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
254 //
test_vwmacc_vv_i64m1(vint64m1_t acc,vint32mf2_t op1,vint32mf2_t op2,size_t vl)255 vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
256 vint32mf2_t op2, size_t vl) {
257 return vwmacc_vv_i64m1(acc, op1, op2, vl);
258 }
259
260 //
261 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
262 // CHECK-RV64-NEXT: entry:
263 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
264 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
265 //
test_vwmacc_vx_i64m1(vint64m1_t acc,int32_t op1,vint32mf2_t op2,size_t vl)266 vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
267 size_t vl) {
268 return vwmacc_vx_i64m1(acc, op1, op2, vl);
269 }
270
271 //
272 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
273 // CHECK-RV64-NEXT: entry:
274 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
275 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
276 //
test_vwmacc_vv_i64m2(vint64m2_t acc,vint32m1_t op1,vint32m1_t op2,size_t vl)277 vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
278 size_t vl) {
279 return vwmacc_vv_i64m2(acc, op1, op2, vl);
280 }
281
282 //
283 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
284 // CHECK-RV64-NEXT: entry:
285 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
286 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
287 //
test_vwmacc_vx_i64m2(vint64m2_t acc,int32_t op1,vint32m1_t op2,size_t vl)288 vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
289 size_t vl) {
290 return vwmacc_vx_i64m2(acc, op1, op2, vl);
291 }
292
293 //
294 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
295 // CHECK-RV64-NEXT: entry:
296 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
297 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
298 //
test_vwmacc_vv_i64m4(vint64m4_t acc,vint32m2_t op1,vint32m2_t op2,size_t vl)299 vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
300 size_t vl) {
301 return vwmacc_vv_i64m4(acc, op1, op2, vl);
302 }
303
304 //
305 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
306 // CHECK-RV64-NEXT: entry:
307 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
308 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
309 //
test_vwmacc_vx_i64m4(vint64m4_t acc,int32_t op1,vint32m2_t op2,size_t vl)310 vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
311 size_t vl) {
312 return vwmacc_vx_i64m4(acc, op1, op2, vl);
313 }
314
315 //
316 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
317 // CHECK-RV64-NEXT: entry:
318 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
319 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
320 //
test_vwmacc_vv_i64m8(vint64m8_t acc,vint32m4_t op1,vint32m4_t op2,size_t vl)321 vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
322 size_t vl) {
323 return vwmacc_vv_i64m8(acc, op1, op2, vl);
324 }
325
326 //
327 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
328 // CHECK-RV64-NEXT: entry:
329 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
330 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
331 //
test_vwmacc_vx_i64m8(vint64m8_t acc,int32_t op1,vint32m4_t op2,size_t vl)332 vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
333 size_t vl) {
334 return vwmacc_vx_i64m8(acc, op1, op2, vl);
335 }
336
337 //
338 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
339 // CHECK-RV64-NEXT: entry:
340 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
341 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
342 //
test_vwmaccu_vv_u16mf4(vuint16mf4_t acc,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)343 vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
344 vuint8mf8_t op2, size_t vl) {
345 return vwmaccu_vv_u16mf4(acc, op1, op2, vl);
346 }
347
348 //
349 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
350 // CHECK-RV64-NEXT: entry:
351 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
352 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
353 //
test_vwmaccu_vx_u16mf4(vuint16mf4_t acc,uint8_t op1,vuint8mf8_t op2,size_t vl)354 vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
355 vuint8mf8_t op2, size_t vl) {
356 return vwmaccu_vx_u16mf4(acc, op1, op2, vl);
357 }
358
359 //
360 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
361 // CHECK-RV64-NEXT: entry:
362 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
363 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
364 //
test_vwmaccu_vv_u16mf2(vuint16mf2_t acc,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)365 vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
366 vuint8mf4_t op2, size_t vl) {
367 return vwmaccu_vv_u16mf2(acc, op1, op2, vl);
368 }
369
370 //
371 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
372 // CHECK-RV64-NEXT: entry:
373 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
374 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
375 //
test_vwmaccu_vx_u16mf2(vuint16mf2_t acc,uint8_t op1,vuint8mf4_t op2,size_t vl)376 vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
377 vuint8mf4_t op2, size_t vl) {
378 return vwmaccu_vx_u16mf2(acc, op1, op2, vl);
379 }
380
381 //
382 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
383 // CHECK-RV64-NEXT: entry:
384 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
385 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
386 //
test_vwmaccu_vv_u16m1(vuint16m1_t acc,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)387 vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
388 vuint8mf2_t op2, size_t vl) {
389 return vwmaccu_vv_u16m1(acc, op1, op2, vl);
390 }
391
392 //
393 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
394 // CHECK-RV64-NEXT: entry:
395 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
396 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
397 //
test_vwmaccu_vx_u16m1(vuint16m1_t acc,uint8_t op1,vuint8mf2_t op2,size_t vl)398 vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
399 size_t vl) {
400 return vwmaccu_vx_u16m1(acc, op1, op2, vl);
401 }
402
403 //
404 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
405 // CHECK-RV64-NEXT: entry:
406 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
407 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
408 //
test_vwmaccu_vv_u16m2(vuint16m2_t acc,vuint8m1_t op1,vuint8m1_t op2,size_t vl)409 vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
410 vuint8m1_t op2, size_t vl) {
411 return vwmaccu_vv_u16m2(acc, op1, op2, vl);
412 }
413
414 //
415 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
416 // CHECK-RV64-NEXT: entry:
417 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
418 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
419 //
test_vwmaccu_vx_u16m2(vuint16m2_t acc,uint8_t op1,vuint8m1_t op2,size_t vl)420 vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
421 size_t vl) {
422 return vwmaccu_vx_u16m2(acc, op1, op2, vl);
423 }
424
425 //
426 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
427 // CHECK-RV64-NEXT: entry:
428 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
429 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
430 //
test_vwmaccu_vv_u16m4(vuint16m4_t acc,vuint8m2_t op1,vuint8m2_t op2,size_t vl)431 vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
432 vuint8m2_t op2, size_t vl) {
433 return vwmaccu_vv_u16m4(acc, op1, op2, vl);
434 }
435
436 //
437 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
438 // CHECK-RV64-NEXT: entry:
439 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
440 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
441 //
test_vwmaccu_vx_u16m4(vuint16m4_t acc,uint8_t op1,vuint8m2_t op2,size_t vl)442 vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
443 size_t vl) {
444 return vwmaccu_vx_u16m4(acc, op1, op2, vl);
445 }
446
447 //
448 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
449 // CHECK-RV64-NEXT: entry:
450 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
451 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
452 //
test_vwmaccu_vv_u16m8(vuint16m8_t acc,vuint8m4_t op1,vuint8m4_t op2,size_t vl)453 vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
454 vuint8m4_t op2, size_t vl) {
455 return vwmaccu_vv_u16m8(acc, op1, op2, vl);
456 }
457
458 //
459 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
460 // CHECK-RV64-NEXT: entry:
461 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
462 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
463 //
test_vwmaccu_vx_u16m8(vuint16m8_t acc,uint8_t op1,vuint8m4_t op2,size_t vl)464 vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
465 size_t vl) {
466 return vwmaccu_vx_u16m8(acc, op1, op2, vl);
467 }
468
469 //
470 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
471 // CHECK-RV64-NEXT: entry:
472 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
473 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
474 //
test_vwmaccu_vv_u32mf2(vuint32mf2_t acc,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)475 vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
476 vuint16mf4_t op2, size_t vl) {
477 return vwmaccu_vv_u32mf2(acc, op1, op2, vl);
478 }
479
480 //
481 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
482 // CHECK-RV64-NEXT: entry:
483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
484 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
485 //
test_vwmaccu_vx_u32mf2(vuint32mf2_t acc,uint16_t op1,vuint16mf4_t op2,size_t vl)486 vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
487 vuint16mf4_t op2, size_t vl) {
488 return vwmaccu_vx_u32mf2(acc, op1, op2, vl);
489 }
490
491 //
492 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
493 // CHECK-RV64-NEXT: entry:
494 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
495 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
496 //
test_vwmaccu_vv_u32m1(vuint32m1_t acc,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)497 vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
498 vuint16mf2_t op2, size_t vl) {
499 return vwmaccu_vv_u32m1(acc, op1, op2, vl);
500 }
501
502 //
503 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
504 // CHECK-RV64-NEXT: entry:
505 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
506 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
507 //
test_vwmaccu_vx_u32m1(vuint32m1_t acc,uint16_t op1,vuint16mf2_t op2,size_t vl)508 vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
509 vuint16mf2_t op2, size_t vl) {
510 return vwmaccu_vx_u32m1(acc, op1, op2, vl);
511 }
512
513 //
514 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
515 // CHECK-RV64-NEXT: entry:
516 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
517 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
518 //
test_vwmaccu_vv_u32m2(vuint32m2_t acc,vuint16m1_t op1,vuint16m1_t op2,size_t vl)519 vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
520 vuint16m1_t op2, size_t vl) {
521 return vwmaccu_vv_u32m2(acc, op1, op2, vl);
522 }
523
524 //
525 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
526 // CHECK-RV64-NEXT: entry:
527 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
528 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
529 //
test_vwmaccu_vx_u32m2(vuint32m2_t acc,uint16_t op1,vuint16m1_t op2,size_t vl)530 vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
531 vuint16m1_t op2, size_t vl) {
532 return vwmaccu_vx_u32m2(acc, op1, op2, vl);
533 }
534
535 //
536 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
537 // CHECK-RV64-NEXT: entry:
538 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
539 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
540 //
test_vwmaccu_vv_u32m4(vuint32m4_t acc,vuint16m2_t op1,vuint16m2_t op2,size_t vl)541 vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
542 vuint16m2_t op2, size_t vl) {
543 return vwmaccu_vv_u32m4(acc, op1, op2, vl);
544 }
545
546 //
547 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
548 // CHECK-RV64-NEXT: entry:
549 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
550 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
551 //
test_vwmaccu_vx_u32m4(vuint32m4_t acc,uint16_t op1,vuint16m2_t op2,size_t vl)552 vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
553 vuint16m2_t op2, size_t vl) {
554 return vwmaccu_vx_u32m4(acc, op1, op2, vl);
555 }
556
557 //
558 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
559 // CHECK-RV64-NEXT: entry:
560 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
561 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
562 //
test_vwmaccu_vv_u32m8(vuint32m8_t acc,vuint16m4_t op1,vuint16m4_t op2,size_t vl)563 vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
564 vuint16m4_t op2, size_t vl) {
565 return vwmaccu_vv_u32m8(acc, op1, op2, vl);
566 }
567
568 //
569 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
570 // CHECK-RV64-NEXT: entry:
571 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
572 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
573 //
test_vwmaccu_vx_u32m8(vuint32m8_t acc,uint16_t op1,vuint16m4_t op2,size_t vl)574 vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
575 vuint16m4_t op2, size_t vl) {
576 return vwmaccu_vx_u32m8(acc, op1, op2, vl);
577 }
578
579 //
580 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
581 // CHECK-RV64-NEXT: entry:
582 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
583 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
584 //
test_vwmaccu_vv_u64m1(vuint64m1_t acc,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)585 vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
586 vuint32mf2_t op2, size_t vl) {
587 return vwmaccu_vv_u64m1(acc, op1, op2, vl);
588 }
589
590 //
591 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
592 // CHECK-RV64-NEXT: entry:
593 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
594 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
595 //
test_vwmaccu_vx_u64m1(vuint64m1_t acc,uint32_t op1,vuint32mf2_t op2,size_t vl)596 vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
597 vuint32mf2_t op2, size_t vl) {
598 return vwmaccu_vx_u64m1(acc, op1, op2, vl);
599 }
600
601 //
602 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
603 // CHECK-RV64-NEXT: entry:
604 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
605 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
606 //
test_vwmaccu_vv_u64m2(vuint64m2_t acc,vuint32m1_t op1,vuint32m1_t op2,size_t vl)607 vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
608 vuint32m1_t op2, size_t vl) {
609 return vwmaccu_vv_u64m2(acc, op1, op2, vl);
610 }
611
612 //
613 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
614 // CHECK-RV64-NEXT: entry:
615 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
616 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
617 //
test_vwmaccu_vx_u64m2(vuint64m2_t acc,uint32_t op1,vuint32m1_t op2,size_t vl)618 vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
619 vuint32m1_t op2, size_t vl) {
620 return vwmaccu_vx_u64m2(acc, op1, op2, vl);
621 }
622
623 //
624 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
625 // CHECK-RV64-NEXT: entry:
626 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
627 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
628 //
test_vwmaccu_vv_u64m4(vuint64m4_t acc,vuint32m2_t op1,vuint32m2_t op2,size_t vl)629 vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
630 vuint32m2_t op2, size_t vl) {
631 return vwmaccu_vv_u64m4(acc, op1, op2, vl);
632 }
633
634 //
635 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
636 // CHECK-RV64-NEXT: entry:
637 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
638 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
639 //
test_vwmaccu_vx_u64m4(vuint64m4_t acc,uint32_t op1,vuint32m2_t op2,size_t vl)640 vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
641 vuint32m2_t op2, size_t vl) {
642 return vwmaccu_vx_u64m4(acc, op1, op2, vl);
643 }
644
645 //
646 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
647 // CHECK-RV64-NEXT: entry:
648 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
649 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
650 //
test_vwmaccu_vv_u64m8(vuint64m8_t acc,vuint32m4_t op1,vuint32m4_t op2,size_t vl)651 vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
652 vuint32m4_t op2, size_t vl) {
653 return vwmaccu_vv_u64m8(acc, op1, op2, vl);
654 }
655
656 //
657 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
658 // CHECK-RV64-NEXT: entry:
659 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
660 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
661 //
test_vwmaccu_vx_u64m8(vuint64m8_t acc,uint32_t op1,vuint32m4_t op2,size_t vl)662 vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
663 vuint32m4_t op2, size_t vl) {
664 return vwmaccu_vx_u64m8(acc, op1, op2, vl);
665 }
666
667 //
668 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
669 // CHECK-RV64-NEXT: entry:
670 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
671 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
672 //
test_vwmaccsu_vv_i16mf4(vint16mf4_t acc,vint8mf8_t op1,vuint8mf8_t op2,size_t vl)673 vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
674 vuint8mf8_t op2, size_t vl) {
675 return vwmaccsu_vv_i16mf4(acc, op1, op2, vl);
676 }
677
678 //
679 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
680 // CHECK-RV64-NEXT: entry:
681 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
682 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
683 //
test_vwmaccsu_vx_i16mf4(vint16mf4_t acc,int8_t op1,vuint8mf8_t op2,size_t vl)684 vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
685 vuint8mf8_t op2, size_t vl) {
686 return vwmaccsu_vx_i16mf4(acc, op1, op2, vl);
687 }
688
689 //
690 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
691 // CHECK-RV64-NEXT: entry:
692 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
693 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
694 //
test_vwmaccsu_vv_i16mf2(vint16mf2_t acc,vint8mf4_t op1,vuint8mf4_t op2,size_t vl)695 vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
696 vuint8mf4_t op2, size_t vl) {
697 return vwmaccsu_vv_i16mf2(acc, op1, op2, vl);
698 }
699
700 //
701 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
702 // CHECK-RV64-NEXT: entry:
703 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
704 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
705 //
test_vwmaccsu_vx_i16mf2(vint16mf2_t acc,int8_t op1,vuint8mf4_t op2,size_t vl)706 vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
707 vuint8mf4_t op2, size_t vl) {
708 return vwmaccsu_vx_i16mf2(acc, op1, op2, vl);
709 }
710
711 //
712 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
713 // CHECK-RV64-NEXT: entry:
714 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
715 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
716 //
test_vwmaccsu_vv_i16m1(vint16m1_t acc,vint8mf2_t op1,vuint8mf2_t op2,size_t vl)717 vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
718 vuint8mf2_t op2, size_t vl) {
719 return vwmaccsu_vv_i16m1(acc, op1, op2, vl);
720 }
721
722 //
723 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
724 // CHECK-RV64-NEXT: entry:
725 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
726 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
727 //
test_vwmaccsu_vx_i16m1(vint16m1_t acc,int8_t op1,vuint8mf2_t op2,size_t vl)728 vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
729 size_t vl) {
730 return vwmaccsu_vx_i16m1(acc, op1, op2, vl);
731 }
732
733 //
734 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
735 // CHECK-RV64-NEXT: entry:
736 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
737 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
738 //
test_vwmaccsu_vv_i16m2(vint16m2_t acc,vint8m1_t op1,vuint8m1_t op2,size_t vl)739 vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
740 size_t vl) {
741 return vwmaccsu_vv_i16m2(acc, op1, op2, vl);
742 }
743
744 //
745 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
746 // CHECK-RV64-NEXT: entry:
747 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
748 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
749 //
test_vwmaccsu_vx_i16m2(vint16m2_t acc,int8_t op1,vuint8m1_t op2,size_t vl)750 vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
751 size_t vl) {
752 return vwmaccsu_vx_i16m2(acc, op1, op2, vl);
753 }
754
755 //
756 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
757 // CHECK-RV64-NEXT: entry:
758 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
759 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
760 //
test_vwmaccsu_vv_i16m4(vint16m4_t acc,vint8m2_t op1,vuint8m2_t op2,size_t vl)761 vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
762 size_t vl) {
763 return vwmaccsu_vv_i16m4(acc, op1, op2, vl);
764 }
765
766 //
767 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
768 // CHECK-RV64-NEXT: entry:
769 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
770 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
771 //
test_vwmaccsu_vx_i16m4(vint16m4_t acc,int8_t op1,vuint8m2_t op2,size_t vl)772 vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
773 size_t vl) {
774 return vwmaccsu_vx_i16m4(acc, op1, op2, vl);
775 }
776
777 //
778 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
779 // CHECK-RV64-NEXT: entry:
780 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
781 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
782 //
test_vwmaccsu_vv_i16m8(vint16m8_t acc,vint8m4_t op1,vuint8m4_t op2,size_t vl)783 vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
784 size_t vl) {
785 return vwmaccsu_vv_i16m8(acc, op1, op2, vl);
786 }
787
788 //
789 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
790 // CHECK-RV64-NEXT: entry:
791 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
792 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
793 //
test_vwmaccsu_vx_i16m8(vint16m8_t acc,int8_t op1,vuint8m4_t op2,size_t vl)794 vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
795 size_t vl) {
796 return vwmaccsu_vx_i16m8(acc, op1, op2, vl);
797 }
798
799 //
800 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
801 // CHECK-RV64-NEXT: entry:
802 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
803 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
804 //
test_vwmaccsu_vv_i32mf2(vint32mf2_t acc,vint16mf4_t op1,vuint16mf4_t op2,size_t vl)805 vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
806 vuint16mf4_t op2, size_t vl) {
807 return vwmaccsu_vv_i32mf2(acc, op1, op2, vl);
808 }
809
810 //
811 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
812 // CHECK-RV64-NEXT: entry:
813 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
814 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
815 //
test_vwmaccsu_vx_i32mf2(vint32mf2_t acc,int16_t op1,vuint16mf4_t op2,size_t vl)816 vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
817 vuint16mf4_t op2, size_t vl) {
818 return vwmaccsu_vx_i32mf2(acc, op1, op2, vl);
819 }
820
821 //
822 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
823 // CHECK-RV64-NEXT: entry:
824 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
825 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
826 //
test_vwmaccsu_vv_i32m1(vint32m1_t acc,vint16mf2_t op1,vuint16mf2_t op2,size_t vl)827 vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
828 vuint16mf2_t op2, size_t vl) {
829 return vwmaccsu_vv_i32m1(acc, op1, op2, vl);
830 }
831
832 //
833 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
834 // CHECK-RV64-NEXT: entry:
835 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
836 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
837 //
test_vwmaccsu_vx_i32m1(vint32m1_t acc,int16_t op1,vuint16mf2_t op2,size_t vl)838 vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
839 size_t vl) {
840 return vwmaccsu_vx_i32m1(acc, op1, op2, vl);
841 }
842
843 //
844 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
845 // CHECK-RV64-NEXT: entry:
846 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
847 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
848 //
test_vwmaccsu_vv_i32m2(vint32m2_t acc,vint16m1_t op1,vuint16m1_t op2,size_t vl)849 vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
850 vuint16m1_t op2, size_t vl) {
851 return vwmaccsu_vv_i32m2(acc, op1, op2, vl);
852 }
853
854 //
855 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
856 // CHECK-RV64-NEXT: entry:
857 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
858 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
859 //
test_vwmaccsu_vx_i32m2(vint32m2_t acc,int16_t op1,vuint16m1_t op2,size_t vl)860 vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
861 size_t vl) {
862 return vwmaccsu_vx_i32m2(acc, op1, op2, vl);
863 }
864
865 //
866 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
867 // CHECK-RV64-NEXT: entry:
868 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
869 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
870 //
test_vwmaccsu_vv_i32m4(vint32m4_t acc,vint16m2_t op1,vuint16m2_t op2,size_t vl)871 vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
872 vuint16m2_t op2, size_t vl) {
873 return vwmaccsu_vv_i32m4(acc, op1, op2, vl);
874 }
875
876 //
877 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
878 // CHECK-RV64-NEXT: entry:
879 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
880 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
881 //
test_vwmaccsu_vx_i32m4(vint32m4_t acc,int16_t op1,vuint16m2_t op2,size_t vl)882 vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
883 size_t vl) {
884 return vwmaccsu_vx_i32m4(acc, op1, op2, vl);
885 }
886
887 //
888 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
889 // CHECK-RV64-NEXT: entry:
890 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
891 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
892 //
test_vwmaccsu_vv_i32m8(vint32m8_t acc,vint16m4_t op1,vuint16m4_t op2,size_t vl)893 vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
894 vuint16m4_t op2, size_t vl) {
895 return vwmaccsu_vv_i32m8(acc, op1, op2, vl);
896 }
897
898 //
899 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
900 // CHECK-RV64-NEXT: entry:
901 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
902 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
903 //
test_vwmaccsu_vx_i32m8(vint32m8_t acc,int16_t op1,vuint16m4_t op2,size_t vl)904 vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
905 size_t vl) {
906 return vwmaccsu_vx_i32m8(acc, op1, op2, vl);
907 }
908
909 //
910 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
911 // CHECK-RV64-NEXT: entry:
912 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
913 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
914 //
test_vwmaccsu_vv_i64m1(vint64m1_t acc,vint32mf2_t op1,vuint32mf2_t op2,size_t vl)915 vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
916 vuint32mf2_t op2, size_t vl) {
917 return vwmaccsu_vv_i64m1(acc, op1, op2, vl);
918 }
919
920 //
921 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
922 // CHECK-RV64-NEXT: entry:
923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
924 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
925 //
test_vwmaccsu_vx_i64m1(vint64m1_t acc,int32_t op1,vuint32mf2_t op2,size_t vl)926 vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
927 size_t vl) {
928 return vwmaccsu_vx_i64m1(acc, op1, op2, vl);
929 }
930
931 //
932 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
933 // CHECK-RV64-NEXT: entry:
934 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
935 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
936 //
test_vwmaccsu_vv_i64m2(vint64m2_t acc,vint32m1_t op1,vuint32m1_t op2,size_t vl)937 vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
938 vuint32m1_t op2, size_t vl) {
939 return vwmaccsu_vv_i64m2(acc, op1, op2, vl);
940 }
941
942 //
943 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
944 // CHECK-RV64-NEXT: entry:
945 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
946 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
947 //
test_vwmaccsu_vx_i64m2(vint64m2_t acc,int32_t op1,vuint32m1_t op2,size_t vl)948 vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
949 size_t vl) {
950 return vwmaccsu_vx_i64m2(acc, op1, op2, vl);
951 }
952
953 //
954 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
955 // CHECK-RV64-NEXT: entry:
956 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
957 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
958 //
test_vwmaccsu_vv_i64m4(vint64m4_t acc,vint32m2_t op1,vuint32m2_t op2,size_t vl)959 vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
960 vuint32m2_t op2, size_t vl) {
961 return vwmaccsu_vv_i64m4(acc, op1, op2, vl);
962 }
963
964 //
965 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
966 // CHECK-RV64-NEXT: entry:
967 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
968 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
969 //
test_vwmaccsu_vx_i64m4(vint64m4_t acc,int32_t op1,vuint32m2_t op2,size_t vl)970 vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
971 size_t vl) {
972 return vwmaccsu_vx_i64m4(acc, op1, op2, vl);
973 }
974
975 //
976 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
977 // CHECK-RV64-NEXT: entry:
978 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
979 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
980 //
test_vwmaccsu_vv_i64m8(vint64m8_t acc,vint32m4_t op1,vuint32m4_t op2,size_t vl)981 vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
982 vuint32m4_t op2, size_t vl) {
983 return vwmaccsu_vv_i64m8(acc, op1, op2, vl);
984 }
985
986 //
987 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
988 // CHECK-RV64-NEXT: entry:
989 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
990 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
991 //
test_vwmaccsu_vx_i64m8(vint64m8_t acc,int32_t op1,vuint32m4_t op2,size_t vl)992 vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
993 size_t vl) {
994 return vwmaccsu_vx_i64m8(acc, op1, op2, vl);
995 }
996
997 //
998 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
999 // CHECK-RV64-NEXT: entry:
1000 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1001 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1002 //
test_vwmaccus_vx_i16mf4(vint16mf4_t acc,uint8_t op1,vint8mf8_t op2,size_t vl)1003 vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
1004 vint8mf8_t op2, size_t vl) {
1005 return vwmaccus_vx_i16mf4(acc, op1, op2, vl);
1006 }
1007
1008 //
1009 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
1010 // CHECK-RV64-NEXT: entry:
1011 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1012 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1013 //
test_vwmaccus_vx_i16mf2(vint16mf2_t acc,uint8_t op1,vint8mf4_t op2,size_t vl)1014 vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
1015 vint8mf4_t op2, size_t vl) {
1016 return vwmaccus_vx_i16mf2(acc, op1, op2, vl);
1017 }
1018
1019 //
1020 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
1021 // CHECK-RV64-NEXT: entry:
1022 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1023 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1024 //
test_vwmaccus_vx_i16m1(vint16m1_t acc,uint8_t op1,vint8mf2_t op2,size_t vl)1025 vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
1026 size_t vl) {
1027 return vwmaccus_vx_i16m1(acc, op1, op2, vl);
1028 }
1029
1030 //
1031 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
1032 // CHECK-RV64-NEXT: entry:
1033 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1034 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1035 //
test_vwmaccus_vx_i16m2(vint16m2_t acc,uint8_t op1,vint8m1_t op2,size_t vl)1036 vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
1037 size_t vl) {
1038 return vwmaccus_vx_i16m2(acc, op1, op2, vl);
1039 }
1040
1041 //
1042 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
1043 // CHECK-RV64-NEXT: entry:
1044 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1045 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1046 //
test_vwmaccus_vx_i16m4(vint16m4_t acc,uint8_t op1,vint8m2_t op2,size_t vl)1047 vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
1048 size_t vl) {
1049 return vwmaccus_vx_i16m4(acc, op1, op2, vl);
1050 }
1051
1052 //
1053 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
1054 // CHECK-RV64-NEXT: entry:
1055 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
1056 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1057 //
test_vwmaccus_vx_i16m8(vint16m8_t acc,uint8_t op1,vint8m4_t op2,size_t vl)1058 vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
1059 size_t vl) {
1060 return vwmaccus_vx_i16m8(acc, op1, op2, vl);
1061 }
1062
1063 //
1064 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
1065 // CHECK-RV64-NEXT: entry:
1066 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1067 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1068 //
test_vwmaccus_vx_i32mf2(vint32mf2_t acc,uint16_t op1,vint16mf4_t op2,size_t vl)1069 vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
1070 vint16mf4_t op2, size_t vl) {
1071 return vwmaccus_vx_i32mf2(acc, op1, op2, vl);
1072 }
1073
1074 //
1075 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
1076 // CHECK-RV64-NEXT: entry:
1077 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1078 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1079 //
test_vwmaccus_vx_i32m1(vint32m1_t acc,uint16_t op1,vint16mf2_t op2,size_t vl)1080 vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
1081 size_t vl) {
1082 return vwmaccus_vx_i32m1(acc, op1, op2, vl);
1083 }
1084
1085 //
1086 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
1087 // CHECK-RV64-NEXT: entry:
1088 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1089 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1090 //
test_vwmaccus_vx_i32m2(vint32m2_t acc,uint16_t op1,vint16m1_t op2,size_t vl)1091 vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
1092 size_t vl) {
1093 return vwmaccus_vx_i32m2(acc, op1, op2, vl);
1094 }
1095
1096 //
1097 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
1098 // CHECK-RV64-NEXT: entry:
1099 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1100 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1101 //
test_vwmaccus_vx_i32m4(vint32m4_t acc,uint16_t op1,vint16m2_t op2,size_t vl)1102 vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
1103 size_t vl) {
1104 return vwmaccus_vx_i32m4(acc, op1, op2, vl);
1105 }
1106
1107 //
1108 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
1109 // CHECK-RV64-NEXT: entry:
1110 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1111 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1112 //
test_vwmaccus_vx_i32m8(vint32m8_t acc,uint16_t op1,vint16m4_t op2,size_t vl)1113 vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
1114 size_t vl) {
1115 return vwmaccus_vx_i32m8(acc, op1, op2, vl);
1116 }
1117
1118 //
1119 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
1120 // CHECK-RV64-NEXT: entry:
1121 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1122 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1123 //
test_vwmaccus_vx_i64m1(vint64m1_t acc,uint32_t op1,vint32mf2_t op2,size_t vl)1124 vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
1125 size_t vl) {
1126 return vwmaccus_vx_i64m1(acc, op1, op2, vl);
1127 }
1128
1129 //
1130 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
1131 // CHECK-RV64-NEXT: entry:
1132 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1133 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1134 //
test_vwmaccus_vx_i64m2(vint64m2_t acc,uint32_t op1,vint32m1_t op2,size_t vl)1135 vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
1136 size_t vl) {
1137 return vwmaccus_vx_i64m2(acc, op1, op2, vl);
1138 }
1139
1140 //
1141 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
1142 // CHECK-RV64-NEXT: entry:
1143 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1144 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1145 //
test_vwmaccus_vx_i64m4(vint64m4_t acc,uint32_t op1,vint32m2_t op2,size_t vl)1146 vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
1147 size_t vl) {
1148 return vwmaccus_vx_i64m4(acc, op1, op2, vl);
1149 }
1150
1151 //
1152 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
1153 // CHECK-RV64-NEXT: entry:
1154 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1155 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1156 //
test_vwmaccus_vx_i64m8(vint64m8_t acc,uint32_t op1,vint32m4_t op2,size_t vl)1157 vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
1158 size_t vl) {
1159 return vwmaccus_vx_i64m8(acc, op1, op2, vl);
1160 }
1161
1162 //
1163 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
1164 // CHECK-RV64-NEXT: entry:
1165 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1166 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1167 //
test_vwmacc_vv_i16mf4_m(vbool64_t mask,vint16mf4_t acc,vint8mf8_t op1,vint8mf8_t op2,size_t vl)1168 vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
1169 vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
1170 return vwmacc_vv_i16mf4_m(mask, acc, op1, op2, vl);
1171 }
1172
1173 //
1174 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
1175 // CHECK-RV64-NEXT: entry:
1176 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1177 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1178 //
test_vwmacc_vx_i16mf4_m(vbool64_t mask,vint16mf4_t acc,int8_t op1,vint8mf8_t op2,size_t vl)1179 vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
1180 vint8mf8_t op2, size_t vl) {
1181 return vwmacc_vx_i16mf4_m(mask, acc, op1, op2, vl);
1182 }
1183
1184 //
1185 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
1186 // CHECK-RV64-NEXT: entry:
1187 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1188 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1189 //
test_vwmacc_vv_i16mf2_m(vbool32_t mask,vint16mf2_t acc,vint8mf4_t op1,vint8mf4_t op2,size_t vl)1190 vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
1191 vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
1192 return vwmacc_vv_i16mf2_m(mask, acc, op1, op2, vl);
1193 }
1194
1195 //
1196 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
1197 // CHECK-RV64-NEXT: entry:
1198 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1199 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1200 //
test_vwmacc_vx_i16mf2_m(vbool32_t mask,vint16mf2_t acc,int8_t op1,vint8mf4_t op2,size_t vl)1201 vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
1202 vint8mf4_t op2, size_t vl) {
1203 return vwmacc_vx_i16mf2_m(mask, acc, op1, op2, vl);
1204 }
1205
1206 //
1207 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
1208 // CHECK-RV64-NEXT: entry:
1209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1210 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1211 //
test_vwmacc_vv_i16m1_m(vbool16_t mask,vint16m1_t acc,vint8mf2_t op1,vint8mf2_t op2,size_t vl)1212 vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
1213 vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
1214 return vwmacc_vv_i16m1_m(mask, acc, op1, op2, vl);
1215 }
1216
1217 //
1218 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
1219 // CHECK-RV64-NEXT: entry:
1220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1221 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1222 //
test_vwmacc_vx_i16m1_m(vbool16_t mask,vint16m1_t acc,int8_t op1,vint8mf2_t op2,size_t vl)1223 vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
1224 vint8mf2_t op2, size_t vl) {
1225 return vwmacc_vx_i16m1_m(mask, acc, op1, op2, vl);
1226 }
1227
1228 //
1229 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
1230 // CHECK-RV64-NEXT: entry:
1231 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1232 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1233 //
test_vwmacc_vv_i16m2_m(vbool8_t mask,vint16m2_t acc,vint8m1_t op1,vint8m1_t op2,size_t vl)1234 vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
1235 vint8m1_t op2, size_t vl) {
1236 return vwmacc_vv_i16m2_m(mask, acc, op1, op2, vl);
1237 }
1238
1239 //
1240 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
1241 // CHECK-RV64-NEXT: entry:
1242 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1243 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1244 //
test_vwmacc_vx_i16m2_m(vbool8_t mask,vint16m2_t acc,int8_t op1,vint8m1_t op2,size_t vl)1245 vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
1246 vint8m1_t op2, size_t vl) {
1247 return vwmacc_vx_i16m2_m(mask, acc, op1, op2, vl);
1248 }
1249
1250 //
1251 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
1252 // CHECK-RV64-NEXT: entry:
1253 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1254 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1255 //
test_vwmacc_vv_i16m4_m(vbool4_t mask,vint16m4_t acc,vint8m2_t op1,vint8m2_t op2,size_t vl)1256 vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
1257 vint8m2_t op2, size_t vl) {
1258 return vwmacc_vv_i16m4_m(mask, acc, op1, op2, vl);
1259 }
1260
1261 //
1262 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
1263 // CHECK-RV64-NEXT: entry:
1264 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1265 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1266 //
test_vwmacc_vx_i16m4_m(vbool4_t mask,vint16m4_t acc,int8_t op1,vint8m2_t op2,size_t vl)1267 vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
1268 vint8m2_t op2, size_t vl) {
1269 return vwmacc_vx_i16m4_m(mask, acc, op1, op2, vl);
1270 }
1271
1272 //
1273 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
1274 // CHECK-RV64-NEXT: entry:
1275 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1276 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1277 //
test_vwmacc_vv_i16m8_m(vbool2_t mask,vint16m8_t acc,vint8m4_t op1,vint8m4_t op2,size_t vl)1278 vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
1279 vint8m4_t op2, size_t vl) {
1280 return vwmacc_vv_i16m8_m(mask, acc, op1, op2, vl);
1281 }
1282
1283 //
1284 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
1285 // CHECK-RV64-NEXT: entry:
1286 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1287 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1288 //
test_vwmacc_vx_i16m8_m(vbool2_t mask,vint16m8_t acc,int8_t op1,vint8m4_t op2,size_t vl)1289 vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
1290 vint8m4_t op2, size_t vl) {
1291 return vwmacc_vx_i16m8_m(mask, acc, op1, op2, vl);
1292 }
1293
1294 //
1295 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
1296 // CHECK-RV64-NEXT: entry:
1297 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1298 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1299 //
test_vwmacc_vv_i32mf2_m(vbool64_t mask,vint32mf2_t acc,vint16mf4_t op1,vint16mf4_t op2,size_t vl)1300 vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
1301 vint16mf4_t op1, vint16mf4_t op2,
1302 size_t vl) {
1303 return vwmacc_vv_i32mf2_m(mask, acc, op1, op2, vl);
1304 }
1305
1306 //
1307 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
1308 // CHECK-RV64-NEXT: entry:
1309 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1310 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1311 //
test_vwmacc_vx_i32mf2_m(vbool64_t mask,vint32mf2_t acc,int16_t op1,vint16mf4_t op2,size_t vl)1312 vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
1313 int16_t op1, vint16mf4_t op2, size_t vl) {
1314 return vwmacc_vx_i32mf2_m(mask, acc, op1, op2, vl);
1315 }
1316
1317 //
1318 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
1319 // CHECK-RV64-NEXT: entry:
1320 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1321 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1322 //
test_vwmacc_vv_i32m1_m(vbool32_t mask,vint32m1_t acc,vint16mf2_t op1,vint16mf2_t op2,size_t vl)1323 vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
1324 vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
1325 return vwmacc_vv_i32m1_m(mask, acc, op1, op2, vl);
1326 }
1327
1328 //
1329 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
1330 // CHECK-RV64-NEXT: entry:
1331 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1332 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1333 //
test_vwmacc_vx_i32m1_m(vbool32_t mask,vint32m1_t acc,int16_t op1,vint16mf2_t op2,size_t vl)1334 vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
1335 vint16mf2_t op2, size_t vl) {
1336 return vwmacc_vx_i32m1_m(mask, acc, op1, op2, vl);
1337 }
1338
1339 //
1340 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
1341 // CHECK-RV64-NEXT: entry:
1342 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1343 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1344 //
test_vwmacc_vv_i32m2_m(vbool16_t mask,vint32m2_t acc,vint16m1_t op1,vint16m1_t op2,size_t vl)1345 vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
1346 vint16m1_t op1, vint16m1_t op2, size_t vl) {
1347 return vwmacc_vv_i32m2_m(mask, acc, op1, op2, vl);
1348 }
1349
1350 //
1351 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
1352 // CHECK-RV64-NEXT: entry:
1353 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1354 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1355 //
test_vwmacc_vx_i32m2_m(vbool16_t mask,vint32m2_t acc,int16_t op1,vint16m1_t op2,size_t vl)1356 vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
1357 vint16m1_t op2, size_t vl) {
1358 return vwmacc_vx_i32m2_m(mask, acc, op1, op2, vl);
1359 }
1360
1361 //
1362 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
1363 // CHECK-RV64-NEXT: entry:
1364 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1365 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1366 //
test_vwmacc_vv_i32m4_m(vbool8_t mask,vint32m4_t acc,vint16m2_t op1,vint16m2_t op2,size_t vl)1367 vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
1368 vint16m2_t op2, size_t vl) {
1369 return vwmacc_vv_i32m4_m(mask, acc, op1, op2, vl);
1370 }
1371
1372 //
1373 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
1374 // CHECK-RV64-NEXT: entry:
1375 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1376 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1377 //
test_vwmacc_vx_i32m4_m(vbool8_t mask,vint32m4_t acc,int16_t op1,vint16m2_t op2,size_t vl)1378 vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
1379 vint16m2_t op2, size_t vl) {
1380 return vwmacc_vx_i32m4_m(mask, acc, op1, op2, vl);
1381 }
1382
1383 //
1384 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
1385 // CHECK-RV64-NEXT: entry:
1386 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1387 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1388 //
test_vwmacc_vv_i32m8_m(vbool4_t mask,vint32m8_t acc,vint16m4_t op1,vint16m4_t op2,size_t vl)1389 vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
1390 vint16m4_t op2, size_t vl) {
1391 return vwmacc_vv_i32m8_m(mask, acc, op1, op2, vl);
1392 }
1393
1394 //
1395 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
1396 // CHECK-RV64-NEXT: entry:
1397 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1398 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1399 //
test_vwmacc_vx_i32m8_m(vbool4_t mask,vint32m8_t acc,int16_t op1,vint16m4_t op2,size_t vl)1400 vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
1401 vint16m4_t op2, size_t vl) {
1402 return vwmacc_vx_i32m8_m(mask, acc, op1, op2, vl);
1403 }
1404
1405 //
1406 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
1407 // CHECK-RV64-NEXT: entry:
1408 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1409 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1410 //
test_vwmacc_vv_i64m1_m(vbool64_t mask,vint64m1_t acc,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1411 vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
1412 vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
1413 return vwmacc_vv_i64m1_m(mask, acc, op1, op2, vl);
1414 }
1415
1416 //
1417 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
1418 // CHECK-RV64-NEXT: entry:
1419 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1420 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1421 //
test_vwmacc_vx_i64m1_m(vbool64_t mask,vint64m1_t acc,int32_t op1,vint32mf2_t op2,size_t vl)1422 vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
1423 vint32mf2_t op2, size_t vl) {
1424 return vwmacc_vx_i64m1_m(mask, acc, op1, op2, vl);
1425 }
1426
1427 //
1428 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
1429 // CHECK-RV64-NEXT: entry:
1430 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1431 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1432 //
test_vwmacc_vv_i64m2_m(vbool32_t mask,vint64m2_t acc,vint32m1_t op1,vint32m1_t op2,size_t vl)1433 vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
1434 vint32m1_t op1, vint32m1_t op2, size_t vl) {
1435 return vwmacc_vv_i64m2_m(mask, acc, op1, op2, vl);
1436 }
1437
1438 //
1439 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
1440 // CHECK-RV64-NEXT: entry:
1441 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1442 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1443 //
test_vwmacc_vx_i64m2_m(vbool32_t mask,vint64m2_t acc,int32_t op1,vint32m1_t op2,size_t vl)1444 vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
1445 vint32m1_t op2, size_t vl) {
1446 return vwmacc_vx_i64m2_m(mask, acc, op1, op2, vl);
1447 }
1448
1449 //
1450 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
1451 // CHECK-RV64-NEXT: entry:
1452 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1453 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1454 //
test_vwmacc_vv_i64m4_m(vbool16_t mask,vint64m4_t acc,vint32m2_t op1,vint32m2_t op2,size_t vl)1455 vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
1456 vint32m2_t op1, vint32m2_t op2, size_t vl) {
1457 return vwmacc_vv_i64m4_m(mask, acc, op1, op2, vl);
1458 }
1459
1460 //
1461 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
1462 // CHECK-RV64-NEXT: entry:
1463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1464 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1465 //
test_vwmacc_vx_i64m4_m(vbool16_t mask,vint64m4_t acc,int32_t op1,vint32m2_t op2,size_t vl)1466 vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
1467 vint32m2_t op2, size_t vl) {
1468 return vwmacc_vx_i64m4_m(mask, acc, op1, op2, vl);
1469 }
1470
1471 //
1472 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
1473 // CHECK-RV64-NEXT: entry:
1474 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1475 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1476 //
test_vwmacc_vv_i64m8_m(vbool8_t mask,vint64m8_t acc,vint32m4_t op1,vint32m4_t op2,size_t vl)1477 vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
1478 vint32m4_t op2, size_t vl) {
1479 return vwmacc_vv_i64m8_m(mask, acc, op1, op2, vl);
1480 }
1481
1482 //
1483 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
1484 // CHECK-RV64-NEXT: entry:
1485 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1486 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1487 //
test_vwmacc_vx_i64m8_m(vbool8_t mask,vint64m8_t acc,int32_t op1,vint32m4_t op2,size_t vl)1488 vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
1489 vint32m4_t op2, size_t vl) {
1490 return vwmacc_vx_i64m8_m(mask, acc, op1, op2, vl);
1491 }
1492
1493 //
1494 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
1495 // CHECK-RV64-NEXT: entry:
1496 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1497 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1498 //
test_vwmaccu_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t acc,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1499 vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
1500 vuint8mf8_t op1, vuint8mf8_t op2,
1501 size_t vl) {
1502 return vwmaccu_vv_u16mf4_m(mask, acc, op1, op2, vl);
1503 }
1504
1505 //
1506 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
1507 // CHECK-RV64-NEXT: entry:
1508 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1509 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1510 //
test_vwmaccu_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t acc,uint8_t op1,vuint8mf8_t op2,size_t vl)1511 vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
1512 uint8_t op1, vuint8mf8_t op2, size_t vl) {
1513 return vwmaccu_vx_u16mf4_m(mask, acc, op1, op2, vl);
1514 }
1515
1516 //
1517 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
1518 // CHECK-RV64-NEXT: entry:
1519 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1520 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1521 //
test_vwmaccu_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t acc,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1522 vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
1523 vuint8mf4_t op1, vuint8mf4_t op2,
1524 size_t vl) {
1525 return vwmaccu_vv_u16mf2_m(mask, acc, op1, op2, vl);
1526 }
1527
1528 //
1529 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
1530 // CHECK-RV64-NEXT: entry:
1531 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1532 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1533 //
test_vwmaccu_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t acc,uint8_t op1,vuint8mf4_t op2,size_t vl)1534 vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
1535 uint8_t op1, vuint8mf4_t op2, size_t vl) {
1536 return vwmaccu_vx_u16mf2_m(mask, acc, op1, op2, vl);
1537 }
1538
1539 //
1540 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
1541 // CHECK-RV64-NEXT: entry:
1542 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1543 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1544 //
test_vwmaccu_vv_u16m1_m(vbool16_t mask,vuint16m1_t acc,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1545 vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
1546 vuint8mf2_t op1, vuint8mf2_t op2,
1547 size_t vl) {
1548 return vwmaccu_vv_u16m1_m(mask, acc, op1, op2, vl);
1549 }
1550
1551 //
1552 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
1553 // CHECK-RV64-NEXT: entry:
1554 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1555 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1556 //
test_vwmaccu_vx_u16m1_m(vbool16_t mask,vuint16m1_t acc,uint8_t op1,vuint8mf2_t op2,size_t vl)1557 vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
1558 uint8_t op1, vuint8mf2_t op2, size_t vl) {
1559 return vwmaccu_vx_u16m1_m(mask, acc, op1, op2, vl);
1560 }
1561
1562 //
1563 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
1564 // CHECK-RV64-NEXT: entry:
1565 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1566 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1567 //
test_vwmaccu_vv_u16m2_m(vbool8_t mask,vuint16m2_t acc,vuint8m1_t op1,vuint8m1_t op2,size_t vl)1568 vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
1569 vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1570 return vwmaccu_vv_u16m2_m(mask, acc, op1, op2, vl);
1571 }
1572
1573 //
1574 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
1575 // CHECK-RV64-NEXT: entry:
1576 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1577 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1578 //
test_vwmaccu_vx_u16m2_m(vbool8_t mask,vuint16m2_t acc,uint8_t op1,vuint8m1_t op2,size_t vl)1579 vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
1580 vuint8m1_t op2, size_t vl) {
1581 return vwmaccu_vx_u16m2_m(mask, acc, op1, op2, vl);
1582 }
1583
1584 //
1585 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
1586 // CHECK-RV64-NEXT: entry:
1587 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1588 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1589 //
test_vwmaccu_vv_u16m4_m(vbool4_t mask,vuint16m4_t acc,vuint8m2_t op1,vuint8m2_t op2,size_t vl)1590 vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
1591 vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1592 return vwmaccu_vv_u16m4_m(mask, acc, op1, op2, vl);
1593 }
1594
1595 //
1596 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
1597 // CHECK-RV64-NEXT: entry:
1598 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1599 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1600 //
test_vwmaccu_vx_u16m4_m(vbool4_t mask,vuint16m4_t acc,uint8_t op1,vuint8m2_t op2,size_t vl)1601 vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
1602 vuint8m2_t op2, size_t vl) {
1603 return vwmaccu_vx_u16m4_m(mask, acc, op1, op2, vl);
1604 }
1605
1606 //
1607 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
1608 // CHECK-RV64-NEXT: entry:
1609 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1610 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1611 //
test_vwmaccu_vv_u16m8_m(vbool2_t mask,vuint16m8_t acc,vuint8m4_t op1,vuint8m4_t op2,size_t vl)1612 vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
1613 vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1614 return vwmaccu_vv_u16m8_m(mask, acc, op1, op2, vl);
1615 }
1616
1617 //
1618 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
1619 // CHECK-RV64-NEXT: entry:
1620 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1621 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1622 //
test_vwmaccu_vx_u16m8_m(vbool2_t mask,vuint16m8_t acc,uint8_t op1,vuint8m4_t op2,size_t vl)1623 vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
1624 vuint8m4_t op2, size_t vl) {
1625 return vwmaccu_vx_u16m8_m(mask, acc, op1, op2, vl);
1626 }
1627
1628 //
1629 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
1630 // CHECK-RV64-NEXT: entry:
1631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1632 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1633 //
test_vwmaccu_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t acc,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1634 vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
1635 vuint16mf4_t op1, vuint16mf4_t op2,
1636 size_t vl) {
1637 return vwmaccu_vv_u32mf2_m(mask, acc, op1, op2, vl);
1638 }
1639
1640 //
1641 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
1642 // CHECK-RV64-NEXT: entry:
1643 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1644 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1645 //
test_vwmaccu_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t acc,uint16_t op1,vuint16mf4_t op2,size_t vl)1646 vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
1647 uint16_t op1, vuint16mf4_t op2,
1648 size_t vl) {
1649 return vwmaccu_vx_u32mf2_m(mask, acc, op1, op2, vl);
1650 }
1651
1652 //
1653 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
1654 // CHECK-RV64-NEXT: entry:
1655 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1656 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1657 //
test_vwmaccu_vv_u32m1_m(vbool32_t mask,vuint32m1_t acc,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1658 vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
1659 vuint16mf2_t op1, vuint16mf2_t op2,
1660 size_t vl) {
1661 return vwmaccu_vv_u32m1_m(mask, acc, op1, op2, vl);
1662 }
1663
1664 //
1665 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
1666 // CHECK-RV64-NEXT: entry:
1667 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1668 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1669 //
test_vwmaccu_vx_u32m1_m(vbool32_t mask,vuint32m1_t acc,uint16_t op1,vuint16mf2_t op2,size_t vl)1670 vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
1671 uint16_t op1, vuint16mf2_t op2, size_t vl) {
1672 return vwmaccu_vx_u32m1_m(mask, acc, op1, op2, vl);
1673 }
1674
1675 //
1676 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
1677 // CHECK-RV64-NEXT: entry:
1678 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1679 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1680 //
test_vwmaccu_vv_u32m2_m(vbool16_t mask,vuint32m2_t acc,vuint16m1_t op1,vuint16m1_t op2,size_t vl)1681 vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
1682 vuint16m1_t op1, vuint16m1_t op2,
1683 size_t vl) {
1684 return vwmaccu_vv_u32m2_m(mask, acc, op1, op2, vl);
1685 }
1686
1687 //
1688 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
1689 // CHECK-RV64-NEXT: entry:
1690 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1691 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1692 //
test_vwmaccu_vx_u32m2_m(vbool16_t mask,vuint32m2_t acc,uint16_t op1,vuint16m1_t op2,size_t vl)1693 vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
1694 uint16_t op1, vuint16m1_t op2, size_t vl) {
1695 return vwmaccu_vx_u32m2_m(mask, acc, op1, op2, vl);
1696 }
1697
1698 //
1699 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
1700 // CHECK-RV64-NEXT: entry:
1701 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1702 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1703 //
test_vwmaccu_vv_u32m4_m(vbool8_t mask,vuint32m4_t acc,vuint16m2_t op1,vuint16m2_t op2,size_t vl)1704 vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
1705 vuint16m2_t op1, vuint16m2_t op2,
1706 size_t vl) {
1707 return vwmaccu_vv_u32m4_m(mask, acc, op1, op2, vl);
1708 }
1709
1710 //
1711 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
1712 // CHECK-RV64-NEXT: entry:
1713 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1714 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1715 //
test_vwmaccu_vx_u32m4_m(vbool8_t mask,vuint32m4_t acc,uint16_t op1,vuint16m2_t op2,size_t vl)1716 vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
1717 uint16_t op1, vuint16m2_t op2, size_t vl) {
1718 return vwmaccu_vx_u32m4_m(mask, acc, op1, op2, vl);
1719 }
1720
1721 //
1722 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
1723 // CHECK-RV64-NEXT: entry:
1724 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1725 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1726 //
test_vwmaccu_vv_u32m8_m(vbool4_t mask,vuint32m8_t acc,vuint16m4_t op1,vuint16m4_t op2,size_t vl)1727 vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
1728 vuint16m4_t op1, vuint16m4_t op2,
1729 size_t vl) {
1730 return vwmaccu_vv_u32m8_m(mask, acc, op1, op2, vl);
1731 }
1732
1733 //
1734 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
1735 // CHECK-RV64-NEXT: entry:
1736 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1737 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1738 //
test_vwmaccu_vx_u32m8_m(vbool4_t mask,vuint32m8_t acc,uint16_t op1,vuint16m4_t op2,size_t vl)1739 vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
1740 uint16_t op1, vuint16m4_t op2, size_t vl) {
1741 return vwmaccu_vx_u32m8_m(mask, acc, op1, op2, vl);
1742 }
1743
1744 //
1745 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
1746 // CHECK-RV64-NEXT: entry:
1747 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1748 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1749 //
test_vwmaccu_vv_u64m1_m(vbool64_t mask,vuint64m1_t acc,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1750 vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
1751 vuint32mf2_t op1, vuint32mf2_t op2,
1752 size_t vl) {
1753 return vwmaccu_vv_u64m1_m(mask, acc, op1, op2, vl);
1754 }
1755
1756 //
1757 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
1758 // CHECK-RV64-NEXT: entry:
1759 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1760 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1761 //
test_vwmaccu_vx_u64m1_m(vbool64_t mask,vuint64m1_t acc,uint32_t op1,vuint32mf2_t op2,size_t vl)1762 vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
1763 uint32_t op1, vuint32mf2_t op2, size_t vl) {
1764 return vwmaccu_vx_u64m1_m(mask, acc, op1, op2, vl);
1765 }
1766
1767 //
1768 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
1769 // CHECK-RV64-NEXT: entry:
1770 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1771 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1772 //
test_vwmaccu_vv_u64m2_m(vbool32_t mask,vuint64m2_t acc,vuint32m1_t op1,vuint32m1_t op2,size_t vl)1773 vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
1774 vuint32m1_t op1, vuint32m1_t op2,
1775 size_t vl) {
1776 return vwmaccu_vv_u64m2_m(mask, acc, op1, op2, vl);
1777 }
1778
1779 //
1780 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
1781 // CHECK-RV64-NEXT: entry:
1782 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1783 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1784 //
test_vwmaccu_vx_u64m2_m(vbool32_t mask,vuint64m2_t acc,uint32_t op1,vuint32m1_t op2,size_t vl)1785 vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
1786 uint32_t op1, vuint32m1_t op2, size_t vl) {
1787 return vwmaccu_vx_u64m2_m(mask, acc, op1, op2, vl);
1788 }
1789
1790 //
1791 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
1792 // CHECK-RV64-NEXT: entry:
1793 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1794 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1795 //
test_vwmaccu_vv_u64m4_m(vbool16_t mask,vuint64m4_t acc,vuint32m2_t op1,vuint32m2_t op2,size_t vl)1796 vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
1797 vuint32m2_t op1, vuint32m2_t op2,
1798 size_t vl) {
1799 return vwmaccu_vv_u64m4_m(mask, acc, op1, op2, vl);
1800 }
1801
1802 //
1803 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
1804 // CHECK-RV64-NEXT: entry:
1805 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1806 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1807 //
test_vwmaccu_vx_u64m4_m(vbool16_t mask,vuint64m4_t acc,uint32_t op1,vuint32m2_t op2,size_t vl)1808 vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
1809 uint32_t op1, vuint32m2_t op2, size_t vl) {
1810 return vwmaccu_vx_u64m4_m(mask, acc, op1, op2, vl);
1811 }
1812
1813 //
1814 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
1815 // CHECK-RV64-NEXT: entry:
1816 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1817 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1818 //
test_vwmaccu_vv_u64m8_m(vbool8_t mask,vuint64m8_t acc,vuint32m4_t op1,vuint32m4_t op2,size_t vl)1819 vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
1820 vuint32m4_t op1, vuint32m4_t op2,
1821 size_t vl) {
1822 return vwmaccu_vv_u64m8_m(mask, acc, op1, op2, vl);
1823 }
1824
1825 //
1826 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
1827 // CHECK-RV64-NEXT: entry:
1828 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1829 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1830 //
test_vwmaccu_vx_u64m8_m(vbool8_t mask,vuint64m8_t acc,uint32_t op1,vuint32m4_t op2,size_t vl)1831 vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
1832 uint32_t op1, vuint32m4_t op2, size_t vl) {
1833 return vwmaccu_vx_u64m8_m(mask, acc, op1, op2, vl);
1834 }
1835
1836 //
1837 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
1838 // CHECK-RV64-NEXT: entry:
1839 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1840 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1841 //
test_vwmaccsu_vv_i16mf4_m(vbool64_t mask,vint16mf4_t acc,vint8mf8_t op1,vuint8mf8_t op2,size_t vl)1842 vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
1843 vint8mf8_t op1, vuint8mf8_t op2,
1844 size_t vl) {
1845 return vwmaccsu_vv_i16mf4_m(mask, acc, op1, op2, vl);
1846 }
1847
1848 //
1849 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
1850 // CHECK-RV64-NEXT: entry:
1851 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1852 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1853 //
test_vwmaccsu_vx_i16mf4_m(vbool64_t mask,vint16mf4_t acc,int8_t op1,vuint8mf8_t op2,size_t vl)1854 vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
1855 int8_t op1, vuint8mf8_t op2, size_t vl) {
1856 return vwmaccsu_vx_i16mf4_m(mask, acc, op1, op2, vl);
1857 }
1858
1859 //
1860 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
1861 // CHECK-RV64-NEXT: entry:
1862 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1863 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1864 //
test_vwmaccsu_vv_i16mf2_m(vbool32_t mask,vint16mf2_t acc,vint8mf4_t op1,vuint8mf4_t op2,size_t vl)1865 vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
1866 vint8mf4_t op1, vuint8mf4_t op2,
1867 size_t vl) {
1868 return vwmaccsu_vv_i16mf2_m(mask, acc, op1, op2, vl);
1869 }
1870
1871 //
1872 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
1873 // CHECK-RV64-NEXT: entry:
1874 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1875 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1876 //
test_vwmaccsu_vx_i16mf2_m(vbool32_t mask,vint16mf2_t acc,int8_t op1,vuint8mf4_t op2,size_t vl)1877 vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
1878 int8_t op1, vuint8mf4_t op2, size_t vl) {
1879 return vwmaccsu_vx_i16mf2_m(mask, acc, op1, op2, vl);
1880 }
1881
1882 //
1883 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
1884 // CHECK-RV64-NEXT: entry:
1885 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1886 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1887 //
test_vwmaccsu_vv_i16m1_m(vbool16_t mask,vint16m1_t acc,vint8mf2_t op1,vuint8mf2_t op2,size_t vl)1888 vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
1889 vint8mf2_t op1, vuint8mf2_t op2,
1890 size_t vl) {
1891 return vwmaccsu_vv_i16m1_m(mask, acc, op1, op2, vl);
1892 }
1893
1894 //
1895 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
1896 // CHECK-RV64-NEXT: entry:
1897 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1898 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1899 //
test_vwmaccsu_vx_i16m1_m(vbool16_t mask,vint16m1_t acc,int8_t op1,vuint8mf2_t op2,size_t vl)1900 vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
1901 vuint8mf2_t op2, size_t vl) {
1902 return vwmaccsu_vx_i16m1_m(mask, acc, op1, op2, vl);
1903 }
1904
1905 //
1906 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
1907 // CHECK-RV64-NEXT: entry:
1908 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1909 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1910 //
test_vwmaccsu_vv_i16m2_m(vbool8_t mask,vint16m2_t acc,vint8m1_t op1,vuint8m1_t op2,size_t vl)1911 vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
1912 vint8m1_t op1, vuint8m1_t op2, size_t vl) {
1913 return vwmaccsu_vv_i16m2_m(mask, acc, op1, op2, vl);
1914 }
1915
1916 //
1917 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
1918 // CHECK-RV64-NEXT: entry:
1919 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1920 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1921 //
test_vwmaccsu_vx_i16m2_m(vbool8_t mask,vint16m2_t acc,int8_t op1,vuint8m1_t op2,size_t vl)1922 vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
1923 vuint8m1_t op2, size_t vl) {
1924 return vwmaccsu_vx_i16m2_m(mask, acc, op1, op2, vl);
1925 }
1926
1927 //
1928 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
1929 // CHECK-RV64-NEXT: entry:
1930 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1931 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1932 //
test_vwmaccsu_vv_i16m4_m(vbool4_t mask,vint16m4_t acc,vint8m2_t op1,vuint8m2_t op2,size_t vl)1933 vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
1934 vint8m2_t op1, vuint8m2_t op2, size_t vl) {
1935 return vwmaccsu_vv_i16m4_m(mask, acc, op1, op2, vl);
1936 }
1937
1938 //
1939 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
1940 // CHECK-RV64-NEXT: entry:
1941 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1942 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1943 //
test_vwmaccsu_vx_i16m4_m(vbool4_t mask,vint16m4_t acc,int8_t op1,vuint8m2_t op2,size_t vl)1944 vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
1945 vuint8m2_t op2, size_t vl) {
1946 return vwmaccsu_vx_i16m4_m(mask, acc, op1, op2, vl);
1947 }
1948
1949 //
1950 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
1951 // CHECK-RV64-NEXT: entry:
1952 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1953 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1954 //
test_vwmaccsu_vv_i16m8_m(vbool2_t mask,vint16m8_t acc,vint8m4_t op1,vuint8m4_t op2,size_t vl)1955 vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
1956 vint8m4_t op1, vuint8m4_t op2, size_t vl) {
1957 return vwmaccsu_vv_i16m8_m(mask, acc, op1, op2, vl);
1958 }
1959
1960 //
1961 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
1962 // CHECK-RV64-NEXT: entry:
1963 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1964 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1965 //
test_vwmaccsu_vx_i16m8_m(vbool2_t mask,vint16m8_t acc,int8_t op1,vuint8m4_t op2,size_t vl)1966 vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
1967 vuint8m4_t op2, size_t vl) {
1968 return vwmaccsu_vx_i16m8_m(mask, acc, op1, op2, vl);
1969 }
1970
1971 //
1972 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
1973 // CHECK-RV64-NEXT: entry:
1974 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1975 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1976 //
test_vwmaccsu_vv_i32mf2_m(vbool64_t mask,vint32mf2_t acc,vint16mf4_t op1,vuint16mf4_t op2,size_t vl)1977 vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
1978 vint16mf4_t op1, vuint16mf4_t op2,
1979 size_t vl) {
1980 return vwmaccsu_vv_i32mf2_m(mask, acc, op1, op2, vl);
1981 }
1982
1983 //
1984 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
1985 // CHECK-RV64-NEXT: entry:
1986 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1987 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1988 //
test_vwmaccsu_vx_i32mf2_m(vbool64_t mask,vint32mf2_t acc,int16_t op1,vuint16mf4_t op2,size_t vl)1989 vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
1990 int16_t op1, vuint16mf4_t op2,
1991 size_t vl) {
1992 return vwmaccsu_vx_i32mf2_m(mask, acc, op1, op2, vl);
1993 }
1994
1995 //
1996 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
1997 // CHECK-RV64-NEXT: entry:
1998 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1999 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2000 //
test_vwmaccsu_vv_i32m1_m(vbool32_t mask,vint32m1_t acc,vint16mf2_t op1,vuint16mf2_t op2,size_t vl)2001 vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
2002 vint16mf2_t op1, vuint16mf2_t op2,
2003 size_t vl) {
2004 return vwmaccsu_vv_i32m1_m(mask, acc, op1, op2, vl);
2005 }
2006
2007 //
2008 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
2009 // CHECK-RV64-NEXT: entry:
2010 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2011 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2012 //
test_vwmaccsu_vx_i32m1_m(vbool32_t mask,vint32m1_t acc,int16_t op1,vuint16mf2_t op2,size_t vl)2013 vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
2014 vuint16mf2_t op2, size_t vl) {
2015 return vwmaccsu_vx_i32m1_m(mask, acc, op1, op2, vl);
2016 }
2017
2018 //
2019 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
2020 // CHECK-RV64-NEXT: entry:
2021 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2022 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2023 //
test_vwmaccsu_vv_i32m2_m(vbool16_t mask,vint32m2_t acc,vint16m1_t op1,vuint16m1_t op2,size_t vl)2024 vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
2025 vint16m1_t op1, vuint16m1_t op2,
2026 size_t vl) {
2027 return vwmaccsu_vv_i32m2_m(mask, acc, op1, op2, vl);
2028 }
2029
2030 //
2031 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
2032 // CHECK-RV64-NEXT: entry:
2033 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2034 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2035 //
test_vwmaccsu_vx_i32m2_m(vbool16_t mask,vint32m2_t acc,int16_t op1,vuint16m1_t op2,size_t vl)2036 vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
2037 vuint16m1_t op2, size_t vl) {
2038 return vwmaccsu_vx_i32m2_m(mask, acc, op1, op2, vl);
2039 }
2040
2041 //
2042 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
2043 // CHECK-RV64-NEXT: entry:
2044 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2045 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2046 //
test_vwmaccsu_vv_i32m4_m(vbool8_t mask,vint32m4_t acc,vint16m2_t op1,vuint16m2_t op2,size_t vl)2047 vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
2048 vint16m2_t op1, vuint16m2_t op2,
2049 size_t vl) {
2050 return vwmaccsu_vv_i32m4_m(mask, acc, op1, op2, vl);
2051 }
2052
2053 //
2054 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
2055 // CHECK-RV64-NEXT: entry:
2056 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2057 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2058 //
test_vwmaccsu_vx_i32m4_m(vbool8_t mask,vint32m4_t acc,int16_t op1,vuint16m2_t op2,size_t vl)2059 vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
2060 vuint16m2_t op2, size_t vl) {
2061 return vwmaccsu_vx_i32m4_m(mask, acc, op1, op2, vl);
2062 }
2063
2064 //
2065 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
2066 // CHECK-RV64-NEXT: entry:
2067 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2068 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2069 //
test_vwmaccsu_vv_i32m8_m(vbool4_t mask,vint32m8_t acc,vint16m4_t op1,vuint16m4_t op2,size_t vl)2070 vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
2071 vint16m4_t op1, vuint16m4_t op2,
2072 size_t vl) {
2073 return vwmaccsu_vv_i32m8_m(mask, acc, op1, op2, vl);
2074 }
2075
2076 //
2077 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
2078 // CHECK-RV64-NEXT: entry:
2079 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2080 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2081 //
test_vwmaccsu_vx_i32m8_m(vbool4_t mask,vint32m8_t acc,int16_t op1,vuint16m4_t op2,size_t vl)2082 vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
2083 vuint16m4_t op2, size_t vl) {
2084 return vwmaccsu_vx_i32m8_m(mask, acc, op1, op2, vl);
2085 }
2086
2087 //
2088 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
2089 // CHECK-RV64-NEXT: entry:
2090 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2091 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2092 //
test_vwmaccsu_vv_i64m1_m(vbool64_t mask,vint64m1_t acc,vint32mf2_t op1,vuint32mf2_t op2,size_t vl)2093 vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
2094 vint32mf2_t op1, vuint32mf2_t op2,
2095 size_t vl) {
2096 return vwmaccsu_vv_i64m1_m(mask, acc, op1, op2, vl);
2097 }
2098
2099 //
2100 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
2101 // CHECK-RV64-NEXT: entry:
2102 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2103 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2104 //
test_vwmaccsu_vx_i64m1_m(vbool64_t mask,vint64m1_t acc,int32_t op1,vuint32mf2_t op2,size_t vl)2105 vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
2106 vuint32mf2_t op2, size_t vl) {
2107 return vwmaccsu_vx_i64m1_m(mask, acc, op1, op2, vl);
2108 }
2109
2110 //
2111 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
2112 // CHECK-RV64-NEXT: entry:
2113 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2114 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2115 //
test_vwmaccsu_vv_i64m2_m(vbool32_t mask,vint64m2_t acc,vint32m1_t op1,vuint32m1_t op2,size_t vl)2116 vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
2117 vint32m1_t op1, vuint32m1_t op2,
2118 size_t vl) {
2119 return vwmaccsu_vv_i64m2_m(mask, acc, op1, op2, vl);
2120 }
2121
2122 //
2123 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
2124 // CHECK-RV64-NEXT: entry:
2125 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2126 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2127 //
test_vwmaccsu_vx_i64m2_m(vbool32_t mask,vint64m2_t acc,int32_t op1,vuint32m1_t op2,size_t vl)2128 vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
2129 vuint32m1_t op2, size_t vl) {
2130 return vwmaccsu_vx_i64m2_m(mask, acc, op1, op2, vl);
2131 }
2132
2133 //
2134 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
2135 // CHECK-RV64-NEXT: entry:
2136 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2137 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2138 //
test_vwmaccsu_vv_i64m4_m(vbool16_t mask,vint64m4_t acc,vint32m2_t op1,vuint32m2_t op2,size_t vl)2139 vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
2140 vint32m2_t op1, vuint32m2_t op2,
2141 size_t vl) {
2142 return vwmaccsu_vv_i64m4_m(mask, acc, op1, op2, vl);
2143 }
2144
2145 //
2146 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
2147 // CHECK-RV64-NEXT: entry:
2148 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2149 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2150 //
test_vwmaccsu_vx_i64m4_m(vbool16_t mask,vint64m4_t acc,int32_t op1,vuint32m2_t op2,size_t vl)2151 vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
2152 vuint32m2_t op2, size_t vl) {
2153 return vwmaccsu_vx_i64m4_m(mask, acc, op1, op2, vl);
2154 }
2155
2156 //
2157 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
2158 // CHECK-RV64-NEXT: entry:
2159 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2160 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2161 //
test_vwmaccsu_vv_i64m8_m(vbool8_t mask,vint64m8_t acc,vint32m4_t op1,vuint32m4_t op2,size_t vl)2162 vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
2163 vint32m4_t op1, vuint32m4_t op2,
2164 size_t vl) {
2165 return vwmaccsu_vv_i64m8_m(mask, acc, op1, op2, vl);
2166 }
2167
2168 //
2169 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
2170 // CHECK-RV64-NEXT: entry:
2171 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2172 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2173 //
test_vwmaccsu_vx_i64m8_m(vbool8_t mask,vint64m8_t acc,int32_t op1,vuint32m4_t op2,size_t vl)2174 vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
2175 vuint32m4_t op2, size_t vl) {
2176 return vwmaccsu_vx_i64m8_m(mask, acc, op1, op2, vl);
2177 }
2178
2179 //
2180 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
2181 // CHECK-RV64-NEXT: entry:
2182 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2183 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
2184 //
test_vwmaccus_vx_i16mf4_m(vbool64_t mask,vint16mf4_t acc,uint8_t op1,vint8mf8_t op2,size_t vl)2185 vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
2186 uint8_t op1, vint8mf8_t op2, size_t vl) {
2187 return vwmaccus_vx_i16mf4_m(mask, acc, op1, op2, vl);
2188 }
2189
2190 //
2191 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
2192 // CHECK-RV64-NEXT: entry:
2193 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2194 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
2195 //
test_vwmaccus_vx_i16mf2_m(vbool32_t mask,vint16mf2_t acc,uint8_t op1,vint8mf4_t op2,size_t vl)2196 vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
2197 uint8_t op1, vint8mf4_t op2, size_t vl) {
2198 return vwmaccus_vx_i16mf2_m(mask, acc, op1, op2, vl);
2199 }
2200
2201 //
2202 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
2203 // CHECK-RV64-NEXT: entry:
2204 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2205 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2206 //
test_vwmaccus_vx_i16m1_m(vbool16_t mask,vint16m1_t acc,uint8_t op1,vint8mf2_t op2,size_t vl)2207 vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
2208 vint8mf2_t op2, size_t vl) {
2209 return vwmaccus_vx_i16m1_m(mask, acc, op1, op2, vl);
2210 }
2211
2212 //
2213 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
2214 // CHECK-RV64-NEXT: entry:
2215 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2216 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2217 //
test_vwmaccus_vx_i16m2_m(vbool8_t mask,vint16m2_t acc,uint8_t op1,vint8m1_t op2,size_t vl)2218 vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
2219 vint8m1_t op2, size_t vl) {
2220 return vwmaccus_vx_i16m2_m(mask, acc, op1, op2, vl);
2221 }
2222
2223 //
2224 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
2225 // CHECK-RV64-NEXT: entry:
2226 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2227 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2228 //
test_vwmaccus_vx_i16m4_m(vbool4_t mask,vint16m4_t acc,uint8_t op1,vint8m2_t op2,size_t vl)2229 vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
2230 vint8m2_t op2, size_t vl) {
2231 return vwmaccus_vx_i16m4_m(mask, acc, op1, op2, vl);
2232 }
2233
2234 //
2235 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
2236 // CHECK-RV64-NEXT: entry:
2237 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2238 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2239 //
test_vwmaccus_vx_i16m8_m(vbool2_t mask,vint16m8_t acc,uint8_t op1,vint8m4_t op2,size_t vl)2240 vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
2241 vint8m4_t op2, size_t vl) {
2242 return vwmaccus_vx_i16m8_m(mask, acc, op1, op2, vl);
2243 }
2244
2245 //
2246 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
2247 // CHECK-RV64-NEXT: entry:
2248 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2249 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2250 //
test_vwmaccus_vx_i32mf2_m(vbool64_t mask,vint32mf2_t acc,uint16_t op1,vint16mf4_t op2,size_t vl)2251 vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
2252 uint16_t op1, vint16mf4_t op2,
2253 size_t vl) {
2254 return vwmaccus_vx_i32mf2_m(mask, acc, op1, op2, vl);
2255 }
2256
2257 //
2258 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
2259 // CHECK-RV64-NEXT: entry:
2260 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2261 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2262 //
test_vwmaccus_vx_i32m1_m(vbool32_t mask,vint32m1_t acc,uint16_t op1,vint16mf2_t op2,size_t vl)2263 vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
2264 uint16_t op1, vint16mf2_t op2, size_t vl) {
2265 return vwmaccus_vx_i32m1_m(mask, acc, op1, op2, vl);
2266 }
2267
2268 //
2269 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
2270 // CHECK-RV64-NEXT: entry:
2271 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2272 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2273 //
test_vwmaccus_vx_i32m2_m(vbool16_t mask,vint32m2_t acc,uint16_t op1,vint16m1_t op2,size_t vl)2274 vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
2275 uint16_t op1, vint16m1_t op2, size_t vl) {
2276 return vwmaccus_vx_i32m2_m(mask, acc, op1, op2, vl);
2277 }
2278
2279 //
2280 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
2281 // CHECK-RV64-NEXT: entry:
2282 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2283 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2284 //
test_vwmaccus_vx_i32m4_m(vbool8_t mask,vint32m4_t acc,uint16_t op1,vint16m2_t op2,size_t vl)2285 vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
2286 vint16m2_t op2, size_t vl) {
2287 return vwmaccus_vx_i32m4_m(mask, acc, op1, op2, vl);
2288 }
2289
2290 //
2291 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
2292 // CHECK-RV64-NEXT: entry:
2293 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2294 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2295 //
test_vwmaccus_vx_i32m8_m(vbool4_t mask,vint32m8_t acc,uint16_t op1,vint16m4_t op2,size_t vl)2296 vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
2297 vint16m4_t op2, size_t vl) {
2298 return vwmaccus_vx_i32m8_m(mask, acc, op1, op2, vl);
2299 }
2300
2301 //
2302 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
2303 // CHECK-RV64-NEXT: entry:
2304 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2305 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2306 //
test_vwmaccus_vx_i64m1_m(vbool64_t mask,vint64m1_t acc,uint32_t op1,vint32mf2_t op2,size_t vl)2307 vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
2308 uint32_t op1, vint32mf2_t op2, size_t vl) {
2309 return vwmaccus_vx_i64m1_m(mask, acc, op1, op2, vl);
2310 }
2311
2312 //
2313 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
2314 // CHECK-RV64-NEXT: entry:
2315 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2316 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2317 //
test_vwmaccus_vx_i64m2_m(vbool32_t mask,vint64m2_t acc,uint32_t op1,vint32m1_t op2,size_t vl)2318 vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
2319 uint32_t op1, vint32m1_t op2, size_t vl) {
2320 return vwmaccus_vx_i64m2_m(mask, acc, op1, op2, vl);
2321 }
2322
2323 //
2324 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
2325 // CHECK-RV64-NEXT: entry:
2326 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2327 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2328 //
test_vwmaccus_vx_i64m4_m(vbool16_t mask,vint64m4_t acc,uint32_t op1,vint32m2_t op2,size_t vl)2329 vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
2330 uint32_t op1, vint32m2_t op2, size_t vl) {
2331 return vwmaccus_vx_i64m4_m(mask, acc, op1, op2, vl);
2332 }
2333
2334 //
2335 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
2336 // CHECK-RV64-NEXT: entry:
2337 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2338 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2339 //
test_vwmaccus_vx_i64m8_m(vbool8_t mask,vint64m8_t acc,uint32_t op1,vint32m4_t op2,size_t vl)2340 vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1,
2341 vint32m4_t op2, size_t vl) {
2342 return vwmaccus_vx_i64m8_m(mask, acc, op1, op2, vl);
2343 }
2344