1 // NOTE: Assertions have been autogenerated by utils/update_cc_test_checks.py
2 // REQUIRES: riscv-registered-target
3 // RUN: %clang_cc1 -triple riscv64 -target-feature +experimental-v -disable-O0-optnone -emit-llvm %s -o - | opt -S -mem2reg | FileCheck --check-prefix=CHECK-RV64 %s
4
5 #include <riscv_vector.h>
6
7 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4(
8 // CHECK-RV64-NEXT: entry:
9 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
10 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
11 //
test_vwmacc_vv_i16mf4(vint16mf4_t acc,vint8mf8_t op1,vint8mf8_t op2,size_t vl)12 vint16mf4_t test_vwmacc_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
13 vint8mf8_t op2, size_t vl) {
14 return vwmacc(acc, op1, op2, vl);
15 }
16
17 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4(
18 // CHECK-RV64-NEXT: entry:
19 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
20 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
21 //
test_vwmacc_vx_i16mf4(vint16mf4_t acc,int8_t op1,vint8mf8_t op2,size_t vl)22 vint16mf4_t test_vwmacc_vx_i16mf4(vint16mf4_t acc, int8_t op1, vint8mf8_t op2,
23 size_t vl) {
24 return vwmacc(acc, op1, op2, vl);
25 }
26
27 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2(
28 // CHECK-RV64-NEXT: entry:
29 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
30 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
31 //
test_vwmacc_vv_i16mf2(vint16mf2_t acc,vint8mf4_t op1,vint8mf4_t op2,size_t vl)32 vint16mf2_t test_vwmacc_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
33 vint8mf4_t op2, size_t vl) {
34 return vwmacc(acc, op1, op2, vl);
35 }
36
37 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2(
38 // CHECK-RV64-NEXT: entry:
39 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
40 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
41 //
test_vwmacc_vx_i16mf2(vint16mf2_t acc,int8_t op1,vint8mf4_t op2,size_t vl)42 vint16mf2_t test_vwmacc_vx_i16mf2(vint16mf2_t acc, int8_t op1, vint8mf4_t op2,
43 size_t vl) {
44 return vwmacc(acc, op1, op2, vl);
45 }
46
47 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1(
48 // CHECK-RV64-NEXT: entry:
49 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
50 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
51 //
test_vwmacc_vv_i16m1(vint16m1_t acc,vint8mf2_t op1,vint8mf2_t op2,size_t vl)52 vint16m1_t test_vwmacc_vv_i16m1(vint16m1_t acc, vint8mf2_t op1, vint8mf2_t op2,
53 size_t vl) {
54 return vwmacc(acc, op1, op2, vl);
55 }
56
57 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1(
58 // CHECK-RV64-NEXT: entry:
59 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
60 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
61 //
test_vwmacc_vx_i16m1(vint16m1_t acc,int8_t op1,vint8mf2_t op2,size_t vl)62 vint16m1_t test_vwmacc_vx_i16m1(vint16m1_t acc, int8_t op1, vint8mf2_t op2,
63 size_t vl) {
64 return vwmacc(acc, op1, op2, vl);
65 }
66
67 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2(
68 // CHECK-RV64-NEXT: entry:
69 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
70 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
71 //
test_vwmacc_vv_i16m2(vint16m2_t acc,vint8m1_t op1,vint8m1_t op2,size_t vl)72 vint16m2_t test_vwmacc_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vint8m1_t op2,
73 size_t vl) {
74 return vwmacc(acc, op1, op2, vl);
75 }
76
77 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2(
78 // CHECK-RV64-NEXT: entry:
79 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
80 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
81 //
test_vwmacc_vx_i16m2(vint16m2_t acc,int8_t op1,vint8m1_t op2,size_t vl)82 vint16m2_t test_vwmacc_vx_i16m2(vint16m2_t acc, int8_t op1, vint8m1_t op2,
83 size_t vl) {
84 return vwmacc(acc, op1, op2, vl);
85 }
86
87 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4(
88 // CHECK-RV64-NEXT: entry:
89 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
90 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
91 //
test_vwmacc_vv_i16m4(vint16m4_t acc,vint8m2_t op1,vint8m2_t op2,size_t vl)92 vint16m4_t test_vwmacc_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vint8m2_t op2,
93 size_t vl) {
94 return vwmacc(acc, op1, op2, vl);
95 }
96
97 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4(
98 // CHECK-RV64-NEXT: entry:
99 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
100 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
101 //
test_vwmacc_vx_i16m4(vint16m4_t acc,int8_t op1,vint8m2_t op2,size_t vl)102 vint16m4_t test_vwmacc_vx_i16m4(vint16m4_t acc, int8_t op1, vint8m2_t op2,
103 size_t vl) {
104 return vwmacc(acc, op1, op2, vl);
105 }
106
107 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8(
108 // CHECK-RV64-NEXT: entry:
109 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
110 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
111 //
test_vwmacc_vv_i16m8(vint16m8_t acc,vint8m4_t op1,vint8m4_t op2,size_t vl)112 vint16m8_t test_vwmacc_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vint8m4_t op2,
113 size_t vl) {
114 return vwmacc(acc, op1, op2, vl);
115 }
116
117 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8(
118 // CHECK-RV64-NEXT: entry:
119 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
120 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
121 //
test_vwmacc_vx_i16m8(vint16m8_t acc,int8_t op1,vint8m4_t op2,size_t vl)122 vint16m8_t test_vwmacc_vx_i16m8(vint16m8_t acc, int8_t op1, vint8m4_t op2,
123 size_t vl) {
124 return vwmacc(acc, op1, op2, vl);
125 }
126
127 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2(
128 // CHECK-RV64-NEXT: entry:
129 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
130 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
131 //
test_vwmacc_vv_i32mf2(vint32mf2_t acc,vint16mf4_t op1,vint16mf4_t op2,size_t vl)132 vint32mf2_t test_vwmacc_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
133 vint16mf4_t op2, size_t vl) {
134 return vwmacc(acc, op1, op2, vl);
135 }
136
137 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2(
138 // CHECK-RV64-NEXT: entry:
139 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
140 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
141 //
test_vwmacc_vx_i32mf2(vint32mf2_t acc,int16_t op1,vint16mf4_t op2,size_t vl)142 vint32mf2_t test_vwmacc_vx_i32mf2(vint32mf2_t acc, int16_t op1, vint16mf4_t op2,
143 size_t vl) {
144 return vwmacc(acc, op1, op2, vl);
145 }
146
147 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1(
148 // CHECK-RV64-NEXT: entry:
149 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
150 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
151 //
test_vwmacc_vv_i32m1(vint32m1_t acc,vint16mf2_t op1,vint16mf2_t op2,size_t vl)152 vint32m1_t test_vwmacc_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
153 vint16mf2_t op2, size_t vl) {
154 return vwmacc(acc, op1, op2, vl);
155 }
156
157 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1(
158 // CHECK-RV64-NEXT: entry:
159 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
160 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
161 //
test_vwmacc_vx_i32m1(vint32m1_t acc,int16_t op1,vint16mf2_t op2,size_t vl)162 vint32m1_t test_vwmacc_vx_i32m1(vint32m1_t acc, int16_t op1, vint16mf2_t op2,
163 size_t vl) {
164 return vwmacc(acc, op1, op2, vl);
165 }
166
167 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2(
168 // CHECK-RV64-NEXT: entry:
169 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
170 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
171 //
test_vwmacc_vv_i32m2(vint32m2_t acc,vint16m1_t op1,vint16m1_t op2,size_t vl)172 vint32m2_t test_vwmacc_vv_i32m2(vint32m2_t acc, vint16m1_t op1, vint16m1_t op2,
173 size_t vl) {
174 return vwmacc(acc, op1, op2, vl);
175 }
176
177 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2(
178 // CHECK-RV64-NEXT: entry:
179 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
180 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
181 //
test_vwmacc_vx_i32m2(vint32m2_t acc,int16_t op1,vint16m1_t op2,size_t vl)182 vint32m2_t test_vwmacc_vx_i32m2(vint32m2_t acc, int16_t op1, vint16m1_t op2,
183 size_t vl) {
184 return vwmacc(acc, op1, op2, vl);
185 }
186
187 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4(
188 // CHECK-RV64-NEXT: entry:
189 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
190 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
191 //
test_vwmacc_vv_i32m4(vint32m4_t acc,vint16m2_t op1,vint16m2_t op2,size_t vl)192 vint32m4_t test_vwmacc_vv_i32m4(vint32m4_t acc, vint16m2_t op1, vint16m2_t op2,
193 size_t vl) {
194 return vwmacc(acc, op1, op2, vl);
195 }
196
197 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4(
198 // CHECK-RV64-NEXT: entry:
199 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
200 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
201 //
test_vwmacc_vx_i32m4(vint32m4_t acc,int16_t op1,vint16m2_t op2,size_t vl)202 vint32m4_t test_vwmacc_vx_i32m4(vint32m4_t acc, int16_t op1, vint16m2_t op2,
203 size_t vl) {
204 return vwmacc(acc, op1, op2, vl);
205 }
206
207 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8(
208 // CHECK-RV64-NEXT: entry:
209 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
210 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
211 //
test_vwmacc_vv_i32m8(vint32m8_t acc,vint16m4_t op1,vint16m4_t op2,size_t vl)212 vint32m8_t test_vwmacc_vv_i32m8(vint32m8_t acc, vint16m4_t op1, vint16m4_t op2,
213 size_t vl) {
214 return vwmacc(acc, op1, op2, vl);
215 }
216
217 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8(
218 // CHECK-RV64-NEXT: entry:
219 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
220 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
221 //
test_vwmacc_vx_i32m8(vint32m8_t acc,int16_t op1,vint16m4_t op2,size_t vl)222 vint32m8_t test_vwmacc_vx_i32m8(vint32m8_t acc, int16_t op1, vint16m4_t op2,
223 size_t vl) {
224 return vwmacc(acc, op1, op2, vl);
225 }
226
227 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1(
228 // CHECK-RV64-NEXT: entry:
229 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
230 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
231 //
test_vwmacc_vv_i64m1(vint64m1_t acc,vint32mf2_t op1,vint32mf2_t op2,size_t vl)232 vint64m1_t test_vwmacc_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
233 vint32mf2_t op2, size_t vl) {
234 return vwmacc(acc, op1, op2, vl);
235 }
236
237 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1(
238 // CHECK-RV64-NEXT: entry:
239 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
240 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
241 //
test_vwmacc_vx_i64m1(vint64m1_t acc,int32_t op1,vint32mf2_t op2,size_t vl)242 vint64m1_t test_vwmacc_vx_i64m1(vint64m1_t acc, int32_t op1, vint32mf2_t op2,
243 size_t vl) {
244 return vwmacc(acc, op1, op2, vl);
245 }
246
247 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2(
248 // CHECK-RV64-NEXT: entry:
249 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
250 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
251 //
test_vwmacc_vv_i64m2(vint64m2_t acc,vint32m1_t op1,vint32m1_t op2,size_t vl)252 vint64m2_t test_vwmacc_vv_i64m2(vint64m2_t acc, vint32m1_t op1, vint32m1_t op2,
253 size_t vl) {
254 return vwmacc(acc, op1, op2, vl);
255 }
256
257 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2(
258 // CHECK-RV64-NEXT: entry:
259 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
260 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
261 //
test_vwmacc_vx_i64m2(vint64m2_t acc,int32_t op1,vint32m1_t op2,size_t vl)262 vint64m2_t test_vwmacc_vx_i64m2(vint64m2_t acc, int32_t op1, vint32m1_t op2,
263 size_t vl) {
264 return vwmacc(acc, op1, op2, vl);
265 }
266
267 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4(
268 // CHECK-RV64-NEXT: entry:
269 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
270 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
271 //
test_vwmacc_vv_i64m4(vint64m4_t acc,vint32m2_t op1,vint32m2_t op2,size_t vl)272 vint64m4_t test_vwmacc_vv_i64m4(vint64m4_t acc, vint32m2_t op1, vint32m2_t op2,
273 size_t vl) {
274 return vwmacc(acc, op1, op2, vl);
275 }
276
277 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4(
278 // CHECK-RV64-NEXT: entry:
279 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
280 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
281 //
test_vwmacc_vx_i64m4(vint64m4_t acc,int32_t op1,vint32m2_t op2,size_t vl)282 vint64m4_t test_vwmacc_vx_i64m4(vint64m4_t acc, int32_t op1, vint32m2_t op2,
283 size_t vl) {
284 return vwmacc(acc, op1, op2, vl);
285 }
286
287 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8(
288 // CHECK-RV64-NEXT: entry:
289 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
290 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
291 //
test_vwmacc_vv_i64m8(vint64m8_t acc,vint32m4_t op1,vint32m4_t op2,size_t vl)292 vint64m8_t test_vwmacc_vv_i64m8(vint64m8_t acc, vint32m4_t op1, vint32m4_t op2,
293 size_t vl) {
294 return vwmacc(acc, op1, op2, vl);
295 }
296
297 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8(
298 // CHECK-RV64-NEXT: entry:
299 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
300 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
301 //
test_vwmacc_vx_i64m8(vint64m8_t acc,int32_t op1,vint32m4_t op2,size_t vl)302 vint64m8_t test_vwmacc_vx_i64m8(vint64m8_t acc, int32_t op1, vint32m4_t op2,
303 size_t vl) {
304 return vwmacc(acc, op1, op2, vl);
305 }
306
307 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4(
308 // CHECK-RV64-NEXT: entry:
309 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
310 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
311 //
test_vwmaccu_vv_u16mf4(vuint16mf4_t acc,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)312 vuint16mf4_t test_vwmaccu_vv_u16mf4(vuint16mf4_t acc, vuint8mf8_t op1,
313 vuint8mf8_t op2, size_t vl) {
314 return vwmaccu(acc, op1, op2, vl);
315 }
316
317 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4(
318 // CHECK-RV64-NEXT: entry:
319 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
320 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
321 //
test_vwmaccu_vx_u16mf4(vuint16mf4_t acc,uint8_t op1,vuint8mf8_t op2,size_t vl)322 vuint16mf4_t test_vwmaccu_vx_u16mf4(vuint16mf4_t acc, uint8_t op1,
323 vuint8mf8_t op2, size_t vl) {
324 return vwmaccu(acc, op1, op2, vl);
325 }
326
327 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2(
328 // CHECK-RV64-NEXT: entry:
329 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
330 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
331 //
test_vwmaccu_vv_u16mf2(vuint16mf2_t acc,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)332 vuint16mf2_t test_vwmaccu_vv_u16mf2(vuint16mf2_t acc, vuint8mf4_t op1,
333 vuint8mf4_t op2, size_t vl) {
334 return vwmaccu(acc, op1, op2, vl);
335 }
336
337 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2(
338 // CHECK-RV64-NEXT: entry:
339 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
340 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
341 //
test_vwmaccu_vx_u16mf2(vuint16mf2_t acc,uint8_t op1,vuint8mf4_t op2,size_t vl)342 vuint16mf2_t test_vwmaccu_vx_u16mf2(vuint16mf2_t acc, uint8_t op1,
343 vuint8mf4_t op2, size_t vl) {
344 return vwmaccu(acc, op1, op2, vl);
345 }
346
347 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1(
348 // CHECK-RV64-NEXT: entry:
349 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
350 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
351 //
test_vwmaccu_vv_u16m1(vuint16m1_t acc,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)352 vuint16m1_t test_vwmaccu_vv_u16m1(vuint16m1_t acc, vuint8mf2_t op1,
353 vuint8mf2_t op2, size_t vl) {
354 return vwmaccu(acc, op1, op2, vl);
355 }
356
357 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1(
358 // CHECK-RV64-NEXT: entry:
359 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
360 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
361 //
test_vwmaccu_vx_u16m1(vuint16m1_t acc,uint8_t op1,vuint8mf2_t op2,size_t vl)362 vuint16m1_t test_vwmaccu_vx_u16m1(vuint16m1_t acc, uint8_t op1, vuint8mf2_t op2,
363 size_t vl) {
364 return vwmaccu(acc, op1, op2, vl);
365 }
366
367 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2(
368 // CHECK-RV64-NEXT: entry:
369 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
370 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
371 //
test_vwmaccu_vv_u16m2(vuint16m2_t acc,vuint8m1_t op1,vuint8m1_t op2,size_t vl)372 vuint16m2_t test_vwmaccu_vv_u16m2(vuint16m2_t acc, vuint8m1_t op1,
373 vuint8m1_t op2, size_t vl) {
374 return vwmaccu(acc, op1, op2, vl);
375 }
376
377 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2(
378 // CHECK-RV64-NEXT: entry:
379 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
380 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
381 //
test_vwmaccu_vx_u16m2(vuint16m2_t acc,uint8_t op1,vuint8m1_t op2,size_t vl)382 vuint16m2_t test_vwmaccu_vx_u16m2(vuint16m2_t acc, uint8_t op1, vuint8m1_t op2,
383 size_t vl) {
384 return vwmaccu(acc, op1, op2, vl);
385 }
386
387 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4(
388 // CHECK-RV64-NEXT: entry:
389 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
390 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
391 //
test_vwmaccu_vv_u16m4(vuint16m4_t acc,vuint8m2_t op1,vuint8m2_t op2,size_t vl)392 vuint16m4_t test_vwmaccu_vv_u16m4(vuint16m4_t acc, vuint8m2_t op1,
393 vuint8m2_t op2, size_t vl) {
394 return vwmaccu(acc, op1, op2, vl);
395 }
396
397 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4(
398 // CHECK-RV64-NEXT: entry:
399 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
400 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
401 //
test_vwmaccu_vx_u16m4(vuint16m4_t acc,uint8_t op1,vuint8m2_t op2,size_t vl)402 vuint16m4_t test_vwmaccu_vx_u16m4(vuint16m4_t acc, uint8_t op1, vuint8m2_t op2,
403 size_t vl) {
404 return vwmaccu(acc, op1, op2, vl);
405 }
406
407 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8(
408 // CHECK-RV64-NEXT: entry:
409 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
410 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
411 //
test_vwmaccu_vv_u16m8(vuint16m8_t acc,vuint8m4_t op1,vuint8m4_t op2,size_t vl)412 vuint16m8_t test_vwmaccu_vv_u16m8(vuint16m8_t acc, vuint8m4_t op1,
413 vuint8m4_t op2, size_t vl) {
414 return vwmaccu(acc, op1, op2, vl);
415 }
416
417 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8(
418 // CHECK-RV64-NEXT: entry:
419 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
420 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
421 //
test_vwmaccu_vx_u16m8(vuint16m8_t acc,uint8_t op1,vuint8m4_t op2,size_t vl)422 vuint16m8_t test_vwmaccu_vx_u16m8(vuint16m8_t acc, uint8_t op1, vuint8m4_t op2,
423 size_t vl) {
424 return vwmaccu(acc, op1, op2, vl);
425 }
426
427 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2(
428 // CHECK-RV64-NEXT: entry:
429 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
430 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
431 //
test_vwmaccu_vv_u32mf2(vuint32mf2_t acc,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)432 vuint32mf2_t test_vwmaccu_vv_u32mf2(vuint32mf2_t acc, vuint16mf4_t op1,
433 vuint16mf4_t op2, size_t vl) {
434 return vwmaccu(acc, op1, op2, vl);
435 }
436
437 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2(
438 // CHECK-RV64-NEXT: entry:
439 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
440 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
441 //
test_vwmaccu_vx_u32mf2(vuint32mf2_t acc,uint16_t op1,vuint16mf4_t op2,size_t vl)442 vuint32mf2_t test_vwmaccu_vx_u32mf2(vuint32mf2_t acc, uint16_t op1,
443 vuint16mf4_t op2, size_t vl) {
444 return vwmaccu(acc, op1, op2, vl);
445 }
446
447 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1(
448 // CHECK-RV64-NEXT: entry:
449 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
450 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
451 //
test_vwmaccu_vv_u32m1(vuint32m1_t acc,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)452 vuint32m1_t test_vwmaccu_vv_u32m1(vuint32m1_t acc, vuint16mf2_t op1,
453 vuint16mf2_t op2, size_t vl) {
454 return vwmaccu(acc, op1, op2, vl);
455 }
456
457 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1(
458 // CHECK-RV64-NEXT: entry:
459 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
460 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
461 //
test_vwmaccu_vx_u32m1(vuint32m1_t acc,uint16_t op1,vuint16mf2_t op2,size_t vl)462 vuint32m1_t test_vwmaccu_vx_u32m1(vuint32m1_t acc, uint16_t op1,
463 vuint16mf2_t op2, size_t vl) {
464 return vwmaccu(acc, op1, op2, vl);
465 }
466
467 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2(
468 // CHECK-RV64-NEXT: entry:
469 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
470 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
471 //
test_vwmaccu_vv_u32m2(vuint32m2_t acc,vuint16m1_t op1,vuint16m1_t op2,size_t vl)472 vuint32m2_t test_vwmaccu_vv_u32m2(vuint32m2_t acc, vuint16m1_t op1,
473 vuint16m1_t op2, size_t vl) {
474 return vwmaccu(acc, op1, op2, vl);
475 }
476
477 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2(
478 // CHECK-RV64-NEXT: entry:
479 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
480 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
481 //
test_vwmaccu_vx_u32m2(vuint32m2_t acc,uint16_t op1,vuint16m1_t op2,size_t vl)482 vuint32m2_t test_vwmaccu_vx_u32m2(vuint32m2_t acc, uint16_t op1,
483 vuint16m1_t op2, size_t vl) {
484 return vwmaccu(acc, op1, op2, vl);
485 }
486
487 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4(
488 // CHECK-RV64-NEXT: entry:
489 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
490 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
491 //
test_vwmaccu_vv_u32m4(vuint32m4_t acc,vuint16m2_t op1,vuint16m2_t op2,size_t vl)492 vuint32m4_t test_vwmaccu_vv_u32m4(vuint32m4_t acc, vuint16m2_t op1,
493 vuint16m2_t op2, size_t vl) {
494 return vwmaccu(acc, op1, op2, vl);
495 }
496
497 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4(
498 // CHECK-RV64-NEXT: entry:
499 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
500 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
501 //
test_vwmaccu_vx_u32m4(vuint32m4_t acc,uint16_t op1,vuint16m2_t op2,size_t vl)502 vuint32m4_t test_vwmaccu_vx_u32m4(vuint32m4_t acc, uint16_t op1,
503 vuint16m2_t op2, size_t vl) {
504 return vwmaccu(acc, op1, op2, vl);
505 }
506
507 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8(
508 // CHECK-RV64-NEXT: entry:
509 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
510 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
511 //
test_vwmaccu_vv_u32m8(vuint32m8_t acc,vuint16m4_t op1,vuint16m4_t op2,size_t vl)512 vuint32m8_t test_vwmaccu_vv_u32m8(vuint32m8_t acc, vuint16m4_t op1,
513 vuint16m4_t op2, size_t vl) {
514 return vwmaccu(acc, op1, op2, vl);
515 }
516
517 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8(
518 // CHECK-RV64-NEXT: entry:
519 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
520 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
521 //
test_vwmaccu_vx_u32m8(vuint32m8_t acc,uint16_t op1,vuint16m4_t op2,size_t vl)522 vuint32m8_t test_vwmaccu_vx_u32m8(vuint32m8_t acc, uint16_t op1,
523 vuint16m4_t op2, size_t vl) {
524 return vwmaccu(acc, op1, op2, vl);
525 }
526
527 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1(
528 // CHECK-RV64-NEXT: entry:
529 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
530 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
531 //
test_vwmaccu_vv_u64m1(vuint64m1_t acc,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)532 vuint64m1_t test_vwmaccu_vv_u64m1(vuint64m1_t acc, vuint32mf2_t op1,
533 vuint32mf2_t op2, size_t vl) {
534 return vwmaccu(acc, op1, op2, vl);
535 }
536
537 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1(
538 // CHECK-RV64-NEXT: entry:
539 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
540 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
541 //
test_vwmaccu_vx_u64m1(vuint64m1_t acc,uint32_t op1,vuint32mf2_t op2,size_t vl)542 vuint64m1_t test_vwmaccu_vx_u64m1(vuint64m1_t acc, uint32_t op1,
543 vuint32mf2_t op2, size_t vl) {
544 return vwmaccu(acc, op1, op2, vl);
545 }
546
547 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2(
548 // CHECK-RV64-NEXT: entry:
549 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
550 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
551 //
test_vwmaccu_vv_u64m2(vuint64m2_t acc,vuint32m1_t op1,vuint32m1_t op2,size_t vl)552 vuint64m2_t test_vwmaccu_vv_u64m2(vuint64m2_t acc, vuint32m1_t op1,
553 vuint32m1_t op2, size_t vl) {
554 return vwmaccu(acc, op1, op2, vl);
555 }
556
557 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2(
558 // CHECK-RV64-NEXT: entry:
559 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
560 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
561 //
test_vwmaccu_vx_u64m2(vuint64m2_t acc,uint32_t op1,vuint32m1_t op2,size_t vl)562 vuint64m2_t test_vwmaccu_vx_u64m2(vuint64m2_t acc, uint32_t op1,
563 vuint32m1_t op2, size_t vl) {
564 return vwmaccu(acc, op1, op2, vl);
565 }
566
567 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4(
568 // CHECK-RV64-NEXT: entry:
569 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
570 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
571 //
test_vwmaccu_vv_u64m4(vuint64m4_t acc,vuint32m2_t op1,vuint32m2_t op2,size_t vl)572 vuint64m4_t test_vwmaccu_vv_u64m4(vuint64m4_t acc, vuint32m2_t op1,
573 vuint32m2_t op2, size_t vl) {
574 return vwmaccu(acc, op1, op2, vl);
575 }
576
577 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4(
578 // CHECK-RV64-NEXT: entry:
579 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
580 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
581 //
test_vwmaccu_vx_u64m4(vuint64m4_t acc,uint32_t op1,vuint32m2_t op2,size_t vl)582 vuint64m4_t test_vwmaccu_vx_u64m4(vuint64m4_t acc, uint32_t op1,
583 vuint32m2_t op2, size_t vl) {
584 return vwmaccu(acc, op1, op2, vl);
585 }
586
587 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8(
588 // CHECK-RV64-NEXT: entry:
589 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
590 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
591 //
test_vwmaccu_vv_u64m8(vuint64m8_t acc,vuint32m4_t op1,vuint32m4_t op2,size_t vl)592 vuint64m8_t test_vwmaccu_vv_u64m8(vuint64m8_t acc, vuint32m4_t op1,
593 vuint32m4_t op2, size_t vl) {
594 return vwmaccu(acc, op1, op2, vl);
595 }
596
597 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8(
598 // CHECK-RV64-NEXT: entry:
599 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
600 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
601 //
test_vwmaccu_vx_u64m8(vuint64m8_t acc,uint32_t op1,vuint32m4_t op2,size_t vl)602 vuint64m8_t test_vwmaccu_vx_u64m8(vuint64m8_t acc, uint32_t op1,
603 vuint32m4_t op2, size_t vl) {
604 return vwmaccu(acc, op1, op2, vl);
605 }
606
607 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4(
608 // CHECK-RV64-NEXT: entry:
609 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
610 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
611 //
test_vwmaccsu_vv_i16mf4(vint16mf4_t acc,vint8mf8_t op1,vuint8mf8_t op2,size_t vl)612 vint16mf4_t test_vwmaccsu_vv_i16mf4(vint16mf4_t acc, vint8mf8_t op1,
613 vuint8mf8_t op2, size_t vl) {
614 return vwmaccsu(acc, op1, op2, vl);
615 }
616
617 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4(
618 // CHECK-RV64-NEXT: entry:
619 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
620 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
621 //
test_vwmaccsu_vx_i16mf4(vint16mf4_t acc,int8_t op1,vuint8mf8_t op2,size_t vl)622 vint16mf4_t test_vwmaccsu_vx_i16mf4(vint16mf4_t acc, int8_t op1,
623 vuint8mf8_t op2, size_t vl) {
624 return vwmaccsu(acc, op1, op2, vl);
625 }
626
627 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2(
628 // CHECK-RV64-NEXT: entry:
629 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
630 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
631 //
test_vwmaccsu_vv_i16mf2(vint16mf2_t acc,vint8mf4_t op1,vuint8mf4_t op2,size_t vl)632 vint16mf2_t test_vwmaccsu_vv_i16mf2(vint16mf2_t acc, vint8mf4_t op1,
633 vuint8mf4_t op2, size_t vl) {
634 return vwmaccsu(acc, op1, op2, vl);
635 }
636
637 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2(
638 // CHECK-RV64-NEXT: entry:
639 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
640 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
641 //
test_vwmaccsu_vx_i16mf2(vint16mf2_t acc,int8_t op1,vuint8mf4_t op2,size_t vl)642 vint16mf2_t test_vwmaccsu_vx_i16mf2(vint16mf2_t acc, int8_t op1,
643 vuint8mf4_t op2, size_t vl) {
644 return vwmaccsu(acc, op1, op2, vl);
645 }
646
647 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1(
648 // CHECK-RV64-NEXT: entry:
649 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
650 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
651 //
test_vwmaccsu_vv_i16m1(vint16m1_t acc,vint8mf2_t op1,vuint8mf2_t op2,size_t vl)652 vint16m1_t test_vwmaccsu_vv_i16m1(vint16m1_t acc, vint8mf2_t op1,
653 vuint8mf2_t op2, size_t vl) {
654 return vwmaccsu(acc, op1, op2, vl);
655 }
656
657 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1(
658 // CHECK-RV64-NEXT: entry:
659 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
660 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
661 //
test_vwmaccsu_vx_i16m1(vint16m1_t acc,int8_t op1,vuint8mf2_t op2,size_t vl)662 vint16m1_t test_vwmaccsu_vx_i16m1(vint16m1_t acc, int8_t op1, vuint8mf2_t op2,
663 size_t vl) {
664 return vwmaccsu(acc, op1, op2, vl);
665 }
666
667 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2(
668 // CHECK-RV64-NEXT: entry:
669 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
670 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
671 //
test_vwmaccsu_vv_i16m2(vint16m2_t acc,vint8m1_t op1,vuint8m1_t op2,size_t vl)672 vint16m2_t test_vwmaccsu_vv_i16m2(vint16m2_t acc, vint8m1_t op1, vuint8m1_t op2,
673 size_t vl) {
674 return vwmaccsu(acc, op1, op2, vl);
675 }
676
677 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2(
678 // CHECK-RV64-NEXT: entry:
679 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
680 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
681 //
test_vwmaccsu_vx_i16m2(vint16m2_t acc,int8_t op1,vuint8m1_t op2,size_t vl)682 vint16m2_t test_vwmaccsu_vx_i16m2(vint16m2_t acc, int8_t op1, vuint8m1_t op2,
683 size_t vl) {
684 return vwmaccsu(acc, op1, op2, vl);
685 }
686
687 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4(
688 // CHECK-RV64-NEXT: entry:
689 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
690 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
691 //
test_vwmaccsu_vv_i16m4(vint16m4_t acc,vint8m2_t op1,vuint8m2_t op2,size_t vl)692 vint16m4_t test_vwmaccsu_vv_i16m4(vint16m4_t acc, vint8m2_t op1, vuint8m2_t op2,
693 size_t vl) {
694 return vwmaccsu(acc, op1, op2, vl);
695 }
696
697 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4(
698 // CHECK-RV64-NEXT: entry:
699 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
700 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
701 //
test_vwmaccsu_vx_i16m4(vint16m4_t acc,int8_t op1,vuint8m2_t op2,size_t vl)702 vint16m4_t test_vwmaccsu_vx_i16m4(vint16m4_t acc, int8_t op1, vuint8m2_t op2,
703 size_t vl) {
704 return vwmaccsu(acc, op1, op2, vl);
705 }
706
707 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8(
708 // CHECK-RV64-NEXT: entry:
709 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
710 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
711 //
test_vwmaccsu_vv_i16m8(vint16m8_t acc,vint8m4_t op1,vuint8m4_t op2,size_t vl)712 vint16m8_t test_vwmaccsu_vv_i16m8(vint16m8_t acc, vint8m4_t op1, vuint8m4_t op2,
713 size_t vl) {
714 return vwmaccsu(acc, op1, op2, vl);
715 }
716
717 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8(
718 // CHECK-RV64-NEXT: entry:
719 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
720 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
721 //
test_vwmaccsu_vx_i16m8(vint16m8_t acc,int8_t op1,vuint8m4_t op2,size_t vl)722 vint16m8_t test_vwmaccsu_vx_i16m8(vint16m8_t acc, int8_t op1, vuint8m4_t op2,
723 size_t vl) {
724 return vwmaccsu(acc, op1, op2, vl);
725 }
726
727 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2(
728 // CHECK-RV64-NEXT: entry:
729 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
730 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
731 //
test_vwmaccsu_vv_i32mf2(vint32mf2_t acc,vint16mf4_t op1,vuint16mf4_t op2,size_t vl)732 vint32mf2_t test_vwmaccsu_vv_i32mf2(vint32mf2_t acc, vint16mf4_t op1,
733 vuint16mf4_t op2, size_t vl) {
734 return vwmaccsu(acc, op1, op2, vl);
735 }
736
737 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2(
738 // CHECK-RV64-NEXT: entry:
739 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
740 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
741 //
test_vwmaccsu_vx_i32mf2(vint32mf2_t acc,int16_t op1,vuint16mf4_t op2,size_t vl)742 vint32mf2_t test_vwmaccsu_vx_i32mf2(vint32mf2_t acc, int16_t op1,
743 vuint16mf4_t op2, size_t vl) {
744 return vwmaccsu(acc, op1, op2, vl);
745 }
746
747 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1(
748 // CHECK-RV64-NEXT: entry:
749 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
750 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
751 //
test_vwmaccsu_vv_i32m1(vint32m1_t acc,vint16mf2_t op1,vuint16mf2_t op2,size_t vl)752 vint32m1_t test_vwmaccsu_vv_i32m1(vint32m1_t acc, vint16mf2_t op1,
753 vuint16mf2_t op2, size_t vl) {
754 return vwmaccsu(acc, op1, op2, vl);
755 }
756
757 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1(
758 // CHECK-RV64-NEXT: entry:
759 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
760 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
761 //
test_vwmaccsu_vx_i32m1(vint32m1_t acc,int16_t op1,vuint16mf2_t op2,size_t vl)762 vint32m1_t test_vwmaccsu_vx_i32m1(vint32m1_t acc, int16_t op1, vuint16mf2_t op2,
763 size_t vl) {
764 return vwmaccsu(acc, op1, op2, vl);
765 }
766
767 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2(
768 // CHECK-RV64-NEXT: entry:
769 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
770 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
771 //
test_vwmaccsu_vv_i32m2(vint32m2_t acc,vint16m1_t op1,vuint16m1_t op2,size_t vl)772 vint32m2_t test_vwmaccsu_vv_i32m2(vint32m2_t acc, vint16m1_t op1,
773 vuint16m1_t op2, size_t vl) {
774 return vwmaccsu(acc, op1, op2, vl);
775 }
776
777 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2(
778 // CHECK-RV64-NEXT: entry:
779 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
780 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
781 //
test_vwmaccsu_vx_i32m2(vint32m2_t acc,int16_t op1,vuint16m1_t op2,size_t vl)782 vint32m2_t test_vwmaccsu_vx_i32m2(vint32m2_t acc, int16_t op1, vuint16m1_t op2,
783 size_t vl) {
784 return vwmaccsu(acc, op1, op2, vl);
785 }
786
787 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4(
788 // CHECK-RV64-NEXT: entry:
789 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
790 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
791 //
test_vwmaccsu_vv_i32m4(vint32m4_t acc,vint16m2_t op1,vuint16m2_t op2,size_t vl)792 vint32m4_t test_vwmaccsu_vv_i32m4(vint32m4_t acc, vint16m2_t op1,
793 vuint16m2_t op2, size_t vl) {
794 return vwmaccsu(acc, op1, op2, vl);
795 }
796
797 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4(
798 // CHECK-RV64-NEXT: entry:
799 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
800 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
801 //
test_vwmaccsu_vx_i32m4(vint32m4_t acc,int16_t op1,vuint16m2_t op2,size_t vl)802 vint32m4_t test_vwmaccsu_vx_i32m4(vint32m4_t acc, int16_t op1, vuint16m2_t op2,
803 size_t vl) {
804 return vwmaccsu(acc, op1, op2, vl);
805 }
806
807 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8(
808 // CHECK-RV64-NEXT: entry:
809 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
810 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
811 //
test_vwmaccsu_vv_i32m8(vint32m8_t acc,vint16m4_t op1,vuint16m4_t op2,size_t vl)812 vint32m8_t test_vwmaccsu_vv_i32m8(vint32m8_t acc, vint16m4_t op1,
813 vuint16m4_t op2, size_t vl) {
814 return vwmaccsu(acc, op1, op2, vl);
815 }
816
817 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8(
818 // CHECK-RV64-NEXT: entry:
819 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
820 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
821 //
test_vwmaccsu_vx_i32m8(vint32m8_t acc,int16_t op1,vuint16m4_t op2,size_t vl)822 vint32m8_t test_vwmaccsu_vx_i32m8(vint32m8_t acc, int16_t op1, vuint16m4_t op2,
823 size_t vl) {
824 return vwmaccsu(acc, op1, op2, vl);
825 }
826
827 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1(
828 // CHECK-RV64-NEXT: entry:
829 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
830 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
831 //
test_vwmaccsu_vv_i64m1(vint64m1_t acc,vint32mf2_t op1,vuint32mf2_t op2,size_t vl)832 vint64m1_t test_vwmaccsu_vv_i64m1(vint64m1_t acc, vint32mf2_t op1,
833 vuint32mf2_t op2, size_t vl) {
834 return vwmaccsu(acc, op1, op2, vl);
835 }
836
837 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1(
838 // CHECK-RV64-NEXT: entry:
839 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
840 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
841 //
test_vwmaccsu_vx_i64m1(vint64m1_t acc,int32_t op1,vuint32mf2_t op2,size_t vl)842 vint64m1_t test_vwmaccsu_vx_i64m1(vint64m1_t acc, int32_t op1, vuint32mf2_t op2,
843 size_t vl) {
844 return vwmaccsu(acc, op1, op2, vl);
845 }
846
847 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2(
848 // CHECK-RV64-NEXT: entry:
849 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
850 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
851 //
test_vwmaccsu_vv_i64m2(vint64m2_t acc,vint32m1_t op1,vuint32m1_t op2,size_t vl)852 vint64m2_t test_vwmaccsu_vv_i64m2(vint64m2_t acc, vint32m1_t op1,
853 vuint32m1_t op2, size_t vl) {
854 return vwmaccsu(acc, op1, op2, vl);
855 }
856
857 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2(
858 // CHECK-RV64-NEXT: entry:
859 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
860 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
861 //
test_vwmaccsu_vx_i64m2(vint64m2_t acc,int32_t op1,vuint32m1_t op2,size_t vl)862 vint64m2_t test_vwmaccsu_vx_i64m2(vint64m2_t acc, int32_t op1, vuint32m1_t op2,
863 size_t vl) {
864 return vwmaccsu(acc, op1, op2, vl);
865 }
866
867 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4(
868 // CHECK-RV64-NEXT: entry:
869 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
870 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
871 //
test_vwmaccsu_vv_i64m4(vint64m4_t acc,vint32m2_t op1,vuint32m2_t op2,size_t vl)872 vint64m4_t test_vwmaccsu_vv_i64m4(vint64m4_t acc, vint32m2_t op1,
873 vuint32m2_t op2, size_t vl) {
874 return vwmaccsu(acc, op1, op2, vl);
875 }
876
877 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4(
878 // CHECK-RV64-NEXT: entry:
879 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
880 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
881 //
test_vwmaccsu_vx_i64m4(vint64m4_t acc,int32_t op1,vuint32m2_t op2,size_t vl)882 vint64m4_t test_vwmaccsu_vx_i64m4(vint64m4_t acc, int32_t op1, vuint32m2_t op2,
883 size_t vl) {
884 return vwmaccsu(acc, op1, op2, vl);
885 }
886
887 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8(
888 // CHECK-RV64-NEXT: entry:
889 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
890 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
891 //
test_vwmaccsu_vv_i64m8(vint64m8_t acc,vint32m4_t op1,vuint32m4_t op2,size_t vl)892 vint64m8_t test_vwmaccsu_vv_i64m8(vint64m8_t acc, vint32m4_t op1,
893 vuint32m4_t op2, size_t vl) {
894 return vwmaccsu(acc, op1, op2, vl);
895 }
896
897 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8(
898 // CHECK-RV64-NEXT: entry:
899 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
900 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
901 //
test_vwmaccsu_vx_i64m8(vint64m8_t acc,int32_t op1,vuint32m4_t op2,size_t vl)902 vint64m8_t test_vwmaccsu_vx_i64m8(vint64m8_t acc, int32_t op1, vuint32m4_t op2,
903 size_t vl) {
904 return vwmaccsu(acc, op1, op2, vl);
905 }
906
907 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4(
908 // CHECK-RV64-NEXT: entry:
909 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
910 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
911 //
test_vwmaccus_vx_i16mf4(vint16mf4_t acc,uint8_t op1,vint8mf8_t op2,size_t vl)912 vint16mf4_t test_vwmaccus_vx_i16mf4(vint16mf4_t acc, uint8_t op1,
913 vint8mf8_t op2, size_t vl) {
914 return vwmaccus(acc, op1, op2, vl);
915 }
916
917 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2(
918 // CHECK-RV64-NEXT: entry:
919 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
920 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
921 //
test_vwmaccus_vx_i16mf2(vint16mf2_t acc,uint8_t op1,vint8mf4_t op2,size_t vl)922 vint16mf2_t test_vwmaccus_vx_i16mf2(vint16mf2_t acc, uint8_t op1,
923 vint8mf4_t op2, size_t vl) {
924 return vwmaccus(acc, op1, op2, vl);
925 }
926
927 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1(
928 // CHECK-RV64-NEXT: entry:
929 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
930 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
931 //
test_vwmaccus_vx_i16m1(vint16m1_t acc,uint8_t op1,vint8mf2_t op2,size_t vl)932 vint16m1_t test_vwmaccus_vx_i16m1(vint16m1_t acc, uint8_t op1, vint8mf2_t op2,
933 size_t vl) {
934 return vwmaccus(acc, op1, op2, vl);
935 }
936
937 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2(
938 // CHECK-RV64-NEXT: entry:
939 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
940 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
941 //
test_vwmaccus_vx_i16m2(vint16m2_t acc,uint8_t op1,vint8m1_t op2,size_t vl)942 vint16m2_t test_vwmaccus_vx_i16m2(vint16m2_t acc, uint8_t op1, vint8m1_t op2,
943 size_t vl) {
944 return vwmaccus(acc, op1, op2, vl);
945 }
946
947 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4(
948 // CHECK-RV64-NEXT: entry:
949 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
950 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
951 //
test_vwmaccus_vx_i16m4(vint16m4_t acc,uint8_t op1,vint8m2_t op2,size_t vl)952 vint16m4_t test_vwmaccus_vx_i16m4(vint16m4_t acc, uint8_t op1, vint8m2_t op2,
953 size_t vl) {
954 return vwmaccus(acc, op1, op2, vl);
955 }
956
957 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8(
958 // CHECK-RV64-NEXT: entry:
959 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], i64 [[VL:%.*]])
960 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
961 //
test_vwmaccus_vx_i16m8(vint16m8_t acc,uint8_t op1,vint8m4_t op2,size_t vl)962 vint16m8_t test_vwmaccus_vx_i16m8(vint16m8_t acc, uint8_t op1, vint8m4_t op2,
963 size_t vl) {
964 return vwmaccus(acc, op1, op2, vl);
965 }
966
967 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2(
968 // CHECK-RV64-NEXT: entry:
969 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
970 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
971 //
test_vwmaccus_vx_i32mf2(vint32mf2_t acc,uint16_t op1,vint16mf4_t op2,size_t vl)972 vint32mf2_t test_vwmaccus_vx_i32mf2(vint32mf2_t acc, uint16_t op1,
973 vint16mf4_t op2, size_t vl) {
974 return vwmaccus(acc, op1, op2, vl);
975 }
976
977 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1(
978 // CHECK-RV64-NEXT: entry:
979 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
980 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
981 //
test_vwmaccus_vx_i32m1(vint32m1_t acc,uint16_t op1,vint16mf2_t op2,size_t vl)982 vint32m1_t test_vwmaccus_vx_i32m1(vint32m1_t acc, uint16_t op1, vint16mf2_t op2,
983 size_t vl) {
984 return vwmaccus(acc, op1, op2, vl);
985 }
986
987 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2(
988 // CHECK-RV64-NEXT: entry:
989 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
990 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
991 //
test_vwmaccus_vx_i32m2(vint32m2_t acc,uint16_t op1,vint16m1_t op2,size_t vl)992 vint32m2_t test_vwmaccus_vx_i32m2(vint32m2_t acc, uint16_t op1, vint16m1_t op2,
993 size_t vl) {
994 return vwmaccus(acc, op1, op2, vl);
995 }
996
997 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4(
998 // CHECK-RV64-NEXT: entry:
999 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1000 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1001 //
test_vwmaccus_vx_i32m4(vint32m4_t acc,uint16_t op1,vint16m2_t op2,size_t vl)1002 vint32m4_t test_vwmaccus_vx_i32m4(vint32m4_t acc, uint16_t op1, vint16m2_t op2,
1003 size_t vl) {
1004 return vwmaccus(acc, op1, op2, vl);
1005 }
1006
1007 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8(
1008 // CHECK-RV64-NEXT: entry:
1009 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], i64 [[VL:%.*]])
1010 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1011 //
test_vwmaccus_vx_i32m8(vint32m8_t acc,uint16_t op1,vint16m4_t op2,size_t vl)1012 vint32m8_t test_vwmaccus_vx_i32m8(vint32m8_t acc, uint16_t op1, vint16m4_t op2,
1013 size_t vl) {
1014 return vwmaccus(acc, op1, op2, vl);
1015 }
1016
1017 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1(
1018 // CHECK-RV64-NEXT: entry:
1019 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1020 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1021 //
test_vwmaccus_vx_i64m1(vint64m1_t acc,uint32_t op1,vint32mf2_t op2,size_t vl)1022 vint64m1_t test_vwmaccus_vx_i64m1(vint64m1_t acc, uint32_t op1, vint32mf2_t op2,
1023 size_t vl) {
1024 return vwmaccus(acc, op1, op2, vl);
1025 }
1026
1027 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2(
1028 // CHECK-RV64-NEXT: entry:
1029 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1030 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1031 //
test_vwmaccus_vx_i64m2(vint64m2_t acc,uint32_t op1,vint32m1_t op2,size_t vl)1032 vint64m2_t test_vwmaccus_vx_i64m2(vint64m2_t acc, uint32_t op1, vint32m1_t op2,
1033 size_t vl) {
1034 return vwmaccus(acc, op1, op2, vl);
1035 }
1036
1037 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4(
1038 // CHECK-RV64-NEXT: entry:
1039 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1040 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1041 //
test_vwmaccus_vx_i64m4(vint64m4_t acc,uint32_t op1,vint32m2_t op2,size_t vl)1042 vint64m4_t test_vwmaccus_vx_i64m4(vint64m4_t acc, uint32_t op1, vint32m2_t op2,
1043 size_t vl) {
1044 return vwmaccus(acc, op1, op2, vl);
1045 }
1046
1047 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8(
1048 // CHECK-RV64-NEXT: entry:
1049 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], i64 [[VL:%.*]])
1050 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1051 //
test_vwmaccus_vx_i64m8(vint64m8_t acc,uint32_t op1,vint32m4_t op2,size_t vl)1052 vint64m8_t test_vwmaccus_vx_i64m8(vint64m8_t acc, uint32_t op1, vint32m4_t op2,
1053 size_t vl) {
1054 return vwmaccus(acc, op1, op2, vl);
1055 }
1056
1057 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf4_m(
1058 // CHECK-RV64-NEXT: entry:
1059 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1060 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1061 //
test_vwmacc_vv_i16mf4_m(vbool64_t mask,vint16mf4_t acc,vint8mf8_t op1,vint8mf8_t op2,size_t vl)1062 vint16mf4_t test_vwmacc_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
1063 vint8mf8_t op1, vint8mf8_t op2, size_t vl) {
1064 return vwmacc(mask, acc, op1, op2, vl);
1065 }
1066
1067 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf4_m(
1068 // CHECK-RV64-NEXT: entry:
1069 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmacc.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1070 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1071 //
test_vwmacc_vx_i16mf4_m(vbool64_t mask,vint16mf4_t acc,int8_t op1,vint8mf8_t op2,size_t vl)1072 vint16mf4_t test_vwmacc_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc, int8_t op1,
1073 vint8mf8_t op2, size_t vl) {
1074 return vwmacc(mask, acc, op1, op2, vl);
1075 }
1076
1077 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16mf2_m(
1078 // CHECK-RV64-NEXT: entry:
1079 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1080 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1081 //
test_vwmacc_vv_i16mf2_m(vbool32_t mask,vint16mf2_t acc,vint8mf4_t op1,vint8mf4_t op2,size_t vl)1082 vint16mf2_t test_vwmacc_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
1083 vint8mf4_t op1, vint8mf4_t op2, size_t vl) {
1084 return vwmacc(mask, acc, op1, op2, vl);
1085 }
1086
1087 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16mf2_m(
1088 // CHECK-RV64-NEXT: entry:
1089 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmacc.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1090 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1091 //
test_vwmacc_vx_i16mf2_m(vbool32_t mask,vint16mf2_t acc,int8_t op1,vint8mf4_t op2,size_t vl)1092 vint16mf2_t test_vwmacc_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc, int8_t op1,
1093 vint8mf4_t op2, size_t vl) {
1094 return vwmacc(mask, acc, op1, op2, vl);
1095 }
1096
1097 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m1_m(
1098 // CHECK-RV64-NEXT: entry:
1099 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1100 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1101 //
test_vwmacc_vv_i16m1_m(vbool16_t mask,vint16m1_t acc,vint8mf2_t op1,vint8mf2_t op2,size_t vl)1102 vint16m1_t test_vwmacc_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
1103 vint8mf2_t op1, vint8mf2_t op2, size_t vl) {
1104 return vwmacc(mask, acc, op1, op2, vl);
1105 }
1106
1107 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m1_m(
1108 // CHECK-RV64-NEXT: entry:
1109 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmacc.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1110 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1111 //
test_vwmacc_vx_i16m1_m(vbool16_t mask,vint16m1_t acc,int8_t op1,vint8mf2_t op2,size_t vl)1112 vint16m1_t test_vwmacc_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
1113 vint8mf2_t op2, size_t vl) {
1114 return vwmacc(mask, acc, op1, op2, vl);
1115 }
1116
1117 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m2_m(
1118 // CHECK-RV64-NEXT: entry:
1119 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1120 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1121 //
test_vwmacc_vv_i16m2_m(vbool8_t mask,vint16m2_t acc,vint8m1_t op1,vint8m1_t op2,size_t vl)1122 vint16m2_t test_vwmacc_vv_i16m2_m(vbool8_t mask, vint16m2_t acc, vint8m1_t op1,
1123 vint8m1_t op2, size_t vl) {
1124 return vwmacc(mask, acc, op1, op2, vl);
1125 }
1126
1127 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m2_m(
1128 // CHECK-RV64-NEXT: entry:
1129 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmacc.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1130 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1131 //
test_vwmacc_vx_i16m2_m(vbool8_t mask,vint16m2_t acc,int8_t op1,vint8m1_t op2,size_t vl)1132 vint16m2_t test_vwmacc_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
1133 vint8m1_t op2, size_t vl) {
1134 return vwmacc(mask, acc, op1, op2, vl);
1135 }
1136
1137 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m4_m(
1138 // CHECK-RV64-NEXT: entry:
1139 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1140 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1141 //
test_vwmacc_vv_i16m4_m(vbool4_t mask,vint16m4_t acc,vint8m2_t op1,vint8m2_t op2,size_t vl)1142 vint16m4_t test_vwmacc_vv_i16m4_m(vbool4_t mask, vint16m4_t acc, vint8m2_t op1,
1143 vint8m2_t op2, size_t vl) {
1144 return vwmacc(mask, acc, op1, op2, vl);
1145 }
1146
1147 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m4_m(
1148 // CHECK-RV64-NEXT: entry:
1149 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmacc.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1150 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1151 //
test_vwmacc_vx_i16m4_m(vbool4_t mask,vint16m4_t acc,int8_t op1,vint8m2_t op2,size_t vl)1152 vint16m4_t test_vwmacc_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
1153 vint8m2_t op2, size_t vl) {
1154 return vwmacc(mask, acc, op1, op2, vl);
1155 }
1156
1157 // CHECK-RV64-LABEL: @test_vwmacc_vv_i16m8_m(
1158 // CHECK-RV64-NEXT: entry:
1159 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1160 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1161 //
test_vwmacc_vv_i16m8_m(vbool2_t mask,vint16m8_t acc,vint8m4_t op1,vint8m4_t op2,size_t vl)1162 vint16m8_t test_vwmacc_vv_i16m8_m(vbool2_t mask, vint16m8_t acc, vint8m4_t op1,
1163 vint8m4_t op2, size_t vl) {
1164 return vwmacc(mask, acc, op1, op2, vl);
1165 }
1166
1167 // CHECK-RV64-LABEL: @test_vwmacc_vx_i16m8_m(
1168 // CHECK-RV64-NEXT: entry:
1169 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmacc.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1170 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1171 //
test_vwmacc_vx_i16m8_m(vbool2_t mask,vint16m8_t acc,int8_t op1,vint8m4_t op2,size_t vl)1172 vint16m8_t test_vwmacc_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
1173 vint8m4_t op2, size_t vl) {
1174 return vwmacc(mask, acc, op1, op2, vl);
1175 }
1176
1177 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32mf2_m(
1178 // CHECK-RV64-NEXT: entry:
1179 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1180 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1181 //
test_vwmacc_vv_i32mf2_m(vbool64_t mask,vint32mf2_t acc,vint16mf4_t op1,vint16mf4_t op2,size_t vl)1182 vint32mf2_t test_vwmacc_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
1183 vint16mf4_t op1, vint16mf4_t op2,
1184 size_t vl) {
1185 return vwmacc(mask, acc, op1, op2, vl);
1186 }
1187
1188 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32mf2_m(
1189 // CHECK-RV64-NEXT: entry:
1190 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmacc.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1191 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1192 //
test_vwmacc_vx_i32mf2_m(vbool64_t mask,vint32mf2_t acc,int16_t op1,vint16mf4_t op2,size_t vl)1193 vint32mf2_t test_vwmacc_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
1194 int16_t op1, vint16mf4_t op2, size_t vl) {
1195 return vwmacc(mask, acc, op1, op2, vl);
1196 }
1197
1198 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m1_m(
1199 // CHECK-RV64-NEXT: entry:
1200 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1201 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1202 //
test_vwmacc_vv_i32m1_m(vbool32_t mask,vint32m1_t acc,vint16mf2_t op1,vint16mf2_t op2,size_t vl)1203 vint32m1_t test_vwmacc_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
1204 vint16mf2_t op1, vint16mf2_t op2, size_t vl) {
1205 return vwmacc(mask, acc, op1, op2, vl);
1206 }
1207
1208 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m1_m(
1209 // CHECK-RV64-NEXT: entry:
1210 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmacc.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1211 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1212 //
test_vwmacc_vx_i32m1_m(vbool32_t mask,vint32m1_t acc,int16_t op1,vint16mf2_t op2,size_t vl)1213 vint32m1_t test_vwmacc_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
1214 vint16mf2_t op2, size_t vl) {
1215 return vwmacc(mask, acc, op1, op2, vl);
1216 }
1217
1218 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m2_m(
1219 // CHECK-RV64-NEXT: entry:
1220 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1221 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1222 //
test_vwmacc_vv_i32m2_m(vbool16_t mask,vint32m2_t acc,vint16m1_t op1,vint16m1_t op2,size_t vl)1223 vint32m2_t test_vwmacc_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
1224 vint16m1_t op1, vint16m1_t op2, size_t vl) {
1225 return vwmacc(mask, acc, op1, op2, vl);
1226 }
1227
1228 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m2_m(
1229 // CHECK-RV64-NEXT: entry:
1230 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmacc.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1231 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1232 //
test_vwmacc_vx_i32m2_m(vbool16_t mask,vint32m2_t acc,int16_t op1,vint16m1_t op2,size_t vl)1233 vint32m2_t test_vwmacc_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
1234 vint16m1_t op2, size_t vl) {
1235 return vwmacc(mask, acc, op1, op2, vl);
1236 }
1237
1238 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m4_m(
1239 // CHECK-RV64-NEXT: entry:
1240 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1241 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1242 //
test_vwmacc_vv_i32m4_m(vbool8_t mask,vint32m4_t acc,vint16m2_t op1,vint16m2_t op2,size_t vl)1243 vint32m4_t test_vwmacc_vv_i32m4_m(vbool8_t mask, vint32m4_t acc, vint16m2_t op1,
1244 vint16m2_t op2, size_t vl) {
1245 return vwmacc(mask, acc, op1, op2, vl);
1246 }
1247
1248 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m4_m(
1249 // CHECK-RV64-NEXT: entry:
1250 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmacc.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1251 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1252 //
test_vwmacc_vx_i32m4_m(vbool8_t mask,vint32m4_t acc,int16_t op1,vint16m2_t op2,size_t vl)1253 vint32m4_t test_vwmacc_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
1254 vint16m2_t op2, size_t vl) {
1255 return vwmacc(mask, acc, op1, op2, vl);
1256 }
1257
1258 // CHECK-RV64-LABEL: @test_vwmacc_vv_i32m8_m(
1259 // CHECK-RV64-NEXT: entry:
1260 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1261 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1262 //
test_vwmacc_vv_i32m8_m(vbool4_t mask,vint32m8_t acc,vint16m4_t op1,vint16m4_t op2,size_t vl)1263 vint32m8_t test_vwmacc_vv_i32m8_m(vbool4_t mask, vint32m8_t acc, vint16m4_t op1,
1264 vint16m4_t op2, size_t vl) {
1265 return vwmacc(mask, acc, op1, op2, vl);
1266 }
1267
1268 // CHECK-RV64-LABEL: @test_vwmacc_vx_i32m8_m(
1269 // CHECK-RV64-NEXT: entry:
1270 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmacc.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1271 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1272 //
test_vwmacc_vx_i32m8_m(vbool4_t mask,vint32m8_t acc,int16_t op1,vint16m4_t op2,size_t vl)1273 vint32m8_t test_vwmacc_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
1274 vint16m4_t op2, size_t vl) {
1275 return vwmacc(mask, acc, op1, op2, vl);
1276 }
1277
1278 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m1_m(
1279 // CHECK-RV64-NEXT: entry:
1280 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1281 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1282 //
test_vwmacc_vv_i64m1_m(vbool64_t mask,vint64m1_t acc,vint32mf2_t op1,vint32mf2_t op2,size_t vl)1283 vint64m1_t test_vwmacc_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
1284 vint32mf2_t op1, vint32mf2_t op2, size_t vl) {
1285 return vwmacc(mask, acc, op1, op2, vl);
1286 }
1287
1288 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m1_m(
1289 // CHECK-RV64-NEXT: entry:
1290 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmacc.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1291 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1292 //
test_vwmacc_vx_i64m1_m(vbool64_t mask,vint64m1_t acc,int32_t op1,vint32mf2_t op2,size_t vl)1293 vint64m1_t test_vwmacc_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
1294 vint32mf2_t op2, size_t vl) {
1295 return vwmacc(mask, acc, op1, op2, vl);
1296 }
1297
1298 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m2_m(
1299 // CHECK-RV64-NEXT: entry:
1300 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1301 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1302 //
test_vwmacc_vv_i64m2_m(vbool32_t mask,vint64m2_t acc,vint32m1_t op1,vint32m1_t op2,size_t vl)1303 vint64m2_t test_vwmacc_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
1304 vint32m1_t op1, vint32m1_t op2, size_t vl) {
1305 return vwmacc(mask, acc, op1, op2, vl);
1306 }
1307
1308 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m2_m(
1309 // CHECK-RV64-NEXT: entry:
1310 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmacc.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1311 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1312 //
test_vwmacc_vx_i64m2_m(vbool32_t mask,vint64m2_t acc,int32_t op1,vint32m1_t op2,size_t vl)1313 vint64m2_t test_vwmacc_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
1314 vint32m1_t op2, size_t vl) {
1315 return vwmacc(mask, acc, op1, op2, vl);
1316 }
1317
1318 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m4_m(
1319 // CHECK-RV64-NEXT: entry:
1320 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1321 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1322 //
test_vwmacc_vv_i64m4_m(vbool16_t mask,vint64m4_t acc,vint32m2_t op1,vint32m2_t op2,size_t vl)1323 vint64m4_t test_vwmacc_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
1324 vint32m2_t op1, vint32m2_t op2, size_t vl) {
1325 return vwmacc(mask, acc, op1, op2, vl);
1326 }
1327
1328 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m4_m(
1329 // CHECK-RV64-NEXT: entry:
1330 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmacc.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1331 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1332 //
test_vwmacc_vx_i64m4_m(vbool16_t mask,vint64m4_t acc,int32_t op1,vint32m2_t op2,size_t vl)1333 vint64m4_t test_vwmacc_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
1334 vint32m2_t op2, size_t vl) {
1335 return vwmacc(mask, acc, op1, op2, vl);
1336 }
1337
1338 // CHECK-RV64-LABEL: @test_vwmacc_vv_i64m8_m(
1339 // CHECK-RV64-NEXT: entry:
1340 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1341 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1342 //
test_vwmacc_vv_i64m8_m(vbool8_t mask,vint64m8_t acc,vint32m4_t op1,vint32m4_t op2,size_t vl)1343 vint64m8_t test_vwmacc_vv_i64m8_m(vbool8_t mask, vint64m8_t acc, vint32m4_t op1,
1344 vint32m4_t op2, size_t vl) {
1345 return vwmacc(mask, acc, op1, op2, vl);
1346 }
1347
1348 // CHECK-RV64-LABEL: @test_vwmacc_vx_i64m8_m(
1349 // CHECK-RV64-NEXT: entry:
1350 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmacc.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1351 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1352 //
test_vwmacc_vx_i64m8_m(vbool8_t mask,vint64m8_t acc,int32_t op1,vint32m4_t op2,size_t vl)1353 vint64m8_t test_vwmacc_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
1354 vint32m4_t op2, size_t vl) {
1355 return vwmacc(mask, acc, op1, op2, vl);
1356 }
1357
1358 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf4_m(
1359 // CHECK-RV64-NEXT: entry:
1360 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1361 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1362 //
test_vwmaccu_vv_u16mf4_m(vbool64_t mask,vuint16mf4_t acc,vuint8mf8_t op1,vuint8mf8_t op2,size_t vl)1363 vuint16mf4_t test_vwmaccu_vv_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
1364 vuint8mf8_t op1, vuint8mf8_t op2,
1365 size_t vl) {
1366 return vwmaccu(mask, acc, op1, op2, vl);
1367 }
1368
1369 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf4_m(
1370 // CHECK-RV64-NEXT: entry:
1371 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1372 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1373 //
test_vwmaccu_vx_u16mf4_m(vbool64_t mask,vuint16mf4_t acc,uint8_t op1,vuint8mf8_t op2,size_t vl)1374 vuint16mf4_t test_vwmaccu_vx_u16mf4_m(vbool64_t mask, vuint16mf4_t acc,
1375 uint8_t op1, vuint8mf8_t op2, size_t vl) {
1376 return vwmaccu(mask, acc, op1, op2, vl);
1377 }
1378
1379 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16mf2_m(
1380 // CHECK-RV64-NEXT: entry:
1381 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1382 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1383 //
test_vwmaccu_vv_u16mf2_m(vbool32_t mask,vuint16mf2_t acc,vuint8mf4_t op1,vuint8mf4_t op2,size_t vl)1384 vuint16mf2_t test_vwmaccu_vv_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
1385 vuint8mf4_t op1, vuint8mf4_t op2,
1386 size_t vl) {
1387 return vwmaccu(mask, acc, op1, op2, vl);
1388 }
1389
1390 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16mf2_m(
1391 // CHECK-RV64-NEXT: entry:
1392 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1393 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1394 //
test_vwmaccu_vx_u16mf2_m(vbool32_t mask,vuint16mf2_t acc,uint8_t op1,vuint8mf4_t op2,size_t vl)1395 vuint16mf2_t test_vwmaccu_vx_u16mf2_m(vbool32_t mask, vuint16mf2_t acc,
1396 uint8_t op1, vuint8mf4_t op2, size_t vl) {
1397 return vwmaccu(mask, acc, op1, op2, vl);
1398 }
1399
1400 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m1_m(
1401 // CHECK-RV64-NEXT: entry:
1402 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1403 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1404 //
test_vwmaccu_vv_u16m1_m(vbool16_t mask,vuint16m1_t acc,vuint8mf2_t op1,vuint8mf2_t op2,size_t vl)1405 vuint16m1_t test_vwmaccu_vv_u16m1_m(vbool16_t mask, vuint16m1_t acc,
1406 vuint8mf2_t op1, vuint8mf2_t op2,
1407 size_t vl) {
1408 return vwmaccu(mask, acc, op1, op2, vl);
1409 }
1410
1411 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m1_m(
1412 // CHECK-RV64-NEXT: entry:
1413 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1414 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1415 //
test_vwmaccu_vx_u16m1_m(vbool16_t mask,vuint16m1_t acc,uint8_t op1,vuint8mf2_t op2,size_t vl)1416 vuint16m1_t test_vwmaccu_vx_u16m1_m(vbool16_t mask, vuint16m1_t acc,
1417 uint8_t op1, vuint8mf2_t op2, size_t vl) {
1418 return vwmaccu(mask, acc, op1, op2, vl);
1419 }
1420
1421 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m2_m(
1422 // CHECK-RV64-NEXT: entry:
1423 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1424 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1425 //
test_vwmaccu_vv_u16m2_m(vbool8_t mask,vuint16m2_t acc,vuint8m1_t op1,vuint8m1_t op2,size_t vl)1426 vuint16m2_t test_vwmaccu_vv_u16m2_m(vbool8_t mask, vuint16m2_t acc,
1427 vuint8m1_t op1, vuint8m1_t op2, size_t vl) {
1428 return vwmaccu(mask, acc, op1, op2, vl);
1429 }
1430
1431 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m2_m(
1432 // CHECK-RV64-NEXT: entry:
1433 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1434 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1435 //
test_vwmaccu_vx_u16m2_m(vbool8_t mask,vuint16m2_t acc,uint8_t op1,vuint8m1_t op2,size_t vl)1436 vuint16m2_t test_vwmaccu_vx_u16m2_m(vbool8_t mask, vuint16m2_t acc, uint8_t op1,
1437 vuint8m1_t op2, size_t vl) {
1438 return vwmaccu(mask, acc, op1, op2, vl);
1439 }
1440
1441 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m4_m(
1442 // CHECK-RV64-NEXT: entry:
1443 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1444 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1445 //
test_vwmaccu_vv_u16m4_m(vbool4_t mask,vuint16m4_t acc,vuint8m2_t op1,vuint8m2_t op2,size_t vl)1446 vuint16m4_t test_vwmaccu_vv_u16m4_m(vbool4_t mask, vuint16m4_t acc,
1447 vuint8m2_t op1, vuint8m2_t op2, size_t vl) {
1448 return vwmaccu(mask, acc, op1, op2, vl);
1449 }
1450
1451 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m4_m(
1452 // CHECK-RV64-NEXT: entry:
1453 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1454 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1455 //
test_vwmaccu_vx_u16m4_m(vbool4_t mask,vuint16m4_t acc,uint8_t op1,vuint8m2_t op2,size_t vl)1456 vuint16m4_t test_vwmaccu_vx_u16m4_m(vbool4_t mask, vuint16m4_t acc, uint8_t op1,
1457 vuint8m2_t op2, size_t vl) {
1458 return vwmaccu(mask, acc, op1, op2, vl);
1459 }
1460
1461 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u16m8_m(
1462 // CHECK-RV64-NEXT: entry:
1463 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1464 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1465 //
test_vwmaccu_vv_u16m8_m(vbool2_t mask,vuint16m8_t acc,vuint8m4_t op1,vuint8m4_t op2,size_t vl)1466 vuint16m8_t test_vwmaccu_vv_u16m8_m(vbool2_t mask, vuint16m8_t acc,
1467 vuint8m4_t op1, vuint8m4_t op2, size_t vl) {
1468 return vwmaccu(mask, acc, op1, op2, vl);
1469 }
1470
1471 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u16m8_m(
1472 // CHECK-RV64-NEXT: entry:
1473 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1474 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1475 //
test_vwmaccu_vx_u16m8_m(vbool2_t mask,vuint16m8_t acc,uint8_t op1,vuint8m4_t op2,size_t vl)1476 vuint16m8_t test_vwmaccu_vx_u16m8_m(vbool2_t mask, vuint16m8_t acc, uint8_t op1,
1477 vuint8m4_t op2, size_t vl) {
1478 return vwmaccu(mask, acc, op1, op2, vl);
1479 }
1480
1481 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32mf2_m(
1482 // CHECK-RV64-NEXT: entry:
1483 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1484 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1485 //
test_vwmaccu_vv_u32mf2_m(vbool64_t mask,vuint32mf2_t acc,vuint16mf4_t op1,vuint16mf4_t op2,size_t vl)1486 vuint32mf2_t test_vwmaccu_vv_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
1487 vuint16mf4_t op1, vuint16mf4_t op2,
1488 size_t vl) {
1489 return vwmaccu(mask, acc, op1, op2, vl);
1490 }
1491
1492 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32mf2_m(
1493 // CHECK-RV64-NEXT: entry:
1494 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1495 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1496 //
test_vwmaccu_vx_u32mf2_m(vbool64_t mask,vuint32mf2_t acc,uint16_t op1,vuint16mf4_t op2,size_t vl)1497 vuint32mf2_t test_vwmaccu_vx_u32mf2_m(vbool64_t mask, vuint32mf2_t acc,
1498 uint16_t op1, vuint16mf4_t op2,
1499 size_t vl) {
1500 return vwmaccu(mask, acc, op1, op2, vl);
1501 }
1502
1503 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m1_m(
1504 // CHECK-RV64-NEXT: entry:
1505 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1506 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1507 //
test_vwmaccu_vv_u32m1_m(vbool32_t mask,vuint32m1_t acc,vuint16mf2_t op1,vuint16mf2_t op2,size_t vl)1508 vuint32m1_t test_vwmaccu_vv_u32m1_m(vbool32_t mask, vuint32m1_t acc,
1509 vuint16mf2_t op1, vuint16mf2_t op2,
1510 size_t vl) {
1511 return vwmaccu(mask, acc, op1, op2, vl);
1512 }
1513
1514 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m1_m(
1515 // CHECK-RV64-NEXT: entry:
1516 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1517 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1518 //
test_vwmaccu_vx_u32m1_m(vbool32_t mask,vuint32m1_t acc,uint16_t op1,vuint16mf2_t op2,size_t vl)1519 vuint32m1_t test_vwmaccu_vx_u32m1_m(vbool32_t mask, vuint32m1_t acc,
1520 uint16_t op1, vuint16mf2_t op2, size_t vl) {
1521 return vwmaccu(mask, acc, op1, op2, vl);
1522 }
1523
1524 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m2_m(
1525 // CHECK-RV64-NEXT: entry:
1526 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1527 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1528 //
test_vwmaccu_vv_u32m2_m(vbool16_t mask,vuint32m2_t acc,vuint16m1_t op1,vuint16m1_t op2,size_t vl)1529 vuint32m2_t test_vwmaccu_vv_u32m2_m(vbool16_t mask, vuint32m2_t acc,
1530 vuint16m1_t op1, vuint16m1_t op2,
1531 size_t vl) {
1532 return vwmaccu(mask, acc, op1, op2, vl);
1533 }
1534
1535 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m2_m(
1536 // CHECK-RV64-NEXT: entry:
1537 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1538 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1539 //
test_vwmaccu_vx_u32m2_m(vbool16_t mask,vuint32m2_t acc,uint16_t op1,vuint16m1_t op2,size_t vl)1540 vuint32m2_t test_vwmaccu_vx_u32m2_m(vbool16_t mask, vuint32m2_t acc,
1541 uint16_t op1, vuint16m1_t op2, size_t vl) {
1542 return vwmaccu(mask, acc, op1, op2, vl);
1543 }
1544
1545 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m4_m(
1546 // CHECK-RV64-NEXT: entry:
1547 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1548 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1549 //
test_vwmaccu_vv_u32m4_m(vbool8_t mask,vuint32m4_t acc,vuint16m2_t op1,vuint16m2_t op2,size_t vl)1550 vuint32m4_t test_vwmaccu_vv_u32m4_m(vbool8_t mask, vuint32m4_t acc,
1551 vuint16m2_t op1, vuint16m2_t op2,
1552 size_t vl) {
1553 return vwmaccu(mask, acc, op1, op2, vl);
1554 }
1555
1556 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m4_m(
1557 // CHECK-RV64-NEXT: entry:
1558 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1559 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1560 //
test_vwmaccu_vx_u32m4_m(vbool8_t mask,vuint32m4_t acc,uint16_t op1,vuint16m2_t op2,size_t vl)1561 vuint32m4_t test_vwmaccu_vx_u32m4_m(vbool8_t mask, vuint32m4_t acc,
1562 uint16_t op1, vuint16m2_t op2, size_t vl) {
1563 return vwmaccu(mask, acc, op1, op2, vl);
1564 }
1565
1566 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u32m8_m(
1567 // CHECK-RV64-NEXT: entry:
1568 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1569 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1570 //
test_vwmaccu_vv_u32m8_m(vbool4_t mask,vuint32m8_t acc,vuint16m4_t op1,vuint16m4_t op2,size_t vl)1571 vuint32m8_t test_vwmaccu_vv_u32m8_m(vbool4_t mask, vuint32m8_t acc,
1572 vuint16m4_t op1, vuint16m4_t op2,
1573 size_t vl) {
1574 return vwmaccu(mask, acc, op1, op2, vl);
1575 }
1576
1577 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u32m8_m(
1578 // CHECK-RV64-NEXT: entry:
1579 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1580 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1581 //
test_vwmaccu_vx_u32m8_m(vbool4_t mask,vuint32m8_t acc,uint16_t op1,vuint16m4_t op2,size_t vl)1582 vuint32m8_t test_vwmaccu_vx_u32m8_m(vbool4_t mask, vuint32m8_t acc,
1583 uint16_t op1, vuint16m4_t op2, size_t vl) {
1584 return vwmaccu(mask, acc, op1, op2, vl);
1585 }
1586
1587 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m1_m(
1588 // CHECK-RV64-NEXT: entry:
1589 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1590 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1591 //
test_vwmaccu_vv_u64m1_m(vbool64_t mask,vuint64m1_t acc,vuint32mf2_t op1,vuint32mf2_t op2,size_t vl)1592 vuint64m1_t test_vwmaccu_vv_u64m1_m(vbool64_t mask, vuint64m1_t acc,
1593 vuint32mf2_t op1, vuint32mf2_t op2,
1594 size_t vl) {
1595 return vwmaccu(mask, acc, op1, op2, vl);
1596 }
1597
1598 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m1_m(
1599 // CHECK-RV64-NEXT: entry:
1600 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1601 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1602 //
test_vwmaccu_vx_u64m1_m(vbool64_t mask,vuint64m1_t acc,uint32_t op1,vuint32mf2_t op2,size_t vl)1603 vuint64m1_t test_vwmaccu_vx_u64m1_m(vbool64_t mask, vuint64m1_t acc,
1604 uint32_t op1, vuint32mf2_t op2, size_t vl) {
1605 return vwmaccu(mask, acc, op1, op2, vl);
1606 }
1607
1608 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m2_m(
1609 // CHECK-RV64-NEXT: entry:
1610 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1611 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1612 //
test_vwmaccu_vv_u64m2_m(vbool32_t mask,vuint64m2_t acc,vuint32m1_t op1,vuint32m1_t op2,size_t vl)1613 vuint64m2_t test_vwmaccu_vv_u64m2_m(vbool32_t mask, vuint64m2_t acc,
1614 vuint32m1_t op1, vuint32m1_t op2,
1615 size_t vl) {
1616 return vwmaccu(mask, acc, op1, op2, vl);
1617 }
1618
1619 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m2_m(
1620 // CHECK-RV64-NEXT: entry:
1621 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1622 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1623 //
test_vwmaccu_vx_u64m2_m(vbool32_t mask,vuint64m2_t acc,uint32_t op1,vuint32m1_t op2,size_t vl)1624 vuint64m2_t test_vwmaccu_vx_u64m2_m(vbool32_t mask, vuint64m2_t acc,
1625 uint32_t op1, vuint32m1_t op2, size_t vl) {
1626 return vwmaccu(mask, acc, op1, op2, vl);
1627 }
1628
1629 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m4_m(
1630 // CHECK-RV64-NEXT: entry:
1631 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1632 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1633 //
test_vwmaccu_vv_u64m4_m(vbool16_t mask,vuint64m4_t acc,vuint32m2_t op1,vuint32m2_t op2,size_t vl)1634 vuint64m4_t test_vwmaccu_vv_u64m4_m(vbool16_t mask, vuint64m4_t acc,
1635 vuint32m2_t op1, vuint32m2_t op2,
1636 size_t vl) {
1637 return vwmaccu(mask, acc, op1, op2, vl);
1638 }
1639
1640 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m4_m(
1641 // CHECK-RV64-NEXT: entry:
1642 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1643 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1644 //
test_vwmaccu_vx_u64m4_m(vbool16_t mask,vuint64m4_t acc,uint32_t op1,vuint32m2_t op2,size_t vl)1645 vuint64m4_t test_vwmaccu_vx_u64m4_m(vbool16_t mask, vuint64m4_t acc,
1646 uint32_t op1, vuint32m2_t op2, size_t vl) {
1647 return vwmaccu(mask, acc, op1, op2, vl);
1648 }
1649
1650 // CHECK-RV64-LABEL: @test_vwmaccu_vv_u64m8_m(
1651 // CHECK-RV64-NEXT: entry:
1652 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1653 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1654 //
test_vwmaccu_vv_u64m8_m(vbool8_t mask,vuint64m8_t acc,vuint32m4_t op1,vuint32m4_t op2,size_t vl)1655 vuint64m8_t test_vwmaccu_vv_u64m8_m(vbool8_t mask, vuint64m8_t acc,
1656 vuint32m4_t op1, vuint32m4_t op2,
1657 size_t vl) {
1658 return vwmaccu(mask, acc, op1, op2, vl);
1659 }
1660
1661 // CHECK-RV64-LABEL: @test_vwmaccu_vx_u64m8_m(
1662 // CHECK-RV64-NEXT: entry:
1663 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1664 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1665 //
test_vwmaccu_vx_u64m8_m(vbool8_t mask,vuint64m8_t acc,uint32_t op1,vuint32m4_t op2,size_t vl)1666 vuint64m8_t test_vwmaccu_vx_u64m8_m(vbool8_t mask, vuint64m8_t acc,
1667 uint32_t op1, vuint32m4_t op2, size_t vl) {
1668 return vwmaccu(mask, acc, op1, op2, vl);
1669 }
1670
1671 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf4_m(
1672 // CHECK-RV64-NEXT: entry:
1673 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.nxv1i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], <vscale x 1 x i8> [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1674 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1675 //
test_vwmaccsu_vv_i16mf4_m(vbool64_t mask,vint16mf4_t acc,vint8mf8_t op1,vuint8mf8_t op2,size_t vl)1676 vint16mf4_t test_vwmaccsu_vv_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
1677 vint8mf8_t op1, vuint8mf8_t op2,
1678 size_t vl) {
1679 return vwmaccsu(mask, acc, op1, op2, vl);
1680 }
1681
1682 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf4_m(
1683 // CHECK-RV64-NEXT: entry:
1684 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccsu.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1685 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1686 //
test_vwmaccsu_vx_i16mf4_m(vbool64_t mask,vint16mf4_t acc,int8_t op1,vuint8mf8_t op2,size_t vl)1687 vint16mf4_t test_vwmaccsu_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
1688 int8_t op1, vuint8mf8_t op2, size_t vl) {
1689 return vwmaccsu(mask, acc, op1, op2, vl);
1690 }
1691
1692 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16mf2_m(
1693 // CHECK-RV64-NEXT: entry:
1694 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.nxv2i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], <vscale x 2 x i8> [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1695 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1696 //
test_vwmaccsu_vv_i16mf2_m(vbool32_t mask,vint16mf2_t acc,vint8mf4_t op1,vuint8mf4_t op2,size_t vl)1697 vint16mf2_t test_vwmaccsu_vv_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
1698 vint8mf4_t op1, vuint8mf4_t op2,
1699 size_t vl) {
1700 return vwmaccsu(mask, acc, op1, op2, vl);
1701 }
1702
1703 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16mf2_m(
1704 // CHECK-RV64-NEXT: entry:
1705 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccsu.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1706 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1707 //
test_vwmaccsu_vx_i16mf2_m(vbool32_t mask,vint16mf2_t acc,int8_t op1,vuint8mf4_t op2,size_t vl)1708 vint16mf2_t test_vwmaccsu_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
1709 int8_t op1, vuint8mf4_t op2, size_t vl) {
1710 return vwmaccsu(mask, acc, op1, op2, vl);
1711 }
1712
1713 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m1_m(
1714 // CHECK-RV64-NEXT: entry:
1715 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.nxv4i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], <vscale x 4 x i8> [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1716 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1717 //
test_vwmaccsu_vv_i16m1_m(vbool16_t mask,vint16m1_t acc,vint8mf2_t op1,vuint8mf2_t op2,size_t vl)1718 vint16m1_t test_vwmaccsu_vv_i16m1_m(vbool16_t mask, vint16m1_t acc,
1719 vint8mf2_t op1, vuint8mf2_t op2,
1720 size_t vl) {
1721 return vwmaccsu(mask, acc, op1, op2, vl);
1722 }
1723
1724 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m1_m(
1725 // CHECK-RV64-NEXT: entry:
1726 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccsu.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1727 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
1728 //
test_vwmaccsu_vx_i16m1_m(vbool16_t mask,vint16m1_t acc,int8_t op1,vuint8mf2_t op2,size_t vl)1729 vint16m1_t test_vwmaccsu_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, int8_t op1,
1730 vuint8mf2_t op2, size_t vl) {
1731 return vwmaccsu(mask, acc, op1, op2, vl);
1732 }
1733
1734 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m2_m(
1735 // CHECK-RV64-NEXT: entry:
1736 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.nxv8i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], <vscale x 8 x i8> [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1737 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1738 //
test_vwmaccsu_vv_i16m2_m(vbool8_t mask,vint16m2_t acc,vint8m1_t op1,vuint8m1_t op2,size_t vl)1739 vint16m2_t test_vwmaccsu_vv_i16m2_m(vbool8_t mask, vint16m2_t acc,
1740 vint8m1_t op1, vuint8m1_t op2, size_t vl) {
1741 return vwmaccsu(mask, acc, op1, op2, vl);
1742 }
1743
1744 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m2_m(
1745 // CHECK-RV64-NEXT: entry:
1746 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccsu.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1747 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
1748 //
test_vwmaccsu_vx_i16m2_m(vbool8_t mask,vint16m2_t acc,int8_t op1,vuint8m1_t op2,size_t vl)1749 vint16m2_t test_vwmaccsu_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, int8_t op1,
1750 vuint8m1_t op2, size_t vl) {
1751 return vwmaccsu(mask, acc, op1, op2, vl);
1752 }
1753
1754 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m4_m(
1755 // CHECK-RV64-NEXT: entry:
1756 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.nxv16i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], <vscale x 16 x i8> [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1757 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1758 //
test_vwmaccsu_vv_i16m4_m(vbool4_t mask,vint16m4_t acc,vint8m2_t op1,vuint8m2_t op2,size_t vl)1759 vint16m4_t test_vwmaccsu_vv_i16m4_m(vbool4_t mask, vint16m4_t acc,
1760 vint8m2_t op1, vuint8m2_t op2, size_t vl) {
1761 return vwmaccsu(mask, acc, op1, op2, vl);
1762 }
1763
1764 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m4_m(
1765 // CHECK-RV64-NEXT: entry:
1766 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccsu.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1767 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
1768 //
test_vwmaccsu_vx_i16m4_m(vbool4_t mask,vint16m4_t acc,int8_t op1,vuint8m2_t op2,size_t vl)1769 vint16m4_t test_vwmaccsu_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, int8_t op1,
1770 vuint8m2_t op2, size_t vl) {
1771 return vwmaccsu(mask, acc, op1, op2, vl);
1772 }
1773
1774 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i16m8_m(
1775 // CHECK-RV64-NEXT: entry:
1776 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.nxv32i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], <vscale x 32 x i8> [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1777 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1778 //
test_vwmaccsu_vv_i16m8_m(vbool2_t mask,vint16m8_t acc,vint8m4_t op1,vuint8m4_t op2,size_t vl)1779 vint16m8_t test_vwmaccsu_vv_i16m8_m(vbool2_t mask, vint16m8_t acc,
1780 vint8m4_t op1, vuint8m4_t op2, size_t vl) {
1781 return vwmaccsu(mask, acc, op1, op2, vl);
1782 }
1783
1784 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i16m8_m(
1785 // CHECK-RV64-NEXT: entry:
1786 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccsu.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1787 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
1788 //
test_vwmaccsu_vx_i16m8_m(vbool2_t mask,vint16m8_t acc,int8_t op1,vuint8m4_t op2,size_t vl)1789 vint16m8_t test_vwmaccsu_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, int8_t op1,
1790 vuint8m4_t op2, size_t vl) {
1791 return vwmaccsu(mask, acc, op1, op2, vl);
1792 }
1793
1794 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32mf2_m(
1795 // CHECK-RV64-NEXT: entry:
1796 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.nxv1i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], <vscale x 1 x i16> [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1797 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1798 //
test_vwmaccsu_vv_i32mf2_m(vbool64_t mask,vint32mf2_t acc,vint16mf4_t op1,vuint16mf4_t op2,size_t vl)1799 vint32mf2_t test_vwmaccsu_vv_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
1800 vint16mf4_t op1, vuint16mf4_t op2,
1801 size_t vl) {
1802 return vwmaccsu(mask, acc, op1, op2, vl);
1803 }
1804
1805 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32mf2_m(
1806 // CHECK-RV64-NEXT: entry:
1807 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccsu.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1808 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
1809 //
test_vwmaccsu_vx_i32mf2_m(vbool64_t mask,vint32mf2_t acc,int16_t op1,vuint16mf4_t op2,size_t vl)1810 vint32mf2_t test_vwmaccsu_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
1811 int16_t op1, vuint16mf4_t op2,
1812 size_t vl) {
1813 return vwmaccsu(mask, acc, op1, op2, vl);
1814 }
1815
1816 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m1_m(
1817 // CHECK-RV64-NEXT: entry:
1818 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.nxv2i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], <vscale x 2 x i16> [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1819 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1820 //
test_vwmaccsu_vv_i32m1_m(vbool32_t mask,vint32m1_t acc,vint16mf2_t op1,vuint16mf2_t op2,size_t vl)1821 vint32m1_t test_vwmaccsu_vv_i32m1_m(vbool32_t mask, vint32m1_t acc,
1822 vint16mf2_t op1, vuint16mf2_t op2,
1823 size_t vl) {
1824 return vwmaccsu(mask, acc, op1, op2, vl);
1825 }
1826
1827 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m1_m(
1828 // CHECK-RV64-NEXT: entry:
1829 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccsu.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1830 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
1831 //
test_vwmaccsu_vx_i32m1_m(vbool32_t mask,vint32m1_t acc,int16_t op1,vuint16mf2_t op2,size_t vl)1832 vint32m1_t test_vwmaccsu_vx_i32m1_m(vbool32_t mask, vint32m1_t acc, int16_t op1,
1833 vuint16mf2_t op2, size_t vl) {
1834 return vwmaccsu(mask, acc, op1, op2, vl);
1835 }
1836
1837 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m2_m(
1838 // CHECK-RV64-NEXT: entry:
1839 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.nxv4i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], <vscale x 4 x i16> [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1840 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1841 //
test_vwmaccsu_vv_i32m2_m(vbool16_t mask,vint32m2_t acc,vint16m1_t op1,vuint16m1_t op2,size_t vl)1842 vint32m2_t test_vwmaccsu_vv_i32m2_m(vbool16_t mask, vint32m2_t acc,
1843 vint16m1_t op1, vuint16m1_t op2,
1844 size_t vl) {
1845 return vwmaccsu(mask, acc, op1, op2, vl);
1846 }
1847
1848 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m2_m(
1849 // CHECK-RV64-NEXT: entry:
1850 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccsu.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1851 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
1852 //
test_vwmaccsu_vx_i32m2_m(vbool16_t mask,vint32m2_t acc,int16_t op1,vuint16m1_t op2,size_t vl)1853 vint32m2_t test_vwmaccsu_vx_i32m2_m(vbool16_t mask, vint32m2_t acc, int16_t op1,
1854 vuint16m1_t op2, size_t vl) {
1855 return vwmaccsu(mask, acc, op1, op2, vl);
1856 }
1857
1858 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m4_m(
1859 // CHECK-RV64-NEXT: entry:
1860 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.nxv8i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], <vscale x 8 x i16> [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1861 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1862 //
test_vwmaccsu_vv_i32m4_m(vbool8_t mask,vint32m4_t acc,vint16m2_t op1,vuint16m2_t op2,size_t vl)1863 vint32m4_t test_vwmaccsu_vv_i32m4_m(vbool8_t mask, vint32m4_t acc,
1864 vint16m2_t op1, vuint16m2_t op2,
1865 size_t vl) {
1866 return vwmaccsu(mask, acc, op1, op2, vl);
1867 }
1868
1869 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m4_m(
1870 // CHECK-RV64-NEXT: entry:
1871 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccsu.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1872 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
1873 //
test_vwmaccsu_vx_i32m4_m(vbool8_t mask,vint32m4_t acc,int16_t op1,vuint16m2_t op2,size_t vl)1874 vint32m4_t test_vwmaccsu_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, int16_t op1,
1875 vuint16m2_t op2, size_t vl) {
1876 return vwmaccsu(mask, acc, op1, op2, vl);
1877 }
1878
1879 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i32m8_m(
1880 // CHECK-RV64-NEXT: entry:
1881 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.nxv16i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], <vscale x 16 x i16> [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1882 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1883 //
test_vwmaccsu_vv_i32m8_m(vbool4_t mask,vint32m8_t acc,vint16m4_t op1,vuint16m4_t op2,size_t vl)1884 vint32m8_t test_vwmaccsu_vv_i32m8_m(vbool4_t mask, vint32m8_t acc,
1885 vint16m4_t op1, vuint16m4_t op2,
1886 size_t vl) {
1887 return vwmaccsu(mask, acc, op1, op2, vl);
1888 }
1889
1890 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i32m8_m(
1891 // CHECK-RV64-NEXT: entry:
1892 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccsu.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1893 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
1894 //
test_vwmaccsu_vx_i32m8_m(vbool4_t mask,vint32m8_t acc,int16_t op1,vuint16m4_t op2,size_t vl)1895 vint32m8_t test_vwmaccsu_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, int16_t op1,
1896 vuint16m4_t op2, size_t vl) {
1897 return vwmaccsu(mask, acc, op1, op2, vl);
1898 }
1899
1900 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m1_m(
1901 // CHECK-RV64-NEXT: entry:
1902 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.nxv1i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], <vscale x 1 x i32> [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1903 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1904 //
test_vwmaccsu_vv_i64m1_m(vbool64_t mask,vint64m1_t acc,vint32mf2_t op1,vuint32mf2_t op2,size_t vl)1905 vint64m1_t test_vwmaccsu_vv_i64m1_m(vbool64_t mask, vint64m1_t acc,
1906 vint32mf2_t op1, vuint32mf2_t op2,
1907 size_t vl) {
1908 return vwmaccsu(mask, acc, op1, op2, vl);
1909 }
1910
1911 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m1_m(
1912 // CHECK-RV64-NEXT: entry:
1913 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccsu.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1914 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
1915 //
test_vwmaccsu_vx_i64m1_m(vbool64_t mask,vint64m1_t acc,int32_t op1,vuint32mf2_t op2,size_t vl)1916 vint64m1_t test_vwmaccsu_vx_i64m1_m(vbool64_t mask, vint64m1_t acc, int32_t op1,
1917 vuint32mf2_t op2, size_t vl) {
1918 return vwmaccsu(mask, acc, op1, op2, vl);
1919 }
1920
1921 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m2_m(
1922 // CHECK-RV64-NEXT: entry:
1923 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.nxv2i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], <vscale x 2 x i32> [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1924 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1925 //
test_vwmaccsu_vv_i64m2_m(vbool32_t mask,vint64m2_t acc,vint32m1_t op1,vuint32m1_t op2,size_t vl)1926 vint64m2_t test_vwmaccsu_vv_i64m2_m(vbool32_t mask, vint64m2_t acc,
1927 vint32m1_t op1, vuint32m1_t op2,
1928 size_t vl) {
1929 return vwmaccsu(mask, acc, op1, op2, vl);
1930 }
1931
1932 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m2_m(
1933 // CHECK-RV64-NEXT: entry:
1934 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccsu.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1935 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
1936 //
test_vwmaccsu_vx_i64m2_m(vbool32_t mask,vint64m2_t acc,int32_t op1,vuint32m1_t op2,size_t vl)1937 vint64m2_t test_vwmaccsu_vx_i64m2_m(vbool32_t mask, vint64m2_t acc, int32_t op1,
1938 vuint32m1_t op2, size_t vl) {
1939 return vwmaccsu(mask, acc, op1, op2, vl);
1940 }
1941
1942 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m4_m(
1943 // CHECK-RV64-NEXT: entry:
1944 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.nxv4i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], <vscale x 4 x i32> [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1945 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1946 //
test_vwmaccsu_vv_i64m4_m(vbool16_t mask,vint64m4_t acc,vint32m2_t op1,vuint32m2_t op2,size_t vl)1947 vint64m4_t test_vwmaccsu_vv_i64m4_m(vbool16_t mask, vint64m4_t acc,
1948 vint32m2_t op1, vuint32m2_t op2,
1949 size_t vl) {
1950 return vwmaccsu(mask, acc, op1, op2, vl);
1951 }
1952
1953 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m4_m(
1954 // CHECK-RV64-NEXT: entry:
1955 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccsu.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1956 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
1957 //
test_vwmaccsu_vx_i64m4_m(vbool16_t mask,vint64m4_t acc,int32_t op1,vuint32m2_t op2,size_t vl)1958 vint64m4_t test_vwmaccsu_vx_i64m4_m(vbool16_t mask, vint64m4_t acc, int32_t op1,
1959 vuint32m2_t op2, size_t vl) {
1960 return vwmaccsu(mask, acc, op1, op2, vl);
1961 }
1962
1963 // CHECK-RV64-LABEL: @test_vwmaccsu_vv_i64m8_m(
1964 // CHECK-RV64-NEXT: entry:
1965 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.nxv8i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], <vscale x 8 x i32> [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1966 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1967 //
test_vwmaccsu_vv_i64m8_m(vbool8_t mask,vint64m8_t acc,vint32m4_t op1,vuint32m4_t op2,size_t vl)1968 vint64m8_t test_vwmaccsu_vv_i64m8_m(vbool8_t mask, vint64m8_t acc,
1969 vint32m4_t op1, vuint32m4_t op2,
1970 size_t vl) {
1971 return vwmaccsu(mask, acc, op1, op2, vl);
1972 }
1973
1974 // CHECK-RV64-LABEL: @test_vwmaccsu_vx_i64m8_m(
1975 // CHECK-RV64-NEXT: entry:
1976 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccsu.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1977 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
1978 //
test_vwmaccsu_vx_i64m8_m(vbool8_t mask,vint64m8_t acc,int32_t op1,vuint32m4_t op2,size_t vl)1979 vint64m8_t test_vwmaccsu_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, int32_t op1,
1980 vuint32m4_t op2, size_t vl) {
1981 return vwmaccsu(mask, acc, op1, op2, vl);
1982 }
1983
1984 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf4_m(
1985 // CHECK-RV64-NEXT: entry:
1986 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i16> @llvm.riscv.vwmaccus.mask.nxv1i16.i8.nxv1i8.i64(<vscale x 1 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 1 x i8> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1987 // CHECK-RV64-NEXT: ret <vscale x 1 x i16> [[TMP0]]
1988 //
test_vwmaccus_vx_i16mf4_m(vbool64_t mask,vint16mf4_t acc,uint8_t op1,vint8mf8_t op2,size_t vl)1989 vint16mf4_t test_vwmaccus_vx_i16mf4_m(vbool64_t mask, vint16mf4_t acc,
1990 uint8_t op1, vint8mf8_t op2, size_t vl) {
1991 return vwmaccus(mask, acc, op1, op2, vl);
1992 }
1993
1994 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16mf2_m(
1995 // CHECK-RV64-NEXT: entry:
1996 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i16> @llvm.riscv.vwmaccus.mask.nxv2i16.i8.nxv2i8.i64(<vscale x 2 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 2 x i8> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
1997 // CHECK-RV64-NEXT: ret <vscale x 2 x i16> [[TMP0]]
1998 //
test_vwmaccus_vx_i16mf2_m(vbool32_t mask,vint16mf2_t acc,uint8_t op1,vint8mf4_t op2,size_t vl)1999 vint16mf2_t test_vwmaccus_vx_i16mf2_m(vbool32_t mask, vint16mf2_t acc,
2000 uint8_t op1, vint8mf4_t op2, size_t vl) {
2001 return vwmaccus(mask, acc, op1, op2, vl);
2002 }
2003
2004 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m1_m(
2005 // CHECK-RV64-NEXT: entry:
2006 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i16> @llvm.riscv.vwmaccus.mask.nxv4i16.i8.nxv4i8.i64(<vscale x 4 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 4 x i8> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2007 // CHECK-RV64-NEXT: ret <vscale x 4 x i16> [[TMP0]]
2008 //
test_vwmaccus_vx_i16m1_m(vbool16_t mask,vint16m1_t acc,uint8_t op1,vint8mf2_t op2,size_t vl)2009 vint16m1_t test_vwmaccus_vx_i16m1_m(vbool16_t mask, vint16m1_t acc, uint8_t op1,
2010 vint8mf2_t op2, size_t vl) {
2011 return vwmaccus(mask, acc, op1, op2, vl);
2012 }
2013
2014 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m2_m(
2015 // CHECK-RV64-NEXT: entry:
2016 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i16> @llvm.riscv.vwmaccus.mask.nxv8i16.i8.nxv8i8.i64(<vscale x 8 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 8 x i8> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2017 // CHECK-RV64-NEXT: ret <vscale x 8 x i16> [[TMP0]]
2018 //
test_vwmaccus_vx_i16m2_m(vbool8_t mask,vint16m2_t acc,uint8_t op1,vint8m1_t op2,size_t vl)2019 vint16m2_t test_vwmaccus_vx_i16m2_m(vbool8_t mask, vint16m2_t acc, uint8_t op1,
2020 vint8m1_t op2, size_t vl) {
2021 return vwmaccus(mask, acc, op1, op2, vl);
2022 }
2023
2024 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m4_m(
2025 // CHECK-RV64-NEXT: entry:
2026 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i16> @llvm.riscv.vwmaccus.mask.nxv16i16.i8.nxv16i8.i64(<vscale x 16 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 16 x i8> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2027 // CHECK-RV64-NEXT: ret <vscale x 16 x i16> [[TMP0]]
2028 //
test_vwmaccus_vx_i16m4_m(vbool4_t mask,vint16m4_t acc,uint8_t op1,vint8m2_t op2,size_t vl)2029 vint16m4_t test_vwmaccus_vx_i16m4_m(vbool4_t mask, vint16m4_t acc, uint8_t op1,
2030 vint8m2_t op2, size_t vl) {
2031 return vwmaccus(mask, acc, op1, op2, vl);
2032 }
2033
2034 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i16m8_m(
2035 // CHECK-RV64-NEXT: entry:
2036 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 32 x i16> @llvm.riscv.vwmaccus.mask.nxv32i16.i8.nxv32i8.i64(<vscale x 32 x i16> [[ACC:%.*]], i8 [[OP1:%.*]], <vscale x 32 x i8> [[OP2:%.*]], <vscale x 32 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2037 // CHECK-RV64-NEXT: ret <vscale x 32 x i16> [[TMP0]]
2038 //
test_vwmaccus_vx_i16m8_m(vbool2_t mask,vint16m8_t acc,uint8_t op1,vint8m4_t op2,size_t vl)2039 vint16m8_t test_vwmaccus_vx_i16m8_m(vbool2_t mask, vint16m8_t acc, uint8_t op1,
2040 vint8m4_t op2, size_t vl) {
2041 return vwmaccus(mask, acc, op1, op2, vl);
2042 }
2043
2044 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32mf2_m(
2045 // CHECK-RV64-NEXT: entry:
2046 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i32> @llvm.riscv.vwmaccus.mask.nxv1i32.i16.nxv1i16.i64(<vscale x 1 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 1 x i16> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2047 // CHECK-RV64-NEXT: ret <vscale x 1 x i32> [[TMP0]]
2048 //
test_vwmaccus_vx_i32mf2_m(vbool64_t mask,vint32mf2_t acc,uint16_t op1,vint16mf4_t op2,size_t vl)2049 vint32mf2_t test_vwmaccus_vx_i32mf2_m(vbool64_t mask, vint32mf2_t acc,
2050 uint16_t op1, vint16mf4_t op2,
2051 size_t vl) {
2052 return vwmaccus(mask, acc, op1, op2, vl);
2053 }
2054
2055 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m1_m(
2056 // CHECK-RV64-NEXT: entry:
2057 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i32> @llvm.riscv.vwmaccus.mask.nxv2i32.i16.nxv2i16.i64(<vscale x 2 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 2 x i16> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2058 // CHECK-RV64-NEXT: ret <vscale x 2 x i32> [[TMP0]]
2059 //
test_vwmaccus_vx_i32m1_m(vbool32_t mask,vint32m1_t acc,uint16_t op1,vint16mf2_t op2,size_t vl)2060 vint32m1_t test_vwmaccus_vx_i32m1_m(vbool32_t mask, vint32m1_t acc,
2061 uint16_t op1, vint16mf2_t op2, size_t vl) {
2062 return vwmaccus(mask, acc, op1, op2, vl);
2063 }
2064
2065 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m2_m(
2066 // CHECK-RV64-NEXT: entry:
2067 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i32> @llvm.riscv.vwmaccus.mask.nxv4i32.i16.nxv4i16.i64(<vscale x 4 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 4 x i16> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2068 // CHECK-RV64-NEXT: ret <vscale x 4 x i32> [[TMP0]]
2069 //
test_vwmaccus_vx_i32m2_m(vbool16_t mask,vint32m2_t acc,uint16_t op1,vint16m1_t op2,size_t vl)2070 vint32m2_t test_vwmaccus_vx_i32m2_m(vbool16_t mask, vint32m2_t acc,
2071 uint16_t op1, vint16m1_t op2, size_t vl) {
2072 return vwmaccus(mask, acc, op1, op2, vl);
2073 }
2074
2075 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m4_m(
2076 // CHECK-RV64-NEXT: entry:
2077 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i32> @llvm.riscv.vwmaccus.mask.nxv8i32.i16.nxv8i16.i64(<vscale x 8 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 8 x i16> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2078 // CHECK-RV64-NEXT: ret <vscale x 8 x i32> [[TMP0]]
2079 //
test_vwmaccus_vx_i32m4_m(vbool8_t mask,vint32m4_t acc,uint16_t op1,vint16m2_t op2,size_t vl)2080 vint32m4_t test_vwmaccus_vx_i32m4_m(vbool8_t mask, vint32m4_t acc, uint16_t op1,
2081 vint16m2_t op2, size_t vl) {
2082 return vwmaccus(mask, acc, op1, op2, vl);
2083 }
2084
2085 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i32m8_m(
2086 // CHECK-RV64-NEXT: entry:
2087 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 16 x i32> @llvm.riscv.vwmaccus.mask.nxv16i32.i16.nxv16i16.i64(<vscale x 16 x i32> [[ACC:%.*]], i16 [[OP1:%.*]], <vscale x 16 x i16> [[OP2:%.*]], <vscale x 16 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2088 // CHECK-RV64-NEXT: ret <vscale x 16 x i32> [[TMP0]]
2089 //
test_vwmaccus_vx_i32m8_m(vbool4_t mask,vint32m8_t acc,uint16_t op1,vint16m4_t op2,size_t vl)2090 vint32m8_t test_vwmaccus_vx_i32m8_m(vbool4_t mask, vint32m8_t acc, uint16_t op1,
2091 vint16m4_t op2, size_t vl) {
2092 return vwmaccus(mask, acc, op1, op2, vl);
2093 }
2094
2095 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m1_m(
2096 // CHECK-RV64-NEXT: entry:
2097 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 1 x i64> @llvm.riscv.vwmaccus.mask.nxv1i64.i32.nxv1i32.i64(<vscale x 1 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 1 x i32> [[OP2:%.*]], <vscale x 1 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2098 // CHECK-RV64-NEXT: ret <vscale x 1 x i64> [[TMP0]]
2099 //
test_vwmaccus_vx_i64m1_m(vbool64_t mask,vint64m1_t acc,uint32_t op1,vint32mf2_t op2,size_t vl)2100 vint64m1_t test_vwmaccus_vx_i64m1_m(vbool64_t mask, vint64m1_t acc,
2101 uint32_t op1, vint32mf2_t op2, size_t vl) {
2102 return vwmaccus(mask, acc, op1, op2, vl);
2103 }
2104
2105 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m2_m(
2106 // CHECK-RV64-NEXT: entry:
2107 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 2 x i64> @llvm.riscv.vwmaccus.mask.nxv2i64.i32.nxv2i32.i64(<vscale x 2 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 2 x i32> [[OP2:%.*]], <vscale x 2 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2108 // CHECK-RV64-NEXT: ret <vscale x 2 x i64> [[TMP0]]
2109 //
test_vwmaccus_vx_i64m2_m(vbool32_t mask,vint64m2_t acc,uint32_t op1,vint32m1_t op2,size_t vl)2110 vint64m2_t test_vwmaccus_vx_i64m2_m(vbool32_t mask, vint64m2_t acc,
2111 uint32_t op1, vint32m1_t op2, size_t vl) {
2112 return vwmaccus(mask, acc, op1, op2, vl);
2113 }
2114
2115 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m4_m(
2116 // CHECK-RV64-NEXT: entry:
2117 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 4 x i64> @llvm.riscv.vwmaccus.mask.nxv4i64.i32.nxv4i32.i64(<vscale x 4 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 4 x i32> [[OP2:%.*]], <vscale x 4 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2118 // CHECK-RV64-NEXT: ret <vscale x 4 x i64> [[TMP0]]
2119 //
test_vwmaccus_vx_i64m4_m(vbool16_t mask,vint64m4_t acc,uint32_t op1,vint32m2_t op2,size_t vl)2120 vint64m4_t test_vwmaccus_vx_i64m4_m(vbool16_t mask, vint64m4_t acc,
2121 uint32_t op1, vint32m2_t op2, size_t vl) {
2122 return vwmaccus(mask, acc, op1, op2, vl);
2123 }
2124
2125 // CHECK-RV64-LABEL: @test_vwmaccus_vx_i64m8_m(
2126 // CHECK-RV64-NEXT: entry:
2127 // CHECK-RV64-NEXT: [[TMP0:%.*]] = call <vscale x 8 x i64> @llvm.riscv.vwmaccus.mask.nxv8i64.i32.nxv8i32.i64(<vscale x 8 x i64> [[ACC:%.*]], i32 [[OP1:%.*]], <vscale x 8 x i32> [[OP2:%.*]], <vscale x 8 x i1> [[MASK:%.*]], i64 [[VL:%.*]])
2128 // CHECK-RV64-NEXT: ret <vscale x 8 x i64> [[TMP0]]
2129 //
test_vwmaccus_vx_i64m8_m(vbool8_t mask,vint64m8_t acc,uint32_t op1,vint32m4_t op2,size_t vl)2130 vint64m8_t test_vwmaccus_vx_i64m8_m(vbool8_t mask, vint64m8_t acc, uint32_t op1,
2131 vint32m4_t op2, size_t vl) {
2132 return vwmaccus(mask, acc, op1, op2, vl);
2133 }
2134