1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v,+d -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
5  <vscale x 1 x i16>,
6  <vscale x 1 x i8>,
7  <vscale x 1 x i8>,
8  i64);
9
10define <vscale x 1 x i16>  @intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
11; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i16_nxv1i8_nxv1i8:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
14; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.nxv1i8(
18    <vscale x 1 x i16> %0,
19    <vscale x 1 x i8> %1,
20    <vscale x 1 x i8> %2,
21    i64 %3)
22
23  ret <vscale x 1 x i16> %a
24}
25
26declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
27  <vscale x 1 x i16>,
28  <vscale x 1 x i8>,
29  <vscale x 1 x i8>,
30  <vscale x 1 x i1>,
31  i64);
32
33define <vscale x 1 x i16>  @intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
34; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i16_nxv1i8_nxv1i8:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
37; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
38; CHECK-NEXT:    ret
39entry:
40  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.nxv1i8(
41    <vscale x 1 x i16> %0,
42    <vscale x 1 x i8> %1,
43    <vscale x 1 x i8> %2,
44    <vscale x 1 x i1> %3,
45    i64 %4)
46
47  ret <vscale x 1 x i16> %a
48}
49
50declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
51  <vscale x 2 x i16>,
52  <vscale x 2 x i8>,
53  <vscale x 2 x i8>,
54  i64);
55
56define <vscale x 2 x i16>  @intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
57; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i16_nxv2i8_nxv2i8:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
60; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
61; CHECK-NEXT:    ret
62entry:
63  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.nxv2i8(
64    <vscale x 2 x i16> %0,
65    <vscale x 2 x i8> %1,
66    <vscale x 2 x i8> %2,
67    i64 %3)
68
69  ret <vscale x 2 x i16> %a
70}
71
72declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
73  <vscale x 2 x i16>,
74  <vscale x 2 x i8>,
75  <vscale x 2 x i8>,
76  <vscale x 2 x i1>,
77  i64);
78
79define <vscale x 2 x i16>  @intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
80; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i16_nxv2i8_nxv2i8:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
83; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
84; CHECK-NEXT:    ret
85entry:
86  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.nxv2i8(
87    <vscale x 2 x i16> %0,
88    <vscale x 2 x i8> %1,
89    <vscale x 2 x i8> %2,
90    <vscale x 2 x i1> %3,
91    i64 %4)
92
93  ret <vscale x 2 x i16> %a
94}
95
96declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
97  <vscale x 4 x i16>,
98  <vscale x 4 x i8>,
99  <vscale x 4 x i8>,
100  i64);
101
102define <vscale x 4 x i16>  @intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
103; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i16_nxv4i8_nxv4i8:
104; CHECK:       # %bb.0: # %entry
105; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
106; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
107; CHECK-NEXT:    ret
108entry:
109  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.nxv4i8(
110    <vscale x 4 x i16> %0,
111    <vscale x 4 x i8> %1,
112    <vscale x 4 x i8> %2,
113    i64 %3)
114
115  ret <vscale x 4 x i16> %a
116}
117
118declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
119  <vscale x 4 x i16>,
120  <vscale x 4 x i8>,
121  <vscale x 4 x i8>,
122  <vscale x 4 x i1>,
123  i64);
124
125define <vscale x 4 x i16>  @intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
126; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i16_nxv4i8_nxv4i8:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
129; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
130; CHECK-NEXT:    ret
131entry:
132  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.nxv4i8(
133    <vscale x 4 x i16> %0,
134    <vscale x 4 x i8> %1,
135    <vscale x 4 x i8> %2,
136    <vscale x 4 x i1> %3,
137    i64 %4)
138
139  ret <vscale x 4 x i16> %a
140}
141
142declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
143  <vscale x 8 x i16>,
144  <vscale x 8 x i8>,
145  <vscale x 8 x i8>,
146  i64);
147
148define <vscale x 8 x i16>  @intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
149; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i16_nxv8i8_nxv8i8:
150; CHECK:       # %bb.0: # %entry
151; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
152; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
153; CHECK-NEXT:    ret
154entry:
155  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.nxv8i8(
156    <vscale x 8 x i16> %0,
157    <vscale x 8 x i8> %1,
158    <vscale x 8 x i8> %2,
159    i64 %3)
160
161  ret <vscale x 8 x i16> %a
162}
163
164declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
165  <vscale x 8 x i16>,
166  <vscale x 8 x i8>,
167  <vscale x 8 x i8>,
168  <vscale x 8 x i1>,
169  i64);
170
171define <vscale x 8 x i16>  @intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
172; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i16_nxv8i8_nxv8i8:
173; CHECK:       # %bb.0: # %entry
174; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
175; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
176; CHECK-NEXT:    ret
177entry:
178  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.nxv8i8(
179    <vscale x 8 x i16> %0,
180    <vscale x 8 x i8> %1,
181    <vscale x 8 x i8> %2,
182    <vscale x 8 x i1> %3,
183    i64 %4)
184
185  ret <vscale x 8 x i16> %a
186}
187
188declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
189  <vscale x 16 x i16>,
190  <vscale x 16 x i8>,
191  <vscale x 16 x i8>,
192  i64);
193
194define <vscale x 16 x i16>  @intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
195; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i16_nxv16i8_nxv16i8:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
198; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
199; CHECK-NEXT:    ret
200entry:
201  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.nxv16i8(
202    <vscale x 16 x i16> %0,
203    <vscale x 16 x i8> %1,
204    <vscale x 16 x i8> %2,
205    i64 %3)
206
207  ret <vscale x 16 x i16> %a
208}
209
210declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
211  <vscale x 16 x i16>,
212  <vscale x 16 x i8>,
213  <vscale x 16 x i8>,
214  <vscale x 16 x i1>,
215  i64);
216
217define <vscale x 16 x i16>  @intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
218; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i16_nxv16i8_nxv16i8:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
221; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
222; CHECK-NEXT:    ret
223entry:
224  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.nxv16i8(
225    <vscale x 16 x i16> %0,
226    <vscale x 16 x i8> %1,
227    <vscale x 16 x i8> %2,
228    <vscale x 16 x i1> %3,
229    i64 %4)
230
231  ret <vscale x 16 x i16> %a
232}
233
234declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
235  <vscale x 32 x i16>,
236  <vscale x 32 x i8>,
237  <vscale x 32 x i8>,
238  i64);
239
240define <vscale x 32 x i16>  @intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
241; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv32i16_nxv32i8_nxv32i8:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
244; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
245; CHECK-NEXT:    ret
246entry:
247  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.nxv32i8(
248    <vscale x 32 x i16> %0,
249    <vscale x 32 x i8> %1,
250    <vscale x 32 x i8> %2,
251    i64 %3)
252
253  ret <vscale x 32 x i16> %a
254}
255
256declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
257  <vscale x 32 x i16>,
258  <vscale x 32 x i8>,
259  <vscale x 32 x i8>,
260  <vscale x 32 x i1>,
261  i64);
262
263define <vscale x 32 x i16>  @intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
264; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv32i16_nxv32i8_nxv32i8:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
267; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
268; CHECK-NEXT:    ret
269entry:
270  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.nxv32i8(
271    <vscale x 32 x i16> %0,
272    <vscale x 32 x i8> %1,
273    <vscale x 32 x i8> %2,
274    <vscale x 32 x i1> %3,
275    i64 %4)
276
277  ret <vscale x 32 x i16> %a
278}
279
280declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
281  <vscale x 1 x i32>,
282  <vscale x 1 x i16>,
283  <vscale x 1 x i16>,
284  i64);
285
286define <vscale x 1 x i32>  @intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
287; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i32_nxv1i16_nxv1i16:
288; CHECK:       # %bb.0: # %entry
289; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
290; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
291; CHECK-NEXT:    ret
292entry:
293  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.nxv1i16(
294    <vscale x 1 x i32> %0,
295    <vscale x 1 x i16> %1,
296    <vscale x 1 x i16> %2,
297    i64 %3)
298
299  ret <vscale x 1 x i32> %a
300}
301
302declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
303  <vscale x 1 x i32>,
304  <vscale x 1 x i16>,
305  <vscale x 1 x i16>,
306  <vscale x 1 x i1>,
307  i64);
308
309define <vscale x 1 x i32>  @intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
310; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i32_nxv1i16_nxv1i16:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
313; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
314; CHECK-NEXT:    ret
315entry:
316  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.nxv1i16(
317    <vscale x 1 x i32> %0,
318    <vscale x 1 x i16> %1,
319    <vscale x 1 x i16> %2,
320    <vscale x 1 x i1> %3,
321    i64 %4)
322
323  ret <vscale x 1 x i32> %a
324}
325
326declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
327  <vscale x 2 x i32>,
328  <vscale x 2 x i16>,
329  <vscale x 2 x i16>,
330  i64);
331
332define <vscale x 2 x i32>  @intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
333; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i32_nxv2i16_nxv2i16:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
336; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
337; CHECK-NEXT:    ret
338entry:
339  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.nxv2i16(
340    <vscale x 2 x i32> %0,
341    <vscale x 2 x i16> %1,
342    <vscale x 2 x i16> %2,
343    i64 %3)
344
345  ret <vscale x 2 x i32> %a
346}
347
348declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
349  <vscale x 2 x i32>,
350  <vscale x 2 x i16>,
351  <vscale x 2 x i16>,
352  <vscale x 2 x i1>,
353  i64);
354
355define <vscale x 2 x i32>  @intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
356; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i32_nxv2i16_nxv2i16:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
359; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
360; CHECK-NEXT:    ret
361entry:
362  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.nxv2i16(
363    <vscale x 2 x i32> %0,
364    <vscale x 2 x i16> %1,
365    <vscale x 2 x i16> %2,
366    <vscale x 2 x i1> %3,
367    i64 %4)
368
369  ret <vscale x 2 x i32> %a
370}
371
372declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
373  <vscale x 4 x i32>,
374  <vscale x 4 x i16>,
375  <vscale x 4 x i16>,
376  i64);
377
378define <vscale x 4 x i32>  @intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
379; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i32_nxv4i16_nxv4i16:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
382; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.nxv4i16(
386    <vscale x 4 x i32> %0,
387    <vscale x 4 x i16> %1,
388    <vscale x 4 x i16> %2,
389    i64 %3)
390
391  ret <vscale x 4 x i32> %a
392}
393
394declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
395  <vscale x 4 x i32>,
396  <vscale x 4 x i16>,
397  <vscale x 4 x i16>,
398  <vscale x 4 x i1>,
399  i64);
400
401define <vscale x 4 x i32>  @intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
402; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i32_nxv4i16_nxv4i16:
403; CHECK:       # %bb.0: # %entry
404; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
405; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
406; CHECK-NEXT:    ret
407entry:
408  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.nxv4i16(
409    <vscale x 4 x i32> %0,
410    <vscale x 4 x i16> %1,
411    <vscale x 4 x i16> %2,
412    <vscale x 4 x i1> %3,
413    i64 %4)
414
415  ret <vscale x 4 x i32> %a
416}
417
418declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
419  <vscale x 8 x i32>,
420  <vscale x 8 x i16>,
421  <vscale x 8 x i16>,
422  i64);
423
424define <vscale x 8 x i32>  @intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
425; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i32_nxv8i16_nxv8i16:
426; CHECK:       # %bb.0: # %entry
427; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
428; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
429; CHECK-NEXT:    ret
430entry:
431  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.nxv8i16(
432    <vscale x 8 x i32> %0,
433    <vscale x 8 x i16> %1,
434    <vscale x 8 x i16> %2,
435    i64 %3)
436
437  ret <vscale x 8 x i32> %a
438}
439
440declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
441  <vscale x 8 x i32>,
442  <vscale x 8 x i16>,
443  <vscale x 8 x i16>,
444  <vscale x 8 x i1>,
445  i64);
446
447define <vscale x 8 x i32>  @intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
448; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i32_nxv8i16_nxv8i16:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
451; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
452; CHECK-NEXT:    ret
453entry:
454  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.nxv8i16(
455    <vscale x 8 x i32> %0,
456    <vscale x 8 x i16> %1,
457    <vscale x 8 x i16> %2,
458    <vscale x 8 x i1> %3,
459    i64 %4)
460
461  ret <vscale x 8 x i32> %a
462}
463
464declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
465  <vscale x 16 x i32>,
466  <vscale x 16 x i16>,
467  <vscale x 16 x i16>,
468  i64);
469
470define <vscale x 16 x i32>  @intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
471; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv16i32_nxv16i16_nxv16i16:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
474; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.nxv16i16(
478    <vscale x 16 x i32> %0,
479    <vscale x 16 x i16> %1,
480    <vscale x 16 x i16> %2,
481    i64 %3)
482
483  ret <vscale x 16 x i32> %a
484}
485
486declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
487  <vscale x 16 x i32>,
488  <vscale x 16 x i16>,
489  <vscale x 16 x i16>,
490  <vscale x 16 x i1>,
491  i64);
492
493define <vscale x 16 x i32>  @intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
494; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv16i32_nxv16i16_nxv16i16:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
497; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
498; CHECK-NEXT:    ret
499entry:
500  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.nxv16i16(
501    <vscale x 16 x i32> %0,
502    <vscale x 16 x i16> %1,
503    <vscale x 16 x i16> %2,
504    <vscale x 16 x i1> %3,
505    i64 %4)
506
507  ret <vscale x 16 x i32> %a
508}
509
510declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
511  <vscale x 1 x i64>,
512  <vscale x 1 x i32>,
513  <vscale x 1 x i32>,
514  i64);
515
516define <vscale x 1 x i64>  @intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
517; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv1i64_nxv1i32_nxv1i32:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
520; CHECK-NEXT:    vwmaccu.vv v8, v9, v10
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.nxv1i32(
524    <vscale x 1 x i64> %0,
525    <vscale x 1 x i32> %1,
526    <vscale x 1 x i32> %2,
527    i64 %3)
528
529  ret <vscale x 1 x i64> %a
530}
531
532declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
533  <vscale x 1 x i64>,
534  <vscale x 1 x i32>,
535  <vscale x 1 x i32>,
536  <vscale x 1 x i1>,
537  i64);
538
539define <vscale x 1 x i64>  @intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
540; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv1i64_nxv1i32_nxv1i32:
541; CHECK:       # %bb.0: # %entry
542; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
543; CHECK-NEXT:    vwmaccu.vv v8, v9, v10, v0.t
544; CHECK-NEXT:    ret
545entry:
546  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.nxv1i32(
547    <vscale x 1 x i64> %0,
548    <vscale x 1 x i32> %1,
549    <vscale x 1 x i32> %2,
550    <vscale x 1 x i1> %3,
551    i64 %4)
552
553  ret <vscale x 1 x i64> %a
554}
555
556declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
557  <vscale x 2 x i64>,
558  <vscale x 2 x i32>,
559  <vscale x 2 x i32>,
560  i64);
561
562define <vscale x 2 x i64>  @intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
563; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv2i64_nxv2i32_nxv2i32:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
566; CHECK-NEXT:    vwmaccu.vv v8, v10, v11
567; CHECK-NEXT:    ret
568entry:
569  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.nxv2i32(
570    <vscale x 2 x i64> %0,
571    <vscale x 2 x i32> %1,
572    <vscale x 2 x i32> %2,
573    i64 %3)
574
575  ret <vscale x 2 x i64> %a
576}
577
578declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
579  <vscale x 2 x i64>,
580  <vscale x 2 x i32>,
581  <vscale x 2 x i32>,
582  <vscale x 2 x i1>,
583  i64);
584
585define <vscale x 2 x i64>  @intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
586; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv2i64_nxv2i32_nxv2i32:
587; CHECK:       # %bb.0: # %entry
588; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
589; CHECK-NEXT:    vwmaccu.vv v8, v10, v11, v0.t
590; CHECK-NEXT:    ret
591entry:
592  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.nxv2i32(
593    <vscale x 2 x i64> %0,
594    <vscale x 2 x i32> %1,
595    <vscale x 2 x i32> %2,
596    <vscale x 2 x i1> %3,
597    i64 %4)
598
599  ret <vscale x 2 x i64> %a
600}
601
602declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
603  <vscale x 4 x i64>,
604  <vscale x 4 x i32>,
605  <vscale x 4 x i32>,
606  i64);
607
608define <vscale x 4 x i64>  @intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
609; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv4i64_nxv4i32_nxv4i32:
610; CHECK:       # %bb.0: # %entry
611; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
612; CHECK-NEXT:    vwmaccu.vv v8, v12, v14
613; CHECK-NEXT:    ret
614entry:
615  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.nxv4i32(
616    <vscale x 4 x i64> %0,
617    <vscale x 4 x i32> %1,
618    <vscale x 4 x i32> %2,
619    i64 %3)
620
621  ret <vscale x 4 x i64> %a
622}
623
624declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
625  <vscale x 4 x i64>,
626  <vscale x 4 x i32>,
627  <vscale x 4 x i32>,
628  <vscale x 4 x i1>,
629  i64);
630
631define <vscale x 4 x i64>  @intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
632; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv4i64_nxv4i32_nxv4i32:
633; CHECK:       # %bb.0: # %entry
634; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
635; CHECK-NEXT:    vwmaccu.vv v8, v12, v14, v0.t
636; CHECK-NEXT:    ret
637entry:
638  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.nxv4i32(
639    <vscale x 4 x i64> %0,
640    <vscale x 4 x i32> %1,
641    <vscale x 4 x i32> %2,
642    <vscale x 4 x i1> %3,
643    i64 %4)
644
645  ret <vscale x 4 x i64> %a
646}
647
648declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
649  <vscale x 8 x i64>,
650  <vscale x 8 x i32>,
651  <vscale x 8 x i32>,
652  i64);
653
654define <vscale x 8 x i64>  @intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
655; CHECK-LABEL: intrinsic_vwmaccu_vv_nxv8i64_nxv8i32_nxv8i32:
656; CHECK:       # %bb.0: # %entry
657; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
658; CHECK-NEXT:    vwmaccu.vv v8, v16, v20
659; CHECK-NEXT:    ret
660entry:
661  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.nxv8i32(
662    <vscale x 8 x i64> %0,
663    <vscale x 8 x i32> %1,
664    <vscale x 8 x i32> %2,
665    i64 %3)
666
667  ret <vscale x 8 x i64> %a
668}
669
670declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
671  <vscale x 8 x i64>,
672  <vscale x 8 x i32>,
673  <vscale x 8 x i32>,
674  <vscale x 8 x i1>,
675  i64);
676
677define <vscale x 8 x i64>  @intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
678; CHECK-LABEL: intrinsic_vwmaccu_mask_vv_nxv8i64_nxv8i32_nxv8i32:
679; CHECK:       # %bb.0: # %entry
680; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
681; CHECK-NEXT:    vwmaccu.vv v8, v16, v20, v0.t
682; CHECK-NEXT:    ret
683entry:
684  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.nxv8i32(
685    <vscale x 8 x i64> %0,
686    <vscale x 8 x i32> %1,
687    <vscale x 8 x i32> %2,
688    <vscale x 8 x i1> %3,
689    i64 %4)
690
691  ret <vscale x 8 x i64> %a
692}
693
694declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
695  <vscale x 1 x i16>,
696  i8,
697  <vscale x 1 x i8>,
698  i64);
699
700define <vscale x 1 x i16>  @intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, i64 %3) nounwind {
701; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i16_i8_nxv1i8:
702; CHECK:       # %bb.0: # %entry
703; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
704; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
705; CHECK-NEXT:    ret
706entry:
707  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.nxv1i16.i8(
708    <vscale x 1 x i16> %0,
709    i8 %1,
710    <vscale x 1 x i8> %2,
711    i64 %3)
712
713  ret <vscale x 1 x i16> %a
714}
715
716declare <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
717  <vscale x 1 x i16>,
718  i8,
719  <vscale x 1 x i8>,
720  <vscale x 1 x i1>,
721  i64);
722
723define <vscale x 1 x i16> @intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
724; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i16_i8_nxv1i8:
725; CHECK:       # %bb.0: # %entry
726; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
727; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
728; CHECK-NEXT:    ret
729entry:
730  %a = call <vscale x 1 x i16> @llvm.riscv.vwmaccu.mask.nxv1i16.i8(
731    <vscale x 1 x i16> %0,
732    i8 %1,
733    <vscale x 1 x i8> %2,
734    <vscale x 1 x i1> %3,
735    i64 %4)
736
737  ret <vscale x 1 x i16> %a
738}
739
740declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
741  <vscale x 2 x i16>,
742  i8,
743  <vscale x 2 x i8>,
744  i64);
745
746define <vscale x 2 x i16>  @intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, i64 %3) nounwind {
747; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i16_i8_nxv2i8:
748; CHECK:       # %bb.0: # %entry
749; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
750; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
751; CHECK-NEXT:    ret
752entry:
753  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.nxv2i16.i8(
754    <vscale x 2 x i16> %0,
755    i8 %1,
756    <vscale x 2 x i8> %2,
757    i64 %3)
758
759  ret <vscale x 2 x i16> %a
760}
761
762declare <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
763  <vscale x 2 x i16>,
764  i8,
765  <vscale x 2 x i8>,
766  <vscale x 2 x i1>,
767  i64);
768
769define <vscale x 2 x i16> @intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
770; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i16_i8_nxv2i8:
771; CHECK:       # %bb.0: # %entry
772; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
773; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
774; CHECK-NEXT:    ret
775entry:
776  %a = call <vscale x 2 x i16> @llvm.riscv.vwmaccu.mask.nxv2i16.i8(
777    <vscale x 2 x i16> %0,
778    i8 %1,
779    <vscale x 2 x i8> %2,
780    <vscale x 2 x i1> %3,
781    i64 %4)
782
783  ret <vscale x 2 x i16> %a
784}
785
786declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
787  <vscale x 4 x i16>,
788  i8,
789  <vscale x 4 x i8>,
790  i64);
791
792define <vscale x 4 x i16>  @intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, i64 %3) nounwind {
793; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i16_i8_nxv4i8:
794; CHECK:       # %bb.0: # %entry
795; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
796; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
797; CHECK-NEXT:    ret
798entry:
799  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.nxv4i16.i8(
800    <vscale x 4 x i16> %0,
801    i8 %1,
802    <vscale x 4 x i8> %2,
803    i64 %3)
804
805  ret <vscale x 4 x i16> %a
806}
807
808declare <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
809  <vscale x 4 x i16>,
810  i8,
811  <vscale x 4 x i8>,
812  <vscale x 4 x i1>,
813  i64);
814
815define <vscale x 4 x i16> @intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
816; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i16_i8_nxv4i8:
817; CHECK:       # %bb.0: # %entry
818; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
819; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
820; CHECK-NEXT:    ret
821entry:
822  %a = call <vscale x 4 x i16> @llvm.riscv.vwmaccu.mask.nxv4i16.i8(
823    <vscale x 4 x i16> %0,
824    i8 %1,
825    <vscale x 4 x i8> %2,
826    <vscale x 4 x i1> %3,
827    i64 %4)
828
829  ret <vscale x 4 x i16> %a
830}
831
832declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
833  <vscale x 8 x i16>,
834  i8,
835  <vscale x 8 x i8>,
836  i64);
837
838define <vscale x 8 x i16>  @intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, i64 %3) nounwind {
839; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i16_i8_nxv8i8:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
842; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
843; CHECK-NEXT:    ret
844entry:
845  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.nxv8i16.i8(
846    <vscale x 8 x i16> %0,
847    i8 %1,
848    <vscale x 8 x i8> %2,
849    i64 %3)
850
851  ret <vscale x 8 x i16> %a
852}
853
854declare <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
855  <vscale x 8 x i16>,
856  i8,
857  <vscale x 8 x i8>,
858  <vscale x 8 x i1>,
859  i64);
860
861define <vscale x 8 x i16> @intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
862; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i16_i8_nxv8i8:
863; CHECK:       # %bb.0: # %entry
864; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
865; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
866; CHECK-NEXT:    ret
867entry:
868  %a = call <vscale x 8 x i16> @llvm.riscv.vwmaccu.mask.nxv8i16.i8(
869    <vscale x 8 x i16> %0,
870    i8 %1,
871    <vscale x 8 x i8> %2,
872    <vscale x 8 x i1> %3,
873    i64 %4)
874
875  ret <vscale x 8 x i16> %a
876}
877
878declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
879  <vscale x 16 x i16>,
880  i8,
881  <vscale x 16 x i8>,
882  i64);
883
884define <vscale x 16 x i16>  @intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, i64 %3) nounwind {
885; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i16_i8_nxv16i8:
886; CHECK:       # %bb.0: # %entry
887; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
888; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
889; CHECK-NEXT:    ret
890entry:
891  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.nxv16i16.i8(
892    <vscale x 16 x i16> %0,
893    i8 %1,
894    <vscale x 16 x i8> %2,
895    i64 %3)
896
897  ret <vscale x 16 x i16> %a
898}
899
900declare <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
901  <vscale x 16 x i16>,
902  i8,
903  <vscale x 16 x i8>,
904  <vscale x 16 x i1>,
905  i64);
906
907define <vscale x 16 x i16> @intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
908; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i16_i8_nxv16i8:
909; CHECK:       # %bb.0: # %entry
910; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
911; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
912; CHECK-NEXT:    ret
913entry:
914  %a = call <vscale x 16 x i16> @llvm.riscv.vwmaccu.mask.nxv16i16.i8(
915    <vscale x 16 x i16> %0,
916    i8 %1,
917    <vscale x 16 x i8> %2,
918    <vscale x 16 x i1> %3,
919    i64 %4)
920
921  ret <vscale x 16 x i16> %a
922}
923
924declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
925  <vscale x 32 x i16>,
926  i8,
927  <vscale x 32 x i8>,
928  i64);
929
930define <vscale x 32 x i16>  @intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, i64 %3) nounwind {
931; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv32i16_i8_nxv32i8:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
934; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
935; CHECK-NEXT:    ret
936entry:
937  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.nxv32i16.i8(
938    <vscale x 32 x i16> %0,
939    i8 %1,
940    <vscale x 32 x i8> %2,
941    i64 %3)
942
943  ret <vscale x 32 x i16> %a
944}
945
946declare <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
947  <vscale x 32 x i16>,
948  i8,
949  <vscale x 32 x i8>,
950  <vscale x 32 x i1>,
951  i64);
952
953define <vscale x 32 x i16> @intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
954; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv32i16_i8_nxv32i8:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
957; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
958; CHECK-NEXT:    ret
959entry:
960  %a = call <vscale x 32 x i16> @llvm.riscv.vwmaccu.mask.nxv32i16.i8(
961    <vscale x 32 x i16> %0,
962    i8 %1,
963    <vscale x 32 x i8> %2,
964    <vscale x 32 x i1> %3,
965    i64 %4)
966
967  ret <vscale x 32 x i16> %a
968}
969
970declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
971  <vscale x 1 x i32>,
972  i16,
973  <vscale x 1 x i16>,
974  i64);
975
976define <vscale x 1 x i32>  @intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, i64 %3) nounwind {
977; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i32_i16_nxv1i16:
978; CHECK:       # %bb.0: # %entry
979; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
980; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
981; CHECK-NEXT:    ret
982entry:
983  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.nxv1i32.i16(
984    <vscale x 1 x i32> %0,
985    i16 %1,
986    <vscale x 1 x i16> %2,
987    i64 %3)
988
989  ret <vscale x 1 x i32> %a
990}
991
992declare <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
993  <vscale x 1 x i32>,
994  i16,
995  <vscale x 1 x i16>,
996  <vscale x 1 x i1>,
997  i64);
998
999define <vscale x 1 x i32> @intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1000; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i32_i16_nxv1i16:
1001; CHECK:       # %bb.0: # %entry
1002; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1003; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
1004; CHECK-NEXT:    ret
1005entry:
1006  %a = call <vscale x 1 x i32> @llvm.riscv.vwmaccu.mask.nxv1i32.i16(
1007    <vscale x 1 x i32> %0,
1008    i16 %1,
1009    <vscale x 1 x i16> %2,
1010    <vscale x 1 x i1> %3,
1011    i64 %4)
1012
1013  ret <vscale x 1 x i32> %a
1014}
1015
1016declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
1017  <vscale x 2 x i32>,
1018  i16,
1019  <vscale x 2 x i16>,
1020  i64);
1021
1022define <vscale x 2 x i32>  @intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, i64 %3) nounwind {
1023; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i32_i16_nxv2i16:
1024; CHECK:       # %bb.0: # %entry
1025; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1026; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
1027; CHECK-NEXT:    ret
1028entry:
1029  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.nxv2i32.i16(
1030    <vscale x 2 x i32> %0,
1031    i16 %1,
1032    <vscale x 2 x i16> %2,
1033    i64 %3)
1034
1035  ret <vscale x 2 x i32> %a
1036}
1037
1038declare <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
1039  <vscale x 2 x i32>,
1040  i16,
1041  <vscale x 2 x i16>,
1042  <vscale x 2 x i1>,
1043  i64);
1044
1045define <vscale x 2 x i32> @intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1046; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i32_i16_nxv2i16:
1047; CHECK:       # %bb.0: # %entry
1048; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1049; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
1050; CHECK-NEXT:    ret
1051entry:
1052  %a = call <vscale x 2 x i32> @llvm.riscv.vwmaccu.mask.nxv2i32.i16(
1053    <vscale x 2 x i32> %0,
1054    i16 %1,
1055    <vscale x 2 x i16> %2,
1056    <vscale x 2 x i1> %3,
1057    i64 %4)
1058
1059  ret <vscale x 2 x i32> %a
1060}
1061
1062declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
1063  <vscale x 4 x i32>,
1064  i16,
1065  <vscale x 4 x i16>,
1066  i64);
1067
1068define <vscale x 4 x i32>  @intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, i64 %3) nounwind {
1069; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i32_i16_nxv4i16:
1070; CHECK:       # %bb.0: # %entry
1071; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1072; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
1073; CHECK-NEXT:    ret
1074entry:
1075  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.nxv4i32.i16(
1076    <vscale x 4 x i32> %0,
1077    i16 %1,
1078    <vscale x 4 x i16> %2,
1079    i64 %3)
1080
1081  ret <vscale x 4 x i32> %a
1082}
1083
1084declare <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
1085  <vscale x 4 x i32>,
1086  i16,
1087  <vscale x 4 x i16>,
1088  <vscale x 4 x i1>,
1089  i64);
1090
1091define <vscale x 4 x i32> @intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1092; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i32_i16_nxv4i16:
1093; CHECK:       # %bb.0: # %entry
1094; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1095; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
1096; CHECK-NEXT:    ret
1097entry:
1098  %a = call <vscale x 4 x i32> @llvm.riscv.vwmaccu.mask.nxv4i32.i16(
1099    <vscale x 4 x i32> %0,
1100    i16 %1,
1101    <vscale x 4 x i16> %2,
1102    <vscale x 4 x i1> %3,
1103    i64 %4)
1104
1105  ret <vscale x 4 x i32> %a
1106}
1107
1108declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
1109  <vscale x 8 x i32>,
1110  i16,
1111  <vscale x 8 x i16>,
1112  i64);
1113
1114define <vscale x 8 x i32>  @intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, i64 %3) nounwind {
1115; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i32_i16_nxv8i16:
1116; CHECK:       # %bb.0: # %entry
1117; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1118; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
1119; CHECK-NEXT:    ret
1120entry:
1121  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.nxv8i32.i16(
1122    <vscale x 8 x i32> %0,
1123    i16 %1,
1124    <vscale x 8 x i16> %2,
1125    i64 %3)
1126
1127  ret <vscale x 8 x i32> %a
1128}
1129
1130declare <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
1131  <vscale x 8 x i32>,
1132  i16,
1133  <vscale x 8 x i16>,
1134  <vscale x 8 x i1>,
1135  i64);
1136
1137define <vscale x 8 x i32> @intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1138; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i32_i16_nxv8i16:
1139; CHECK:       # %bb.0: # %entry
1140; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1141; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
1142; CHECK-NEXT:    ret
1143entry:
1144  %a = call <vscale x 8 x i32> @llvm.riscv.vwmaccu.mask.nxv8i32.i16(
1145    <vscale x 8 x i32> %0,
1146    i16 %1,
1147    <vscale x 8 x i16> %2,
1148    <vscale x 8 x i1> %3,
1149    i64 %4)
1150
1151  ret <vscale x 8 x i32> %a
1152}
1153
1154declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
1155  <vscale x 16 x i32>,
1156  i16,
1157  <vscale x 16 x i16>,
1158  i64);
1159
1160define <vscale x 16 x i32>  @intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, i64 %3) nounwind {
1161; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv16i32_i16_nxv16i16:
1162; CHECK:       # %bb.0: # %entry
1163; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1164; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
1165; CHECK-NEXT:    ret
1166entry:
1167  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.nxv16i32.i16(
1168    <vscale x 16 x i32> %0,
1169    i16 %1,
1170    <vscale x 16 x i16> %2,
1171    i64 %3)
1172
1173  ret <vscale x 16 x i32> %a
1174}
1175
1176declare <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
1177  <vscale x 16 x i32>,
1178  i16,
1179  <vscale x 16 x i16>,
1180  <vscale x 16 x i1>,
1181  i64);
1182
1183define <vscale x 16 x i32> @intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1184; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv16i32_i16_nxv16i16:
1185; CHECK:       # %bb.0: # %entry
1186; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1187; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
1188; CHECK-NEXT:    ret
1189entry:
1190  %a = call <vscale x 16 x i32> @llvm.riscv.vwmaccu.mask.nxv16i32.i16(
1191    <vscale x 16 x i32> %0,
1192    i16 %1,
1193    <vscale x 16 x i16> %2,
1194    <vscale x 16 x i1> %3,
1195    i64 %4)
1196
1197  ret <vscale x 16 x i32> %a
1198}
1199
1200declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
1201  <vscale x 1 x i64>,
1202  i32,
1203  <vscale x 1 x i32>,
1204  i64);
1205
1206define <vscale x 1 x i64>  @intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, i64 %3) nounwind {
1207; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv1i64_i32_nxv1i32:
1208; CHECK:       # %bb.0: # %entry
1209; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1210; CHECK-NEXT:    vwmaccu.vx v8, a0, v9
1211; CHECK-NEXT:    ret
1212entry:
1213  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.nxv1i64.i32(
1214    <vscale x 1 x i64> %0,
1215    i32 %1,
1216    <vscale x 1 x i32> %2,
1217    i64 %3)
1218
1219  ret <vscale x 1 x i64> %a
1220}
1221
1222declare <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
1223  <vscale x 1 x i64>,
1224  i32,
1225  <vscale x 1 x i32>,
1226  <vscale x 1 x i1>,
1227  i64);
1228
1229define <vscale x 1 x i64> @intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1230; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv1i64_i32_nxv1i32:
1231; CHECK:       # %bb.0: # %entry
1232; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1233; CHECK-NEXT:    vwmaccu.vx v8, a0, v9, v0.t
1234; CHECK-NEXT:    ret
1235entry:
1236  %a = call <vscale x 1 x i64> @llvm.riscv.vwmaccu.mask.nxv1i64.i32(
1237    <vscale x 1 x i64> %0,
1238    i32 %1,
1239    <vscale x 1 x i32> %2,
1240    <vscale x 1 x i1> %3,
1241    i64 %4)
1242
1243  ret <vscale x 1 x i64> %a
1244}
1245
1246declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
1247  <vscale x 2 x i64>,
1248  i32,
1249  <vscale x 2 x i32>,
1250  i64);
1251
1252define <vscale x 2 x i64>  @intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, i64 %3) nounwind {
1253; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv2i64_i32_nxv2i32:
1254; CHECK:       # %bb.0: # %entry
1255; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1256; CHECK-NEXT:    vwmaccu.vx v8, a0, v10
1257; CHECK-NEXT:    ret
1258entry:
1259  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.nxv2i64.i32(
1260    <vscale x 2 x i64> %0,
1261    i32 %1,
1262    <vscale x 2 x i32> %2,
1263    i64 %3)
1264
1265  ret <vscale x 2 x i64> %a
1266}
1267
1268declare <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
1269  <vscale x 2 x i64>,
1270  i32,
1271  <vscale x 2 x i32>,
1272  <vscale x 2 x i1>,
1273  i64);
1274
1275define <vscale x 2 x i64> @intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1276; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv2i64_i32_nxv2i32:
1277; CHECK:       # %bb.0: # %entry
1278; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1279; CHECK-NEXT:    vwmaccu.vx v8, a0, v10, v0.t
1280; CHECK-NEXT:    ret
1281entry:
1282  %a = call <vscale x 2 x i64> @llvm.riscv.vwmaccu.mask.nxv2i64.i32(
1283    <vscale x 2 x i64> %0,
1284    i32 %1,
1285    <vscale x 2 x i32> %2,
1286    <vscale x 2 x i1> %3,
1287    i64 %4)
1288
1289  ret <vscale x 2 x i64> %a
1290}
1291
1292declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
1293  <vscale x 4 x i64>,
1294  i32,
1295  <vscale x 4 x i32>,
1296  i64);
1297
1298define <vscale x 4 x i64>  @intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, i64 %3) nounwind {
1299; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv4i64_i32_nxv4i32:
1300; CHECK:       # %bb.0: # %entry
1301; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1302; CHECK-NEXT:    vwmaccu.vx v8, a0, v12
1303; CHECK-NEXT:    ret
1304entry:
1305  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.nxv4i64.i32(
1306    <vscale x 4 x i64> %0,
1307    i32 %1,
1308    <vscale x 4 x i32> %2,
1309    i64 %3)
1310
1311  ret <vscale x 4 x i64> %a
1312}
1313
1314declare <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
1315  <vscale x 4 x i64>,
1316  i32,
1317  <vscale x 4 x i32>,
1318  <vscale x 4 x i1>,
1319  i64);
1320
1321define <vscale x 4 x i64> @intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1322; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv4i64_i32_nxv4i32:
1323; CHECK:       # %bb.0: # %entry
1324; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1325; CHECK-NEXT:    vwmaccu.vx v8, a0, v12, v0.t
1326; CHECK-NEXT:    ret
1327entry:
1328  %a = call <vscale x 4 x i64> @llvm.riscv.vwmaccu.mask.nxv4i64.i32(
1329    <vscale x 4 x i64> %0,
1330    i32 %1,
1331    <vscale x 4 x i32> %2,
1332    <vscale x 4 x i1> %3,
1333    i64 %4)
1334
1335  ret <vscale x 4 x i64> %a
1336}
1337
1338declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
1339  <vscale x 8 x i64>,
1340  i32,
1341  <vscale x 8 x i32>,
1342  i64);
1343
1344define <vscale x 8 x i64>  @intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, i64 %3) nounwind {
1345; CHECK-LABEL: intrinsic_vwmaccu_vx_nxv8i64_i32_nxv8i32:
1346; CHECK:       # %bb.0: # %entry
1347; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1348; CHECK-NEXT:    vwmaccu.vx v8, a0, v16
1349; CHECK-NEXT:    ret
1350entry:
1351  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.nxv8i64.i32(
1352    <vscale x 8 x i64> %0,
1353    i32 %1,
1354    <vscale x 8 x i32> %2,
1355    i64 %3)
1356
1357  ret <vscale x 8 x i64> %a
1358}
1359
1360declare <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
1361  <vscale x 8 x i64>,
1362  i32,
1363  <vscale x 8 x i32>,
1364  <vscale x 8 x i1>,
1365  i64);
1366
1367define <vscale x 8 x i64> @intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1368; CHECK-LABEL: intrinsic_vwmaccu_mask_vx_nxv8i64_i32_nxv8i32:
1369; CHECK:       # %bb.0: # %entry
1370; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1371; CHECK-NEXT:    vwmaccu.vx v8, a0, v16, v0.t
1372; CHECK-NEXT:    ret
1373entry:
1374  %a = call <vscale x 8 x i64> @llvm.riscv.vwmaccu.mask.nxv8i64.i32(
1375    <vscale x 8 x i64> %0,
1376    i32 %1,
1377    <vscale x 8 x i32> %2,
1378    <vscale x 8 x i1> %3,
1379    i64 %4)
1380
1381  ret <vscale x 8 x i64> %a
1382}
1383