1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
5  <vscale x 1 x i16>,
6  <vscale x 1 x i8>,
7  i32);
8
9define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i16_nxv1i16_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vwadd.wv v8, v8, v9
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
17    <vscale x 1 x i16> %0,
18    <vscale x 1 x i8> %1,
19    i32 %2)
20
21  ret <vscale x 1 x i16> %a
22}
23
24declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
25  <vscale x 1 x i16>,
26  <vscale x 1 x i16>,
27  <vscale x 1 x i8>,
28  <vscale x 1 x i1>,
29  i32);
30
31define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
32; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i16_nxv1i16_nxv1i8:
33; CHECK:       # %bb.0: # %entry
34; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
35; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
36; CHECK-NEXT:    ret
37entry:
38  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
39    <vscale x 1 x i16> %0,
40    <vscale x 1 x i16> %1,
41    <vscale x 1 x i8> %2,
42    <vscale x 1 x i1> %3,
43    i32 %4)
44
45  ret <vscale x 1 x i16> %a
46}
47
48declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
49  <vscale x 2 x i16>,
50  <vscale x 2 x i8>,
51  i32);
52
53define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
54; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i16_nxv2i16_nxv2i8:
55; CHECK:       # %bb.0: # %entry
56; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
57; CHECK-NEXT:    vwadd.wv v8, v8, v9
58; CHECK-NEXT:    ret
59entry:
60  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
61    <vscale x 2 x i16> %0,
62    <vscale x 2 x i8> %1,
63    i32 %2)
64
65  ret <vscale x 2 x i16> %a
66}
67
68declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
69  <vscale x 2 x i16>,
70  <vscale x 2 x i16>,
71  <vscale x 2 x i8>,
72  <vscale x 2 x i1>,
73  i32);
74
75define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
76; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i16_nxv2i16_nxv2i8:
77; CHECK:       # %bb.0: # %entry
78; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
79; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
80; CHECK-NEXT:    ret
81entry:
82  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
83    <vscale x 2 x i16> %0,
84    <vscale x 2 x i16> %1,
85    <vscale x 2 x i8> %2,
86    <vscale x 2 x i1> %3,
87    i32 %4)
88
89  ret <vscale x 2 x i16> %a
90}
91
92declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
93  <vscale x 4 x i16>,
94  <vscale x 4 x i8>,
95  i32);
96
97define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
98; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i16_nxv4i16_nxv4i8:
99; CHECK:       # %bb.0: # %entry
100; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
101; CHECK-NEXT:    vwadd.wv v8, v8, v9
102; CHECK-NEXT:    ret
103entry:
104  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
105    <vscale x 4 x i16> %0,
106    <vscale x 4 x i8> %1,
107    i32 %2)
108
109  ret <vscale x 4 x i16> %a
110}
111
112declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
113  <vscale x 4 x i16>,
114  <vscale x 4 x i16>,
115  <vscale x 4 x i8>,
116  <vscale x 4 x i1>,
117  i32);
118
119define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
120; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i16_nxv4i16_nxv4i8:
121; CHECK:       # %bb.0: # %entry
122; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
123; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
124; CHECK-NEXT:    ret
125entry:
126  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
127    <vscale x 4 x i16> %0,
128    <vscale x 4 x i16> %1,
129    <vscale x 4 x i8> %2,
130    <vscale x 4 x i1> %3,
131    i32 %4)
132
133  ret <vscale x 4 x i16> %a
134}
135
136declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
137  <vscale x 8 x i16>,
138  <vscale x 8 x i8>,
139  i32);
140
141define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
142; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i16_nxv8i16_nxv8i8:
143; CHECK:       # %bb.0: # %entry
144; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
145; CHECK-NEXT:    vwadd.wv v8, v8, v10
146; CHECK-NEXT:    ret
147entry:
148  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
149    <vscale x 8 x i16> %0,
150    <vscale x 8 x i8> %1,
151    i32 %2)
152
153  ret <vscale x 8 x i16> %a
154}
155
156declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
157  <vscale x 8 x i16>,
158  <vscale x 8 x i16>,
159  <vscale x 8 x i8>,
160  <vscale x 8 x i1>,
161  i32);
162
163define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
164; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i16_nxv8i16_nxv8i8:
165; CHECK:       # %bb.0: # %entry
166; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
167; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
168; CHECK-NEXT:    ret
169entry:
170  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
171    <vscale x 8 x i16> %0,
172    <vscale x 8 x i16> %1,
173    <vscale x 8 x i8> %2,
174    <vscale x 8 x i1> %3,
175    i32 %4)
176
177  ret <vscale x 8 x i16> %a
178}
179
180declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
181  <vscale x 16 x i16>,
182  <vscale x 16 x i8>,
183  i32);
184
185define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
186; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i16_nxv16i16_nxv16i8:
187; CHECK:       # %bb.0: # %entry
188; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
189; CHECK-NEXT:    vwadd.wv v8, v8, v12
190; CHECK-NEXT:    ret
191entry:
192  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
193    <vscale x 16 x i16> %0,
194    <vscale x 16 x i8> %1,
195    i32 %2)
196
197  ret <vscale x 16 x i16> %a
198}
199
200declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
201  <vscale x 16 x i16>,
202  <vscale x 16 x i16>,
203  <vscale x 16 x i8>,
204  <vscale x 16 x i1>,
205  i32);
206
207define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
208; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i16_nxv16i16_nxv16i8:
209; CHECK:       # %bb.0: # %entry
210; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
211; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
212; CHECK-NEXT:    ret
213entry:
214  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
215    <vscale x 16 x i16> %0,
216    <vscale x 16 x i16> %1,
217    <vscale x 16 x i8> %2,
218    <vscale x 16 x i1> %3,
219    i32 %4)
220
221  ret <vscale x 16 x i16> %a
222}
223
224declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
225  <vscale x 32 x i16>,
226  <vscale x 32 x i8>,
227  i32);
228
229define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
230; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv32i16_nxv32i16_nxv32i8:
231; CHECK:       # %bb.0: # %entry
232; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
233; CHECK-NEXT:    vwadd.wv v8, v8, v16
234; CHECK-NEXT:    ret
235entry:
236  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
237    <vscale x 32 x i16> %0,
238    <vscale x 32 x i8> %1,
239    i32 %2)
240
241  ret <vscale x 32 x i16> %a
242}
243
244declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
245  <vscale x 32 x i16>,
246  <vscale x 32 x i16>,
247  <vscale x 32 x i8>,
248  <vscale x 32 x i1>,
249  i32);
250
251define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
252; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv32i16_nxv32i16_nxv32i8:
253; CHECK:       # %bb.0: # %entry
254; CHECK-NEXT:    vl4r.v v28, (a0)
255; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
256; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
257; CHECK-NEXT:    ret
258entry:
259  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
260    <vscale x 32 x i16> %0,
261    <vscale x 32 x i16> %1,
262    <vscale x 32 x i8> %2,
263    <vscale x 32 x i1> %3,
264    i32 %4)
265
266  ret <vscale x 32 x i16> %a
267}
268
269declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
270  <vscale x 1 x i32>,
271  <vscale x 1 x i16>,
272  i32);
273
274define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
275; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i32_nxv1i32_nxv1i16:
276; CHECK:       # %bb.0: # %entry
277; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
278; CHECK-NEXT:    vwadd.wv v8, v8, v9
279; CHECK-NEXT:    ret
280entry:
281  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
282    <vscale x 1 x i32> %0,
283    <vscale x 1 x i16> %1,
284    i32 %2)
285
286  ret <vscale x 1 x i32> %a
287}
288
289declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
290  <vscale x 1 x i32>,
291  <vscale x 1 x i32>,
292  <vscale x 1 x i16>,
293  <vscale x 1 x i1>,
294  i32);
295
296define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
297; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i32_nxv1i32_nxv1i16:
298; CHECK:       # %bb.0: # %entry
299; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
300; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
304    <vscale x 1 x i32> %0,
305    <vscale x 1 x i32> %1,
306    <vscale x 1 x i16> %2,
307    <vscale x 1 x i1> %3,
308    i32 %4)
309
310  ret <vscale x 1 x i32> %a
311}
312
313declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
314  <vscale x 2 x i32>,
315  <vscale x 2 x i16>,
316  i32);
317
318define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
319; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i32_nxv2i32_nxv2i16:
320; CHECK:       # %bb.0: # %entry
321; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
322; CHECK-NEXT:    vwadd.wv v8, v8, v9
323; CHECK-NEXT:    ret
324entry:
325  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
326    <vscale x 2 x i32> %0,
327    <vscale x 2 x i16> %1,
328    i32 %2)
329
330  ret <vscale x 2 x i32> %a
331}
332
333declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
334  <vscale x 2 x i32>,
335  <vscale x 2 x i32>,
336  <vscale x 2 x i16>,
337  <vscale x 2 x i1>,
338  i32);
339
340define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
341; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i32_nxv2i32_nxv2i16:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
344; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
348    <vscale x 2 x i32> %0,
349    <vscale x 2 x i32> %1,
350    <vscale x 2 x i16> %2,
351    <vscale x 2 x i1> %3,
352    i32 %4)
353
354  ret <vscale x 2 x i32> %a
355}
356
357declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
358  <vscale x 4 x i32>,
359  <vscale x 4 x i16>,
360  i32);
361
362define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
363; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i32_nxv4i32_nxv4i16:
364; CHECK:       # %bb.0: # %entry
365; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
366; CHECK-NEXT:    vwadd.wv v8, v8, v10
367; CHECK-NEXT:    ret
368entry:
369  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
370    <vscale x 4 x i32> %0,
371    <vscale x 4 x i16> %1,
372    i32 %2)
373
374  ret <vscale x 4 x i32> %a
375}
376
377declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
378  <vscale x 4 x i32>,
379  <vscale x 4 x i32>,
380  <vscale x 4 x i16>,
381  <vscale x 4 x i1>,
382  i32);
383
384define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
385; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i32_nxv4i32_nxv4i16:
386; CHECK:       # %bb.0: # %entry
387; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
388; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
389; CHECK-NEXT:    ret
390entry:
391  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
392    <vscale x 4 x i32> %0,
393    <vscale x 4 x i32> %1,
394    <vscale x 4 x i16> %2,
395    <vscale x 4 x i1> %3,
396    i32 %4)
397
398  ret <vscale x 4 x i32> %a
399}
400
401declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
402  <vscale x 8 x i32>,
403  <vscale x 8 x i16>,
404  i32);
405
406define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
407; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i32_nxv8i32_nxv8i16:
408; CHECK:       # %bb.0: # %entry
409; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
410; CHECK-NEXT:    vwadd.wv v8, v8, v12
411; CHECK-NEXT:    ret
412entry:
413  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
414    <vscale x 8 x i32> %0,
415    <vscale x 8 x i16> %1,
416    i32 %2)
417
418  ret <vscale x 8 x i32> %a
419}
420
421declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
422  <vscale x 8 x i32>,
423  <vscale x 8 x i32>,
424  <vscale x 8 x i16>,
425  <vscale x 8 x i1>,
426  i32);
427
428define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
429; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i32_nxv8i32_nxv8i16:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
432; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
436    <vscale x 8 x i32> %0,
437    <vscale x 8 x i32> %1,
438    <vscale x 8 x i16> %2,
439    <vscale x 8 x i1> %3,
440    i32 %4)
441
442  ret <vscale x 8 x i32> %a
443}
444
445declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
446  <vscale x 16 x i32>,
447  <vscale x 16 x i16>,
448  i32);
449
450define <vscale x 16 x i32> @intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
451; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv16i32_nxv16i32_nxv16i16:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
454; CHECK-NEXT:    vwadd.wv v8, v8, v16
455; CHECK-NEXT:    ret
456entry:
457  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.nxv16i16(
458    <vscale x 16 x i32> %0,
459    <vscale x 16 x i16> %1,
460    i32 %2)
461
462  ret <vscale x 16 x i32> %a
463}
464
465declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
466  <vscale x 16 x i32>,
467  <vscale x 16 x i32>,
468  <vscale x 16 x i16>,
469  <vscale x 16 x i1>,
470  i32);
471
472define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
473; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv16i32_nxv16i32_nxv16i16:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    vl4re16.v v28, (a0)
476; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
477; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
478; CHECK-NEXT:    ret
479entry:
480  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
481    <vscale x 16 x i32> %0,
482    <vscale x 16 x i32> %1,
483    <vscale x 16 x i16> %2,
484    <vscale x 16 x i1> %3,
485    i32 %4)
486
487  ret <vscale x 16 x i32> %a
488}
489
490declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
491  <vscale x 1 x i64>,
492  <vscale x 1 x i32>,
493  i32);
494
495define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
496; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv1i64_nxv1i64_nxv1i32:
497; CHECK:       # %bb.0: # %entry
498; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
499; CHECK-NEXT:    vwadd.wv v8, v8, v9
500; CHECK-NEXT:    ret
501entry:
502  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
503    <vscale x 1 x i64> %0,
504    <vscale x 1 x i32> %1,
505    i32 %2)
506
507  ret <vscale x 1 x i64> %a
508}
509
510declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
511  <vscale x 1 x i64>,
512  <vscale x 1 x i64>,
513  <vscale x 1 x i32>,
514  <vscale x 1 x i1>,
515  i32);
516
517define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
518; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv1i64_nxv1i64_nxv1i32:
519; CHECK:       # %bb.0: # %entry
520; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
521; CHECK-NEXT:    vwadd.wv v8, v9, v10, v0.t
522; CHECK-NEXT:    ret
523entry:
524  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
525    <vscale x 1 x i64> %0,
526    <vscale x 1 x i64> %1,
527    <vscale x 1 x i32> %2,
528    <vscale x 1 x i1> %3,
529    i32 %4)
530
531  ret <vscale x 1 x i64> %a
532}
533
534declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
535  <vscale x 2 x i64>,
536  <vscale x 2 x i32>,
537  i32);
538
539define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
540; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv2i64_nxv2i64_nxv2i32:
541; CHECK:       # %bb.0: # %entry
542; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
543; CHECK-NEXT:    vwadd.wv v8, v8, v10
544; CHECK-NEXT:    ret
545entry:
546  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
547    <vscale x 2 x i64> %0,
548    <vscale x 2 x i32> %1,
549    i32 %2)
550
551  ret <vscale x 2 x i64> %a
552}
553
554declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
555  <vscale x 2 x i64>,
556  <vscale x 2 x i64>,
557  <vscale x 2 x i32>,
558  <vscale x 2 x i1>,
559  i32);
560
561define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
562; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv2i64_nxv2i64_nxv2i32:
563; CHECK:       # %bb.0: # %entry
564; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
565; CHECK-NEXT:    vwadd.wv v8, v10, v12, v0.t
566; CHECK-NEXT:    ret
567entry:
568  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
569    <vscale x 2 x i64> %0,
570    <vscale x 2 x i64> %1,
571    <vscale x 2 x i32> %2,
572    <vscale x 2 x i1> %3,
573    i32 %4)
574
575  ret <vscale x 2 x i64> %a
576}
577
578declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
579  <vscale x 4 x i64>,
580  <vscale x 4 x i32>,
581  i32);
582
583define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
584; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv4i64_nxv4i64_nxv4i32:
585; CHECK:       # %bb.0: # %entry
586; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
587; CHECK-NEXT:    vwadd.wv v8, v8, v12
588; CHECK-NEXT:    ret
589entry:
590  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
591    <vscale x 4 x i64> %0,
592    <vscale x 4 x i32> %1,
593    i32 %2)
594
595  ret <vscale x 4 x i64> %a
596}
597
598declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
599  <vscale x 4 x i64>,
600  <vscale x 4 x i64>,
601  <vscale x 4 x i32>,
602  <vscale x 4 x i1>,
603  i32);
604
605define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
606; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv4i64_nxv4i64_nxv4i32:
607; CHECK:       # %bb.0: # %entry
608; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
609; CHECK-NEXT:    vwadd.wv v8, v12, v16, v0.t
610; CHECK-NEXT:    ret
611entry:
612  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
613    <vscale x 4 x i64> %0,
614    <vscale x 4 x i64> %1,
615    <vscale x 4 x i32> %2,
616    <vscale x 4 x i1> %3,
617    i32 %4)
618
619  ret <vscale x 4 x i64> %a
620}
621
622declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
623  <vscale x 8 x i64>,
624  <vscale x 8 x i32>,
625  i32);
626
627define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
628; CHECK-LABEL: intrinsic_vwadd.w_wv_nxv8i64_nxv8i64_nxv8i32:
629; CHECK:       # %bb.0: # %entry
630; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
631; CHECK-NEXT:    vwadd.wv v8, v8, v16
632; CHECK-NEXT:    ret
633entry:
634  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
635    <vscale x 8 x i64> %0,
636    <vscale x 8 x i32> %1,
637    i32 %2)
638
639  ret <vscale x 8 x i64> %a
640}
641
642declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
643  <vscale x 8 x i64>,
644  <vscale x 8 x i64>,
645  <vscale x 8 x i32>,
646  <vscale x 8 x i1>,
647  i32);
648
649define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
650; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_nxv8i64_nxv8i64_nxv8i32:
651; CHECK:       # %bb.0: # %entry
652; CHECK-NEXT:    vl4re32.v v28, (a0)
653; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
654; CHECK-NEXT:    vwadd.wv v8, v16, v28, v0.t
655; CHECK-NEXT:    ret
656entry:
657  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
658    <vscale x 8 x i64> %0,
659    <vscale x 8 x i64> %1,
660    <vscale x 8 x i32> %2,
661    <vscale x 8 x i1> %3,
662    i32 %4)
663
664  ret <vscale x 8 x i64> %a
665}
666
667declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
668  <vscale x 1 x i16>,
669  i8,
670  i32);
671
672define <vscale x 1 x i16> @intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, i32 %2) nounwind {
673; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i16_nxv1i16_i8:
674; CHECK:       # %bb.0: # %entry
675; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
676; CHECK-NEXT:    vwadd.wx v8, v8, a0
677; CHECK-NEXT:    ret
678entry:
679  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.i8(
680    <vscale x 1 x i16> %0,
681    i8 %1,
682    i32 %2)
683
684  ret <vscale x 1 x i16> %a
685}
686
687declare <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
688  <vscale x 1 x i16>,
689  <vscale x 1 x i16>,
690  i8,
691  <vscale x 1 x i1>,
692  i32);
693
694define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
695; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i16_nxv1i16_i8:
696; CHECK:       # %bb.0: # %entry
697; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
698; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
699; CHECK-NEXT:    ret
700entry:
701  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
702    <vscale x 1 x i16> %0,
703    <vscale x 1 x i16> %1,
704    i8 %2,
705    <vscale x 1 x i1> %3,
706    i32 %4)
707
708  ret <vscale x 1 x i16> %a
709}
710
711declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
712  <vscale x 2 x i16>,
713  i8,
714  i32);
715
716define <vscale x 2 x i16> @intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, i32 %2) nounwind {
717; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i16_nxv2i16_i8:
718; CHECK:       # %bb.0: # %entry
719; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
720; CHECK-NEXT:    vwadd.wx v8, v8, a0
721; CHECK-NEXT:    ret
722entry:
723  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.i8(
724    <vscale x 2 x i16> %0,
725    i8 %1,
726    i32 %2)
727
728  ret <vscale x 2 x i16> %a
729}
730
731declare <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
732  <vscale x 2 x i16>,
733  <vscale x 2 x i16>,
734  i8,
735  <vscale x 2 x i1>,
736  i32);
737
738define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
739; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i16_nxv2i16_i8:
740; CHECK:       # %bb.0: # %entry
741; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
742; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
743; CHECK-NEXT:    ret
744entry:
745  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
746    <vscale x 2 x i16> %0,
747    <vscale x 2 x i16> %1,
748    i8 %2,
749    <vscale x 2 x i1> %3,
750    i32 %4)
751
752  ret <vscale x 2 x i16> %a
753}
754
755declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
756  <vscale x 4 x i16>,
757  i8,
758  i32);
759
760define <vscale x 4 x i16> @intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, i32 %2) nounwind {
761; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i16_nxv4i16_i8:
762; CHECK:       # %bb.0: # %entry
763; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
764; CHECK-NEXT:    vwadd.wx v8, v8, a0
765; CHECK-NEXT:    ret
766entry:
767  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.i8(
768    <vscale x 4 x i16> %0,
769    i8 %1,
770    i32 %2)
771
772  ret <vscale x 4 x i16> %a
773}
774
775declare <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
776  <vscale x 4 x i16>,
777  <vscale x 4 x i16>,
778  i8,
779  <vscale x 4 x i1>,
780  i32);
781
782define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
783; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i16_nxv4i16_i8:
784; CHECK:       # %bb.0: # %entry
785; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
786; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
787; CHECK-NEXT:    ret
788entry:
789  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
790    <vscale x 4 x i16> %0,
791    <vscale x 4 x i16> %1,
792    i8 %2,
793    <vscale x 4 x i1> %3,
794    i32 %4)
795
796  ret <vscale x 4 x i16> %a
797}
798
799declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
800  <vscale x 8 x i16>,
801  i8,
802  i32);
803
804define <vscale x 8 x i16> @intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, i32 %2) nounwind {
805; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i16_nxv8i16_i8:
806; CHECK:       # %bb.0: # %entry
807; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
808; CHECK-NEXT:    vwadd.wx v8, v8, a0
809; CHECK-NEXT:    ret
810entry:
811  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.i8(
812    <vscale x 8 x i16> %0,
813    i8 %1,
814    i32 %2)
815
816  ret <vscale x 8 x i16> %a
817}
818
819declare <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
820  <vscale x 8 x i16>,
821  <vscale x 8 x i16>,
822  i8,
823  <vscale x 8 x i1>,
824  i32);
825
826define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
827; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i16_nxv8i16_i8:
828; CHECK:       # %bb.0: # %entry
829; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
830; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
831; CHECK-NEXT:    ret
832entry:
833  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
834    <vscale x 8 x i16> %0,
835    <vscale x 8 x i16> %1,
836    i8 %2,
837    <vscale x 8 x i1> %3,
838    i32 %4)
839
840  ret <vscale x 8 x i16> %a
841}
842
843declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
844  <vscale x 16 x i16>,
845  i8,
846  i32);
847
848define <vscale x 16 x i16> @intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, i32 %2) nounwind {
849; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i16_nxv16i16_i8:
850; CHECK:       # %bb.0: # %entry
851; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
852; CHECK-NEXT:    vwadd.wx v8, v8, a0
853; CHECK-NEXT:    ret
854entry:
855  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.i8(
856    <vscale x 16 x i16> %0,
857    i8 %1,
858    i32 %2)
859
860  ret <vscale x 16 x i16> %a
861}
862
863declare <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
864  <vscale x 16 x i16>,
865  <vscale x 16 x i16>,
866  i8,
867  <vscale x 16 x i1>,
868  i32);
869
870define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
871; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i16_nxv16i16_i8:
872; CHECK:       # %bb.0: # %entry
873; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
874; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
875; CHECK-NEXT:    ret
876entry:
877  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
878    <vscale x 16 x i16> %0,
879    <vscale x 16 x i16> %1,
880    i8 %2,
881    <vscale x 16 x i1> %3,
882    i32 %4)
883
884  ret <vscale x 16 x i16> %a
885}
886
887declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
888  <vscale x 32 x i16>,
889  i8,
890  i32);
891
892define <vscale x 32 x i16> @intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, i32 %2) nounwind {
893; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv32i16_nxv32i16_i8:
894; CHECK:       # %bb.0: # %entry
895; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
896; CHECK-NEXT:    vwadd.wx v8, v8, a0
897; CHECK-NEXT:    ret
898entry:
899  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.i8(
900    <vscale x 32 x i16> %0,
901    i8 %1,
902    i32 %2)
903
904  ret <vscale x 32 x i16> %a
905}
906
907declare <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
908  <vscale x 32 x i16>,
909  <vscale x 32 x i16>,
910  i8,
911  <vscale x 32 x i1>,
912  i32);
913
914define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
915; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv32i16_nxv32i16_i8:
916; CHECK:       # %bb.0: # %entry
917; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
918; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
919; CHECK-NEXT:    ret
920entry:
921  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
922    <vscale x 32 x i16> %0,
923    <vscale x 32 x i16> %1,
924    i8 %2,
925    <vscale x 32 x i1> %3,
926    i32 %4)
927
928  ret <vscale x 32 x i16> %a
929}
930
931declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
932  <vscale x 1 x i32>,
933  i16,
934  i32);
935
936define <vscale x 1 x i32> @intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, i32 %2) nounwind {
937; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i32_nxv1i32_i16:
938; CHECK:       # %bb.0: # %entry
939; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
940; CHECK-NEXT:    vwadd.wx v8, v8, a0
941; CHECK-NEXT:    ret
942entry:
943  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.i16(
944    <vscale x 1 x i32> %0,
945    i16 %1,
946    i32 %2)
947
948  ret <vscale x 1 x i32> %a
949}
950
951declare <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
952  <vscale x 1 x i32>,
953  <vscale x 1 x i32>,
954  i16,
955  <vscale x 1 x i1>,
956  i32);
957
958define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
959; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i32_nxv1i32_i16:
960; CHECK:       # %bb.0: # %entry
961; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
962; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
963; CHECK-NEXT:    ret
964entry:
965  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
966    <vscale x 1 x i32> %0,
967    <vscale x 1 x i32> %1,
968    i16 %2,
969    <vscale x 1 x i1> %3,
970    i32 %4)
971
972  ret <vscale x 1 x i32> %a
973}
974
975declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
976  <vscale x 2 x i32>,
977  i16,
978  i32);
979
980define <vscale x 2 x i32> @intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, i32 %2) nounwind {
981; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i32_nxv2i32_i16:
982; CHECK:       # %bb.0: # %entry
983; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
984; CHECK-NEXT:    vwadd.wx v8, v8, a0
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.i16(
988    <vscale x 2 x i32> %0,
989    i16 %1,
990    i32 %2)
991
992  ret <vscale x 2 x i32> %a
993}
994
995declare <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
996  <vscale x 2 x i32>,
997  <vscale x 2 x i32>,
998  i16,
999  <vscale x 2 x i1>,
1000  i32);
1001
1002define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1003; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i32_nxv2i32_i16:
1004; CHECK:       # %bb.0: # %entry
1005; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1006; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
1007; CHECK-NEXT:    ret
1008entry:
1009  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
1010    <vscale x 2 x i32> %0,
1011    <vscale x 2 x i32> %1,
1012    i16 %2,
1013    <vscale x 2 x i1> %3,
1014    i32 %4)
1015
1016  ret <vscale x 2 x i32> %a
1017}
1018
1019declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
1020  <vscale x 4 x i32>,
1021  i16,
1022  i32);
1023
1024define <vscale x 4 x i32> @intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, i32 %2) nounwind {
1025; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i32_nxv4i32_i16:
1026; CHECK:       # %bb.0: # %entry
1027; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1028; CHECK-NEXT:    vwadd.wx v8, v8, a0
1029; CHECK-NEXT:    ret
1030entry:
1031  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.i16(
1032    <vscale x 4 x i32> %0,
1033    i16 %1,
1034    i32 %2)
1035
1036  ret <vscale x 4 x i32> %a
1037}
1038
1039declare <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1040  <vscale x 4 x i32>,
1041  <vscale x 4 x i32>,
1042  i16,
1043  <vscale x 4 x i1>,
1044  i32);
1045
1046define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1047; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i32_nxv4i32_i16:
1048; CHECK:       # %bb.0: # %entry
1049; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1050; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
1051; CHECK-NEXT:    ret
1052entry:
1053  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1054    <vscale x 4 x i32> %0,
1055    <vscale x 4 x i32> %1,
1056    i16 %2,
1057    <vscale x 4 x i1> %3,
1058    i32 %4)
1059
1060  ret <vscale x 4 x i32> %a
1061}
1062
1063declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
1064  <vscale x 8 x i32>,
1065  i16,
1066  i32);
1067
1068define <vscale x 8 x i32> @intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, i32 %2) nounwind {
1069; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i32_nxv8i32_i16:
1070; CHECK:       # %bb.0: # %entry
1071; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1072; CHECK-NEXT:    vwadd.wx v8, v8, a0
1073; CHECK-NEXT:    ret
1074entry:
1075  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.i16(
1076    <vscale x 8 x i32> %0,
1077    i16 %1,
1078    i32 %2)
1079
1080  ret <vscale x 8 x i32> %a
1081}
1082
1083declare <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1084  <vscale x 8 x i32>,
1085  <vscale x 8 x i32>,
1086  i16,
1087  <vscale x 8 x i1>,
1088  i32);
1089
1090define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1091; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i32_nxv8i32_i16:
1092; CHECK:       # %bb.0: # %entry
1093; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1094; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
1095; CHECK-NEXT:    ret
1096entry:
1097  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1098    <vscale x 8 x i32> %0,
1099    <vscale x 8 x i32> %1,
1100    i16 %2,
1101    <vscale x 8 x i1> %3,
1102    i32 %4)
1103
1104  ret <vscale x 8 x i32> %a
1105}
1106
1107declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
1108  <vscale x 16 x i32>,
1109  i16,
1110  i32);
1111
1112define <vscale x 16 x i32> @intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, i32 %2) nounwind {
1113; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv16i32_nxv16i32_i16:
1114; CHECK:       # %bb.0: # %entry
1115; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1116; CHECK-NEXT:    vwadd.wx v8, v8, a0
1117; CHECK-NEXT:    ret
1118entry:
1119  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.nxv16i32.i16(
1120    <vscale x 16 x i32> %0,
1121    i16 %1,
1122    i32 %2)
1123
1124  ret <vscale x 16 x i32> %a
1125}
1126
1127declare <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1128  <vscale x 16 x i32>,
1129  <vscale x 16 x i32>,
1130  i16,
1131  <vscale x 16 x i1>,
1132  i32);
1133
1134define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1135; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv16i32_nxv16i32_i16:
1136; CHECK:       # %bb.0: # %entry
1137; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1138; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
1139; CHECK-NEXT:    ret
1140entry:
1141  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1142    <vscale x 16 x i32> %0,
1143    <vscale x 16 x i32> %1,
1144    i16 %2,
1145    <vscale x 16 x i1> %3,
1146    i32 %4)
1147
1148  ret <vscale x 16 x i32> %a
1149}
1150
1151declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
1152  <vscale x 1 x i64>,
1153  i32,
1154  i32);
1155
1156define <vscale x 1 x i64> @intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, i32 %2) nounwind {
1157; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv1i64_nxv1i64_i32:
1158; CHECK:       # %bb.0: # %entry
1159; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1160; CHECK-NEXT:    vwadd.wx v8, v8, a0
1161; CHECK-NEXT:    ret
1162entry:
1163  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.i32(
1164    <vscale x 1 x i64> %0,
1165    i32 %1,
1166    i32 %2)
1167
1168  ret <vscale x 1 x i64> %a
1169}
1170
1171declare <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1172  <vscale x 1 x i64>,
1173  <vscale x 1 x i64>,
1174  i32,
1175  <vscale x 1 x i1>,
1176  i32);
1177
1178define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1179; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv1i64_nxv1i64_i32:
1180; CHECK:       # %bb.0: # %entry
1181; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1182; CHECK-NEXT:    vwadd.wx v8, v9, a0, v0.t
1183; CHECK-NEXT:    ret
1184entry:
1185  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1186    <vscale x 1 x i64> %0,
1187    <vscale x 1 x i64> %1,
1188    i32 %2,
1189    <vscale x 1 x i1> %3,
1190    i32 %4)
1191
1192  ret <vscale x 1 x i64> %a
1193}
1194
1195declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
1196  <vscale x 2 x i64>,
1197  i32,
1198  i32);
1199
1200define <vscale x 2 x i64> @intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, i32 %2) nounwind {
1201; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv2i64_nxv2i64_i32:
1202; CHECK:       # %bb.0: # %entry
1203; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1204; CHECK-NEXT:    vwadd.wx v8, v8, a0
1205; CHECK-NEXT:    ret
1206entry:
1207  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.i32(
1208    <vscale x 2 x i64> %0,
1209    i32 %1,
1210    i32 %2)
1211
1212  ret <vscale x 2 x i64> %a
1213}
1214
1215declare <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1216  <vscale x 2 x i64>,
1217  <vscale x 2 x i64>,
1218  i32,
1219  <vscale x 2 x i1>,
1220  i32);
1221
1222define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1223; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv2i64_nxv2i64_i32:
1224; CHECK:       # %bb.0: # %entry
1225; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1226; CHECK-NEXT:    vwadd.wx v8, v10, a0, v0.t
1227; CHECK-NEXT:    ret
1228entry:
1229  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1230    <vscale x 2 x i64> %0,
1231    <vscale x 2 x i64> %1,
1232    i32 %2,
1233    <vscale x 2 x i1> %3,
1234    i32 %4)
1235
1236  ret <vscale x 2 x i64> %a
1237}
1238
1239declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
1240  <vscale x 4 x i64>,
1241  i32,
1242  i32);
1243
1244define <vscale x 4 x i64> @intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, i32 %2) nounwind {
1245; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv4i64_nxv4i64_i32:
1246; CHECK:       # %bb.0: # %entry
1247; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1248; CHECK-NEXT:    vwadd.wx v8, v8, a0
1249; CHECK-NEXT:    ret
1250entry:
1251  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.i32(
1252    <vscale x 4 x i64> %0,
1253    i32 %1,
1254    i32 %2)
1255
1256  ret <vscale x 4 x i64> %a
1257}
1258
1259declare <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1260  <vscale x 4 x i64>,
1261  <vscale x 4 x i64>,
1262  i32,
1263  <vscale x 4 x i1>,
1264  i32);
1265
1266define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1267; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv4i64_nxv4i64_i32:
1268; CHECK:       # %bb.0: # %entry
1269; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1270; CHECK-NEXT:    vwadd.wx v8, v12, a0, v0.t
1271; CHECK-NEXT:    ret
1272entry:
1273  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1274    <vscale x 4 x i64> %0,
1275    <vscale x 4 x i64> %1,
1276    i32 %2,
1277    <vscale x 4 x i1> %3,
1278    i32 %4)
1279
1280  ret <vscale x 4 x i64> %a
1281}
1282
1283declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
1284  <vscale x 8 x i64>,
1285  i32,
1286  i32);
1287
1288define <vscale x 8 x i64> @intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, i32 %2) nounwind {
1289; CHECK-LABEL: intrinsic_vwadd.w_wx_nxv8i64_nxv8i64_i32:
1290; CHECK:       # %bb.0: # %entry
1291; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1292; CHECK-NEXT:    vwadd.wx v8, v8, a0
1293; CHECK-NEXT:    ret
1294entry:
1295  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.i32(
1296    <vscale x 8 x i64> %0,
1297    i32 %1,
1298    i32 %2)
1299
1300  ret <vscale x 8 x i64> %a
1301}
1302
1303declare <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1304  <vscale x 8 x i64>,
1305  <vscale x 8 x i64>,
1306  i32,
1307  <vscale x 8 x i1>,
1308  i32);
1309
1310define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1311; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_nxv8i64_nxv8i64_i32:
1312; CHECK:       # %bb.0: # %entry
1313; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1314; CHECK-NEXT:    vwadd.wx v8, v16, a0, v0.t
1315; CHECK-NEXT:    ret
1316entry:
1317  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1318    <vscale x 8 x i64> %0,
1319    <vscale x 8 x i64> %1,
1320    i32 %2,
1321    <vscale x 8 x i1> %3,
1322    i32 %4)
1323
1324  ret <vscale x 8 x i64> %a
1325}
1326
1327define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i16> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1328; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i16_nxv1i16_nxv1i8:
1329; CHECK:       # %bb.0: # %entry
1330; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
1331; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1332; CHECK-NEXT:    ret
1333entry:
1334  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.nxv1i8(
1335    <vscale x 1 x i16> %0,
1336    <vscale x 1 x i16> %0,
1337    <vscale x 1 x i8> %1,
1338    <vscale x 1 x i1> %2,
1339    i32 %3)
1340
1341  ret <vscale x 1 x i16> %a
1342}
1343
1344define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i16> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1345; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i16_nxv2i16_nxv2i8:
1346; CHECK:       # %bb.0: # %entry
1347; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
1348; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1349; CHECK-NEXT:    ret
1350entry:
1351  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.nxv2i8(
1352    <vscale x 2 x i16> %0,
1353    <vscale x 2 x i16> %0,
1354    <vscale x 2 x i8> %1,
1355    <vscale x 2 x i1> %2,
1356    i32 %3)
1357
1358  ret <vscale x 2 x i16> %a
1359}
1360
1361define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i16> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1362; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i16_nxv4i16_nxv4i8:
1363; CHECK:       # %bb.0: # %entry
1364; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
1365; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1366; CHECK-NEXT:    ret
1367entry:
1368  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.nxv4i8(
1369    <vscale x 4 x i16> %0,
1370    <vscale x 4 x i16> %0,
1371    <vscale x 4 x i8> %1,
1372    <vscale x 4 x i1> %2,
1373    i32 %3)
1374
1375  ret <vscale x 4 x i16> %a
1376}
1377
1378define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i16> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1379; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i16_nxv8i16_nxv8i8:
1380; CHECK:       # %bb.0: # %entry
1381; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
1382; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
1383; CHECK-NEXT:    ret
1384entry:
1385  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.nxv8i8(
1386    <vscale x 8 x i16> %0,
1387    <vscale x 8 x i16> %0,
1388    <vscale x 8 x i8> %1,
1389    <vscale x 8 x i1> %2,
1390    i32 %3)
1391
1392  ret <vscale x 8 x i16> %a
1393}
1394
1395define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i16> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
1396; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i16_nxv16i16_nxv16i8:
1397; CHECK:       # %bb.0: # %entry
1398; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
1399; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
1400; CHECK-NEXT:    ret
1401entry:
1402  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.nxv16i8(
1403    <vscale x 16 x i16> %0,
1404    <vscale x 16 x i16> %0,
1405    <vscale x 16 x i8> %1,
1406    <vscale x 16 x i1> %2,
1407    i32 %3)
1408
1409  ret <vscale x 16 x i16> %a
1410}
1411
1412define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i16> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
1413; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv32i16_nxv32i16_nxv32i8:
1414; CHECK:       # %bb.0: # %entry
1415; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
1416; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
1417; CHECK-NEXT:    ret
1418entry:
1419  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.nxv32i8(
1420    <vscale x 32 x i16> %0,
1421    <vscale x 32 x i16> %0,
1422    <vscale x 32 x i8> %1,
1423    <vscale x 32 x i1> %2,
1424    i32 %3)
1425
1426  ret <vscale x 32 x i16> %a
1427}
1428
1429define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i32> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1430; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i32_nxv1i32_nxv1i16:
1431; CHECK:       # %bb.0: # %entry
1432; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
1433; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1434; CHECK-NEXT:    ret
1435entry:
1436  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.nxv1i16(
1437    <vscale x 1 x i32> %0,
1438    <vscale x 1 x i32> %0,
1439    <vscale x 1 x i16> %1,
1440    <vscale x 1 x i1> %2,
1441    i32 %3)
1442
1443  ret <vscale x 1 x i32> %a
1444}
1445
1446define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i32> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1447; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i32_nxv2i32_nxv2i16:
1448; CHECK:       # %bb.0: # %entry
1449; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
1450; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1451; CHECK-NEXT:    ret
1452entry:
1453  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.nxv2i16(
1454    <vscale x 2 x i32> %0,
1455    <vscale x 2 x i32> %0,
1456    <vscale x 2 x i16> %1,
1457    <vscale x 2 x i1> %2,
1458    i32 %3)
1459
1460  ret <vscale x 2 x i32> %a
1461}
1462
1463define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i32> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1464; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i32_nxv4i32_nxv4i16:
1465; CHECK:       # %bb.0: # %entry
1466; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
1467; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
1468; CHECK-NEXT:    ret
1469entry:
1470  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.nxv4i16(
1471    <vscale x 4 x i32> %0,
1472    <vscale x 4 x i32> %0,
1473    <vscale x 4 x i16> %1,
1474    <vscale x 4 x i1> %2,
1475    i32 %3)
1476
1477  ret <vscale x 4 x i32> %a
1478}
1479
1480define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i32> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1481; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i32_nxv8i32_nxv8i16:
1482; CHECK:       # %bb.0: # %entry
1483; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
1484; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
1485; CHECK-NEXT:    ret
1486entry:
1487  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.nxv8i16(
1488    <vscale x 8 x i32> %0,
1489    <vscale x 8 x i32> %0,
1490    <vscale x 8 x i16> %1,
1491    <vscale x 8 x i1> %2,
1492    i32 %3)
1493
1494  ret <vscale x 8 x i32> %a
1495}
1496
1497define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16(<vscale x 16 x i32> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
1498; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv16i32_nxv16i32_nxv16i16:
1499; CHECK:       # %bb.0: # %entry
1500; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
1501; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
1502; CHECK-NEXT:    ret
1503entry:
1504  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.nxv16i16(
1505    <vscale x 16 x i32> %0,
1506    <vscale x 16 x i32> %0,
1507    <vscale x 16 x i16> %1,
1508    <vscale x 16 x i1> %2,
1509    i32 %3)
1510
1511  ret <vscale x 16 x i32> %a
1512}
1513
1514define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i64> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1515; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv1i64_nxv1i64_nxv1i32:
1516; CHECK:       # %bb.0: # %entry
1517; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
1518; CHECK-NEXT:    vwadd.wv v8, v8, v9, v0.t
1519; CHECK-NEXT:    ret
1520entry:
1521  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.nxv1i32(
1522    <vscale x 1 x i64> %0,
1523    <vscale x 1 x i64> %0,
1524    <vscale x 1 x i32> %1,
1525    <vscale x 1 x i1> %2,
1526    i32 %3)
1527
1528  ret <vscale x 1 x i64> %a
1529}
1530
1531define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i64> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1532; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv2i64_nxv2i64_nxv2i32:
1533; CHECK:       # %bb.0: # %entry
1534; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
1535; CHECK-NEXT:    vwadd.wv v8, v8, v10, v0.t
1536; CHECK-NEXT:    ret
1537entry:
1538  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.nxv2i32(
1539    <vscale x 2 x i64> %0,
1540    <vscale x 2 x i64> %0,
1541    <vscale x 2 x i32> %1,
1542    <vscale x 2 x i1> %2,
1543    i32 %3)
1544
1545  ret <vscale x 2 x i64> %a
1546}
1547
1548define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i64> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1549; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv4i64_nxv4i64_nxv4i32:
1550; CHECK:       # %bb.0: # %entry
1551; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
1552; CHECK-NEXT:    vwadd.wv v8, v8, v12, v0.t
1553; CHECK-NEXT:    ret
1554entry:
1555  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.nxv4i32(
1556    <vscale x 4 x i64> %0,
1557    <vscale x 4 x i64> %0,
1558    <vscale x 4 x i32> %1,
1559    <vscale x 4 x i1> %2,
1560    i32 %3)
1561
1562  ret <vscale x 4 x i64> %a
1563}
1564
1565define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i64> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1566; CHECK-LABEL: intrinsic_vwadd.w_mask_wv_tie_nxv8i64_nxv8i64_nxv8i32:
1567; CHECK:       # %bb.0: # %entry
1568; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
1569; CHECK-NEXT:    vwadd.wv v8, v8, v16, v0.t
1570; CHECK-NEXT:    ret
1571entry:
1572  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.nxv8i32(
1573    <vscale x 8 x i64> %0,
1574    <vscale x 8 x i64> %0,
1575    <vscale x 8 x i32> %1,
1576    <vscale x 8 x i1> %2,
1577    i32 %3)
1578
1579  ret <vscale x 8 x i64> %a
1580}
1581
1582define <vscale x 1 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8(<vscale x 1 x i16> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1583; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i16_nxv1i16_i8:
1584; CHECK:       # %bb.0: # %entry
1585; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, tu, mu
1586; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1587; CHECK-NEXT:    ret
1588entry:
1589  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.mask.nxv1i16.i8(
1590    <vscale x 1 x i16> %0,
1591    <vscale x 1 x i16> %0,
1592    i8 %1,
1593    <vscale x 1 x i1> %2,
1594    i32 %3)
1595
1596  ret <vscale x 1 x i16> %a
1597}
1598
1599define <vscale x 2 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8(<vscale x 2 x i16> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1600; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i16_nxv2i16_i8:
1601; CHECK:       # %bb.0: # %entry
1602; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, tu, mu
1603; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1604; CHECK-NEXT:    ret
1605entry:
1606  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.mask.nxv2i16.i8(
1607    <vscale x 2 x i16> %0,
1608    <vscale x 2 x i16> %0,
1609    i8 %1,
1610    <vscale x 2 x i1> %2,
1611    i32 %3)
1612
1613  ret <vscale x 2 x i16> %a
1614}
1615
1616define <vscale x 4 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8(<vscale x 4 x i16> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1617; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i16_nxv4i16_i8:
1618; CHECK:       # %bb.0: # %entry
1619; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, tu, mu
1620; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1621; CHECK-NEXT:    ret
1622entry:
1623  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.mask.nxv4i16.i8(
1624    <vscale x 4 x i16> %0,
1625    <vscale x 4 x i16> %0,
1626    i8 %1,
1627    <vscale x 4 x i1> %2,
1628    i32 %3)
1629
1630  ret <vscale x 4 x i16> %a
1631}
1632
1633define <vscale x 8 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8(<vscale x 8 x i16> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1634; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i16_nxv8i16_i8:
1635; CHECK:       # %bb.0: # %entry
1636; CHECK-NEXT:    vsetvli zero, a1, e8, m1, tu, mu
1637; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1638; CHECK-NEXT:    ret
1639entry:
1640  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.mask.nxv8i16.i8(
1641    <vscale x 8 x i16> %0,
1642    <vscale x 8 x i16> %0,
1643    i8 %1,
1644    <vscale x 8 x i1> %2,
1645    i32 %3)
1646
1647  ret <vscale x 8 x i16> %a
1648}
1649
1650define <vscale x 16 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8(<vscale x 16 x i16> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
1651; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i16_nxv16i16_i8:
1652; CHECK:       # %bb.0: # %entry
1653; CHECK-NEXT:    vsetvli zero, a1, e8, m2, tu, mu
1654; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1655; CHECK-NEXT:    ret
1656entry:
1657  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.mask.nxv16i16.i8(
1658    <vscale x 16 x i16> %0,
1659    <vscale x 16 x i16> %0,
1660    i8 %1,
1661    <vscale x 16 x i1> %2,
1662    i32 %3)
1663
1664  ret <vscale x 16 x i16> %a
1665}
1666
1667define <vscale x 32 x i16> @intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8(<vscale x 32 x i16> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
1668; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv32i16_nxv32i16_i8:
1669; CHECK:       # %bb.0: # %entry
1670; CHECK-NEXT:    vsetvli zero, a1, e8, m4, tu, mu
1671; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1672; CHECK-NEXT:    ret
1673entry:
1674  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.mask.nxv32i16.i8(
1675    <vscale x 32 x i16> %0,
1676    <vscale x 32 x i16> %0,
1677    i8 %1,
1678    <vscale x 32 x i1> %2,
1679    i32 %3)
1680
1681  ret <vscale x 32 x i16> %a
1682}
1683
1684define <vscale x 1 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16(<vscale x 1 x i32> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1685; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i32_nxv1i32_i16:
1686; CHECK:       # %bb.0: # %entry
1687; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, tu, mu
1688; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1689; CHECK-NEXT:    ret
1690entry:
1691  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.mask.nxv1i32.i16(
1692    <vscale x 1 x i32> %0,
1693    <vscale x 1 x i32> %0,
1694    i16 %1,
1695    <vscale x 1 x i1> %2,
1696    i32 %3)
1697
1698  ret <vscale x 1 x i32> %a
1699}
1700
1701define <vscale x 2 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16(<vscale x 2 x i32> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1702; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i32_nxv2i32_i16:
1703; CHECK:       # %bb.0: # %entry
1704; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, tu, mu
1705; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1706; CHECK-NEXT:    ret
1707entry:
1708  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.mask.nxv2i32.i16(
1709    <vscale x 2 x i32> %0,
1710    <vscale x 2 x i32> %0,
1711    i16 %1,
1712    <vscale x 2 x i1> %2,
1713    i32 %3)
1714
1715  ret <vscale x 2 x i32> %a
1716}
1717
1718define <vscale x 4 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16(<vscale x 4 x i32> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1719; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i32_nxv4i32_i16:
1720; CHECK:       # %bb.0: # %entry
1721; CHECK-NEXT:    vsetvli zero, a1, e16, m1, tu, mu
1722; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1723; CHECK-NEXT:    ret
1724entry:
1725  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.mask.nxv4i32.i16(
1726    <vscale x 4 x i32> %0,
1727    <vscale x 4 x i32> %0,
1728    i16 %1,
1729    <vscale x 4 x i1> %2,
1730    i32 %3)
1731
1732  ret <vscale x 4 x i32> %a
1733}
1734
1735define <vscale x 8 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16(<vscale x 8 x i32> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1736; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i32_nxv8i32_i16:
1737; CHECK:       # %bb.0: # %entry
1738; CHECK-NEXT:    vsetvli zero, a1, e16, m2, tu, mu
1739; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1740; CHECK-NEXT:    ret
1741entry:
1742  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.mask.nxv8i32.i16(
1743    <vscale x 8 x i32> %0,
1744    <vscale x 8 x i32> %0,
1745    i16 %1,
1746    <vscale x 8 x i1> %2,
1747    i32 %3)
1748
1749  ret <vscale x 8 x i32> %a
1750}
1751
1752define <vscale x 16 x i32> @intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16(<vscale x 16 x i32> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
1753; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv16i32_nxv16i32_i16:
1754; CHECK:       # %bb.0: # %entry
1755; CHECK-NEXT:    vsetvli zero, a1, e16, m4, tu, mu
1756; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1757; CHECK-NEXT:    ret
1758entry:
1759  %a = call <vscale x 16 x i32> @llvm.riscv.vwadd.w.mask.nxv16i32.i16(
1760    <vscale x 16 x i32> %0,
1761    <vscale x 16 x i32> %0,
1762    i16 %1,
1763    <vscale x 16 x i1> %2,
1764    i32 %3)
1765
1766  ret <vscale x 16 x i32> %a
1767}
1768
1769define <vscale x 1 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32(<vscale x 1 x i64> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1770; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv1i64_nxv1i64_i32:
1771; CHECK:       # %bb.0: # %entry
1772; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, tu, mu
1773; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1774; CHECK-NEXT:    ret
1775entry:
1776  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.mask.nxv1i64.i32(
1777    <vscale x 1 x i64> %0,
1778    <vscale x 1 x i64> %0,
1779    i32 %1,
1780    <vscale x 1 x i1> %2,
1781    i32 %3)
1782
1783  ret <vscale x 1 x i64> %a
1784}
1785
1786define <vscale x 2 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32(<vscale x 2 x i64> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1787; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv2i64_nxv2i64_i32:
1788; CHECK:       # %bb.0: # %entry
1789; CHECK-NEXT:    vsetvli zero, a1, e32, m1, tu, mu
1790; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1791; CHECK-NEXT:    ret
1792entry:
1793  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.mask.nxv2i64.i32(
1794    <vscale x 2 x i64> %0,
1795    <vscale x 2 x i64> %0,
1796    i32 %1,
1797    <vscale x 2 x i1> %2,
1798    i32 %3)
1799
1800  ret <vscale x 2 x i64> %a
1801}
1802
1803define <vscale x 4 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32(<vscale x 4 x i64> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1804; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv4i64_nxv4i64_i32:
1805; CHECK:       # %bb.0: # %entry
1806; CHECK-NEXT:    vsetvli zero, a1, e32, m2, tu, mu
1807; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1808; CHECK-NEXT:    ret
1809entry:
1810  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.mask.nxv4i64.i32(
1811    <vscale x 4 x i64> %0,
1812    <vscale x 4 x i64> %0,
1813    i32 %1,
1814    <vscale x 4 x i1> %2,
1815    i32 %3)
1816
1817  ret <vscale x 4 x i64> %a
1818}
1819
1820define <vscale x 8 x i64> @intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32(<vscale x 8 x i64> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1821; CHECK-LABEL: intrinsic_vwadd.w_mask_wx_tie_nxv8i64_nxv8i64_i32:
1822; CHECK:       # %bb.0: # %entry
1823; CHECK-NEXT:    vsetvli zero, a1, e32, m4, tu, mu
1824; CHECK-NEXT:    vwadd.wx v8, v8, a0, v0.t
1825; CHECK-NEXT:    ret
1826entry:
1827  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.mask.nxv8i64.i32(
1828    <vscale x 8 x i64> %0,
1829    <vscale x 8 x i64> %0,
1830    i32 %1,
1831    <vscale x 8 x i1> %2,
1832    i32 %3)
1833
1834  ret <vscale x 8 x i64> %a
1835}
1836
1837define <vscale x 1 x i16> @intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
1838; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i16_nxv1i16_nxv1i8:
1839; CHECK:       # %bb.0: # %entry
1840; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1841; CHECK-NEXT:    vwadd.wv v25, v9, v8
1842; CHECK-NEXT:    vmv1r.v v8, v25
1843; CHECK-NEXT:    ret
1844entry:
1845  %a = call <vscale x 1 x i16> @llvm.riscv.vwadd.w.nxv1i16.nxv1i8(
1846    <vscale x 1 x i16> %1,
1847    <vscale x 1 x i8> %0,
1848    i32 %2)
1849
1850  ret <vscale x 1 x i16> %a
1851}
1852
1853define <vscale x 2 x i16> @intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
1854; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i16_nxv2i16_nxv2i8:
1855; CHECK:       # %bb.0: # %entry
1856; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1857; CHECK-NEXT:    vwadd.wv v25, v9, v8
1858; CHECK-NEXT:    vmv1r.v v8, v25
1859; CHECK-NEXT:    ret
1860entry:
1861  %a = call <vscale x 2 x i16> @llvm.riscv.vwadd.w.nxv2i16.nxv2i8(
1862    <vscale x 2 x i16> %1,
1863    <vscale x 2 x i8> %0,
1864    i32 %2)
1865
1866  ret <vscale x 2 x i16> %a
1867}
1868
1869define <vscale x 4 x i16> @intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
1870; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i16_nxv4i16_nxv4i8:
1871; CHECK:       # %bb.0: # %entry
1872; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1873; CHECK-NEXT:    vwadd.wv v25, v9, v8
1874; CHECK-NEXT:    vmv1r.v v8, v25
1875; CHECK-NEXT:    ret
1876entry:
1877  %a = call <vscale x 4 x i16> @llvm.riscv.vwadd.w.nxv4i16.nxv4i8(
1878    <vscale x 4 x i16> %1,
1879    <vscale x 4 x i8> %0,
1880    i32 %2)
1881
1882  ret <vscale x 4 x i16> %a
1883}
1884
1885define <vscale x 8 x i16> @intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
1886; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i16_nxv8i16_nxv8i8:
1887; CHECK:       # %bb.0: # %entry
1888; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1889; CHECK-NEXT:    vwadd.wv v26, v10, v8
1890; CHECK-NEXT:    vmv2r.v v8, v26
1891; CHECK-NEXT:    ret
1892entry:
1893  %a = call <vscale x 8 x i16> @llvm.riscv.vwadd.w.nxv8i16.nxv8i8(
1894    <vscale x 8 x i16> %1,
1895    <vscale x 8 x i8> %0,
1896    i32 %2)
1897
1898  ret <vscale x 8 x i16> %a
1899}
1900
1901define <vscale x 16 x i16> @intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
1902; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv16i16_nxv16i16_nxv16i8:
1903; CHECK:       # %bb.0: # %entry
1904; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1905; CHECK-NEXT:    vwadd.wv v28, v12, v8
1906; CHECK-NEXT:    vmv4r.v v8, v28
1907; CHECK-NEXT:    ret
1908entry:
1909  %a = call <vscale x 16 x i16> @llvm.riscv.vwadd.w.nxv16i16.nxv16i8(
1910    <vscale x 16 x i16> %1,
1911    <vscale x 16 x i8> %0,
1912    i32 %2)
1913
1914  ret <vscale x 16 x i16> %a
1915}
1916
1917define <vscale x 32 x i16> @intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
1918; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv32i16_nxv32i16_nxv32i8:
1919; CHECK:       # %bb.0: # %entry
1920; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1921; CHECK-NEXT:    vwadd.wv v24, v16, v8
1922; CHECK-NEXT:    vmv8r.v v8, v24
1923; CHECK-NEXT:    ret
1924entry:
1925  %a = call <vscale x 32 x i16> @llvm.riscv.vwadd.w.nxv32i16.nxv32i8(
1926    <vscale x 32 x i16> %1,
1927    <vscale x 32 x i8> %0,
1928    i32 %2)
1929
1930  ret <vscale x 32 x i16> %a
1931}
1932
1933define <vscale x 1 x i32> @intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
1934; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i32_nxv1i32_nxv1i16:
1935; CHECK:       # %bb.0: # %entry
1936; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1937; CHECK-NEXT:    vwadd.wv v25, v9, v8
1938; CHECK-NEXT:    vmv1r.v v8, v25
1939; CHECK-NEXT:    ret
1940entry:
1941  %a = call <vscale x 1 x i32> @llvm.riscv.vwadd.w.nxv1i32.nxv1i16(
1942    <vscale x 1 x i32> %1,
1943    <vscale x 1 x i16> %0,
1944    i32 %2)
1945
1946  ret <vscale x 1 x i32> %a
1947}
1948
1949define <vscale x 2 x i32> @intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
1950; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i32_nxv2i32_nxv2i16:
1951; CHECK:       # %bb.0: # %entry
1952; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1953; CHECK-NEXT:    vwadd.wv v25, v9, v8
1954; CHECK-NEXT:    vmv1r.v v8, v25
1955; CHECK-NEXT:    ret
1956entry:
1957  %a = call <vscale x 2 x i32> @llvm.riscv.vwadd.w.nxv2i32.nxv2i16(
1958    <vscale x 2 x i32> %1,
1959    <vscale x 2 x i16> %0,
1960    i32 %2)
1961
1962  ret <vscale x 2 x i32> %a
1963}
1964
1965define <vscale x 4 x i32> @intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
1966; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i32_nxv4i32_nxv4i16:
1967; CHECK:       # %bb.0: # %entry
1968; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1969; CHECK-NEXT:    vwadd.wv v26, v10, v8
1970; CHECK-NEXT:    vmv2r.v v8, v26
1971; CHECK-NEXT:    ret
1972entry:
1973  %a = call <vscale x 4 x i32> @llvm.riscv.vwadd.w.nxv4i32.nxv4i16(
1974    <vscale x 4 x i32> %1,
1975    <vscale x 4 x i16> %0,
1976    i32 %2)
1977
1978  ret <vscale x 4 x i32> %a
1979}
1980
1981define <vscale x 8 x i32> @intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
1982; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i32_nxv8i32_nxv8i16:
1983; CHECK:       # %bb.0: # %entry
1984; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1985; CHECK-NEXT:    vwadd.wv v28, v12, v8
1986; CHECK-NEXT:    vmv4r.v v8, v28
1987; CHECK-NEXT:    ret
1988entry:
1989  %a = call <vscale x 8 x i32> @llvm.riscv.vwadd.w.nxv8i32.nxv8i16(
1990    <vscale x 8 x i32> %1,
1991    <vscale x 8 x i16> %0,
1992    i32 %2)
1993
1994  ret <vscale x 8 x i32> %a
1995}
1996
1997define <vscale x 1 x i64> @intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
1998; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv1i64_nxv1i64_nxv1i32:
1999; CHECK:       # %bb.0: # %entry
2000; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2001; CHECK-NEXT:    vwadd.wv v25, v9, v8
2002; CHECK-NEXT:    vmv1r.v v8, v25
2003; CHECK-NEXT:    ret
2004entry:
2005  %a = call <vscale x 1 x i64> @llvm.riscv.vwadd.w.nxv1i64.nxv1i32(
2006    <vscale x 1 x i64> %1,
2007    <vscale x 1 x i32> %0,
2008    i32 %2)
2009
2010  ret <vscale x 1 x i64> %a
2011}
2012
2013define <vscale x 2 x i64> @intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
2014; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv2i64_nxv2i64_nxv2i32:
2015; CHECK:       # %bb.0: # %entry
2016; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2017; CHECK-NEXT:    vwadd.wv v26, v10, v8
2018; CHECK-NEXT:    vmv2r.v v8, v26
2019; CHECK-NEXT:    ret
2020entry:
2021  %a = call <vscale x 2 x i64> @llvm.riscv.vwadd.w.nxv2i64.nxv2i32(
2022    <vscale x 2 x i64> %1,
2023    <vscale x 2 x i32> %0,
2024    i32 %2)
2025
2026  ret <vscale x 2 x i64> %a
2027}
2028
2029define <vscale x 4 x i64> @intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
2030; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv4i64_nxv4i64_nxv4i32:
2031; CHECK:       # %bb.0: # %entry
2032; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2033; CHECK-NEXT:    vwadd.wv v28, v12, v8
2034; CHECK-NEXT:    vmv4r.v v8, v28
2035; CHECK-NEXT:    ret
2036entry:
2037  %a = call <vscale x 4 x i64> @llvm.riscv.vwadd.w.nxv4i64.nxv4i32(
2038    <vscale x 4 x i64> %1,
2039    <vscale x 4 x i32> %0,
2040    i32 %2)
2041
2042  ret <vscale x 4 x i64> %a
2043}
2044
2045define <vscale x 8 x i64> @intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
2046; CHECK-LABEL: intrinsic_vwadd.w_wv_untie_nxv8i64_nxv8i64_nxv8i32:
2047; CHECK:       # %bb.0: # %entry
2048; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2049; CHECK-NEXT:    vwadd.wv v24, v16, v8
2050; CHECK-NEXT:    vmv8r.v v8, v24
2051; CHECK-NEXT:    ret
2052entry:
2053  %a = call <vscale x 8 x i64> @llvm.riscv.vwadd.w.nxv8i64.nxv8i32(
2054    <vscale x 8 x i64> %1,
2055    <vscale x 8 x i32> %0,
2056    i32 %2)
2057
2058  ret <vscale x 8 x i64> %a
2059}
2060