1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  i32);
8
9define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vmadc.vv v0, v8, v9
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
17    <vscale x 1 x i8> %0,
18    <vscale x 1 x i8> %1,
19    i32 %2)
20
21  ret <vscale x 1 x i1> %a
22}
23
24declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
25  <vscale x 2 x i8>,
26  <vscale x 2 x i8>,
27  i32);
28
29define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
30; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8:
31; CHECK:       # %bb.0: # %entry
32; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
33; CHECK-NEXT:    vmadc.vv v0, v8, v9
34; CHECK-NEXT:    ret
35entry:
36  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
37    <vscale x 2 x i8> %0,
38    <vscale x 2 x i8> %1,
39    i32 %2)
40
41  ret <vscale x 2 x i1> %a
42}
43
44declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
45  <vscale x 4 x i8>,
46  <vscale x 4 x i8>,
47  i32);
48
49define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
50; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8:
51; CHECK:       # %bb.0: # %entry
52; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
53; CHECK-NEXT:    vmadc.vv v0, v8, v9
54; CHECK-NEXT:    ret
55entry:
56  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
57    <vscale x 4 x i8> %0,
58    <vscale x 4 x i8> %1,
59    i32 %2)
60
61  ret <vscale x 4 x i1> %a
62}
63
64declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
65  <vscale x 8 x i8>,
66  <vscale x 8 x i8>,
67  i32);
68
69define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
70; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8:
71; CHECK:       # %bb.0: # %entry
72; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
73; CHECK-NEXT:    vmadc.vv v0, v8, v9
74; CHECK-NEXT:    ret
75entry:
76  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
77    <vscale x 8 x i8> %0,
78    <vscale x 8 x i8> %1,
79    i32 %2)
80
81  ret <vscale x 8 x i1> %a
82}
83
84declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
85  <vscale x 16 x i8>,
86  <vscale x 16 x i8>,
87  i32);
88
89define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
90; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
93; CHECK-NEXT:    vmadc.vv v0, v8, v10
94; CHECK-NEXT:    ret
95entry:
96  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
97    <vscale x 16 x i8> %0,
98    <vscale x 16 x i8> %1,
99    i32 %2)
100
101  ret <vscale x 16 x i1> %a
102}
103
104declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
105  <vscale x 32 x i8>,
106  <vscale x 32 x i8>,
107  i32);
108
109define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
110; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
113; CHECK-NEXT:    vmadc.vv v0, v8, v12
114; CHECK-NEXT:    ret
115entry:
116  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
117    <vscale x 32 x i8> %0,
118    <vscale x 32 x i8> %1,
119    i32 %2)
120
121  ret <vscale x 32 x i1> %a
122}
123
124declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
125  <vscale x 64 x i8>,
126  <vscale x 64 x i8>,
127  i32);
128
129define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i32 %2) nounwind {
130; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8:
131; CHECK:       # %bb.0: # %entry
132; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
133; CHECK-NEXT:    vmadc.vv v0, v8, v16
134; CHECK-NEXT:    ret
135entry:
136  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
137    <vscale x 64 x i8> %0,
138    <vscale x 64 x i8> %1,
139    i32 %2)
140
141  ret <vscale x 64 x i1> %a
142}
143
144declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
145  <vscale x 1 x i16>,
146  <vscale x 1 x i16>,
147  i32);
148
149define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
150; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16:
151; CHECK:       # %bb.0: # %entry
152; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
153; CHECK-NEXT:    vmadc.vv v0, v8, v9
154; CHECK-NEXT:    ret
155entry:
156  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
157    <vscale x 1 x i16> %0,
158    <vscale x 1 x i16> %1,
159    i32 %2)
160
161  ret <vscale x 1 x i1> %a
162}
163
164declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
165  <vscale x 2 x i16>,
166  <vscale x 2 x i16>,
167  i32);
168
169define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
170; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16:
171; CHECK:       # %bb.0: # %entry
172; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
173; CHECK-NEXT:    vmadc.vv v0, v8, v9
174; CHECK-NEXT:    ret
175entry:
176  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
177    <vscale x 2 x i16> %0,
178    <vscale x 2 x i16> %1,
179    i32 %2)
180
181  ret <vscale x 2 x i1> %a
182}
183
184declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
185  <vscale x 4 x i16>,
186  <vscale x 4 x i16>,
187  i32);
188
189define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
190; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16:
191; CHECK:       # %bb.0: # %entry
192; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
193; CHECK-NEXT:    vmadc.vv v0, v8, v9
194; CHECK-NEXT:    ret
195entry:
196  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
197    <vscale x 4 x i16> %0,
198    <vscale x 4 x i16> %1,
199    i32 %2)
200
201  ret <vscale x 4 x i1> %a
202}
203
204declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
205  <vscale x 8 x i16>,
206  <vscale x 8 x i16>,
207  i32);
208
209define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
210; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16:
211; CHECK:       # %bb.0: # %entry
212; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
213; CHECK-NEXT:    vmadc.vv v0, v8, v10
214; CHECK-NEXT:    ret
215entry:
216  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
217    <vscale x 8 x i16> %0,
218    <vscale x 8 x i16> %1,
219    i32 %2)
220
221  ret <vscale x 8 x i1> %a
222}
223
224declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
225  <vscale x 16 x i16>,
226  <vscale x 16 x i16>,
227  i32);
228
229define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
230; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16:
231; CHECK:       # %bb.0: # %entry
232; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
233; CHECK-NEXT:    vmadc.vv v0, v8, v12
234; CHECK-NEXT:    ret
235entry:
236  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
237    <vscale x 16 x i16> %0,
238    <vscale x 16 x i16> %1,
239    i32 %2)
240
241  ret <vscale x 16 x i1> %a
242}
243
244declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
245  <vscale x 32 x i16>,
246  <vscale x 32 x i16>,
247  i32);
248
249define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i32 %2) nounwind {
250; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16:
251; CHECK:       # %bb.0: # %entry
252; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
253; CHECK-NEXT:    vmadc.vv v0, v8, v16
254; CHECK-NEXT:    ret
255entry:
256  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
257    <vscale x 32 x i16> %0,
258    <vscale x 32 x i16> %1,
259    i32 %2)
260
261  ret <vscale x 32 x i1> %a
262}
263
264declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
265  <vscale x 1 x i32>,
266  <vscale x 1 x i32>,
267  i32);
268
269define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
270; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32:
271; CHECK:       # %bb.0: # %entry
272; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
273; CHECK-NEXT:    vmadc.vv v0, v8, v9
274; CHECK-NEXT:    ret
275entry:
276  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
277    <vscale x 1 x i32> %0,
278    <vscale x 1 x i32> %1,
279    i32 %2)
280
281  ret <vscale x 1 x i1> %a
282}
283
284declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
285  <vscale x 2 x i32>,
286  <vscale x 2 x i32>,
287  i32);
288
289define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
290; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
293; CHECK-NEXT:    vmadc.vv v0, v8, v9
294; CHECK-NEXT:    ret
295entry:
296  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
297    <vscale x 2 x i32> %0,
298    <vscale x 2 x i32> %1,
299    i32 %2)
300
301  ret <vscale x 2 x i1> %a
302}
303
304declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
305  <vscale x 4 x i32>,
306  <vscale x 4 x i32>,
307  i32);
308
309define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
310; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
313; CHECK-NEXT:    vmadc.vv v0, v8, v10
314; CHECK-NEXT:    ret
315entry:
316  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
317    <vscale x 4 x i32> %0,
318    <vscale x 4 x i32> %1,
319    i32 %2)
320
321  ret <vscale x 4 x i1> %a
322}
323
324declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
325  <vscale x 8 x i32>,
326  <vscale x 8 x i32>,
327  i32);
328
329define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
330; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32:
331; CHECK:       # %bb.0: # %entry
332; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
333; CHECK-NEXT:    vmadc.vv v0, v8, v12
334; CHECK-NEXT:    ret
335entry:
336  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
337    <vscale x 8 x i32> %0,
338    <vscale x 8 x i32> %1,
339    i32 %2)
340
341  ret <vscale x 8 x i1> %a
342}
343
344declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
345  <vscale x 16 x i32>,
346  <vscale x 16 x i32>,
347  i32);
348
349define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i32 %2) nounwind {
350; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
353; CHECK-NEXT:    vmadc.vv v0, v8, v16
354; CHECK-NEXT:    ret
355entry:
356  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
357    <vscale x 16 x i32> %0,
358    <vscale x 16 x i32> %1,
359    i32 %2)
360
361  ret <vscale x 16 x i1> %a
362}
363
364declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
365  <vscale x 1 x i64>,
366  <vscale x 1 x i64>,
367  i32);
368
369define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
370; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
373; CHECK-NEXT:    vmadc.vv v0, v8, v9
374; CHECK-NEXT:    ret
375entry:
376  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
377    <vscale x 1 x i64> %0,
378    <vscale x 1 x i64> %1,
379    i32 %2)
380
381  ret <vscale x 1 x i1> %a
382}
383
384declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
385  <vscale x 2 x i64>,
386  <vscale x 2 x i64>,
387  i32);
388
389define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
390; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64:
391; CHECK:       # %bb.0: # %entry
392; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
393; CHECK-NEXT:    vmadc.vv v0, v8, v10
394; CHECK-NEXT:    ret
395entry:
396  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
397    <vscale x 2 x i64> %0,
398    <vscale x 2 x i64> %1,
399    i32 %2)
400
401  ret <vscale x 2 x i1> %a
402}
403
404declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
405  <vscale x 4 x i64>,
406  <vscale x 4 x i64>,
407  i32);
408
409define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
410; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64:
411; CHECK:       # %bb.0: # %entry
412; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
413; CHECK-NEXT:    vmadc.vv v0, v8, v12
414; CHECK-NEXT:    ret
415entry:
416  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
417    <vscale x 4 x i64> %0,
418    <vscale x 4 x i64> %1,
419    i32 %2)
420
421  ret <vscale x 4 x i1> %a
422}
423
424declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
425  <vscale x 8 x i64>,
426  <vscale x 8 x i64>,
427  i32);
428
429define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i32 %2) nounwind {
430; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64:
431; CHECK:       # %bb.0: # %entry
432; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
433; CHECK-NEXT:    vmadc.vv v0, v8, v16
434; CHECK-NEXT:    ret
435entry:
436  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
437    <vscale x 8 x i64> %0,
438    <vscale x 8 x i64> %1,
439    i32 %2)
440
441  ret <vscale x 8 x i1> %a
442}
443
444declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
445  <vscale x 1 x i8>,
446  i8,
447  i32);
448
449define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
450; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8:
451; CHECK:       # %bb.0: # %entry
452; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
453; CHECK-NEXT:    vmadc.vx v0, v8, a0
454; CHECK-NEXT:    ret
455entry:
456  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
457    <vscale x 1 x i8> %0,
458    i8 %1,
459    i32 %2)
460
461  ret <vscale x 1 x i1> %a
462}
463
464declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
465  <vscale x 2 x i8>,
466  i8,
467  i32);
468
469define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
470; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8:
471; CHECK:       # %bb.0: # %entry
472; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
473; CHECK-NEXT:    vmadc.vx v0, v8, a0
474; CHECK-NEXT:    ret
475entry:
476  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
477    <vscale x 2 x i8> %0,
478    i8 %1,
479    i32 %2)
480
481  ret <vscale x 2 x i1> %a
482}
483
484declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
485  <vscale x 4 x i8>,
486  i8,
487  i32);
488
489define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
490; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8:
491; CHECK:       # %bb.0: # %entry
492; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
493; CHECK-NEXT:    vmadc.vx v0, v8, a0
494; CHECK-NEXT:    ret
495entry:
496  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
497    <vscale x 4 x i8> %0,
498    i8 %1,
499    i32 %2)
500
501  ret <vscale x 4 x i1> %a
502}
503
504declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
505  <vscale x 8 x i8>,
506  i8,
507  i32);
508
509define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
510; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8:
511; CHECK:       # %bb.0: # %entry
512; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
513; CHECK-NEXT:    vmadc.vx v0, v8, a0
514; CHECK-NEXT:    ret
515entry:
516  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
517    <vscale x 8 x i8> %0,
518    i8 %1,
519    i32 %2)
520
521  ret <vscale x 8 x i1> %a
522}
523
524declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
525  <vscale x 16 x i8>,
526  i8,
527  i32);
528
529define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
530; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8:
531; CHECK:       # %bb.0: # %entry
532; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
533; CHECK-NEXT:    vmadc.vx v0, v8, a0
534; CHECK-NEXT:    ret
535entry:
536  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
537    <vscale x 16 x i8> %0,
538    i8 %1,
539    i32 %2)
540
541  ret <vscale x 16 x i1> %a
542}
543
544declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
545  <vscale x 32 x i8>,
546  i8,
547  i32);
548
549define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
550; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8:
551; CHECK:       # %bb.0: # %entry
552; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
553; CHECK-NEXT:    vmadc.vx v0, v8, a0
554; CHECK-NEXT:    ret
555entry:
556  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
557    <vscale x 32 x i8> %0,
558    i8 %1,
559    i32 %2)
560
561  ret <vscale x 32 x i1> %a
562}
563
564declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
565  <vscale x 64 x i8>,
566  i8,
567  i32);
568
569define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i32 %2) nounwind {
570; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8:
571; CHECK:       # %bb.0: # %entry
572; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
573; CHECK-NEXT:    vmadc.vx v0, v8, a0
574; CHECK-NEXT:    ret
575entry:
576  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
577    <vscale x 64 x i8> %0,
578    i8 %1,
579    i32 %2)
580
581  ret <vscale x 64 x i1> %a
582}
583
584declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
585  <vscale x 1 x i16>,
586  i16,
587  i32);
588
589define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
590; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16:
591; CHECK:       # %bb.0: # %entry
592; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
593; CHECK-NEXT:    vmadc.vx v0, v8, a0
594; CHECK-NEXT:    ret
595entry:
596  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
597    <vscale x 1 x i16> %0,
598    i16 %1,
599    i32 %2)
600
601  ret <vscale x 1 x i1> %a
602}
603
604declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
605  <vscale x 2 x i16>,
606  i16,
607  i32);
608
609define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
610; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16:
611; CHECK:       # %bb.0: # %entry
612; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
613; CHECK-NEXT:    vmadc.vx v0, v8, a0
614; CHECK-NEXT:    ret
615entry:
616  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
617    <vscale x 2 x i16> %0,
618    i16 %1,
619    i32 %2)
620
621  ret <vscale x 2 x i1> %a
622}
623
624declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
625  <vscale x 4 x i16>,
626  i16,
627  i32);
628
629define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
630; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16:
631; CHECK:       # %bb.0: # %entry
632; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
633; CHECK-NEXT:    vmadc.vx v0, v8, a0
634; CHECK-NEXT:    ret
635entry:
636  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
637    <vscale x 4 x i16> %0,
638    i16 %1,
639    i32 %2)
640
641  ret <vscale x 4 x i1> %a
642}
643
644declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
645  <vscale x 8 x i16>,
646  i16,
647  i32);
648
649define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
650; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16:
651; CHECK:       # %bb.0: # %entry
652; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
653; CHECK-NEXT:    vmadc.vx v0, v8, a0
654; CHECK-NEXT:    ret
655entry:
656  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
657    <vscale x 8 x i16> %0,
658    i16 %1,
659    i32 %2)
660
661  ret <vscale x 8 x i1> %a
662}
663
664declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
665  <vscale x 16 x i16>,
666  i16,
667  i32);
668
669define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
670; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16:
671; CHECK:       # %bb.0: # %entry
672; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
673; CHECK-NEXT:    vmadc.vx v0, v8, a0
674; CHECK-NEXT:    ret
675entry:
676  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
677    <vscale x 16 x i16> %0,
678    i16 %1,
679    i32 %2)
680
681  ret <vscale x 16 x i1> %a
682}
683
684declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
685  <vscale x 32 x i16>,
686  i16,
687  i32);
688
689define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i32 %2) nounwind {
690; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16:
691; CHECK:       # %bb.0: # %entry
692; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
693; CHECK-NEXT:    vmadc.vx v0, v8, a0
694; CHECK-NEXT:    ret
695entry:
696  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
697    <vscale x 32 x i16> %0,
698    i16 %1,
699    i32 %2)
700
701  ret <vscale x 32 x i1> %a
702}
703
704declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
705  <vscale x 1 x i32>,
706  i32,
707  i32);
708
709define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
710; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32:
711; CHECK:       # %bb.0: # %entry
712; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
713; CHECK-NEXT:    vmadc.vx v0, v8, a0
714; CHECK-NEXT:    ret
715entry:
716  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
717    <vscale x 1 x i32> %0,
718    i32 %1,
719    i32 %2)
720
721  ret <vscale x 1 x i1> %a
722}
723
724declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
725  <vscale x 2 x i32>,
726  i32,
727  i32);
728
729define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
730; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32:
731; CHECK:       # %bb.0: # %entry
732; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
733; CHECK-NEXT:    vmadc.vx v0, v8, a0
734; CHECK-NEXT:    ret
735entry:
736  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
737    <vscale x 2 x i32> %0,
738    i32 %1,
739    i32 %2)
740
741  ret <vscale x 2 x i1> %a
742}
743
744declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
745  <vscale x 4 x i32>,
746  i32,
747  i32);
748
749define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
750; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
753; CHECK-NEXT:    vmadc.vx v0, v8, a0
754; CHECK-NEXT:    ret
755entry:
756  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
757    <vscale x 4 x i32> %0,
758    i32 %1,
759    i32 %2)
760
761  ret <vscale x 4 x i1> %a
762}
763
764declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
765  <vscale x 8 x i32>,
766  i32,
767  i32);
768
769define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
770; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32:
771; CHECK:       # %bb.0: # %entry
772; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
773; CHECK-NEXT:    vmadc.vx v0, v8, a0
774; CHECK-NEXT:    ret
775entry:
776  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
777    <vscale x 8 x i32> %0,
778    i32 %1,
779    i32 %2)
780
781  ret <vscale x 8 x i1> %a
782}
783
784declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
785  <vscale x 16 x i32>,
786  i32,
787  i32);
788
789define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i32 %2) nounwind {
790; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
793; CHECK-NEXT:    vmadc.vx v0, v8, a0
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
797    <vscale x 16 x i32> %0,
798    i32 %1,
799    i32 %2)
800
801  ret <vscale x 16 x i1> %a
802}
803
804declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
805  <vscale x 1 x i64>,
806  i64,
807  i32);
808
809define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
810; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
811; CHECK:       # %bb.0: # %entry
812; CHECK-NEXT:    addi sp, sp, -16
813; CHECK-NEXT:    sw a1, 12(sp)
814; CHECK-NEXT:    sw a0, 8(sp)
815; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
816; CHECK-NEXT:    addi a0, sp, 8
817; CHECK-NEXT:    vlse64.v v25, (a0), zero
818; CHECK-NEXT:    vmadc.vv v0, v8, v25
819; CHECK-NEXT:    addi sp, sp, 16
820; CHECK-NEXT:    ret
821entry:
822  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
823    <vscale x 1 x i64> %0,
824    i64 %1,
825    i32 %2)
826
827  ret <vscale x 1 x i1> %a
828}
829
830declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
831  <vscale x 2 x i64>,
832  i64,
833  i32);
834
835define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
836; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
837; CHECK:       # %bb.0: # %entry
838; CHECK-NEXT:    addi sp, sp, -16
839; CHECK-NEXT:    sw a1, 12(sp)
840; CHECK-NEXT:    sw a0, 8(sp)
841; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
842; CHECK-NEXT:    addi a0, sp, 8
843; CHECK-NEXT:    vlse64.v v26, (a0), zero
844; CHECK-NEXT:    vmadc.vv v0, v8, v26
845; CHECK-NEXT:    addi sp, sp, 16
846; CHECK-NEXT:    ret
847entry:
848  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
849    <vscale x 2 x i64> %0,
850    i64 %1,
851    i32 %2)
852
853  ret <vscale x 2 x i1> %a
854}
855
856declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
857  <vscale x 4 x i64>,
858  i64,
859  i32);
860
861define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
862; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
863; CHECK:       # %bb.0: # %entry
864; CHECK-NEXT:    addi sp, sp, -16
865; CHECK-NEXT:    sw a1, 12(sp)
866; CHECK-NEXT:    sw a0, 8(sp)
867; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
868; CHECK-NEXT:    addi a0, sp, 8
869; CHECK-NEXT:    vlse64.v v28, (a0), zero
870; CHECK-NEXT:    vmadc.vv v0, v8, v28
871; CHECK-NEXT:    addi sp, sp, 16
872; CHECK-NEXT:    ret
873entry:
874  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
875    <vscale x 4 x i64> %0,
876    i64 %1,
877    i32 %2)
878
879  ret <vscale x 4 x i1> %a
880}
881
882declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
883  <vscale x 8 x i64>,
884  i64,
885  i32);
886
887define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i32 %2) nounwind {
888; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
889; CHECK:       # %bb.0: # %entry
890; CHECK-NEXT:    addi sp, sp, -16
891; CHECK-NEXT:    sw a1, 12(sp)
892; CHECK-NEXT:    sw a0, 8(sp)
893; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
894; CHECK-NEXT:    addi a0, sp, 8
895; CHECK-NEXT:    vlse64.v v16, (a0), zero
896; CHECK-NEXT:    vmadc.vv v0, v8, v16
897; CHECK-NEXT:    addi sp, sp, 16
898; CHECK-NEXT:    ret
899entry:
900  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
901    <vscale x 8 x i64> %0,
902    i64 %1,
903    i32 %2)
904
905  ret <vscale x 8 x i1> %a
906}
907
908define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
909; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8:
910; CHECK:       # %bb.0: # %entry
911; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
912; CHECK-NEXT:    vmadc.vi v0, v8, 9
913; CHECK-NEXT:    ret
914entry:
915  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
916    <vscale x 1 x i8> %0,
917    i8 9,
918    i32 %1)
919
920  ret <vscale x 1 x i1> %a
921}
922
923define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
924; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8:
925; CHECK:       # %bb.0: # %entry
926; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
927; CHECK-NEXT:    vmadc.vi v0, v8, -9
928; CHECK-NEXT:    ret
929entry:
930  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
931    <vscale x 2 x i8> %0,
932    i8 -9,
933    i32 %1)
934
935  ret <vscale x 2 x i1> %a
936}
937
938define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
939; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8:
940; CHECK:       # %bb.0: # %entry
941; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
942; CHECK-NEXT:    vmadc.vi v0, v8, 9
943; CHECK-NEXT:    ret
944entry:
945  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
946    <vscale x 4 x i8> %0,
947    i8 9,
948    i32 %1)
949
950  ret <vscale x 4 x i1> %a
951}
952
953define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
954; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
957; CHECK-NEXT:    vmadc.vi v0, v8, -9
958; CHECK-NEXT:    ret
959entry:
960  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
961    <vscale x 8 x i8> %0,
962    i8 -9,
963    i32 %1)
964
965  ret <vscale x 8 x i1> %a
966}
967
968define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
969; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8:
970; CHECK:       # %bb.0: # %entry
971; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
972; CHECK-NEXT:    vmadc.vi v0, v8, 9
973; CHECK-NEXT:    ret
974entry:
975  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
976    <vscale x 16 x i8> %0,
977    i8 9,
978    i32 %1)
979
980  ret <vscale x 16 x i1> %a
981}
982
983define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
984; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8:
985; CHECK:       # %bb.0: # %entry
986; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
987; CHECK-NEXT:    vmadc.vi v0, v8, -9
988; CHECK-NEXT:    ret
989entry:
990  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
991    <vscale x 32 x i8> %0,
992    i8 -9,
993    i32 %1)
994
995  ret <vscale x 32 x i1> %a
996}
997
998define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i32 %1) nounwind {
999; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8:
1000; CHECK:       # %bb.0: # %entry
1001; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
1002; CHECK-NEXT:    vmadc.vi v0, v8, 9
1003; CHECK-NEXT:    ret
1004entry:
1005  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
1006    <vscale x 64 x i8> %0,
1007    i8 9,
1008    i32 %1)
1009
1010  ret <vscale x 64 x i1> %a
1011}
1012
1013define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
1014; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16:
1015; CHECK:       # %bb.0: # %entry
1016; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1017; CHECK-NEXT:    vmadc.vi v0, v8, -9
1018; CHECK-NEXT:    ret
1019entry:
1020  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
1021    <vscale x 1 x i16> %0,
1022    i16 -9,
1023    i32 %1)
1024
1025  ret <vscale x 1 x i1> %a
1026}
1027
1028define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
1029; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16:
1030; CHECK:       # %bb.0: # %entry
1031; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1032; CHECK-NEXT:    vmadc.vi v0, v8, 9
1033; CHECK-NEXT:    ret
1034entry:
1035  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
1036    <vscale x 2 x i16> %0,
1037    i16 9,
1038    i32 %1)
1039
1040  ret <vscale x 2 x i1> %a
1041}
1042
1043define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
1044; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16:
1045; CHECK:       # %bb.0: # %entry
1046; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1047; CHECK-NEXT:    vmadc.vi v0, v8, -9
1048; CHECK-NEXT:    ret
1049entry:
1050  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
1051    <vscale x 4 x i16> %0,
1052    i16 -9,
1053    i32 %1)
1054
1055  ret <vscale x 4 x i1> %a
1056}
1057
1058define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
1059; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16:
1060; CHECK:       # %bb.0: # %entry
1061; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1062; CHECK-NEXT:    vmadc.vi v0, v8, 9
1063; CHECK-NEXT:    ret
1064entry:
1065  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
1066    <vscale x 8 x i16> %0,
1067    i16 9,
1068    i32 %1)
1069
1070  ret <vscale x 8 x i1> %a
1071}
1072
1073define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
1074; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16:
1075; CHECK:       # %bb.0: # %entry
1076; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1077; CHECK-NEXT:    vmadc.vi v0, v8, -9
1078; CHECK-NEXT:    ret
1079entry:
1080  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
1081    <vscale x 16 x i16> %0,
1082    i16 -9,
1083    i32 %1)
1084
1085  ret <vscale x 16 x i1> %a
1086}
1087
1088define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i32 %1) nounwind {
1089; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16:
1090; CHECK:       # %bb.0: # %entry
1091; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
1092; CHECK-NEXT:    vmadc.vi v0, v8, 9
1093; CHECK-NEXT:    ret
1094entry:
1095  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
1096    <vscale x 32 x i16> %0,
1097    i16 9,
1098    i32 %1)
1099
1100  ret <vscale x 32 x i1> %a
1101}
1102
1103define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
1104; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32:
1105; CHECK:       # %bb.0: # %entry
1106; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1107; CHECK-NEXT:    vmadc.vi v0, v8, -9
1108; CHECK-NEXT:    ret
1109entry:
1110  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
1111    <vscale x 1 x i32> %0,
1112    i32 -9,
1113    i32 %1)
1114
1115  ret <vscale x 1 x i1> %a
1116}
1117
1118define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
1119; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32:
1120; CHECK:       # %bb.0: # %entry
1121; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1122; CHECK-NEXT:    vmadc.vi v0, v8, 9
1123; CHECK-NEXT:    ret
1124entry:
1125  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
1126    <vscale x 2 x i32> %0,
1127    i32 9,
1128    i32 %1)
1129
1130  ret <vscale x 2 x i1> %a
1131}
1132
1133define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
1134; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32:
1135; CHECK:       # %bb.0: # %entry
1136; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1137; CHECK-NEXT:    vmadc.vi v0, v8, -9
1138; CHECK-NEXT:    ret
1139entry:
1140  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
1141    <vscale x 4 x i32> %0,
1142    i32 -9,
1143    i32 %1)
1144
1145  ret <vscale x 4 x i1> %a
1146}
1147
1148define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
1149; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32:
1150; CHECK:       # %bb.0: # %entry
1151; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1152; CHECK-NEXT:    vmadc.vi v0, v8, 9
1153; CHECK-NEXT:    ret
1154entry:
1155  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
1156    <vscale x 8 x i32> %0,
1157    i32 9,
1158    i32 %1)
1159
1160  ret <vscale x 8 x i1> %a
1161}
1162
1163define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1) nounwind {
1164; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32:
1165; CHECK:       # %bb.0: # %entry
1166; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
1167; CHECK-NEXT:    vmadc.vi v0, v8, -9
1168; CHECK-NEXT:    ret
1169entry:
1170  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
1171    <vscale x 16 x i32> %0,
1172    i32 -9,
1173    i32 %1)
1174
1175  ret <vscale x 16 x i1> %a
1176}
1177
1178define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
1179; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64:
1180; CHECK:       # %bb.0: # %entry
1181; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1182; CHECK-NEXT:    vmadc.vi v0, v8, 9
1183; CHECK-NEXT:    ret
1184entry:
1185  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
1186    <vscale x 1 x i64> %0,
1187    i64 9,
1188    i32 %1)
1189
1190  ret <vscale x 1 x i1> %a
1191}
1192
1193define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
1194; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64:
1195; CHECK:       # %bb.0: # %entry
1196; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1197; CHECK-NEXT:    vmadc.vi v0, v8, -9
1198; CHECK-NEXT:    ret
1199entry:
1200  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
1201    <vscale x 2 x i64> %0,
1202    i64 -9,
1203    i32 %1)
1204
1205  ret <vscale x 2 x i1> %a
1206}
1207
1208define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
1209; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64:
1210; CHECK:       # %bb.0: # %entry
1211; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1212; CHECK-NEXT:    vmadc.vi v0, v8, 9
1213; CHECK-NEXT:    ret
1214entry:
1215  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
1216    <vscale x 4 x i64> %0,
1217    i64 9,
1218    i32 %1)
1219
1220  ret <vscale x 4 x i1> %a
1221}
1222
1223define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i32 %1) nounwind {
1224; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64:
1225; CHECK:       # %bb.0: # %entry
1226; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
1227; CHECK-NEXT:    vmadc.vi v0, v8, -9
1228; CHECK-NEXT:    ret
1229entry:
1230  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
1231    <vscale x 8 x i64> %0,
1232    i64 -9,
1233    i32 %1)
1234
1235  ret <vscale x 8 x i1> %a
1236}
1237