1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  i64);
8
9define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
10; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i8_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vmadc.vv v0, v8, v9
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.nxv1i8(
17    <vscale x 1 x i8> %0,
18    <vscale x 1 x i8> %1,
19    i64 %2)
20
21  ret <vscale x 1 x i1> %a
22}
23
24declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
25  <vscale x 2 x i8>,
26  <vscale x 2 x i8>,
27  i64);
28
29define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
30; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i8_nxv2i8:
31; CHECK:       # %bb.0: # %entry
32; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
33; CHECK-NEXT:    vmadc.vv v0, v8, v9
34; CHECK-NEXT:    ret
35entry:
36  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.nxv2i8(
37    <vscale x 2 x i8> %0,
38    <vscale x 2 x i8> %1,
39    i64 %2)
40
41  ret <vscale x 2 x i1> %a
42}
43
44declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
45  <vscale x 4 x i8>,
46  <vscale x 4 x i8>,
47  i64);
48
49define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
50; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i8_nxv4i8:
51; CHECK:       # %bb.0: # %entry
52; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
53; CHECK-NEXT:    vmadc.vv v0, v8, v9
54; CHECK-NEXT:    ret
55entry:
56  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.nxv4i8(
57    <vscale x 4 x i8> %0,
58    <vscale x 4 x i8> %1,
59    i64 %2)
60
61  ret <vscale x 4 x i1> %a
62}
63
64declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
65  <vscale x 8 x i8>,
66  <vscale x 8 x i8>,
67  i64);
68
69define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
70; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i8_nxv8i8:
71; CHECK:       # %bb.0: # %entry
72; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
73; CHECK-NEXT:    vmadc.vv v0, v8, v9
74; CHECK-NEXT:    ret
75entry:
76  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.nxv8i8(
77    <vscale x 8 x i8> %0,
78    <vscale x 8 x i8> %1,
79    i64 %2)
80
81  ret <vscale x 8 x i1> %a
82}
83
84declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
85  <vscale x 16 x i8>,
86  <vscale x 16 x i8>,
87  i64);
88
89define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
90; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i8_nxv16i8:
91; CHECK:       # %bb.0: # %entry
92; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
93; CHECK-NEXT:    vmadc.vv v0, v8, v10
94; CHECK-NEXT:    ret
95entry:
96  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.nxv16i8(
97    <vscale x 16 x i8> %0,
98    <vscale x 16 x i8> %1,
99    i64 %2)
100
101  ret <vscale x 16 x i1> %a
102}
103
104declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
105  <vscale x 32 x i8>,
106  <vscale x 32 x i8>,
107  i64);
108
109define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
110; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i8_nxv32i8:
111; CHECK:       # %bb.0: # %entry
112; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
113; CHECK-NEXT:    vmadc.vv v0, v8, v12
114; CHECK-NEXT:    ret
115entry:
116  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.nxv32i8(
117    <vscale x 32 x i8> %0,
118    <vscale x 32 x i8> %1,
119    i64 %2)
120
121  ret <vscale x 32 x i1> %a
122}
123
124declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
125  <vscale x 64 x i8>,
126  <vscale x 64 x i8>,
127  i64);
128
129define <vscale x 64 x i1> @intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, i64 %2) nounwind {
130; CHECK-LABEL: intrinsic_vmadc_vv_nxv64i1_nxv64i8_nxv64i8:
131; CHECK:       # %bb.0: # %entry
132; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
133; CHECK-NEXT:    vmadc.vv v0, v8, v16
134; CHECK-NEXT:    ret
135entry:
136  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.nxv64i8(
137    <vscale x 64 x i8> %0,
138    <vscale x 64 x i8> %1,
139    i64 %2)
140
141  ret <vscale x 64 x i1> %a
142}
143
144declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
145  <vscale x 1 x i16>,
146  <vscale x 1 x i16>,
147  i64);
148
149define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
150; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i16_nxv1i16:
151; CHECK:       # %bb.0: # %entry
152; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
153; CHECK-NEXT:    vmadc.vv v0, v8, v9
154; CHECK-NEXT:    ret
155entry:
156  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.nxv1i16(
157    <vscale x 1 x i16> %0,
158    <vscale x 1 x i16> %1,
159    i64 %2)
160
161  ret <vscale x 1 x i1> %a
162}
163
164declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
165  <vscale x 2 x i16>,
166  <vscale x 2 x i16>,
167  i64);
168
169define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
170; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i16_nxv2i16:
171; CHECK:       # %bb.0: # %entry
172; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
173; CHECK-NEXT:    vmadc.vv v0, v8, v9
174; CHECK-NEXT:    ret
175entry:
176  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.nxv2i16(
177    <vscale x 2 x i16> %0,
178    <vscale x 2 x i16> %1,
179    i64 %2)
180
181  ret <vscale x 2 x i1> %a
182}
183
184declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
185  <vscale x 4 x i16>,
186  <vscale x 4 x i16>,
187  i64);
188
189define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
190; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i16_nxv4i16:
191; CHECK:       # %bb.0: # %entry
192; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
193; CHECK-NEXT:    vmadc.vv v0, v8, v9
194; CHECK-NEXT:    ret
195entry:
196  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.nxv4i16(
197    <vscale x 4 x i16> %0,
198    <vscale x 4 x i16> %1,
199    i64 %2)
200
201  ret <vscale x 4 x i1> %a
202}
203
204declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
205  <vscale x 8 x i16>,
206  <vscale x 8 x i16>,
207  i64);
208
209define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
210; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i16_nxv8i16:
211; CHECK:       # %bb.0: # %entry
212; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
213; CHECK-NEXT:    vmadc.vv v0, v8, v10
214; CHECK-NEXT:    ret
215entry:
216  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.nxv8i16(
217    <vscale x 8 x i16> %0,
218    <vscale x 8 x i16> %1,
219    i64 %2)
220
221  ret <vscale x 8 x i1> %a
222}
223
224declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
225  <vscale x 16 x i16>,
226  <vscale x 16 x i16>,
227  i64);
228
229define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
230; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i16_nxv16i16:
231; CHECK:       # %bb.0: # %entry
232; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
233; CHECK-NEXT:    vmadc.vv v0, v8, v12
234; CHECK-NEXT:    ret
235entry:
236  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.nxv16i16(
237    <vscale x 16 x i16> %0,
238    <vscale x 16 x i16> %1,
239    i64 %2)
240
241  ret <vscale x 16 x i1> %a
242}
243
244declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
245  <vscale x 32 x i16>,
246  <vscale x 32 x i16>,
247  i64);
248
249define <vscale x 32 x i1> @intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, i64 %2) nounwind {
250; CHECK-LABEL: intrinsic_vmadc_vv_nxv32i1_nxv32i16_nxv32i16:
251; CHECK:       # %bb.0: # %entry
252; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
253; CHECK-NEXT:    vmadc.vv v0, v8, v16
254; CHECK-NEXT:    ret
255entry:
256  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.nxv32i16(
257    <vscale x 32 x i16> %0,
258    <vscale x 32 x i16> %1,
259    i64 %2)
260
261  ret <vscale x 32 x i1> %a
262}
263
264declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
265  <vscale x 1 x i32>,
266  <vscale x 1 x i32>,
267  i64);
268
269define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
270; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i32_nxv1i32:
271; CHECK:       # %bb.0: # %entry
272; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
273; CHECK-NEXT:    vmadc.vv v0, v8, v9
274; CHECK-NEXT:    ret
275entry:
276  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.nxv1i32(
277    <vscale x 1 x i32> %0,
278    <vscale x 1 x i32> %1,
279    i64 %2)
280
281  ret <vscale x 1 x i1> %a
282}
283
284declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
285  <vscale x 2 x i32>,
286  <vscale x 2 x i32>,
287  i64);
288
289define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
290; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i32_nxv2i32:
291; CHECK:       # %bb.0: # %entry
292; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
293; CHECK-NEXT:    vmadc.vv v0, v8, v9
294; CHECK-NEXT:    ret
295entry:
296  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.nxv2i32(
297    <vscale x 2 x i32> %0,
298    <vscale x 2 x i32> %1,
299    i64 %2)
300
301  ret <vscale x 2 x i1> %a
302}
303
304declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
305  <vscale x 4 x i32>,
306  <vscale x 4 x i32>,
307  i64);
308
309define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
310; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i32_nxv4i32:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
313; CHECK-NEXT:    vmadc.vv v0, v8, v10
314; CHECK-NEXT:    ret
315entry:
316  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.nxv4i32(
317    <vscale x 4 x i32> %0,
318    <vscale x 4 x i32> %1,
319    i64 %2)
320
321  ret <vscale x 4 x i1> %a
322}
323
324declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
325  <vscale x 8 x i32>,
326  <vscale x 8 x i32>,
327  i64);
328
329define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
330; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i32_nxv8i32:
331; CHECK:       # %bb.0: # %entry
332; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
333; CHECK-NEXT:    vmadc.vv v0, v8, v12
334; CHECK-NEXT:    ret
335entry:
336  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.nxv8i32(
337    <vscale x 8 x i32> %0,
338    <vscale x 8 x i32> %1,
339    i64 %2)
340
341  ret <vscale x 8 x i1> %a
342}
343
344declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
345  <vscale x 16 x i32>,
346  <vscale x 16 x i32>,
347  i64);
348
349define <vscale x 16 x i1> @intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, i64 %2) nounwind {
350; CHECK-LABEL: intrinsic_vmadc_vv_nxv16i1_nxv16i32_nxv16i32:
351; CHECK:       # %bb.0: # %entry
352; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
353; CHECK-NEXT:    vmadc.vv v0, v8, v16
354; CHECK-NEXT:    ret
355entry:
356  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.nxv16i32(
357    <vscale x 16 x i32> %0,
358    <vscale x 16 x i32> %1,
359    i64 %2)
360
361  ret <vscale x 16 x i1> %a
362}
363
364declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
365  <vscale x 1 x i64>,
366  <vscale x 1 x i64>,
367  i64);
368
369define <vscale x 1 x i1> @intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
370; CHECK-LABEL: intrinsic_vmadc_vv_nxv1i1_nxv1i64_nxv1i64:
371; CHECK:       # %bb.0: # %entry
372; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
373; CHECK-NEXT:    vmadc.vv v0, v8, v9
374; CHECK-NEXT:    ret
375entry:
376  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.nxv1i64(
377    <vscale x 1 x i64> %0,
378    <vscale x 1 x i64> %1,
379    i64 %2)
380
381  ret <vscale x 1 x i1> %a
382}
383
384declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
385  <vscale x 2 x i64>,
386  <vscale x 2 x i64>,
387  i64);
388
389define <vscale x 2 x i1> @intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
390; CHECK-LABEL: intrinsic_vmadc_vv_nxv2i1_nxv2i64_nxv2i64:
391; CHECK:       # %bb.0: # %entry
392; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
393; CHECK-NEXT:    vmadc.vv v0, v8, v10
394; CHECK-NEXT:    ret
395entry:
396  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.nxv2i64(
397    <vscale x 2 x i64> %0,
398    <vscale x 2 x i64> %1,
399    i64 %2)
400
401  ret <vscale x 2 x i1> %a
402}
403
404declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
405  <vscale x 4 x i64>,
406  <vscale x 4 x i64>,
407  i64);
408
409define <vscale x 4 x i1> @intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
410; CHECK-LABEL: intrinsic_vmadc_vv_nxv4i1_nxv4i64_nxv4i64:
411; CHECK:       # %bb.0: # %entry
412; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
413; CHECK-NEXT:    vmadc.vv v0, v8, v12
414; CHECK-NEXT:    ret
415entry:
416  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.nxv4i64(
417    <vscale x 4 x i64> %0,
418    <vscale x 4 x i64> %1,
419    i64 %2)
420
421  ret <vscale x 4 x i1> %a
422}
423
424declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
425  <vscale x 8 x i64>,
426  <vscale x 8 x i64>,
427  i64);
428
429define <vscale x 8 x i1> @intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, i64 %2) nounwind {
430; CHECK-LABEL: intrinsic_vmadc_vv_nxv8i1_nxv8i64_nxv8i64:
431; CHECK:       # %bb.0: # %entry
432; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
433; CHECK-NEXT:    vmadc.vv v0, v8, v16
434; CHECK-NEXT:    ret
435entry:
436  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.nxv8i64(
437    <vscale x 8 x i64> %0,
438    <vscale x 8 x i64> %1,
439    i64 %2)
440
441  ret <vscale x 8 x i1> %a
442}
443
444declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
445  <vscale x 1 x i8>,
446  i8,
447  i64);
448
449define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
450; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i8_i8:
451; CHECK:       # %bb.0: # %entry
452; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
453; CHECK-NEXT:    vmadc.vx v0, v8, a0
454; CHECK-NEXT:    ret
455entry:
456  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
457    <vscale x 1 x i8> %0,
458    i8 %1,
459    i64 %2)
460
461  ret <vscale x 1 x i1> %a
462}
463
464declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
465  <vscale x 2 x i8>,
466  i8,
467  i64);
468
469define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
470; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i8_i8:
471; CHECK:       # %bb.0: # %entry
472; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
473; CHECK-NEXT:    vmadc.vx v0, v8, a0
474; CHECK-NEXT:    ret
475entry:
476  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
477    <vscale x 2 x i8> %0,
478    i8 %1,
479    i64 %2)
480
481  ret <vscale x 2 x i1> %a
482}
483
484declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
485  <vscale x 4 x i8>,
486  i8,
487  i64);
488
489define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
490; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i8_i8:
491; CHECK:       # %bb.0: # %entry
492; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
493; CHECK-NEXT:    vmadc.vx v0, v8, a0
494; CHECK-NEXT:    ret
495entry:
496  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
497    <vscale x 4 x i8> %0,
498    i8 %1,
499    i64 %2)
500
501  ret <vscale x 4 x i1> %a
502}
503
504declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
505  <vscale x 8 x i8>,
506  i8,
507  i64);
508
509define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
510; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i8_i8:
511; CHECK:       # %bb.0: # %entry
512; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
513; CHECK-NEXT:    vmadc.vx v0, v8, a0
514; CHECK-NEXT:    ret
515entry:
516  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
517    <vscale x 8 x i8> %0,
518    i8 %1,
519    i64 %2)
520
521  ret <vscale x 8 x i1> %a
522}
523
524declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
525  <vscale x 16 x i8>,
526  i8,
527  i64);
528
529define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
530; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i8_i8:
531; CHECK:       # %bb.0: # %entry
532; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
533; CHECK-NEXT:    vmadc.vx v0, v8, a0
534; CHECK-NEXT:    ret
535entry:
536  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
537    <vscale x 16 x i8> %0,
538    i8 %1,
539    i64 %2)
540
541  ret <vscale x 16 x i1> %a
542}
543
544declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
545  <vscale x 32 x i8>,
546  i8,
547  i64);
548
549define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
550; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i8_i8:
551; CHECK:       # %bb.0: # %entry
552; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
553; CHECK-NEXT:    vmadc.vx v0, v8, a0
554; CHECK-NEXT:    ret
555entry:
556  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
557    <vscale x 32 x i8> %0,
558    i8 %1,
559    i64 %2)
560
561  ret <vscale x 32 x i1> %a
562}
563
564declare <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
565  <vscale x 64 x i8>,
566  i8,
567  i64);
568
569define <vscale x 64 x i1> @intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, i64 %2) nounwind {
570; CHECK-LABEL: intrinsic_vmadc_vx_nxv64i1_nxv64i8_i8:
571; CHECK:       # %bb.0: # %entry
572; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
573; CHECK-NEXT:    vmadc.vx v0, v8, a0
574; CHECK-NEXT:    ret
575entry:
576  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
577    <vscale x 64 x i8> %0,
578    i8 %1,
579    i64 %2)
580
581  ret <vscale x 64 x i1> %a
582}
583
584declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
585  <vscale x 1 x i16>,
586  i16,
587  i64);
588
589define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
590; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i16_i16:
591; CHECK:       # %bb.0: # %entry
592; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
593; CHECK-NEXT:    vmadc.vx v0, v8, a0
594; CHECK-NEXT:    ret
595entry:
596  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
597    <vscale x 1 x i16> %0,
598    i16 %1,
599    i64 %2)
600
601  ret <vscale x 1 x i1> %a
602}
603
604declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
605  <vscale x 2 x i16>,
606  i16,
607  i64);
608
609define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
610; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i16_i16:
611; CHECK:       # %bb.0: # %entry
612; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
613; CHECK-NEXT:    vmadc.vx v0, v8, a0
614; CHECK-NEXT:    ret
615entry:
616  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
617    <vscale x 2 x i16> %0,
618    i16 %1,
619    i64 %2)
620
621  ret <vscale x 2 x i1> %a
622}
623
624declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
625  <vscale x 4 x i16>,
626  i16,
627  i64);
628
629define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
630; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i16_i16:
631; CHECK:       # %bb.0: # %entry
632; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
633; CHECK-NEXT:    vmadc.vx v0, v8, a0
634; CHECK-NEXT:    ret
635entry:
636  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
637    <vscale x 4 x i16> %0,
638    i16 %1,
639    i64 %2)
640
641  ret <vscale x 4 x i1> %a
642}
643
644declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
645  <vscale x 8 x i16>,
646  i16,
647  i64);
648
649define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
650; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i16_i16:
651; CHECK:       # %bb.0: # %entry
652; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
653; CHECK-NEXT:    vmadc.vx v0, v8, a0
654; CHECK-NEXT:    ret
655entry:
656  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
657    <vscale x 8 x i16> %0,
658    i16 %1,
659    i64 %2)
660
661  ret <vscale x 8 x i1> %a
662}
663
664declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
665  <vscale x 16 x i16>,
666  i16,
667  i64);
668
669define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
670; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i16_i16:
671; CHECK:       # %bb.0: # %entry
672; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
673; CHECK-NEXT:    vmadc.vx v0, v8, a0
674; CHECK-NEXT:    ret
675entry:
676  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
677    <vscale x 16 x i16> %0,
678    i16 %1,
679    i64 %2)
680
681  ret <vscale x 16 x i1> %a
682}
683
684declare <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
685  <vscale x 32 x i16>,
686  i16,
687  i64);
688
689define <vscale x 32 x i1> @intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, i64 %2) nounwind {
690; CHECK-LABEL: intrinsic_vmadc_vx_nxv32i1_nxv32i16_i16:
691; CHECK:       # %bb.0: # %entry
692; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
693; CHECK-NEXT:    vmadc.vx v0, v8, a0
694; CHECK-NEXT:    ret
695entry:
696  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
697    <vscale x 32 x i16> %0,
698    i16 %1,
699    i64 %2)
700
701  ret <vscale x 32 x i1> %a
702}
703
704declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
705  <vscale x 1 x i32>,
706  i32,
707  i64);
708
709define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
710; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i32_i32:
711; CHECK:       # %bb.0: # %entry
712; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
713; CHECK-NEXT:    vmadc.vx v0, v8, a0
714; CHECK-NEXT:    ret
715entry:
716  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
717    <vscale x 1 x i32> %0,
718    i32 %1,
719    i64 %2)
720
721  ret <vscale x 1 x i1> %a
722}
723
724declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
725  <vscale x 2 x i32>,
726  i32,
727  i64);
728
729define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
730; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i32_i32:
731; CHECK:       # %bb.0: # %entry
732; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
733; CHECK-NEXT:    vmadc.vx v0, v8, a0
734; CHECK-NEXT:    ret
735entry:
736  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
737    <vscale x 2 x i32> %0,
738    i32 %1,
739    i64 %2)
740
741  ret <vscale x 2 x i1> %a
742}
743
744declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
745  <vscale x 4 x i32>,
746  i32,
747  i64);
748
749define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
750; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i32_i32:
751; CHECK:       # %bb.0: # %entry
752; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
753; CHECK-NEXT:    vmadc.vx v0, v8, a0
754; CHECK-NEXT:    ret
755entry:
756  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
757    <vscale x 4 x i32> %0,
758    i32 %1,
759    i64 %2)
760
761  ret <vscale x 4 x i1> %a
762}
763
764declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
765  <vscale x 8 x i32>,
766  i32,
767  i64);
768
769define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
770; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i32_i32:
771; CHECK:       # %bb.0: # %entry
772; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
773; CHECK-NEXT:    vmadc.vx v0, v8, a0
774; CHECK-NEXT:    ret
775entry:
776  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
777    <vscale x 8 x i32> %0,
778    i32 %1,
779    i64 %2)
780
781  ret <vscale x 8 x i1> %a
782}
783
784declare <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
785  <vscale x 16 x i32>,
786  i32,
787  i64);
788
789define <vscale x 16 x i1> @intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, i64 %2) nounwind {
790; CHECK-LABEL: intrinsic_vmadc_vx_nxv16i1_nxv16i32_i32:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
793; CHECK-NEXT:    vmadc.vx v0, v8, a0
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
797    <vscale x 16 x i32> %0,
798    i32 %1,
799    i64 %2)
800
801  ret <vscale x 16 x i1> %a
802}
803
804declare <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
805  <vscale x 1 x i64>,
806  i64,
807  i64);
808
809define <vscale x 1 x i1> @intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
810; CHECK-LABEL: intrinsic_vmadc_vx_nxv1i1_nxv1i64_i64:
811; CHECK:       # %bb.0: # %entry
812; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
813; CHECK-NEXT:    vmadc.vx v0, v8, a0
814; CHECK-NEXT:    ret
815entry:
816  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
817    <vscale x 1 x i64> %0,
818    i64 %1,
819    i64 %2)
820
821  ret <vscale x 1 x i1> %a
822}
823
824declare <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
825  <vscale x 2 x i64>,
826  i64,
827  i64);
828
829define <vscale x 2 x i1> @intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
830; CHECK-LABEL: intrinsic_vmadc_vx_nxv2i1_nxv2i64_i64:
831; CHECK:       # %bb.0: # %entry
832; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
833; CHECK-NEXT:    vmadc.vx v0, v8, a0
834; CHECK-NEXT:    ret
835entry:
836  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
837    <vscale x 2 x i64> %0,
838    i64 %1,
839    i64 %2)
840
841  ret <vscale x 2 x i1> %a
842}
843
844declare <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
845  <vscale x 4 x i64>,
846  i64,
847  i64);
848
849define <vscale x 4 x i1> @intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
850; CHECK-LABEL: intrinsic_vmadc_vx_nxv4i1_nxv4i64_i64:
851; CHECK:       # %bb.0: # %entry
852; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
853; CHECK-NEXT:    vmadc.vx v0, v8, a0
854; CHECK-NEXT:    ret
855entry:
856  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
857    <vscale x 4 x i64> %0,
858    i64 %1,
859    i64 %2)
860
861  ret <vscale x 4 x i1> %a
862}
863
864declare <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
865  <vscale x 8 x i64>,
866  i64,
867  i64);
868
869define <vscale x 8 x i1> @intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, i64 %2) nounwind {
870; CHECK-LABEL: intrinsic_vmadc_vx_nxv8i1_nxv8i64_i64:
871; CHECK:       # %bb.0: # %entry
872; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
873; CHECK-NEXT:    vmadc.vx v0, v8, a0
874; CHECK-NEXT:    ret
875entry:
876  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
877    <vscale x 8 x i64> %0,
878    i64 %1,
879    i64 %2)
880
881  ret <vscale x 8 x i1> %a
882}
883
884define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
885; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i8_i8:
886; CHECK:       # %bb.0: # %entry
887; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
888; CHECK-NEXT:    vmadc.vi v0, v8, 9
889; CHECK-NEXT:    ret
890entry:
891  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i8.i8(
892    <vscale x 1 x i8> %0,
893    i8 9,
894    i64 %1)
895
896  ret <vscale x 1 x i1> %a
897}
898
899define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
900; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i8_i8:
901; CHECK:       # %bb.0: # %entry
902; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
903; CHECK-NEXT:    vmadc.vi v0, v8, -9
904; CHECK-NEXT:    ret
905entry:
906  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i8.i8(
907    <vscale x 2 x i8> %0,
908    i8 -9,
909    i64 %1)
910
911  ret <vscale x 2 x i1> %a
912}
913
914define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
915; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i8_i8:
916; CHECK:       # %bb.0: # %entry
917; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
918; CHECK-NEXT:    vmadc.vi v0, v8, 9
919; CHECK-NEXT:    ret
920entry:
921  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i8.i8(
922    <vscale x 4 x i8> %0,
923    i8 9,
924    i64 %1)
925
926  ret <vscale x 4 x i1> %a
927}
928
929define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
930; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i8_i8:
931; CHECK:       # %bb.0: # %entry
932; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
933; CHECK-NEXT:    vmadc.vi v0, v8, -9
934; CHECK-NEXT:    ret
935entry:
936  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i8.i8(
937    <vscale x 8 x i8> %0,
938    i8 -9,
939    i64 %1)
940
941  ret <vscale x 8 x i1> %a
942}
943
944define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
945; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i8_i8:
946; CHECK:       # %bb.0: # %entry
947; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
948; CHECK-NEXT:    vmadc.vi v0, v8, 9
949; CHECK-NEXT:    ret
950entry:
951  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i8.i8(
952    <vscale x 16 x i8> %0,
953    i8 9,
954    i64 %1)
955
956  ret <vscale x 16 x i1> %a
957}
958
959define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
960; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i8_i8:
961; CHECK:       # %bb.0: # %entry
962; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
963; CHECK-NEXT:    vmadc.vi v0, v8, -9
964; CHECK-NEXT:    ret
965entry:
966  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i8.i8(
967    <vscale x 32 x i8> %0,
968    i8 -9,
969    i64 %1)
970
971  ret <vscale x 32 x i1> %a
972}
973
974define <vscale x 64 x i1> @intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8(<vscale x 64 x i8> %0, i64 %1) nounwind {
975; CHECK-LABEL: intrinsic_vmadc_vi_nxv64i1_nxv64i8_i8:
976; CHECK:       # %bb.0: # %entry
977; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
978; CHECK-NEXT:    vmadc.vi v0, v8, 9
979; CHECK-NEXT:    ret
980entry:
981  %a = call <vscale x 64 x i1> @llvm.riscv.vmadc.nxv64i8.i8(
982    <vscale x 64 x i8> %0,
983    i8 9,
984    i64 %1)
985
986  ret <vscale x 64 x i1> %a
987}
988
989define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
990; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i16_i16:
991; CHECK:       # %bb.0: # %entry
992; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
993; CHECK-NEXT:    vmadc.vi v0, v8, -9
994; CHECK-NEXT:    ret
995entry:
996  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i16.i16(
997    <vscale x 1 x i16> %0,
998    i16 -9,
999    i64 %1)
1000
1001  ret <vscale x 1 x i1> %a
1002}
1003
1004define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
1005; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i16_i16:
1006; CHECK:       # %bb.0: # %entry
1007; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1008; CHECK-NEXT:    vmadc.vi v0, v8, 9
1009; CHECK-NEXT:    ret
1010entry:
1011  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i16.i16(
1012    <vscale x 2 x i16> %0,
1013    i16 9,
1014    i64 %1)
1015
1016  ret <vscale x 2 x i1> %a
1017}
1018
1019define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
1020; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i16_i16:
1021; CHECK:       # %bb.0: # %entry
1022; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1023; CHECK-NEXT:    vmadc.vi v0, v8, -9
1024; CHECK-NEXT:    ret
1025entry:
1026  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i16.i16(
1027    <vscale x 4 x i16> %0,
1028    i16 -9,
1029    i64 %1)
1030
1031  ret <vscale x 4 x i1> %a
1032}
1033
1034define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
1035; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i16_i16:
1036; CHECK:       # %bb.0: # %entry
1037; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1038; CHECK-NEXT:    vmadc.vi v0, v8, 9
1039; CHECK-NEXT:    ret
1040entry:
1041  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i16.i16(
1042    <vscale x 8 x i16> %0,
1043    i16 9,
1044    i64 %1)
1045
1046  ret <vscale x 8 x i1> %a
1047}
1048
1049define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
1050; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i16_i16:
1051; CHECK:       # %bb.0: # %entry
1052; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1053; CHECK-NEXT:    vmadc.vi v0, v8, -9
1054; CHECK-NEXT:    ret
1055entry:
1056  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i16.i16(
1057    <vscale x 16 x i16> %0,
1058    i16 -9,
1059    i64 %1)
1060
1061  ret <vscale x 16 x i1> %a
1062}
1063
1064define <vscale x 32 x i1> @intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16(<vscale x 32 x i16> %0, i64 %1) nounwind {
1065; CHECK-LABEL: intrinsic_vmadc_vi_nxv32i1_nxv32i16_i16:
1066; CHECK:       # %bb.0: # %entry
1067; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
1068; CHECK-NEXT:    vmadc.vi v0, v8, 9
1069; CHECK-NEXT:    ret
1070entry:
1071  %a = call <vscale x 32 x i1> @llvm.riscv.vmadc.nxv32i16.i16(
1072    <vscale x 32 x i16> %0,
1073    i16 9,
1074    i64 %1)
1075
1076  ret <vscale x 32 x i1> %a
1077}
1078
1079define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
1080; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i32_i32:
1081; CHECK:       # %bb.0: # %entry
1082; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1083; CHECK-NEXT:    vmadc.vi v0, v8, -9
1084; CHECK-NEXT:    ret
1085entry:
1086  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i32.i32(
1087    <vscale x 1 x i32> %0,
1088    i32 -9,
1089    i64 %1)
1090
1091  ret <vscale x 1 x i1> %a
1092}
1093
1094define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
1095; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i32_i32:
1096; CHECK:       # %bb.0: # %entry
1097; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1098; CHECK-NEXT:    vmadc.vi v0, v8, 9
1099; CHECK-NEXT:    ret
1100entry:
1101  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i32.i32(
1102    <vscale x 2 x i32> %0,
1103    i32 9,
1104    i64 %1)
1105
1106  ret <vscale x 2 x i1> %a
1107}
1108
1109define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
1110; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i32_i32:
1111; CHECK:       # %bb.0: # %entry
1112; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1113; CHECK-NEXT:    vmadc.vi v0, v8, -9
1114; CHECK-NEXT:    ret
1115entry:
1116  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i32.i32(
1117    <vscale x 4 x i32> %0,
1118    i32 -9,
1119    i64 %1)
1120
1121  ret <vscale x 4 x i1> %a
1122}
1123
1124define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
1125; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i32_i32:
1126; CHECK:       # %bb.0: # %entry
1127; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1128; CHECK-NEXT:    vmadc.vi v0, v8, 9
1129; CHECK-NEXT:    ret
1130entry:
1131  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i32.i32(
1132    <vscale x 8 x i32> %0,
1133    i32 9,
1134    i64 %1)
1135
1136  ret <vscale x 8 x i1> %a
1137}
1138
1139define <vscale x 16 x i1> @intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32(<vscale x 16 x i32> %0, i64 %1) nounwind {
1140; CHECK-LABEL: intrinsic_vmadc_vi_nxv16i1_nxv16i32_i32:
1141; CHECK:       # %bb.0: # %entry
1142; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
1143; CHECK-NEXT:    vmadc.vi v0, v8, -9
1144; CHECK-NEXT:    ret
1145entry:
1146  %a = call <vscale x 16 x i1> @llvm.riscv.vmadc.nxv16i32.i32(
1147    <vscale x 16 x i32> %0,
1148    i32 -9,
1149    i64 %1)
1150
1151  ret <vscale x 16 x i1> %a
1152}
1153
1154define <vscale x 1 x i1> @intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
1155; CHECK-LABEL: intrinsic_vmadc_vi_nxv1i1_nxv1i64_i64:
1156; CHECK:       # %bb.0: # %entry
1157; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1158; CHECK-NEXT:    vmadc.vi v0, v8, 9
1159; CHECK-NEXT:    ret
1160entry:
1161  %a = call <vscale x 1 x i1> @llvm.riscv.vmadc.nxv1i64.i64(
1162    <vscale x 1 x i64> %0,
1163    i64 9,
1164    i64 %1)
1165
1166  ret <vscale x 1 x i1> %a
1167}
1168
1169define <vscale x 2 x i1> @intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
1170; CHECK-LABEL: intrinsic_vmadc_vi_nxv2i1_nxv2i64_i64:
1171; CHECK:       # %bb.0: # %entry
1172; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1173; CHECK-NEXT:    vmadc.vi v0, v8, -9
1174; CHECK-NEXT:    ret
1175entry:
1176  %a = call <vscale x 2 x i1> @llvm.riscv.vmadc.nxv2i64.i64(
1177    <vscale x 2 x i64> %0,
1178    i64 -9,
1179    i64 %1)
1180
1181  ret <vscale x 2 x i1> %a
1182}
1183
1184define <vscale x 4 x i1> @intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
1185; CHECK-LABEL: intrinsic_vmadc_vi_nxv4i1_nxv4i64_i64:
1186; CHECK:       # %bb.0: # %entry
1187; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1188; CHECK-NEXT:    vmadc.vi v0, v8, 9
1189; CHECK-NEXT:    ret
1190entry:
1191  %a = call <vscale x 4 x i1> @llvm.riscv.vmadc.nxv4i64.i64(
1192    <vscale x 4 x i64> %0,
1193    i64 9,
1194    i64 %1)
1195
1196  ret <vscale x 4 x i1> %a
1197}
1198
1199define <vscale x 8 x i1> @intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1) nounwind {
1200; CHECK-LABEL: intrinsic_vmadc_vi_nxv8i1_nxv8i64_i64:
1201; CHECK:       # %bb.0: # %entry
1202; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
1203; CHECK-NEXT:    vmadc.vi v0, v8, -9
1204; CHECK-NEXT:    ret
1205entry:
1206  %a = call <vscale x 8 x i1> @llvm.riscv.vmadc.nxv8i64.i64(
1207    <vscale x 8 x i64> %0,
1208    i64 -9,
1209    i64 %1)
1210
1211  ret <vscale x 8 x i1> %a
1212}
1213