1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  <vscale x 1 x i1>,
8  i64);
9
10define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
11; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
14; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
18    <vscale x 1 x i8> %0,
19    <vscale x 1 x i8> %1,
20    <vscale x 1 x i1> %2,
21    i64 %3)
22
23  ret <vscale x 1 x i8> %a
24}
25
26declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
27  <vscale x 2 x i8>,
28  <vscale x 2 x i8>,
29  <vscale x 2 x i1>,
30  i64);
31
32define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
33; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
36; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
40    <vscale x 2 x i8> %0,
41    <vscale x 2 x i8> %1,
42    <vscale x 2 x i1> %2,
43    i64 %3)
44
45  ret <vscale x 2 x i8> %a
46}
47
48declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
49  <vscale x 4 x i8>,
50  <vscale x 4 x i8>,
51  <vscale x 4 x i1>,
52  i64);
53
54define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
55; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
58; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
59; CHECK-NEXT:    ret
60entry:
61  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
62    <vscale x 4 x i8> %0,
63    <vscale x 4 x i8> %1,
64    <vscale x 4 x i1> %2,
65    i64 %3)
66
67  ret <vscale x 4 x i8> %a
68}
69
70declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
71  <vscale x 8 x i8>,
72  <vscale x 8 x i8>,
73  <vscale x 8 x i1>,
74  i64);
75
76define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
77; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8:
78; CHECK:       # %bb.0: # %entry
79; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
80; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
81; CHECK-NEXT:    ret
82entry:
83  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
84    <vscale x 8 x i8> %0,
85    <vscale x 8 x i8> %1,
86    <vscale x 8 x i1> %2,
87    i64 %3)
88
89  ret <vscale x 8 x i8> %a
90}
91
92declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
93  <vscale x 16 x i8>,
94  <vscale x 16 x i8>,
95  <vscale x 16 x i1>,
96  i64);
97
98define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
99; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8:
100; CHECK:       # %bb.0: # %entry
101; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
102; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
103; CHECK-NEXT:    ret
104entry:
105  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
106    <vscale x 16 x i8> %0,
107    <vscale x 16 x i8> %1,
108    <vscale x 16 x i1> %2,
109    i64 %3)
110
111  ret <vscale x 16 x i8> %a
112}
113
114declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
115  <vscale x 32 x i8>,
116  <vscale x 32 x i8>,
117  <vscale x 32 x i1>,
118  i64);
119
120define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
121; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8:
122; CHECK:       # %bb.0: # %entry
123; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
124; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
125; CHECK-NEXT:    ret
126entry:
127  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
128    <vscale x 32 x i8> %0,
129    <vscale x 32 x i8> %1,
130    <vscale x 32 x i1> %2,
131    i64 %3)
132
133  ret <vscale x 32 x i8> %a
134}
135
136declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
137  <vscale x 64 x i8>,
138  <vscale x 64 x i8>,
139  <vscale x 64 x i1>,
140  i64);
141
142define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
143; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8:
144; CHECK:       # %bb.0: # %entry
145; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
146; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
147; CHECK-NEXT:    ret
148entry:
149  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
150    <vscale x 64 x i8> %0,
151    <vscale x 64 x i8> %1,
152    <vscale x 64 x i1> %2,
153    i64 %3)
154
155  ret <vscale x 64 x i8> %a
156}
157
158declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
159  <vscale x 1 x i16>,
160  <vscale x 1 x i16>,
161  <vscale x 1 x i1>,
162  i64);
163
164define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
165; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16:
166; CHECK:       # %bb.0: # %entry
167; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
168; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
169; CHECK-NEXT:    ret
170entry:
171  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
172    <vscale x 1 x i16> %0,
173    <vscale x 1 x i16> %1,
174    <vscale x 1 x i1> %2,
175    i64 %3)
176
177  ret <vscale x 1 x i16> %a
178}
179
180declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
181  <vscale x 2 x i16>,
182  <vscale x 2 x i16>,
183  <vscale x 2 x i1>,
184  i64);
185
186define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
187; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16:
188; CHECK:       # %bb.0: # %entry
189; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
190; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
191; CHECK-NEXT:    ret
192entry:
193  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
194    <vscale x 2 x i16> %0,
195    <vscale x 2 x i16> %1,
196    <vscale x 2 x i1> %2,
197    i64 %3)
198
199  ret <vscale x 2 x i16> %a
200}
201
202declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
203  <vscale x 4 x i16>,
204  <vscale x 4 x i16>,
205  <vscale x 4 x i1>,
206  i64);
207
208define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
209; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16:
210; CHECK:       # %bb.0: # %entry
211; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
212; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
213; CHECK-NEXT:    ret
214entry:
215  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
216    <vscale x 4 x i16> %0,
217    <vscale x 4 x i16> %1,
218    <vscale x 4 x i1> %2,
219    i64 %3)
220
221  ret <vscale x 4 x i16> %a
222}
223
224declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
225  <vscale x 8 x i16>,
226  <vscale x 8 x i16>,
227  <vscale x 8 x i1>,
228  i64);
229
230define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
231; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
234; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
238    <vscale x 8 x i16> %0,
239    <vscale x 8 x i16> %1,
240    <vscale x 8 x i1> %2,
241    i64 %3)
242
243  ret <vscale x 8 x i16> %a
244}
245
246declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
247  <vscale x 16 x i16>,
248  <vscale x 16 x i16>,
249  <vscale x 16 x i1>,
250  i64);
251
252define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
253; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16:
254; CHECK:       # %bb.0: # %entry
255; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
256; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
257; CHECK-NEXT:    ret
258entry:
259  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
260    <vscale x 16 x i16> %0,
261    <vscale x 16 x i16> %1,
262    <vscale x 16 x i1> %2,
263    i64 %3)
264
265  ret <vscale x 16 x i16> %a
266}
267
268declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
269  <vscale x 32 x i16>,
270  <vscale x 32 x i16>,
271  <vscale x 32 x i1>,
272  i64);
273
274define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
275; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16:
276; CHECK:       # %bb.0: # %entry
277; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
278; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
279; CHECK-NEXT:    ret
280entry:
281  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
282    <vscale x 32 x i16> %0,
283    <vscale x 32 x i16> %1,
284    <vscale x 32 x i1> %2,
285    i64 %3)
286
287  ret <vscale x 32 x i16> %a
288}
289
290declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
291  <vscale x 1 x i32>,
292  <vscale x 1 x i32>,
293  <vscale x 1 x i1>,
294  i64);
295
296define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
297; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32:
298; CHECK:       # %bb.0: # %entry
299; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
300; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
304    <vscale x 1 x i32> %0,
305    <vscale x 1 x i32> %1,
306    <vscale x 1 x i1> %2,
307    i64 %3)
308
309  ret <vscale x 1 x i32> %a
310}
311
312declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
313  <vscale x 2 x i32>,
314  <vscale x 2 x i32>,
315  <vscale x 2 x i1>,
316  i64);
317
318define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
319; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32:
320; CHECK:       # %bb.0: # %entry
321; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
322; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
323; CHECK-NEXT:    ret
324entry:
325  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
326    <vscale x 2 x i32> %0,
327    <vscale x 2 x i32> %1,
328    <vscale x 2 x i1> %2,
329    i64 %3)
330
331  ret <vscale x 2 x i32> %a
332}
333
334declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
335  <vscale x 4 x i32>,
336  <vscale x 4 x i32>,
337  <vscale x 4 x i1>,
338  i64);
339
340define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
341; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
344; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
348    <vscale x 4 x i32> %0,
349    <vscale x 4 x i32> %1,
350    <vscale x 4 x i1> %2,
351    i64 %3)
352
353  ret <vscale x 4 x i32> %a
354}
355
356declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
357  <vscale x 8 x i32>,
358  <vscale x 8 x i32>,
359  <vscale x 8 x i1>,
360  i64);
361
362define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
363; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32:
364; CHECK:       # %bb.0: # %entry
365; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
366; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
367; CHECK-NEXT:    ret
368entry:
369  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
370    <vscale x 8 x i32> %0,
371    <vscale x 8 x i32> %1,
372    <vscale x 8 x i1> %2,
373    i64 %3)
374
375  ret <vscale x 8 x i32> %a
376}
377
378declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
379  <vscale x 16 x i32>,
380  <vscale x 16 x i32>,
381  <vscale x 16 x i1>,
382  i64);
383
384define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
385; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32:
386; CHECK:       # %bb.0: # %entry
387; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
388; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
389; CHECK-NEXT:    ret
390entry:
391  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
392    <vscale x 16 x i32> %0,
393    <vscale x 16 x i32> %1,
394    <vscale x 16 x i1> %2,
395    i64 %3)
396
397  ret <vscale x 16 x i32> %a
398}
399
400declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
401  <vscale x 1 x i64>,
402  <vscale x 1 x i64>,
403  <vscale x 1 x i1>,
404  i64);
405
406define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
407; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64:
408; CHECK:       # %bb.0: # %entry
409; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
410; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
411; CHECK-NEXT:    ret
412entry:
413  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
414    <vscale x 1 x i64> %0,
415    <vscale x 1 x i64> %1,
416    <vscale x 1 x i1> %2,
417    i64 %3)
418
419  ret <vscale x 1 x i64> %a
420}
421
422declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
423  <vscale x 2 x i64>,
424  <vscale x 2 x i64>,
425  <vscale x 2 x i1>,
426  i64);
427
428define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
429; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
432; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
436    <vscale x 2 x i64> %0,
437    <vscale x 2 x i64> %1,
438    <vscale x 2 x i1> %2,
439    i64 %3)
440
441  ret <vscale x 2 x i64> %a
442}
443
444declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
445  <vscale x 4 x i64>,
446  <vscale x 4 x i64>,
447  <vscale x 4 x i1>,
448  i64);
449
450define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
451; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
454; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
455; CHECK-NEXT:    ret
456entry:
457  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
458    <vscale x 4 x i64> %0,
459    <vscale x 4 x i64> %1,
460    <vscale x 4 x i1> %2,
461    i64 %3)
462
463  ret <vscale x 4 x i64> %a
464}
465
466declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
467  <vscale x 8 x i64>,
468  <vscale x 8 x i64>,
469  <vscale x 8 x i1>,
470  i64);
471
472define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
473; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
476; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
477; CHECK-NEXT:    ret
478entry:
479  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
480    <vscale x 8 x i64> %0,
481    <vscale x 8 x i64> %1,
482    <vscale x 8 x i1> %2,
483    i64 %3)
484
485  ret <vscale x 8 x i64> %a
486}
487
488declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
489  <vscale x 1 x i8>,
490  i8,
491  <vscale x 1 x i1>,
492  i64);
493
494define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
495; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8:
496; CHECK:       # %bb.0: # %entry
497; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
498; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
499; CHECK-NEXT:    ret
500entry:
501  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
502    <vscale x 1 x i8> %0,
503    i8 %1,
504    <vscale x 1 x i1> %2,
505    i64 %3)
506
507  ret <vscale x 1 x i8> %a
508}
509
510declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
511  <vscale x 2 x i8>,
512  i8,
513  <vscale x 2 x i1>,
514  i64);
515
516define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
517; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
520; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
524    <vscale x 2 x i8> %0,
525    i8 %1,
526    <vscale x 2 x i1> %2,
527    i64 %3)
528
529  ret <vscale x 2 x i8> %a
530}
531
532declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
533  <vscale x 4 x i8>,
534  i8,
535  <vscale x 4 x i1>,
536  i64);
537
538define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
539; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8:
540; CHECK:       # %bb.0: # %entry
541; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
542; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
543; CHECK-NEXT:    ret
544entry:
545  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
546    <vscale x 4 x i8> %0,
547    i8 %1,
548    <vscale x 4 x i1> %2,
549    i64 %3)
550
551  ret <vscale x 4 x i8> %a
552}
553
554declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
555  <vscale x 8 x i8>,
556  i8,
557  <vscale x 8 x i1>,
558  i64);
559
560define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
561; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8:
562; CHECK:       # %bb.0: # %entry
563; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
564; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
565; CHECK-NEXT:    ret
566entry:
567  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
568    <vscale x 8 x i8> %0,
569    i8 %1,
570    <vscale x 8 x i1> %2,
571    i64 %3)
572
573  ret <vscale x 8 x i8> %a
574}
575
576declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
577  <vscale x 16 x i8>,
578  i8,
579  <vscale x 16 x i1>,
580  i64);
581
582define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
583; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8:
584; CHECK:       # %bb.0: # %entry
585; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
586; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
587; CHECK-NEXT:    ret
588entry:
589  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
590    <vscale x 16 x i8> %0,
591    i8 %1,
592    <vscale x 16 x i1> %2,
593    i64 %3)
594
595  ret <vscale x 16 x i8> %a
596}
597
598declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
599  <vscale x 32 x i8>,
600  i8,
601  <vscale x 32 x i1>,
602  i64);
603
604define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
605; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8:
606; CHECK:       # %bb.0: # %entry
607; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
608; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
609; CHECK-NEXT:    ret
610entry:
611  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
612    <vscale x 32 x i8> %0,
613    i8 %1,
614    <vscale x 32 x i1> %2,
615    i64 %3)
616
617  ret <vscale x 32 x i8> %a
618}
619
620declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
621  <vscale x 64 x i8>,
622  i8,
623  <vscale x 64 x i1>,
624  i64);
625
626define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i64 %3) nounwind {
627; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8:
628; CHECK:       # %bb.0: # %entry
629; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
630; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
631; CHECK-NEXT:    ret
632entry:
633  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
634    <vscale x 64 x i8> %0,
635    i8 %1,
636    <vscale x 64 x i1> %2,
637    i64 %3)
638
639  ret <vscale x 64 x i8> %a
640}
641
642declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
643  <vscale x 1 x i16>,
644  i16,
645  <vscale x 1 x i1>,
646  i64);
647
648define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
649; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16:
650; CHECK:       # %bb.0: # %entry
651; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
652; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
653; CHECK-NEXT:    ret
654entry:
655  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
656    <vscale x 1 x i16> %0,
657    i16 %1,
658    <vscale x 1 x i1> %2,
659    i64 %3)
660
661  ret <vscale x 1 x i16> %a
662}
663
664declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
665  <vscale x 2 x i16>,
666  i16,
667  <vscale x 2 x i1>,
668  i64);
669
670define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
671; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16:
672; CHECK:       # %bb.0: # %entry
673; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
674; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
675; CHECK-NEXT:    ret
676entry:
677  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
678    <vscale x 2 x i16> %0,
679    i16 %1,
680    <vscale x 2 x i1> %2,
681    i64 %3)
682
683  ret <vscale x 2 x i16> %a
684}
685
686declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
687  <vscale x 4 x i16>,
688  i16,
689  <vscale x 4 x i1>,
690  i64);
691
692define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
693; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16:
694; CHECK:       # %bb.0: # %entry
695; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
696; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
697; CHECK-NEXT:    ret
698entry:
699  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
700    <vscale x 4 x i16> %0,
701    i16 %1,
702    <vscale x 4 x i1> %2,
703    i64 %3)
704
705  ret <vscale x 4 x i16> %a
706}
707
708declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
709  <vscale x 8 x i16>,
710  i16,
711  <vscale x 8 x i1>,
712  i64);
713
714define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
715; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16:
716; CHECK:       # %bb.0: # %entry
717; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
718; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
719; CHECK-NEXT:    ret
720entry:
721  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
722    <vscale x 8 x i16> %0,
723    i16 %1,
724    <vscale x 8 x i1> %2,
725    i64 %3)
726
727  ret <vscale x 8 x i16> %a
728}
729
730declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
731  <vscale x 16 x i16>,
732  i16,
733  <vscale x 16 x i1>,
734  i64);
735
736define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
737; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16:
738; CHECK:       # %bb.0: # %entry
739; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
740; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
741; CHECK-NEXT:    ret
742entry:
743  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
744    <vscale x 16 x i16> %0,
745    i16 %1,
746    <vscale x 16 x i1> %2,
747    i64 %3)
748
749  ret <vscale x 16 x i16> %a
750}
751
752declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
753  <vscale x 32 x i16>,
754  i16,
755  <vscale x 32 x i1>,
756  i64);
757
758define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
759; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16:
760; CHECK:       # %bb.0: # %entry
761; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
762; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
763; CHECK-NEXT:    ret
764entry:
765  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
766    <vscale x 32 x i16> %0,
767    i16 %1,
768    <vscale x 32 x i1> %2,
769    i64 %3)
770
771  ret <vscale x 32 x i16> %a
772}
773
774declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
775  <vscale x 1 x i32>,
776  i32,
777  <vscale x 1 x i1>,
778  i64);
779
780define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
781; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32:
782; CHECK:       # %bb.0: # %entry
783; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
784; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
785; CHECK-NEXT:    ret
786entry:
787  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
788    <vscale x 1 x i32> %0,
789    i32 %1,
790    <vscale x 1 x i1> %2,
791    i64 %3)
792
793  ret <vscale x 1 x i32> %a
794}
795
796declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
797  <vscale x 2 x i32>,
798  i32,
799  <vscale x 2 x i1>,
800  i64);
801
802define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
803; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32:
804; CHECK:       # %bb.0: # %entry
805; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
806; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
807; CHECK-NEXT:    ret
808entry:
809  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
810    <vscale x 2 x i32> %0,
811    i32 %1,
812    <vscale x 2 x i1> %2,
813    i64 %3)
814
815  ret <vscale x 2 x i32> %a
816}
817
818declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
819  <vscale x 4 x i32>,
820  i32,
821  <vscale x 4 x i1>,
822  i64);
823
824define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
825; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32:
826; CHECK:       # %bb.0: # %entry
827; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
828; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
829; CHECK-NEXT:    ret
830entry:
831  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
832    <vscale x 4 x i32> %0,
833    i32 %1,
834    <vscale x 4 x i1> %2,
835    i64 %3)
836
837  ret <vscale x 4 x i32> %a
838}
839
840declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
841  <vscale x 8 x i32>,
842  i32,
843  <vscale x 8 x i1>,
844  i64);
845
846define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
847; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32:
848; CHECK:       # %bb.0: # %entry
849; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
850; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
851; CHECK-NEXT:    ret
852entry:
853  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
854    <vscale x 8 x i32> %0,
855    i32 %1,
856    <vscale x 8 x i1> %2,
857    i64 %3)
858
859  ret <vscale x 8 x i32> %a
860}
861
862declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
863  <vscale x 16 x i32>,
864  i32,
865  <vscale x 16 x i1>,
866  i64);
867
868define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
869; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32:
870; CHECK:       # %bb.0: # %entry
871; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
872; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
873; CHECK-NEXT:    ret
874entry:
875  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
876    <vscale x 16 x i32> %0,
877    i32 %1,
878    <vscale x 16 x i1> %2,
879    i64 %3)
880
881  ret <vscale x 16 x i32> %a
882}
883
884declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
885  <vscale x 1 x i64>,
886  i64,
887  <vscale x 1 x i1>,
888  i64);
889
890define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
891; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
894; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
895; CHECK-NEXT:    ret
896entry:
897  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
898    <vscale x 1 x i64> %0,
899    i64 %1,
900    <vscale x 1 x i1> %2,
901    i64 %3)
902
903  ret <vscale x 1 x i64> %a
904}
905
906declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
907  <vscale x 2 x i64>,
908  i64,
909  <vscale x 2 x i1>,
910  i64);
911
912define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
913; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
914; CHECK:       # %bb.0: # %entry
915; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
916; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
917; CHECK-NEXT:    ret
918entry:
919  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
920    <vscale x 2 x i64> %0,
921    i64 %1,
922    <vscale x 2 x i1> %2,
923    i64 %3)
924
925  ret <vscale x 2 x i64> %a
926}
927
928declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
929  <vscale x 4 x i64>,
930  i64,
931  <vscale x 4 x i1>,
932  i64);
933
934define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
935; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
936; CHECK:       # %bb.0: # %entry
937; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
938; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
939; CHECK-NEXT:    ret
940entry:
941  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
942    <vscale x 4 x i64> %0,
943    i64 %1,
944    <vscale x 4 x i1> %2,
945    i64 %3)
946
947  ret <vscale x 4 x i64> %a
948}
949
950declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
951  <vscale x 8 x i64>,
952  i64,
953  <vscale x 8 x i1>,
954  i64);
955
956define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
957; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
958; CHECK:       # %bb.0: # %entry
959; CHECK-NEXT:    vsetvli zero, a1, e64, m8, ta, mu
960; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
961; CHECK-NEXT:    ret
962entry:
963  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
964    <vscale x 8 x i64> %0,
965    i64 %1,
966    <vscale x 8 x i1> %2,
967    i64 %3)
968
969  ret <vscale x 8 x i64> %a
970}
971
972define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
973; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8:
974; CHECK:       # %bb.0: # %entry
975; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
976; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
977; CHECK-NEXT:    ret
978entry:
979  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
980    <vscale x 1 x i8> %0,
981    i8 9,
982    <vscale x 1 x i1> %1,
983    i64 %2)
984
985  ret <vscale x 1 x i8> %a
986}
987
988define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
989; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8:
990; CHECK:       # %bb.0: # %entry
991; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
992; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
993; CHECK-NEXT:    ret
994entry:
995  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
996    <vscale x 2 x i8> %0,
997    i8 -9,
998    <vscale x 2 x i1> %1,
999    i64 %2)
1000
1001  ret <vscale x 2 x i8> %a
1002}
1003
1004define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
1005; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8:
1006; CHECK:       # %bb.0: # %entry
1007; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1008; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1009; CHECK-NEXT:    ret
1010entry:
1011  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
1012    <vscale x 4 x i8> %0,
1013    i8 9,
1014    <vscale x 4 x i1> %1,
1015    i64 %2)
1016
1017  ret <vscale x 4 x i8> %a
1018}
1019
1020define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
1021; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8:
1022; CHECK:       # %bb.0: # %entry
1023; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1024; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1025; CHECK-NEXT:    ret
1026entry:
1027  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
1028    <vscale x 8 x i8> %0,
1029    i8 -9,
1030    <vscale x 8 x i1> %1,
1031    i64 %2)
1032
1033  ret <vscale x 8 x i8> %a
1034}
1035
1036define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
1037; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8:
1038; CHECK:       # %bb.0: # %entry
1039; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1040; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1041; CHECK-NEXT:    ret
1042entry:
1043  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
1044    <vscale x 16 x i8> %0,
1045    i8 9,
1046    <vscale x 16 x i1> %1,
1047    i64 %2)
1048
1049  ret <vscale x 16 x i8> %a
1050}
1051
1052define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
1053; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8:
1054; CHECK:       # %bb.0: # %entry
1055; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1056; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1057; CHECK-NEXT:    ret
1058entry:
1059  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
1060    <vscale x 32 x i8> %0,
1061    i8 -9,
1062    <vscale x 32 x i1> %1,
1063    i64 %2)
1064
1065  ret <vscale x 32 x i8> %a
1066}
1067
1068define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i64 %2) nounwind {
1069; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8:
1070; CHECK:       # %bb.0: # %entry
1071; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
1072; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1073; CHECK-NEXT:    ret
1074entry:
1075  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
1076    <vscale x 64 x i8> %0,
1077    i8 9,
1078    <vscale x 64 x i1> %1,
1079    i64 %2)
1080
1081  ret <vscale x 64 x i8> %a
1082}
1083
1084define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
1085; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16:
1086; CHECK:       # %bb.0: # %entry
1087; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1088; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1089; CHECK-NEXT:    ret
1090entry:
1091  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
1092    <vscale x 1 x i16> %0,
1093    i16 -9,
1094    <vscale x 1 x i1> %1,
1095    i64 %2)
1096
1097  ret <vscale x 1 x i16> %a
1098}
1099
1100define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
1101; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16:
1102; CHECK:       # %bb.0: # %entry
1103; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1104; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1105; CHECK-NEXT:    ret
1106entry:
1107  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
1108    <vscale x 2 x i16> %0,
1109    i16 9,
1110    <vscale x 2 x i1> %1,
1111    i64 %2)
1112
1113  ret <vscale x 2 x i16> %a
1114}
1115
1116define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
1117; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16:
1118; CHECK:       # %bb.0: # %entry
1119; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1120; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1121; CHECK-NEXT:    ret
1122entry:
1123  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
1124    <vscale x 4 x i16> %0,
1125    i16 -9,
1126    <vscale x 4 x i1> %1,
1127    i64 %2)
1128
1129  ret <vscale x 4 x i16> %a
1130}
1131
1132define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
1133; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16:
1134; CHECK:       # %bb.0: # %entry
1135; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1136; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1137; CHECK-NEXT:    ret
1138entry:
1139  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
1140    <vscale x 8 x i16> %0,
1141    i16 9,
1142    <vscale x 8 x i1> %1,
1143    i64 %2)
1144
1145  ret <vscale x 8 x i16> %a
1146}
1147
1148define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
1149; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16:
1150; CHECK:       # %bb.0: # %entry
1151; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1152; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1153; CHECK-NEXT:    ret
1154entry:
1155  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
1156    <vscale x 16 x i16> %0,
1157    i16 -9,
1158    <vscale x 16 x i1> %1,
1159    i64 %2)
1160
1161  ret <vscale x 16 x i16> %a
1162}
1163
1164define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i64 %2) nounwind {
1165; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16:
1166; CHECK:       # %bb.0: # %entry
1167; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
1168; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1169; CHECK-NEXT:    ret
1170entry:
1171  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
1172    <vscale x 32 x i16> %0,
1173    i16 9,
1174    <vscale x 32 x i1> %1,
1175    i64 %2)
1176
1177  ret <vscale x 32 x i16> %a
1178}
1179
1180define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
1181; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32:
1182; CHECK:       # %bb.0: # %entry
1183; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1184; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1185; CHECK-NEXT:    ret
1186entry:
1187  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
1188    <vscale x 1 x i32> %0,
1189    i32 -9,
1190    <vscale x 1 x i1> %1,
1191    i64 %2)
1192
1193  ret <vscale x 1 x i32> %a
1194}
1195
1196define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
1197; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32:
1198; CHECK:       # %bb.0: # %entry
1199; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1200; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1201; CHECK-NEXT:    ret
1202entry:
1203  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
1204    <vscale x 2 x i32> %0,
1205    i32 9,
1206    <vscale x 2 x i1> %1,
1207    i64 %2)
1208
1209  ret <vscale x 2 x i32> %a
1210}
1211
1212define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
1213; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32:
1214; CHECK:       # %bb.0: # %entry
1215; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1216; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1217; CHECK-NEXT:    ret
1218entry:
1219  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
1220    <vscale x 4 x i32> %0,
1221    i32 -9,
1222    <vscale x 4 x i1> %1,
1223    i64 %2)
1224
1225  ret <vscale x 4 x i32> %a
1226}
1227
1228define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
1229; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32:
1230; CHECK:       # %bb.0: # %entry
1231; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1232; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1233; CHECK-NEXT:    ret
1234entry:
1235  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
1236    <vscale x 8 x i32> %0,
1237    i32 9,
1238    <vscale x 8 x i1> %1,
1239    i64 %2)
1240
1241  ret <vscale x 8 x i32> %a
1242}
1243
1244define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i64 %2) nounwind {
1245; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32:
1246; CHECK:       # %bb.0: # %entry
1247; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
1248; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1249; CHECK-NEXT:    ret
1250entry:
1251  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
1252    <vscale x 16 x i32> %0,
1253    i32 -9,
1254    <vscale x 16 x i1> %1,
1255    i64 %2)
1256
1257  ret <vscale x 16 x i32> %a
1258}
1259
1260define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i64 %2) nounwind {
1261; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64:
1262; CHECK:       # %bb.0: # %entry
1263; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1264; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1265; CHECK-NEXT:    ret
1266entry:
1267  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
1268    <vscale x 1 x i64> %0,
1269    i64 9,
1270    <vscale x 1 x i1> %1,
1271    i64 %2)
1272
1273  ret <vscale x 1 x i64> %a
1274}
1275
1276define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i64 %2) nounwind {
1277; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64:
1278; CHECK:       # %bb.0: # %entry
1279; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1280; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1281; CHECK-NEXT:    ret
1282entry:
1283  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
1284    <vscale x 2 x i64> %0,
1285    i64 -9,
1286    <vscale x 2 x i1> %1,
1287    i64 %2)
1288
1289  ret <vscale x 2 x i64> %a
1290}
1291
1292define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i64 %2) nounwind {
1293; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64:
1294; CHECK:       # %bb.0: # %entry
1295; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1296; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1297; CHECK-NEXT:    ret
1298entry:
1299  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
1300    <vscale x 4 x i64> %0,
1301    i64 9,
1302    <vscale x 4 x i1> %1,
1303    i64 %2)
1304
1305  ret <vscale x 4 x i64> %a
1306}
1307
1308define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i64 %2) nounwind {
1309; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64:
1310; CHECK:       # %bb.0: # %entry
1311; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
1312; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1313; CHECK-NEXT:    ret
1314entry:
1315  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
1316    <vscale x 8 x i64> %0,
1317    i64 -9,
1318    <vscale x 8 x i1> %1,
1319    i64 %2)
1320
1321  ret <vscale x 8 x i64> %a
1322}
1323