1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  <vscale x 1 x i1>,
8  i32);
9
10define <vscale x 1 x i8> @intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
11; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i8_nxv1i8_nxv1i8:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
14; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.nxv1i8(
18    <vscale x 1 x i8> %0,
19    <vscale x 1 x i8> %1,
20    <vscale x 1 x i1> %2,
21    i32 %3)
22
23  ret <vscale x 1 x i8> %a
24}
25
26declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
27  <vscale x 2 x i8>,
28  <vscale x 2 x i8>,
29  <vscale x 2 x i1>,
30  i32);
31
32define <vscale x 2 x i8> @intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
33; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i8_nxv2i8_nxv2i8:
34; CHECK:       # %bb.0: # %entry
35; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
36; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
37; CHECK-NEXT:    ret
38entry:
39  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.nxv2i8(
40    <vscale x 2 x i8> %0,
41    <vscale x 2 x i8> %1,
42    <vscale x 2 x i1> %2,
43    i32 %3)
44
45  ret <vscale x 2 x i8> %a
46}
47
48declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
49  <vscale x 4 x i8>,
50  <vscale x 4 x i8>,
51  <vscale x 4 x i1>,
52  i32);
53
54define <vscale x 4 x i8> @intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
55; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i8_nxv4i8_nxv4i8:
56; CHECK:       # %bb.0: # %entry
57; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
58; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
59; CHECK-NEXT:    ret
60entry:
61  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.nxv4i8(
62    <vscale x 4 x i8> %0,
63    <vscale x 4 x i8> %1,
64    <vscale x 4 x i1> %2,
65    i32 %3)
66
67  ret <vscale x 4 x i8> %a
68}
69
70declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
71  <vscale x 8 x i8>,
72  <vscale x 8 x i8>,
73  <vscale x 8 x i1>,
74  i32);
75
76define <vscale x 8 x i8> @intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
77; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i8_nxv8i8_nxv8i8:
78; CHECK:       # %bb.0: # %entry
79; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
80; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
81; CHECK-NEXT:    ret
82entry:
83  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.nxv8i8(
84    <vscale x 8 x i8> %0,
85    <vscale x 8 x i8> %1,
86    <vscale x 8 x i1> %2,
87    i32 %3)
88
89  ret <vscale x 8 x i8> %a
90}
91
92declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
93  <vscale x 16 x i8>,
94  <vscale x 16 x i8>,
95  <vscale x 16 x i1>,
96  i32);
97
98define <vscale x 16 x i8> @intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
99; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i8_nxv16i8_nxv16i8:
100; CHECK:       # %bb.0: # %entry
101; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
102; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
103; CHECK-NEXT:    ret
104entry:
105  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.nxv16i8(
106    <vscale x 16 x i8> %0,
107    <vscale x 16 x i8> %1,
108    <vscale x 16 x i1> %2,
109    i32 %3)
110
111  ret <vscale x 16 x i8> %a
112}
113
114declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
115  <vscale x 32 x i8>,
116  <vscale x 32 x i8>,
117  <vscale x 32 x i1>,
118  i32);
119
120define <vscale x 32 x i8> @intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
121; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i8_nxv32i8_nxv32i8:
122; CHECK:       # %bb.0: # %entry
123; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
124; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
125; CHECK-NEXT:    ret
126entry:
127  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.nxv32i8(
128    <vscale x 32 x i8> %0,
129    <vscale x 32 x i8> %1,
130    <vscale x 32 x i1> %2,
131    i32 %3)
132
133  ret <vscale x 32 x i8> %a
134}
135
136declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
137  <vscale x 64 x i8>,
138  <vscale x 64 x i8>,
139  <vscale x 64 x i1>,
140  i32);
141
142define <vscale x 64 x i8> @intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8(<vscale x 64 x i8> %0, <vscale x 64 x i8> %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
143; CHECK-LABEL: intrinsic_vadc_vvm_nxv64i8_nxv64i8_nxv64i8:
144; CHECK:       # %bb.0: # %entry
145; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
146; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
147; CHECK-NEXT:    ret
148entry:
149  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.nxv64i8(
150    <vscale x 64 x i8> %0,
151    <vscale x 64 x i8> %1,
152    <vscale x 64 x i1> %2,
153    i32 %3)
154
155  ret <vscale x 64 x i8> %a
156}
157
158declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
159  <vscale x 1 x i16>,
160  <vscale x 1 x i16>,
161  <vscale x 1 x i1>,
162  i32);
163
164define <vscale x 1 x i16> @intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
165; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i16_nxv1i16_nxv1i16:
166; CHECK:       # %bb.0: # %entry
167; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
168; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
169; CHECK-NEXT:    ret
170entry:
171  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.nxv1i16(
172    <vscale x 1 x i16> %0,
173    <vscale x 1 x i16> %1,
174    <vscale x 1 x i1> %2,
175    i32 %3)
176
177  ret <vscale x 1 x i16> %a
178}
179
180declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
181  <vscale x 2 x i16>,
182  <vscale x 2 x i16>,
183  <vscale x 2 x i1>,
184  i32);
185
186define <vscale x 2 x i16> @intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
187; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i16_nxv2i16_nxv2i16:
188; CHECK:       # %bb.0: # %entry
189; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
190; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
191; CHECK-NEXT:    ret
192entry:
193  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.nxv2i16(
194    <vscale x 2 x i16> %0,
195    <vscale x 2 x i16> %1,
196    <vscale x 2 x i1> %2,
197    i32 %3)
198
199  ret <vscale x 2 x i16> %a
200}
201
202declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
203  <vscale x 4 x i16>,
204  <vscale x 4 x i16>,
205  <vscale x 4 x i1>,
206  i32);
207
208define <vscale x 4 x i16> @intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
209; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i16_nxv4i16_nxv4i16:
210; CHECK:       # %bb.0: # %entry
211; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
212; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
213; CHECK-NEXT:    ret
214entry:
215  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.nxv4i16(
216    <vscale x 4 x i16> %0,
217    <vscale x 4 x i16> %1,
218    <vscale x 4 x i1> %2,
219    i32 %3)
220
221  ret <vscale x 4 x i16> %a
222}
223
224declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
225  <vscale x 8 x i16>,
226  <vscale x 8 x i16>,
227  <vscale x 8 x i1>,
228  i32);
229
230define <vscale x 8 x i16> @intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
231; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i16_nxv8i16_nxv8i16:
232; CHECK:       # %bb.0: # %entry
233; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
234; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
235; CHECK-NEXT:    ret
236entry:
237  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.nxv8i16(
238    <vscale x 8 x i16> %0,
239    <vscale x 8 x i16> %1,
240    <vscale x 8 x i1> %2,
241    i32 %3)
242
243  ret <vscale x 8 x i16> %a
244}
245
246declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
247  <vscale x 16 x i16>,
248  <vscale x 16 x i16>,
249  <vscale x 16 x i1>,
250  i32);
251
252define <vscale x 16 x i16> @intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
253; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i16_nxv16i16_nxv16i16:
254; CHECK:       # %bb.0: # %entry
255; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
256; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
257; CHECK-NEXT:    ret
258entry:
259  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.nxv16i16(
260    <vscale x 16 x i16> %0,
261    <vscale x 16 x i16> %1,
262    <vscale x 16 x i1> %2,
263    i32 %3)
264
265  ret <vscale x 16 x i16> %a
266}
267
268declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
269  <vscale x 32 x i16>,
270  <vscale x 32 x i16>,
271  <vscale x 32 x i1>,
272  i32);
273
274define <vscale x 32 x i16> @intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16(<vscale x 32 x i16> %0, <vscale x 32 x i16> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
275; CHECK-LABEL: intrinsic_vadc_vvm_nxv32i16_nxv32i16_nxv32i16:
276; CHECK:       # %bb.0: # %entry
277; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
278; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
279; CHECK-NEXT:    ret
280entry:
281  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.nxv32i16(
282    <vscale x 32 x i16> %0,
283    <vscale x 32 x i16> %1,
284    <vscale x 32 x i1> %2,
285    i32 %3)
286
287  ret <vscale x 32 x i16> %a
288}
289
290declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
291  <vscale x 1 x i32>,
292  <vscale x 1 x i32>,
293  <vscale x 1 x i1>,
294  i32);
295
296define <vscale x 1 x i32> @intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
297; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i32_nxv1i32_nxv1i32:
298; CHECK:       # %bb.0: # %entry
299; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
300; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
301; CHECK-NEXT:    ret
302entry:
303  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.nxv1i32(
304    <vscale x 1 x i32> %0,
305    <vscale x 1 x i32> %1,
306    <vscale x 1 x i1> %2,
307    i32 %3)
308
309  ret <vscale x 1 x i32> %a
310}
311
312declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
313  <vscale x 2 x i32>,
314  <vscale x 2 x i32>,
315  <vscale x 2 x i1>,
316  i32);
317
318define <vscale x 2 x i32> @intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
319; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i32_nxv2i32_nxv2i32:
320; CHECK:       # %bb.0: # %entry
321; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
322; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
323; CHECK-NEXT:    ret
324entry:
325  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.nxv2i32(
326    <vscale x 2 x i32> %0,
327    <vscale x 2 x i32> %1,
328    <vscale x 2 x i1> %2,
329    i32 %3)
330
331  ret <vscale x 2 x i32> %a
332}
333
334declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
335  <vscale x 4 x i32>,
336  <vscale x 4 x i32>,
337  <vscale x 4 x i1>,
338  i32);
339
340define <vscale x 4 x i32> @intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
341; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i32_nxv4i32_nxv4i32:
342; CHECK:       # %bb.0: # %entry
343; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
344; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
345; CHECK-NEXT:    ret
346entry:
347  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.nxv4i32(
348    <vscale x 4 x i32> %0,
349    <vscale x 4 x i32> %1,
350    <vscale x 4 x i1> %2,
351    i32 %3)
352
353  ret <vscale x 4 x i32> %a
354}
355
356declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
357  <vscale x 8 x i32>,
358  <vscale x 8 x i32>,
359  <vscale x 8 x i1>,
360  i32);
361
362define <vscale x 8 x i32> @intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
363; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i32_nxv8i32_nxv8i32:
364; CHECK:       # %bb.0: # %entry
365; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
366; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
367; CHECK-NEXT:    ret
368entry:
369  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.nxv8i32(
370    <vscale x 8 x i32> %0,
371    <vscale x 8 x i32> %1,
372    <vscale x 8 x i1> %2,
373    i32 %3)
374
375  ret <vscale x 8 x i32> %a
376}
377
378declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
379  <vscale x 16 x i32>,
380  <vscale x 16 x i32>,
381  <vscale x 16 x i1>,
382  i32);
383
384define <vscale x 16 x i32> @intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32(<vscale x 16 x i32> %0, <vscale x 16 x i32> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
385; CHECK-LABEL: intrinsic_vadc_vvm_nxv16i32_nxv16i32_nxv16i32:
386; CHECK:       # %bb.0: # %entry
387; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
388; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
389; CHECK-NEXT:    ret
390entry:
391  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.nxv16i32(
392    <vscale x 16 x i32> %0,
393    <vscale x 16 x i32> %1,
394    <vscale x 16 x i1> %2,
395    i32 %3)
396
397  ret <vscale x 16 x i32> %a
398}
399
400declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
401  <vscale x 1 x i64>,
402  <vscale x 1 x i64>,
403  <vscale x 1 x i1>,
404  i32);
405
406define <vscale x 1 x i64> @intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
407; CHECK-LABEL: intrinsic_vadc_vvm_nxv1i64_nxv1i64_nxv1i64:
408; CHECK:       # %bb.0: # %entry
409; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
410; CHECK-NEXT:    vadc.vvm v8, v8, v9, v0
411; CHECK-NEXT:    ret
412entry:
413  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.nxv1i64(
414    <vscale x 1 x i64> %0,
415    <vscale x 1 x i64> %1,
416    <vscale x 1 x i1> %2,
417    i32 %3)
418
419  ret <vscale x 1 x i64> %a
420}
421
422declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
423  <vscale x 2 x i64>,
424  <vscale x 2 x i64>,
425  <vscale x 2 x i1>,
426  i32);
427
428define <vscale x 2 x i64> @intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
429; CHECK-LABEL: intrinsic_vadc_vvm_nxv2i64_nxv2i64_nxv2i64:
430; CHECK:       # %bb.0: # %entry
431; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
432; CHECK-NEXT:    vadc.vvm v8, v8, v10, v0
433; CHECK-NEXT:    ret
434entry:
435  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.nxv2i64(
436    <vscale x 2 x i64> %0,
437    <vscale x 2 x i64> %1,
438    <vscale x 2 x i1> %2,
439    i32 %3)
440
441  ret <vscale x 2 x i64> %a
442}
443
444declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
445  <vscale x 4 x i64>,
446  <vscale x 4 x i64>,
447  <vscale x 4 x i1>,
448  i32);
449
450define <vscale x 4 x i64> @intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
451; CHECK-LABEL: intrinsic_vadc_vvm_nxv4i64_nxv4i64_nxv4i64:
452; CHECK:       # %bb.0: # %entry
453; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
454; CHECK-NEXT:    vadc.vvm v8, v8, v12, v0
455; CHECK-NEXT:    ret
456entry:
457  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.nxv4i64(
458    <vscale x 4 x i64> %0,
459    <vscale x 4 x i64> %1,
460    <vscale x 4 x i1> %2,
461    i32 %3)
462
463  ret <vscale x 4 x i64> %a
464}
465
466declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
467  <vscale x 8 x i64>,
468  <vscale x 8 x i64>,
469  <vscale x 8 x i1>,
470  i32);
471
472define <vscale x 8 x i64> @intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64(<vscale x 8 x i64> %0, <vscale x 8 x i64> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
473; CHECK-LABEL: intrinsic_vadc_vvm_nxv8i64_nxv8i64_nxv8i64:
474; CHECK:       # %bb.0: # %entry
475; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
476; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
477; CHECK-NEXT:    ret
478entry:
479  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.nxv8i64(
480    <vscale x 8 x i64> %0,
481    <vscale x 8 x i64> %1,
482    <vscale x 8 x i1> %2,
483    i32 %3)
484
485  ret <vscale x 8 x i64> %a
486}
487
488declare <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
489  <vscale x 1 x i8>,
490  i8,
491  <vscale x 1 x i1>,
492  i32);
493
494define <vscale x 1 x i8> @intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
495; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i8_nxv1i8_i8:
496; CHECK:       # %bb.0: # %entry
497; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
498; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
499; CHECK-NEXT:    ret
500entry:
501  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
502    <vscale x 1 x i8> %0,
503    i8 %1,
504    <vscale x 1 x i1> %2,
505    i32 %3)
506
507  ret <vscale x 1 x i8> %a
508}
509
510declare <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
511  <vscale x 2 x i8>,
512  i8,
513  <vscale x 2 x i1>,
514  i32);
515
516define <vscale x 2 x i8> @intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
517; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i8_nxv2i8_i8:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
520; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
524    <vscale x 2 x i8> %0,
525    i8 %1,
526    <vscale x 2 x i1> %2,
527    i32 %3)
528
529  ret <vscale x 2 x i8> %a
530}
531
532declare <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
533  <vscale x 4 x i8>,
534  i8,
535  <vscale x 4 x i1>,
536  i32);
537
538define <vscale x 4 x i8> @intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
539; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i8_nxv4i8_i8:
540; CHECK:       # %bb.0: # %entry
541; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
542; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
543; CHECK-NEXT:    ret
544entry:
545  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
546    <vscale x 4 x i8> %0,
547    i8 %1,
548    <vscale x 4 x i1> %2,
549    i32 %3)
550
551  ret <vscale x 4 x i8> %a
552}
553
554declare <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
555  <vscale x 8 x i8>,
556  i8,
557  <vscale x 8 x i1>,
558  i32);
559
560define <vscale x 8 x i8> @intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
561; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i8_nxv8i8_i8:
562; CHECK:       # %bb.0: # %entry
563; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
564; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
565; CHECK-NEXT:    ret
566entry:
567  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
568    <vscale x 8 x i8> %0,
569    i8 %1,
570    <vscale x 8 x i1> %2,
571    i32 %3)
572
573  ret <vscale x 8 x i8> %a
574}
575
576declare <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
577  <vscale x 16 x i8>,
578  i8,
579  <vscale x 16 x i1>,
580  i32);
581
582define <vscale x 16 x i8> @intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
583; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i8_nxv16i8_i8:
584; CHECK:       # %bb.0: # %entry
585; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
586; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
587; CHECK-NEXT:    ret
588entry:
589  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
590    <vscale x 16 x i8> %0,
591    i8 %1,
592    <vscale x 16 x i1> %2,
593    i32 %3)
594
595  ret <vscale x 16 x i8> %a
596}
597
598declare <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
599  <vscale x 32 x i8>,
600  i8,
601  <vscale x 32 x i1>,
602  i32);
603
604define <vscale x 32 x i8> @intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
605; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i8_nxv32i8_i8:
606; CHECK:       # %bb.0: # %entry
607; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
608; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
609; CHECK-NEXT:    ret
610entry:
611  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
612    <vscale x 32 x i8> %0,
613    i8 %1,
614    <vscale x 32 x i1> %2,
615    i32 %3)
616
617  ret <vscale x 32 x i8> %a
618}
619
620declare <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
621  <vscale x 64 x i8>,
622  i8,
623  <vscale x 64 x i1>,
624  i32);
625
626define <vscale x 64 x i8> @intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, i8 %1, <vscale x 64 x i1> %2, i32 %3) nounwind {
627; CHECK-LABEL: intrinsic_vadc_vxm_nxv64i8_nxv64i8_i8:
628; CHECK:       # %bb.0: # %entry
629; CHECK-NEXT:    vsetvli zero, a1, e8, m8, ta, mu
630; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
631; CHECK-NEXT:    ret
632entry:
633  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
634    <vscale x 64 x i8> %0,
635    i8 %1,
636    <vscale x 64 x i1> %2,
637    i32 %3)
638
639  ret <vscale x 64 x i8> %a
640}
641
642declare <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
643  <vscale x 1 x i16>,
644  i16,
645  <vscale x 1 x i1>,
646  i32);
647
648define <vscale x 1 x i16> @intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
649; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i16_nxv1i16_i16:
650; CHECK:       # %bb.0: # %entry
651; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
652; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
653; CHECK-NEXT:    ret
654entry:
655  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
656    <vscale x 1 x i16> %0,
657    i16 %1,
658    <vscale x 1 x i1> %2,
659    i32 %3)
660
661  ret <vscale x 1 x i16> %a
662}
663
664declare <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
665  <vscale x 2 x i16>,
666  i16,
667  <vscale x 2 x i1>,
668  i32);
669
670define <vscale x 2 x i16> @intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
671; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i16_nxv2i16_i16:
672; CHECK:       # %bb.0: # %entry
673; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
674; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
675; CHECK-NEXT:    ret
676entry:
677  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
678    <vscale x 2 x i16> %0,
679    i16 %1,
680    <vscale x 2 x i1> %2,
681    i32 %3)
682
683  ret <vscale x 2 x i16> %a
684}
685
686declare <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
687  <vscale x 4 x i16>,
688  i16,
689  <vscale x 4 x i1>,
690  i32);
691
692define <vscale x 4 x i16> @intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
693; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i16_nxv4i16_i16:
694; CHECK:       # %bb.0: # %entry
695; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
696; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
697; CHECK-NEXT:    ret
698entry:
699  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
700    <vscale x 4 x i16> %0,
701    i16 %1,
702    <vscale x 4 x i1> %2,
703    i32 %3)
704
705  ret <vscale x 4 x i16> %a
706}
707
708declare <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
709  <vscale x 8 x i16>,
710  i16,
711  <vscale x 8 x i1>,
712  i32);
713
714define <vscale x 8 x i16> @intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
715; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i16_nxv8i16_i16:
716; CHECK:       # %bb.0: # %entry
717; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
718; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
719; CHECK-NEXT:    ret
720entry:
721  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
722    <vscale x 8 x i16> %0,
723    i16 %1,
724    <vscale x 8 x i1> %2,
725    i32 %3)
726
727  ret <vscale x 8 x i16> %a
728}
729
730declare <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
731  <vscale x 16 x i16>,
732  i16,
733  <vscale x 16 x i1>,
734  i32);
735
736define <vscale x 16 x i16> @intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
737; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i16_nxv16i16_i16:
738; CHECK:       # %bb.0: # %entry
739; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
740; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
741; CHECK-NEXT:    ret
742entry:
743  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
744    <vscale x 16 x i16> %0,
745    i16 %1,
746    <vscale x 16 x i1> %2,
747    i32 %3)
748
749  ret <vscale x 16 x i16> %a
750}
751
752declare <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
753  <vscale x 32 x i16>,
754  i16,
755  <vscale x 32 x i1>,
756  i32);
757
758define <vscale x 32 x i16> @intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, i16 %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
759; CHECK-LABEL: intrinsic_vadc_vxm_nxv32i16_nxv32i16_i16:
760; CHECK:       # %bb.0: # %entry
761; CHECK-NEXT:    vsetvli zero, a1, e16, m8, ta, mu
762; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
763; CHECK-NEXT:    ret
764entry:
765  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
766    <vscale x 32 x i16> %0,
767    i16 %1,
768    <vscale x 32 x i1> %2,
769    i32 %3)
770
771  ret <vscale x 32 x i16> %a
772}
773
774declare <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
775  <vscale x 1 x i32>,
776  i32,
777  <vscale x 1 x i1>,
778  i32);
779
780define <vscale x 1 x i32> @intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
781; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i32_nxv1i32_i32:
782; CHECK:       # %bb.0: # %entry
783; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
784; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
785; CHECK-NEXT:    ret
786entry:
787  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
788    <vscale x 1 x i32> %0,
789    i32 %1,
790    <vscale x 1 x i1> %2,
791    i32 %3)
792
793  ret <vscale x 1 x i32> %a
794}
795
796declare <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
797  <vscale x 2 x i32>,
798  i32,
799  <vscale x 2 x i1>,
800  i32);
801
802define <vscale x 2 x i32> @intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
803; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i32_nxv2i32_i32:
804; CHECK:       # %bb.0: # %entry
805; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
806; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
807; CHECK-NEXT:    ret
808entry:
809  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
810    <vscale x 2 x i32> %0,
811    i32 %1,
812    <vscale x 2 x i1> %2,
813    i32 %3)
814
815  ret <vscale x 2 x i32> %a
816}
817
818declare <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
819  <vscale x 4 x i32>,
820  i32,
821  <vscale x 4 x i1>,
822  i32);
823
824define <vscale x 4 x i32> @intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
825; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i32_nxv4i32_i32:
826; CHECK:       # %bb.0: # %entry
827; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
828; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
829; CHECK-NEXT:    ret
830entry:
831  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
832    <vscale x 4 x i32> %0,
833    i32 %1,
834    <vscale x 4 x i1> %2,
835    i32 %3)
836
837  ret <vscale x 4 x i32> %a
838}
839
840declare <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
841  <vscale x 8 x i32>,
842  i32,
843  <vscale x 8 x i1>,
844  i32);
845
846define <vscale x 8 x i32> @intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
847; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i32_nxv8i32_i32:
848; CHECK:       # %bb.0: # %entry
849; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
850; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
851; CHECK-NEXT:    ret
852entry:
853  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
854    <vscale x 8 x i32> %0,
855    i32 %1,
856    <vscale x 8 x i1> %2,
857    i32 %3)
858
859  ret <vscale x 8 x i32> %a
860}
861
862declare <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
863  <vscale x 16 x i32>,
864  i32,
865  <vscale x 16 x i1>,
866  i32);
867
868define <vscale x 16 x i32> @intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, i32 %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
869; CHECK-LABEL: intrinsic_vadc_vxm_nxv16i32_nxv16i32_i32:
870; CHECK:       # %bb.0: # %entry
871; CHECK-NEXT:    vsetvli zero, a1, e32, m8, ta, mu
872; CHECK-NEXT:    vadc.vxm v8, v8, a0, v0
873; CHECK-NEXT:    ret
874entry:
875  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
876    <vscale x 16 x i32> %0,
877    i32 %1,
878    <vscale x 16 x i1> %2,
879    i32 %3)
880
881  ret <vscale x 16 x i32> %a
882}
883
884declare <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
885  <vscale x 1 x i64>,
886  i64,
887  <vscale x 1 x i1>,
888  i32);
889
890define <vscale x 1 x i64> @intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
891; CHECK-LABEL: intrinsic_vadc_vxm_nxv1i64_nxv1i64_i64:
892; CHECK:       # %bb.0: # %entry
893; CHECK-NEXT:    addi sp, sp, -16
894; CHECK-NEXT:    sw a1, 12(sp)
895; CHECK-NEXT:    sw a0, 8(sp)
896; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
897; CHECK-NEXT:    addi a0, sp, 8
898; CHECK-NEXT:    vlse64.v v25, (a0), zero
899; CHECK-NEXT:    vadc.vvm v8, v8, v25, v0
900; CHECK-NEXT:    addi sp, sp, 16
901; CHECK-NEXT:    ret
902entry:
903  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
904    <vscale x 1 x i64> %0,
905    i64 %1,
906    <vscale x 1 x i1> %2,
907    i32 %3)
908
909  ret <vscale x 1 x i64> %a
910}
911
912declare <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
913  <vscale x 2 x i64>,
914  i64,
915  <vscale x 2 x i1>,
916  i32);
917
918define <vscale x 2 x i64> @intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
919; CHECK-LABEL: intrinsic_vadc_vxm_nxv2i64_nxv2i64_i64:
920; CHECK:       # %bb.0: # %entry
921; CHECK-NEXT:    addi sp, sp, -16
922; CHECK-NEXT:    sw a1, 12(sp)
923; CHECK-NEXT:    sw a0, 8(sp)
924; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
925; CHECK-NEXT:    addi a0, sp, 8
926; CHECK-NEXT:    vlse64.v v26, (a0), zero
927; CHECK-NEXT:    vadc.vvm v8, v8, v26, v0
928; CHECK-NEXT:    addi sp, sp, 16
929; CHECK-NEXT:    ret
930entry:
931  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
932    <vscale x 2 x i64> %0,
933    i64 %1,
934    <vscale x 2 x i1> %2,
935    i32 %3)
936
937  ret <vscale x 2 x i64> %a
938}
939
940declare <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
941  <vscale x 4 x i64>,
942  i64,
943  <vscale x 4 x i1>,
944  i32);
945
946define <vscale x 4 x i64> @intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
947; CHECK-LABEL: intrinsic_vadc_vxm_nxv4i64_nxv4i64_i64:
948; CHECK:       # %bb.0: # %entry
949; CHECK-NEXT:    addi sp, sp, -16
950; CHECK-NEXT:    sw a1, 12(sp)
951; CHECK-NEXT:    sw a0, 8(sp)
952; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
953; CHECK-NEXT:    addi a0, sp, 8
954; CHECK-NEXT:    vlse64.v v28, (a0), zero
955; CHECK-NEXT:    vadc.vvm v8, v8, v28, v0
956; CHECK-NEXT:    addi sp, sp, 16
957; CHECK-NEXT:    ret
958entry:
959  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
960    <vscale x 4 x i64> %0,
961    i64 %1,
962    <vscale x 4 x i1> %2,
963    i32 %3)
964
965  ret <vscale x 4 x i64> %a
966}
967
968declare <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
969  <vscale x 8 x i64>,
970  i64,
971  <vscale x 8 x i1>,
972  i32);
973
974define <vscale x 8 x i64> @intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, i64 %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
975; CHECK-LABEL: intrinsic_vadc_vxm_nxv8i64_nxv8i64_i64:
976; CHECK:       # %bb.0: # %entry
977; CHECK-NEXT:    addi sp, sp, -16
978; CHECK-NEXT:    sw a1, 12(sp)
979; CHECK-NEXT:    sw a0, 8(sp)
980; CHECK-NEXT:    vsetvli zero, a2, e64, m8, ta, mu
981; CHECK-NEXT:    addi a0, sp, 8
982; CHECK-NEXT:    vlse64.v v16, (a0), zero
983; CHECK-NEXT:    vadc.vvm v8, v8, v16, v0
984; CHECK-NEXT:    addi sp, sp, 16
985; CHECK-NEXT:    ret
986entry:
987  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
988    <vscale x 8 x i64> %0,
989    i64 %1,
990    <vscale x 8 x i1> %2,
991    i32 %3)
992
993  ret <vscale x 8 x i64> %a
994}
995
996define <vscale x 1 x i8> @intrinsic_vadc_vim_nxv1i8_nxv1i8_i8(<vscale x 1 x i8> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
997; CHECK-LABEL: intrinsic_vadc_vim_nxv1i8_nxv1i8_i8:
998; CHECK:       # %bb.0: # %entry
999; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1000; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1001; CHECK-NEXT:    ret
1002entry:
1003  %a = call <vscale x 1 x i8> @llvm.riscv.vadc.nxv1i8.i8(
1004    <vscale x 1 x i8> %0,
1005    i8 -9,
1006    <vscale x 1 x i1> %1,
1007    i32 %2)
1008
1009  ret <vscale x 1 x i8> %a
1010}
1011
1012define <vscale x 2 x i8> @intrinsic_vadc_vim_nxv2i8_nxv2i8_i8(<vscale x 2 x i8> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
1013; CHECK-LABEL: intrinsic_vadc_vim_nxv2i8_nxv2i8_i8:
1014; CHECK:       # %bb.0: # %entry
1015; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1016; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1017; CHECK-NEXT:    ret
1018entry:
1019  %a = call <vscale x 2 x i8> @llvm.riscv.vadc.nxv2i8.i8(
1020    <vscale x 2 x i8> %0,
1021    i8 9,
1022    <vscale x 2 x i1> %1,
1023    i32 %2)
1024
1025  ret <vscale x 2 x i8> %a
1026}
1027
1028define <vscale x 4 x i8> @intrinsic_vadc_vim_nxv4i8_nxv4i8_i8(<vscale x 4 x i8> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
1029; CHECK-LABEL: intrinsic_vadc_vim_nxv4i8_nxv4i8_i8:
1030; CHECK:       # %bb.0: # %entry
1031; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1032; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1033; CHECK-NEXT:    ret
1034entry:
1035  %a = call <vscale x 4 x i8> @llvm.riscv.vadc.nxv4i8.i8(
1036    <vscale x 4 x i8> %0,
1037    i8 -9,
1038    <vscale x 4 x i1> %1,
1039    i32 %2)
1040
1041  ret <vscale x 4 x i8> %a
1042}
1043
1044define <vscale x 8 x i8> @intrinsic_vadc_vim_nxv8i8_nxv8i8_i8(<vscale x 8 x i8> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
1045; CHECK-LABEL: intrinsic_vadc_vim_nxv8i8_nxv8i8_i8:
1046; CHECK:       # %bb.0: # %entry
1047; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1048; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1049; CHECK-NEXT:    ret
1050entry:
1051  %a = call <vscale x 8 x i8> @llvm.riscv.vadc.nxv8i8.i8(
1052    <vscale x 8 x i8> %0,
1053    i8 9,
1054    <vscale x 8 x i1> %1,
1055    i32 %2)
1056
1057  ret <vscale x 8 x i8> %a
1058}
1059
1060define <vscale x 16 x i8> @intrinsic_vadc_vim_nxv16i8_nxv16i8_i8(<vscale x 16 x i8> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
1061; CHECK-LABEL: intrinsic_vadc_vim_nxv16i8_nxv16i8_i8:
1062; CHECK:       # %bb.0: # %entry
1063; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1064; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1065; CHECK-NEXT:    ret
1066entry:
1067  %a = call <vscale x 16 x i8> @llvm.riscv.vadc.nxv16i8.i8(
1068    <vscale x 16 x i8> %0,
1069    i8 -9,
1070    <vscale x 16 x i1> %1,
1071    i32 %2)
1072
1073  ret <vscale x 16 x i8> %a
1074}
1075
1076define <vscale x 32 x i8> @intrinsic_vadc_vim_nxv32i8_nxv32i8_i8(<vscale x 32 x i8> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
1077; CHECK-LABEL: intrinsic_vadc_vim_nxv32i8_nxv32i8_i8:
1078; CHECK:       # %bb.0: # %entry
1079; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1080; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1081; CHECK-NEXT:    ret
1082entry:
1083  %a = call <vscale x 32 x i8> @llvm.riscv.vadc.nxv32i8.i8(
1084    <vscale x 32 x i8> %0,
1085    i8 9,
1086    <vscale x 32 x i1> %1,
1087    i32 %2)
1088
1089  ret <vscale x 32 x i8> %a
1090}
1091
1092define <vscale x 64 x i8> @intrinsic_vadc_vim_nxv64i8_nxv64i8_i8(<vscale x 64 x i8> %0, <vscale x 64 x i1> %1, i32 %2) nounwind {
1093; CHECK-LABEL: intrinsic_vadc_vim_nxv64i8_nxv64i8_i8:
1094; CHECK:       # %bb.0: # %entry
1095; CHECK-NEXT:    vsetvli zero, a0, e8, m8, ta, mu
1096; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1097; CHECK-NEXT:    ret
1098entry:
1099  %a = call <vscale x 64 x i8> @llvm.riscv.vadc.nxv64i8.i8(
1100    <vscale x 64 x i8> %0,
1101    i8 -9,
1102    <vscale x 64 x i1> %1,
1103    i32 %2)
1104
1105  ret <vscale x 64 x i8> %a
1106}
1107
1108define <vscale x 1 x i16> @intrinsic_vadc_vim_nxv1i16_nxv1i16_i16(<vscale x 1 x i16> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
1109; CHECK-LABEL: intrinsic_vadc_vim_nxv1i16_nxv1i16_i16:
1110; CHECK:       # %bb.0: # %entry
1111; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
1112; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1113; CHECK-NEXT:    ret
1114entry:
1115  %a = call <vscale x 1 x i16> @llvm.riscv.vadc.nxv1i16.i16(
1116    <vscale x 1 x i16> %0,
1117    i16 9,
1118    <vscale x 1 x i1> %1,
1119    i32 %2)
1120
1121  ret <vscale x 1 x i16> %a
1122}
1123
1124define <vscale x 2 x i16> @intrinsic_vadc_vim_nxv2i16_nxv2i16_i16(<vscale x 2 x i16> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
1125; CHECK-LABEL: intrinsic_vadc_vim_nxv2i16_nxv2i16_i16:
1126; CHECK:       # %bb.0: # %entry
1127; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
1128; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1129; CHECK-NEXT:    ret
1130entry:
1131  %a = call <vscale x 2 x i16> @llvm.riscv.vadc.nxv2i16.i16(
1132    <vscale x 2 x i16> %0,
1133    i16 -9,
1134    <vscale x 2 x i1> %1,
1135    i32 %2)
1136
1137  ret <vscale x 2 x i16> %a
1138}
1139
1140define <vscale x 4 x i16> @intrinsic_vadc_vim_nxv4i16_nxv4i16_i16(<vscale x 4 x i16> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
1141; CHECK-LABEL: intrinsic_vadc_vim_nxv4i16_nxv4i16_i16:
1142; CHECK:       # %bb.0: # %entry
1143; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
1144; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1145; CHECK-NEXT:    ret
1146entry:
1147  %a = call <vscale x 4 x i16> @llvm.riscv.vadc.nxv4i16.i16(
1148    <vscale x 4 x i16> %0,
1149    i16 9,
1150    <vscale x 4 x i1> %1,
1151    i32 %2)
1152
1153  ret <vscale x 4 x i16> %a
1154}
1155
1156define <vscale x 8 x i16> @intrinsic_vadc_vim_nxv8i16_nxv8i16_i16(<vscale x 8 x i16> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
1157; CHECK-LABEL: intrinsic_vadc_vim_nxv8i16_nxv8i16_i16:
1158; CHECK:       # %bb.0: # %entry
1159; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
1160; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1161; CHECK-NEXT:    ret
1162entry:
1163  %a = call <vscale x 8 x i16> @llvm.riscv.vadc.nxv8i16.i16(
1164    <vscale x 8 x i16> %0,
1165    i16 -9,
1166    <vscale x 8 x i1> %1,
1167    i32 %2)
1168
1169  ret <vscale x 8 x i16> %a
1170}
1171
1172define <vscale x 16 x i16> @intrinsic_vadc_vim_nxv16i16_nxv16i16_i16(<vscale x 16 x i16> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
1173; CHECK-LABEL: intrinsic_vadc_vim_nxv16i16_nxv16i16_i16:
1174; CHECK:       # %bb.0: # %entry
1175; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
1176; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1177; CHECK-NEXT:    ret
1178entry:
1179  %a = call <vscale x 16 x i16> @llvm.riscv.vadc.nxv16i16.i16(
1180    <vscale x 16 x i16> %0,
1181    i16 9,
1182    <vscale x 16 x i1> %1,
1183    i32 %2)
1184
1185  ret <vscale x 16 x i16> %a
1186}
1187
1188define <vscale x 32 x i16> @intrinsic_vadc_vim_nxv32i16_nxv32i16_i16(<vscale x 32 x i16> %0, <vscale x 32 x i1> %1, i32 %2) nounwind {
1189; CHECK-LABEL: intrinsic_vadc_vim_nxv32i16_nxv32i16_i16:
1190; CHECK:       # %bb.0: # %entry
1191; CHECK-NEXT:    vsetvli zero, a0, e16, m8, ta, mu
1192; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1193; CHECK-NEXT:    ret
1194entry:
1195  %a = call <vscale x 32 x i16> @llvm.riscv.vadc.nxv32i16.i16(
1196    <vscale x 32 x i16> %0,
1197    i16 -9,
1198    <vscale x 32 x i1> %1,
1199    i32 %2)
1200
1201  ret <vscale x 32 x i16> %a
1202}
1203
1204define <vscale x 1 x i32> @intrinsic_vadc_vim_nxv1i32_nxv1i32_i32(<vscale x 1 x i32> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
1205; CHECK-LABEL: intrinsic_vadc_vim_nxv1i32_nxv1i32_i32:
1206; CHECK:       # %bb.0: # %entry
1207; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
1208; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1209; CHECK-NEXT:    ret
1210entry:
1211  %a = call <vscale x 1 x i32> @llvm.riscv.vadc.nxv1i32.i32(
1212    <vscale x 1 x i32> %0,
1213    i32 9,
1214    <vscale x 1 x i1> %1,
1215    i32 %2)
1216
1217  ret <vscale x 1 x i32> %a
1218}
1219
1220define <vscale x 2 x i32> @intrinsic_vadc_vim_nxv2i32_nxv2i32_i32(<vscale x 2 x i32> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
1221; CHECK-LABEL: intrinsic_vadc_vim_nxv2i32_nxv2i32_i32:
1222; CHECK:       # %bb.0: # %entry
1223; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
1224; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1225; CHECK-NEXT:    ret
1226entry:
1227  %a = call <vscale x 2 x i32> @llvm.riscv.vadc.nxv2i32.i32(
1228    <vscale x 2 x i32> %0,
1229    i32 -9,
1230    <vscale x 2 x i1> %1,
1231    i32 %2)
1232
1233  ret <vscale x 2 x i32> %a
1234}
1235
1236define <vscale x 4 x i32> @intrinsic_vadc_vim_nxv4i32_nxv4i32_i32(<vscale x 4 x i32> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
1237; CHECK-LABEL: intrinsic_vadc_vim_nxv4i32_nxv4i32_i32:
1238; CHECK:       # %bb.0: # %entry
1239; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
1240; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1241; CHECK-NEXT:    ret
1242entry:
1243  %a = call <vscale x 4 x i32> @llvm.riscv.vadc.nxv4i32.i32(
1244    <vscale x 4 x i32> %0,
1245    i32 9,
1246    <vscale x 4 x i1> %1,
1247    i32 %2)
1248
1249  ret <vscale x 4 x i32> %a
1250}
1251
1252define <vscale x 8 x i32> @intrinsic_vadc_vim_nxv8i32_nxv8i32_i32(<vscale x 8 x i32> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
1253; CHECK-LABEL: intrinsic_vadc_vim_nxv8i32_nxv8i32_i32:
1254; CHECK:       # %bb.0: # %entry
1255; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
1256; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1257; CHECK-NEXT:    ret
1258entry:
1259  %a = call <vscale x 8 x i32> @llvm.riscv.vadc.nxv8i32.i32(
1260    <vscale x 8 x i32> %0,
1261    i32 -9,
1262    <vscale x 8 x i1> %1,
1263    i32 %2)
1264
1265  ret <vscale x 8 x i32> %a
1266}
1267
1268define <vscale x 16 x i32> @intrinsic_vadc_vim_nxv16i32_nxv16i32_i32(<vscale x 16 x i32> %0, <vscale x 16 x i1> %1, i32 %2) nounwind {
1269; CHECK-LABEL: intrinsic_vadc_vim_nxv16i32_nxv16i32_i32:
1270; CHECK:       # %bb.0: # %entry
1271; CHECK-NEXT:    vsetvli zero, a0, e32, m8, ta, mu
1272; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1273; CHECK-NEXT:    ret
1274entry:
1275  %a = call <vscale x 16 x i32> @llvm.riscv.vadc.nxv16i32.i32(
1276    <vscale x 16 x i32> %0,
1277    i32 9,
1278    <vscale x 16 x i1> %1,
1279    i32 %2)
1280
1281  ret <vscale x 16 x i32> %a
1282}
1283
1284define <vscale x 1 x i64> @intrinsic_vadc_vim_nxv1i64_nxv1i64_i64(<vscale x 1 x i64> %0, <vscale x 1 x i1> %1, i32 %2) nounwind {
1285; CHECK-LABEL: intrinsic_vadc_vim_nxv1i64_nxv1i64_i64:
1286; CHECK:       # %bb.0: # %entry
1287; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
1288; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1289; CHECK-NEXT:    ret
1290entry:
1291  %a = call <vscale x 1 x i64> @llvm.riscv.vadc.nxv1i64.i64(
1292    <vscale x 1 x i64> %0,
1293    i64 9,
1294    <vscale x 1 x i1> %1,
1295    i32 %2)
1296
1297  ret <vscale x 1 x i64> %a
1298}
1299
1300define <vscale x 2 x i64> @intrinsic_vadc_vim_nxv2i64_nxv2i64_i64(<vscale x 2 x i64> %0, <vscale x 2 x i1> %1, i32 %2) nounwind {
1301; CHECK-LABEL: intrinsic_vadc_vim_nxv2i64_nxv2i64_i64:
1302; CHECK:       # %bb.0: # %entry
1303; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
1304; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1305; CHECK-NEXT:    ret
1306entry:
1307  %a = call <vscale x 2 x i64> @llvm.riscv.vadc.nxv2i64.i64(
1308    <vscale x 2 x i64> %0,
1309    i64 -9,
1310    <vscale x 2 x i1> %1,
1311    i32 %2)
1312
1313  ret <vscale x 2 x i64> %a
1314}
1315
1316define <vscale x 4 x i64> @intrinsic_vadc_vim_nxv4i64_nxv4i64_i64(<vscale x 4 x i64> %0, <vscale x 4 x i1> %1, i32 %2) nounwind {
1317; CHECK-LABEL: intrinsic_vadc_vim_nxv4i64_nxv4i64_i64:
1318; CHECK:       # %bb.0: # %entry
1319; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
1320; CHECK-NEXT:    vadc.vim v8, v8, 9, v0
1321; CHECK-NEXT:    ret
1322entry:
1323  %a = call <vscale x 4 x i64> @llvm.riscv.vadc.nxv4i64.i64(
1324    <vscale x 4 x i64> %0,
1325    i64 9,
1326    <vscale x 4 x i1> %1,
1327    i32 %2)
1328
1329  ret <vscale x 4 x i64> %a
1330}
1331
1332define <vscale x 8 x i64> @intrinsic_vadc_vim_nxv8i64_nxv8i64_i64(<vscale x 8 x i64> %0, <vscale x 8 x i1> %1, i32 %2) nounwind {
1333; CHECK-LABEL: intrinsic_vadc_vim_nxv8i64_nxv8i64_i64:
1334; CHECK:       # %bb.0: # %entry
1335; CHECK-NEXT:    vsetvli zero, a0, e64, m8, ta, mu
1336; CHECK-NEXT:    vadc.vim v8, v8, -9, v0
1337; CHECK-NEXT:    ret
1338entry:
1339  %a = call <vscale x 8 x i64> @llvm.riscv.vadc.nxv8i64.i64(
1340    <vscale x 8 x i64> %0,
1341    i64 -9,
1342    <vscale x 8 x i1> %1,
1343    i32 %2)
1344
1345  ret <vscale x 8 x i64> %a
1346}
1347