1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
5  <vscale x 8 x i8>,
6  <vscale x 1 x i8>,
7  <vscale x 8 x i8>,
8  i32);
9
10define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
11; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv1i8_nxv8i8:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
14; CHECK-NEXT:    vredand.vs v8, v9, v10
15; CHECK-NEXT:    ret
16entry:
17  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv1i8(
18    <vscale x 8 x i8> %0,
19    <vscale x 1 x i8> %1,
20    <vscale x 8 x i8> %2,
21    i32 %3)
22
23  ret <vscale x 8 x i8> %a
24}
25
26declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
27  <vscale x 8 x i8>,
28  <vscale x 1 x i8>,
29  <vscale x 8 x i8>,
30  <vscale x 1 x i1>,
31  i32);
32
33define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 1 x i8> %1, <vscale x 8 x i8> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
34; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv1i8_nxv8i8:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, tu, mu
37; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
38; CHECK-NEXT:    ret
39entry:
40  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv1i8(
41    <vscale x 8 x i8> %0,
42    <vscale x 1 x i8> %1,
43    <vscale x 8 x i8> %2,
44    <vscale x 1 x i1> %3,
45    i32 %4)
46
47  ret <vscale x 8 x i8> %a
48}
49
50declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
51  <vscale x 8 x i8>,
52  <vscale x 2 x i8>,
53  <vscale x 8 x i8>,
54  i32);
55
56define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
57; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv2i8_nxv8i8:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
60; CHECK-NEXT:    vredand.vs v8, v9, v10
61; CHECK-NEXT:    ret
62entry:
63  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv2i8(
64    <vscale x 8 x i8> %0,
65    <vscale x 2 x i8> %1,
66    <vscale x 8 x i8> %2,
67    i32 %3)
68
69  ret <vscale x 8 x i8> %a
70}
71
72declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
73  <vscale x 8 x i8>,
74  <vscale x 2 x i8>,
75  <vscale x 8 x i8>,
76  <vscale x 2 x i1>,
77  i32);
78
79define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 2 x i8> %1, <vscale x 8 x i8> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
80; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv2i8_nxv8i8:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, tu, mu
83; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
84; CHECK-NEXT:    ret
85entry:
86  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv2i8(
87    <vscale x 8 x i8> %0,
88    <vscale x 2 x i8> %1,
89    <vscale x 8 x i8> %2,
90    <vscale x 2 x i1> %3,
91    i32 %4)
92
93  ret <vscale x 8 x i8> %a
94}
95
96declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
97  <vscale x 8 x i8>,
98  <vscale x 4 x i8>,
99  <vscale x 8 x i8>,
100  i32);
101
102define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
103; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv4i8_nxv8i8:
104; CHECK:       # %bb.0: # %entry
105; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
106; CHECK-NEXT:    vredand.vs v8, v9, v10
107; CHECK-NEXT:    ret
108entry:
109  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv4i8(
110    <vscale x 8 x i8> %0,
111    <vscale x 4 x i8> %1,
112    <vscale x 8 x i8> %2,
113    i32 %3)
114
115  ret <vscale x 8 x i8> %a
116}
117
118declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
119  <vscale x 8 x i8>,
120  <vscale x 4 x i8>,
121  <vscale x 8 x i8>,
122  <vscale x 4 x i1>,
123  i32);
124
125define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 4 x i8> %1, <vscale x 8 x i8> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
126; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv4i8_nxv8i8:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, tu, mu
129; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
130; CHECK-NEXT:    ret
131entry:
132  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv4i8(
133    <vscale x 8 x i8> %0,
134    <vscale x 4 x i8> %1,
135    <vscale x 8 x i8> %2,
136    <vscale x 4 x i1> %3,
137    i32 %4)
138
139  ret <vscale x 8 x i8> %a
140}
141
142declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
143  <vscale x 8 x i8>,
144  <vscale x 8 x i8>,
145  <vscale x 8 x i8>,
146  i32);
147
148define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
149; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv8i8_nxv8i8:
150; CHECK:       # %bb.0: # %entry
151; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
152; CHECK-NEXT:    vredand.vs v8, v9, v10
153; CHECK-NEXT:    ret
154entry:
155  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv8i8(
156    <vscale x 8 x i8> %0,
157    <vscale x 8 x i8> %1,
158    <vscale x 8 x i8> %2,
159    i32 %3)
160
161  ret <vscale x 8 x i8> %a
162}
163
164declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
165  <vscale x 8 x i8>,
166  <vscale x 8 x i8>,
167  <vscale x 8 x i8>,
168  <vscale x 8 x i1>,
169  i32);
170
171define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
172; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv8i8_nxv8i8:
173; CHECK:       # %bb.0: # %entry
174; CHECK-NEXT:    vsetvli zero, a0, e8, m1, tu, mu
175; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
176; CHECK-NEXT:    ret
177entry:
178  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv8i8(
179    <vscale x 8 x i8> %0,
180    <vscale x 8 x i8> %1,
181    <vscale x 8 x i8> %2,
182    <vscale x 8 x i1> %3,
183    i32 %4)
184
185  ret <vscale x 8 x i8> %a
186}
187
188declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
189  <vscale x 8 x i8>,
190  <vscale x 16 x i8>,
191  <vscale x 8 x i8>,
192  i32);
193
194define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
195; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv16i8_nxv8i8:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
198; CHECK-NEXT:    vredand.vs v8, v10, v9
199; CHECK-NEXT:    ret
200entry:
201  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv16i8(
202    <vscale x 8 x i8> %0,
203    <vscale x 16 x i8> %1,
204    <vscale x 8 x i8> %2,
205    i32 %3)
206
207  ret <vscale x 8 x i8> %a
208}
209
210declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
211  <vscale x 8 x i8>,
212  <vscale x 16 x i8>,
213  <vscale x 8 x i8>,
214  <vscale x 16 x i1>,
215  i32);
216
217define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 16 x i8> %1, <vscale x 8 x i8> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
218; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv16i8_nxv8i8:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e8, m2, tu, mu
221; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
222; CHECK-NEXT:    ret
223entry:
224  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv16i8(
225    <vscale x 8 x i8> %0,
226    <vscale x 16 x i8> %1,
227    <vscale x 8 x i8> %2,
228    <vscale x 16 x i1> %3,
229    i32 %4)
230
231  ret <vscale x 8 x i8> %a
232}
233
234declare <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
235  <vscale x 8 x i8>,
236  <vscale x 32 x i8>,
237  <vscale x 8 x i8>,
238  i32);
239
240define <vscale x 8 x i8> @intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, i32 %3) nounwind {
241; CHECK-LABEL: intrinsic_vredand_vs_nxv8i8_nxv32i8_nxv8i8:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
244; CHECK-NEXT:    vredand.vs v8, v12, v9
245; CHECK-NEXT:    ret
246entry:
247  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.nxv8i8.nxv32i8(
248    <vscale x 8 x i8> %0,
249    <vscale x 32 x i8> %1,
250    <vscale x 8 x i8> %2,
251    i32 %3)
252
253  ret <vscale x 8 x i8> %a
254}
255
256declare <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
257  <vscale x 8 x i8>,
258  <vscale x 32 x i8>,
259  <vscale x 8 x i8>,
260  <vscale x 32 x i1>,
261  i32);
262
263define <vscale x 8 x i8> @intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 32 x i8> %1, <vscale x 8 x i8> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
264; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv8i8_nxv32i8_nxv8i8:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    vsetvli zero, a0, e8, m4, tu, mu
267; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
268; CHECK-NEXT:    ret
269entry:
270  %a = call <vscale x 8 x i8> @llvm.riscv.vredand.mask.nxv8i8.nxv32i8(
271    <vscale x 8 x i8> %0,
272    <vscale x 32 x i8> %1,
273    <vscale x 8 x i8> %2,
274    <vscale x 32 x i1> %3,
275    i32 %4)
276
277  ret <vscale x 8 x i8> %a
278}
279
280declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
281  <vscale x 4 x i16>,
282  <vscale x 1 x i16>,
283  <vscale x 4 x i16>,
284  i32);
285
286define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
287; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv1i16_nxv4i16:
288; CHECK:       # %bb.0: # %entry
289; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
290; CHECK-NEXT:    vredand.vs v8, v9, v10
291; CHECK-NEXT:    ret
292entry:
293  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv1i16(
294    <vscale x 4 x i16> %0,
295    <vscale x 1 x i16> %1,
296    <vscale x 4 x i16> %2,
297    i32 %3)
298
299  ret <vscale x 4 x i16> %a
300}
301
302declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
303  <vscale x 4 x i16>,
304  <vscale x 1 x i16>,
305  <vscale x 4 x i16>,
306  <vscale x 1 x i1>,
307  i32);
308
309define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 1 x i16> %1, <vscale x 4 x i16> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
310; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv1i16_nxv4i16:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, tu, mu
313; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
314; CHECK-NEXT:    ret
315entry:
316  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv1i16(
317    <vscale x 4 x i16> %0,
318    <vscale x 1 x i16> %1,
319    <vscale x 4 x i16> %2,
320    <vscale x 1 x i1> %3,
321    i32 %4)
322
323  ret <vscale x 4 x i16> %a
324}
325
326declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
327  <vscale x 4 x i16>,
328  <vscale x 2 x i16>,
329  <vscale x 4 x i16>,
330  i32);
331
332define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
333; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv2i16_nxv4i16:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
336; CHECK-NEXT:    vredand.vs v8, v9, v10
337; CHECK-NEXT:    ret
338entry:
339  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv2i16(
340    <vscale x 4 x i16> %0,
341    <vscale x 2 x i16> %1,
342    <vscale x 4 x i16> %2,
343    i32 %3)
344
345  ret <vscale x 4 x i16> %a
346}
347
348declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
349  <vscale x 4 x i16>,
350  <vscale x 2 x i16>,
351  <vscale x 4 x i16>,
352  <vscale x 2 x i1>,
353  i32);
354
355define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 2 x i16> %1, <vscale x 4 x i16> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
356; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv2i16_nxv4i16:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, tu, mu
359; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
360; CHECK-NEXT:    ret
361entry:
362  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv2i16(
363    <vscale x 4 x i16> %0,
364    <vscale x 2 x i16> %1,
365    <vscale x 4 x i16> %2,
366    <vscale x 2 x i1> %3,
367    i32 %4)
368
369  ret <vscale x 4 x i16> %a
370}
371
372declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
373  <vscale x 4 x i16>,
374  <vscale x 4 x i16>,
375  <vscale x 4 x i16>,
376  i32);
377
378define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
379; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv4i16_nxv4i16:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
382; CHECK-NEXT:    vredand.vs v8, v9, v10
383; CHECK-NEXT:    ret
384entry:
385  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv4i16(
386    <vscale x 4 x i16> %0,
387    <vscale x 4 x i16> %1,
388    <vscale x 4 x i16> %2,
389    i32 %3)
390
391  ret <vscale x 4 x i16> %a
392}
393
394declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
395  <vscale x 4 x i16>,
396  <vscale x 4 x i16>,
397  <vscale x 4 x i16>,
398  <vscale x 4 x i1>,
399  i32);
400
401define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
402; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv4i16_nxv4i16:
403; CHECK:       # %bb.0: # %entry
404; CHECK-NEXT:    vsetvli zero, a0, e16, m1, tu, mu
405; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
406; CHECK-NEXT:    ret
407entry:
408  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv4i16(
409    <vscale x 4 x i16> %0,
410    <vscale x 4 x i16> %1,
411    <vscale x 4 x i16> %2,
412    <vscale x 4 x i1> %3,
413    i32 %4)
414
415  ret <vscale x 4 x i16> %a
416}
417
418declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
419  <vscale x 4 x i16>,
420  <vscale x 8 x i16>,
421  <vscale x 4 x i16>,
422  i32);
423
424define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
425; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv8i16_nxv4i16:
426; CHECK:       # %bb.0: # %entry
427; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
428; CHECK-NEXT:    vredand.vs v8, v10, v9
429; CHECK-NEXT:    ret
430entry:
431  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv8i16(
432    <vscale x 4 x i16> %0,
433    <vscale x 8 x i16> %1,
434    <vscale x 4 x i16> %2,
435    i32 %3)
436
437  ret <vscale x 4 x i16> %a
438}
439
440declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
441  <vscale x 4 x i16>,
442  <vscale x 8 x i16>,
443  <vscale x 4 x i16>,
444  <vscale x 8 x i1>,
445  i32);
446
447define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 8 x i16> %1, <vscale x 4 x i16> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
448; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv8i16_nxv4i16:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli zero, a0, e16, m2, tu, mu
451; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
452; CHECK-NEXT:    ret
453entry:
454  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv8i16(
455    <vscale x 4 x i16> %0,
456    <vscale x 8 x i16> %1,
457    <vscale x 4 x i16> %2,
458    <vscale x 8 x i1> %3,
459    i32 %4)
460
461  ret <vscale x 4 x i16> %a
462}
463
464declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
465  <vscale x 4 x i16>,
466  <vscale x 16 x i16>,
467  <vscale x 4 x i16>,
468  i32);
469
470define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
471; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv16i16_nxv4i16:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
474; CHECK-NEXT:    vredand.vs v8, v12, v9
475; CHECK-NEXT:    ret
476entry:
477  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv16i16(
478    <vscale x 4 x i16> %0,
479    <vscale x 16 x i16> %1,
480    <vscale x 4 x i16> %2,
481    i32 %3)
482
483  ret <vscale x 4 x i16> %a
484}
485
486declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
487  <vscale x 4 x i16>,
488  <vscale x 16 x i16>,
489  <vscale x 4 x i16>,
490  <vscale x 16 x i1>,
491  i32);
492
493define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 16 x i16> %1, <vscale x 4 x i16> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
494; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv16i16_nxv4i16:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli zero, a0, e16, m4, tu, mu
497; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
498; CHECK-NEXT:    ret
499entry:
500  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv16i16(
501    <vscale x 4 x i16> %0,
502    <vscale x 16 x i16> %1,
503    <vscale x 4 x i16> %2,
504    <vscale x 16 x i1> %3,
505    i32 %4)
506
507  ret <vscale x 4 x i16> %a
508}
509
510declare <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
511  <vscale x 4 x i16>,
512  <vscale x 32 x i16>,
513  <vscale x 4 x i16>,
514  i32);
515
516define <vscale x 4 x i16> @intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, i32 %3) nounwind {
517; CHECK-LABEL: intrinsic_vredand_vs_nxv4i16_nxv32i16_nxv4i16:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
520; CHECK-NEXT:    vredand.vs v8, v16, v9
521; CHECK-NEXT:    ret
522entry:
523  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.nxv4i16.nxv32i16(
524    <vscale x 4 x i16> %0,
525    <vscale x 32 x i16> %1,
526    <vscale x 4 x i16> %2,
527    i32 %3)
528
529  ret <vscale x 4 x i16> %a
530}
531
532declare <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
533  <vscale x 4 x i16>,
534  <vscale x 32 x i16>,
535  <vscale x 4 x i16>,
536  <vscale x 32 x i1>,
537  i32);
538
539define <vscale x 4 x i16> @intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 32 x i16> %1, <vscale x 4 x i16> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
540; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv4i16_nxv32i16_nxv4i16:
541; CHECK:       # %bb.0: # %entry
542; CHECK-NEXT:    vsetvli zero, a0, e16, m8, tu, mu
543; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
544; CHECK-NEXT:    ret
545entry:
546  %a = call <vscale x 4 x i16> @llvm.riscv.vredand.mask.nxv4i16.nxv32i16(
547    <vscale x 4 x i16> %0,
548    <vscale x 32 x i16> %1,
549    <vscale x 4 x i16> %2,
550    <vscale x 32 x i1> %3,
551    i32 %4)
552
553  ret <vscale x 4 x i16> %a
554}
555
556declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
557  <vscale x 2 x i32>,
558  <vscale x 1 x i32>,
559  <vscale x 2 x i32>,
560  i32);
561
562define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
563; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv1i32_nxv2i32:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
566; CHECK-NEXT:    vredand.vs v8, v9, v10
567; CHECK-NEXT:    ret
568entry:
569  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv1i32(
570    <vscale x 2 x i32> %0,
571    <vscale x 1 x i32> %1,
572    <vscale x 2 x i32> %2,
573    i32 %3)
574
575  ret <vscale x 2 x i32> %a
576}
577
578declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
579  <vscale x 2 x i32>,
580  <vscale x 1 x i32>,
581  <vscale x 2 x i32>,
582  <vscale x 1 x i1>,
583  i32);
584
585define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 1 x i32> %1, <vscale x 2 x i32> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
586; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv1i32_nxv2i32:
587; CHECK:       # %bb.0: # %entry
588; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, tu, mu
589; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
590; CHECK-NEXT:    ret
591entry:
592  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv1i32(
593    <vscale x 2 x i32> %0,
594    <vscale x 1 x i32> %1,
595    <vscale x 2 x i32> %2,
596    <vscale x 1 x i1> %3,
597    i32 %4)
598
599  ret <vscale x 2 x i32> %a
600}
601
602declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
603  <vscale x 2 x i32>,
604  <vscale x 2 x i32>,
605  <vscale x 2 x i32>,
606  i32);
607
608define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
609; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv2i32_nxv2i32:
610; CHECK:       # %bb.0: # %entry
611; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
612; CHECK-NEXT:    vredand.vs v8, v9, v10
613; CHECK-NEXT:    ret
614entry:
615  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv2i32(
616    <vscale x 2 x i32> %0,
617    <vscale x 2 x i32> %1,
618    <vscale x 2 x i32> %2,
619    i32 %3)
620
621  ret <vscale x 2 x i32> %a
622}
623
624declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
625  <vscale x 2 x i32>,
626  <vscale x 2 x i32>,
627  <vscale x 2 x i32>,
628  <vscale x 2 x i1>,
629  i32);
630
631define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
632; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv2i32_nxv2i32:
633; CHECK:       # %bb.0: # %entry
634; CHECK-NEXT:    vsetvli zero, a0, e32, m1, tu, mu
635; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
636; CHECK-NEXT:    ret
637entry:
638  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv2i32(
639    <vscale x 2 x i32> %0,
640    <vscale x 2 x i32> %1,
641    <vscale x 2 x i32> %2,
642    <vscale x 2 x i1> %3,
643    i32 %4)
644
645  ret <vscale x 2 x i32> %a
646}
647
648declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
649  <vscale x 2 x i32>,
650  <vscale x 4 x i32>,
651  <vscale x 2 x i32>,
652  i32);
653
654define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
655; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv4i32_nxv2i32:
656; CHECK:       # %bb.0: # %entry
657; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
658; CHECK-NEXT:    vredand.vs v8, v10, v9
659; CHECK-NEXT:    ret
660entry:
661  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv4i32(
662    <vscale x 2 x i32> %0,
663    <vscale x 4 x i32> %1,
664    <vscale x 2 x i32> %2,
665    i32 %3)
666
667  ret <vscale x 2 x i32> %a
668}
669
670declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
671  <vscale x 2 x i32>,
672  <vscale x 4 x i32>,
673  <vscale x 2 x i32>,
674  <vscale x 4 x i1>,
675  i32);
676
677define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 4 x i32> %1, <vscale x 2 x i32> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
678; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv4i32_nxv2i32:
679; CHECK:       # %bb.0: # %entry
680; CHECK-NEXT:    vsetvli zero, a0, e32, m2, tu, mu
681; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
682; CHECK-NEXT:    ret
683entry:
684  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv4i32(
685    <vscale x 2 x i32> %0,
686    <vscale x 4 x i32> %1,
687    <vscale x 2 x i32> %2,
688    <vscale x 4 x i1> %3,
689    i32 %4)
690
691  ret <vscale x 2 x i32> %a
692}
693
694declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
695  <vscale x 2 x i32>,
696  <vscale x 8 x i32>,
697  <vscale x 2 x i32>,
698  i32);
699
700define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
701; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv8i32_nxv2i32:
702; CHECK:       # %bb.0: # %entry
703; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
704; CHECK-NEXT:    vredand.vs v8, v12, v9
705; CHECK-NEXT:    ret
706entry:
707  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv8i32(
708    <vscale x 2 x i32> %0,
709    <vscale x 8 x i32> %1,
710    <vscale x 2 x i32> %2,
711    i32 %3)
712
713  ret <vscale x 2 x i32> %a
714}
715
716declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
717  <vscale x 2 x i32>,
718  <vscale x 8 x i32>,
719  <vscale x 2 x i32>,
720  <vscale x 8 x i1>,
721  i32);
722
723define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 8 x i32> %1, <vscale x 2 x i32> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
724; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv8i32_nxv2i32:
725; CHECK:       # %bb.0: # %entry
726; CHECK-NEXT:    vsetvli zero, a0, e32, m4, tu, mu
727; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
728; CHECK-NEXT:    ret
729entry:
730  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv8i32(
731    <vscale x 2 x i32> %0,
732    <vscale x 8 x i32> %1,
733    <vscale x 2 x i32> %2,
734    <vscale x 8 x i1> %3,
735    i32 %4)
736
737  ret <vscale x 2 x i32> %a
738}
739
740declare <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
741  <vscale x 2 x i32>,
742  <vscale x 16 x i32>,
743  <vscale x 2 x i32>,
744  i32);
745
746define <vscale x 2 x i32> @intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, i32 %3) nounwind {
747; CHECK-LABEL: intrinsic_vredand_vs_nxv2i32_nxv16i32_nxv2i32:
748; CHECK:       # %bb.0: # %entry
749; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
750; CHECK-NEXT:    vredand.vs v8, v16, v9
751; CHECK-NEXT:    ret
752entry:
753  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.nxv2i32.nxv16i32(
754    <vscale x 2 x i32> %0,
755    <vscale x 16 x i32> %1,
756    <vscale x 2 x i32> %2,
757    i32 %3)
758
759  ret <vscale x 2 x i32> %a
760}
761
762declare <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
763  <vscale x 2 x i32>,
764  <vscale x 16 x i32>,
765  <vscale x 2 x i32>,
766  <vscale x 16 x i1>,
767  i32);
768
769define <vscale x 2 x i32> @intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 16 x i32> %1, <vscale x 2 x i32> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
770; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv2i32_nxv16i32_nxv2i32:
771; CHECK:       # %bb.0: # %entry
772; CHECK-NEXT:    vsetvli zero, a0, e32, m8, tu, mu
773; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
774; CHECK-NEXT:    ret
775entry:
776  %a = call <vscale x 2 x i32> @llvm.riscv.vredand.mask.nxv2i32.nxv16i32(
777    <vscale x 2 x i32> %0,
778    <vscale x 16 x i32> %1,
779    <vscale x 2 x i32> %2,
780    <vscale x 16 x i1> %3,
781    i32 %4)
782
783  ret <vscale x 2 x i32> %a
784}
785
786declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
787  <vscale x 1 x i64>,
788  <vscale x 1 x i64>,
789  <vscale x 1 x i64>,
790  i32);
791
792define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
793; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv1i64_nxv1i64:
794; CHECK:       # %bb.0: # %entry
795; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
796; CHECK-NEXT:    vredand.vs v8, v9, v10
797; CHECK-NEXT:    ret
798entry:
799  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv1i64(
800    <vscale x 1 x i64> %0,
801    <vscale x 1 x i64> %1,
802    <vscale x 1 x i64> %2,
803    i32 %3)
804
805  ret <vscale x 1 x i64> %a
806}
807
808declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
809  <vscale x 1 x i64>,
810  <vscale x 1 x i64>,
811  <vscale x 1 x i64>,
812  <vscale x 1 x i1>,
813  i32);
814
815define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
816; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv1i64_nxv1i64:
817; CHECK:       # %bb.0: # %entry
818; CHECK-NEXT:    vsetvli zero, a0, e64, m1, tu, mu
819; CHECK-NEXT:    vredand.vs v8, v9, v10, v0.t
820; CHECK-NEXT:    ret
821entry:
822  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv1i64(
823    <vscale x 1 x i64> %0,
824    <vscale x 1 x i64> %1,
825    <vscale x 1 x i64> %2,
826    <vscale x 1 x i1> %3,
827    i32 %4)
828
829  ret <vscale x 1 x i64> %a
830}
831
832declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
833  <vscale x 1 x i64>,
834  <vscale x 2 x i64>,
835  <vscale x 1 x i64>,
836  i32);
837
838define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
839; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv2i64_nxv1i64:
840; CHECK:       # %bb.0: # %entry
841; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
842; CHECK-NEXT:    vredand.vs v8, v10, v9
843; CHECK-NEXT:    ret
844entry:
845  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv2i64(
846    <vscale x 1 x i64> %0,
847    <vscale x 2 x i64> %1,
848    <vscale x 1 x i64> %2,
849    i32 %3)
850
851  ret <vscale x 1 x i64> %a
852}
853
854declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
855  <vscale x 1 x i64>,
856  <vscale x 2 x i64>,
857  <vscale x 1 x i64>,
858  <vscale x 2 x i1>,
859  i32);
860
861define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 2 x i64> %1, <vscale x 1 x i64> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
862; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv2i64_nxv1i64:
863; CHECK:       # %bb.0: # %entry
864; CHECK-NEXT:    vsetvli zero, a0, e64, m2, tu, mu
865; CHECK-NEXT:    vredand.vs v8, v10, v9, v0.t
866; CHECK-NEXT:    ret
867entry:
868  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv2i64(
869    <vscale x 1 x i64> %0,
870    <vscale x 2 x i64> %1,
871    <vscale x 1 x i64> %2,
872    <vscale x 2 x i1> %3,
873    i32 %4)
874
875  ret <vscale x 1 x i64> %a
876}
877
878declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
879  <vscale x 1 x i64>,
880  <vscale x 4 x i64>,
881  <vscale x 1 x i64>,
882  i32);
883
884define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
885; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv4i64_nxv1i64:
886; CHECK:       # %bb.0: # %entry
887; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
888; CHECK-NEXT:    vredand.vs v8, v12, v9
889; CHECK-NEXT:    ret
890entry:
891  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv4i64(
892    <vscale x 1 x i64> %0,
893    <vscale x 4 x i64> %1,
894    <vscale x 1 x i64> %2,
895    i32 %3)
896
897  ret <vscale x 1 x i64> %a
898}
899
900declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
901  <vscale x 1 x i64>,
902  <vscale x 4 x i64>,
903  <vscale x 1 x i64>,
904  <vscale x 4 x i1>,
905  i32);
906
907define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 4 x i64> %1, <vscale x 1 x i64> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
908; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv4i64_nxv1i64:
909; CHECK:       # %bb.0: # %entry
910; CHECK-NEXT:    vsetvli zero, a0, e64, m4, tu, mu
911; CHECK-NEXT:    vredand.vs v8, v12, v9, v0.t
912; CHECK-NEXT:    ret
913entry:
914  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv4i64(
915    <vscale x 1 x i64> %0,
916    <vscale x 4 x i64> %1,
917    <vscale x 1 x i64> %2,
918    <vscale x 4 x i1> %3,
919    i32 %4)
920
921  ret <vscale x 1 x i64> %a
922}
923
924declare <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
925  <vscale x 1 x i64>,
926  <vscale x 8 x i64>,
927  <vscale x 1 x i64>,
928  i32);
929
930define <vscale x 1 x i64> @intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, i32 %3) nounwind {
931; CHECK-LABEL: intrinsic_vredand_vs_nxv1i64_nxv8i64_nxv1i64:
932; CHECK:       # %bb.0: # %entry
933; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
934; CHECK-NEXT:    vredand.vs v8, v16, v9
935; CHECK-NEXT:    ret
936entry:
937  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.nxv1i64.nxv8i64(
938    <vscale x 1 x i64> %0,
939    <vscale x 8 x i64> %1,
940    <vscale x 1 x i64> %2,
941    i32 %3)
942
943  ret <vscale x 1 x i64> %a
944}
945
946declare <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
947  <vscale x 1 x i64>,
948  <vscale x 8 x i64>,
949  <vscale x 1 x i64>,
950  <vscale x 8 x i1>,
951  i32);
952
953define <vscale x 1 x i64> @intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 8 x i64> %1, <vscale x 1 x i64> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
954; CHECK-LABEL: intrinsic_vredand_mask_vs_nxv1i64_nxv8i64_nxv1i64:
955; CHECK:       # %bb.0: # %entry
956; CHECK-NEXT:    vsetvli zero, a0, e64, m8, tu, mu
957; CHECK-NEXT:    vredand.vs v8, v16, v9, v0.t
958; CHECK-NEXT:    ret
959entry:
960  %a = call <vscale x 1 x i64> @llvm.riscv.vredand.mask.nxv1i64.nxv8i64(
961    <vscale x 1 x i64> %0,
962    <vscale x 8 x i64> %1,
963    <vscale x 1 x i64> %2,
964    <vscale x 8 x i1> %3,
965    i32 %4)
966
967  ret <vscale x 1 x i64> %a
968}
969