1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v,+d,+experimental-zfh -verify-machineinstrs \
3; RUN:   --riscv-no-aliases < %s | FileCheck %s
4declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
5  <vscale x 4 x half>,
6  <vscale x 1 x half>,
7  <vscale x 4 x half>,
8  i32);
9
10define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
11; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv1f16_nxv4f16:
12; CHECK:       # %bb.0: # %entry
13; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
14; CHECK-NEXT:    vfredmax.vs v8, v9, v10
15; CHECK-NEXT:    jalr zero, 0(ra)
16entry:
17  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv1f16(
18    <vscale x 4 x half> %0,
19    <vscale x 1 x half> %1,
20    <vscale x 4 x half> %2,
21    i32 %3)
22
23  ret <vscale x 4 x half> %a
24}
25
26declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
27  <vscale x 4 x half>,
28  <vscale x 1 x half>,
29  <vscale x 4 x half>,
30  <vscale x 1 x i1>,
31  i32);
32
33define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 1 x half> %1, <vscale x 4 x half> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
34; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv1f16_nxv4f16:
35; CHECK:       # %bb.0: # %entry
36; CHECK-NEXT:    vsetvli a0, a0, e16,mf4,ta,mu
37; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
38; CHECK-NEXT:    jalr zero, 0(ra)
39entry:
40  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv1f16.nxv1i1(
41    <vscale x 4 x half> %0,
42    <vscale x 1 x half> %1,
43    <vscale x 4 x half> %2,
44    <vscale x 1 x i1> %3,
45    i32 %4)
46
47  ret <vscale x 4 x half> %a
48}
49
50declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
51  <vscale x 4 x half>,
52  <vscale x 2 x half>,
53  <vscale x 4 x half>,
54  i32);
55
56define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
57; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv2f16_nxv4f16:
58; CHECK:       # %bb.0: # %entry
59; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
60; CHECK-NEXT:    vfredmax.vs v8, v9, v10
61; CHECK-NEXT:    jalr zero, 0(ra)
62entry:
63  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv2f16(
64    <vscale x 4 x half> %0,
65    <vscale x 2 x half> %1,
66    <vscale x 4 x half> %2,
67    i32 %3)
68
69  ret <vscale x 4 x half> %a
70}
71
72declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
73  <vscale x 4 x half>,
74  <vscale x 2 x half>,
75  <vscale x 4 x half>,
76  <vscale x 2 x i1>,
77  i32);
78
79define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 2 x half> %1, <vscale x 4 x half> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
80; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv2f16_nxv4f16:
81; CHECK:       # %bb.0: # %entry
82; CHECK-NEXT:    vsetvli a0, a0, e16,mf2,ta,mu
83; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
84; CHECK-NEXT:    jalr zero, 0(ra)
85entry:
86  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv2f16.nxv2i1(
87    <vscale x 4 x half> %0,
88    <vscale x 2 x half> %1,
89    <vscale x 4 x half> %2,
90    <vscale x 2 x i1> %3,
91    i32 %4)
92
93  ret <vscale x 4 x half> %a
94}
95
96declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
97  <vscale x 4 x half>,
98  <vscale x 4 x half>,
99  <vscale x 4 x half>,
100  i32);
101
102define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
103; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv4f16_nxv4f16:
104; CHECK:       # %bb.0: # %entry
105; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
106; CHECK-NEXT:    vfredmax.vs v8, v9, v10
107; CHECK-NEXT:    jalr zero, 0(ra)
108entry:
109  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv4f16(
110    <vscale x 4 x half> %0,
111    <vscale x 4 x half> %1,
112    <vscale x 4 x half> %2,
113    i32 %3)
114
115  ret <vscale x 4 x half> %a
116}
117
118declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
119  <vscale x 4 x half>,
120  <vscale x 4 x half>,
121  <vscale x 4 x half>,
122  <vscale x 4 x i1>,
123  i32);
124
125define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 4 x half> %1, <vscale x 4 x half> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
126; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv4f16_nxv4f16:
127; CHECK:       # %bb.0: # %entry
128; CHECK-NEXT:    vsetvli a0, a0, e16,m1,ta,mu
129; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
130; CHECK-NEXT:    jalr zero, 0(ra)
131entry:
132  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv4f16.nxv4i1(
133    <vscale x 4 x half> %0,
134    <vscale x 4 x half> %1,
135    <vscale x 4 x half> %2,
136    <vscale x 4 x i1> %3,
137    i32 %4)
138
139  ret <vscale x 4 x half> %a
140}
141
142declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
143  <vscale x 4 x half>,
144  <vscale x 8 x half>,
145  <vscale x 4 x half>,
146  i32);
147
148define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
149; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv8f16_nxv4f16:
150; CHECK:       # %bb.0: # %entry
151; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
152; CHECK-NEXT:    vfredmax.vs v8, v10, v9
153; CHECK-NEXT:    jalr zero, 0(ra)
154entry:
155  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv8f16(
156    <vscale x 4 x half> %0,
157    <vscale x 8 x half> %1,
158    <vscale x 4 x half> %2,
159    i32 %3)
160
161  ret <vscale x 4 x half> %a
162}
163
164declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
165  <vscale x 4 x half>,
166  <vscale x 8 x half>,
167  <vscale x 4 x half>,
168  <vscale x 8 x i1>,
169  i32);
170
171define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 8 x half> %1, <vscale x 4 x half> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
172; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv8f16_nxv4f16:
173; CHECK:       # %bb.0: # %entry
174; CHECK-NEXT:    vsetvli a0, a0, e16,m2,ta,mu
175; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
176; CHECK-NEXT:    jalr zero, 0(ra)
177entry:
178  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv8f16.nxv8i1(
179    <vscale x 4 x half> %0,
180    <vscale x 8 x half> %1,
181    <vscale x 4 x half> %2,
182    <vscale x 8 x i1> %3,
183    i32 %4)
184
185  ret <vscale x 4 x half> %a
186}
187
188declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
189  <vscale x 4 x half>,
190  <vscale x 16 x half>,
191  <vscale x 4 x half>,
192  i32);
193
194define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
195; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv16f16_nxv4f16:
196; CHECK:       # %bb.0: # %entry
197; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
198; CHECK-NEXT:    vfredmax.vs v8, v12, v9
199; CHECK-NEXT:    jalr zero, 0(ra)
200entry:
201  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv16f16(
202    <vscale x 4 x half> %0,
203    <vscale x 16 x half> %1,
204    <vscale x 4 x half> %2,
205    i32 %3)
206
207  ret <vscale x 4 x half> %a
208}
209
210declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
211  <vscale x 4 x half>,
212  <vscale x 16 x half>,
213  <vscale x 4 x half>,
214  <vscale x 16 x i1>,
215  i32);
216
217define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 16 x half> %1, <vscale x 4 x half> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
218; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv16f16_nxv4f16:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli a0, a0, e16,m4,ta,mu
221; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
222; CHECK-NEXT:    jalr zero, 0(ra)
223entry:
224  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv16f16.nxv16i1(
225    <vscale x 4 x half> %0,
226    <vscale x 16 x half> %1,
227    <vscale x 4 x half> %2,
228    <vscale x 16 x i1> %3,
229    i32 %4)
230
231  ret <vscale x 4 x half> %a
232}
233
234declare <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
235  <vscale x 4 x half>,
236  <vscale x 32 x half>,
237  <vscale x 4 x half>,
238  i32);
239
240define <vscale x 4 x half> @intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, i32 %3) nounwind {
241; CHECK-LABEL: intrinsic_vfredmax_vs_nxv4f16_nxv32f16_nxv4f16:
242; CHECK:       # %bb.0: # %entry
243; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
244; CHECK-NEXT:    vfredmax.vs v8, v16, v9
245; CHECK-NEXT:    jalr zero, 0(ra)
246entry:
247  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.nxv4f16.nxv32f16(
248    <vscale x 4 x half> %0,
249    <vscale x 32 x half> %1,
250    <vscale x 4 x half> %2,
251    i32 %3)
252
253  ret <vscale x 4 x half> %a
254}
255
256declare <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
257  <vscale x 4 x half>,
258  <vscale x 32 x half>,
259  <vscale x 4 x half>,
260  <vscale x 32 x i1>,
261  i32);
262
263define <vscale x 4 x half> @intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16(<vscale x 4 x half> %0, <vscale x 32 x half> %1, <vscale x 4 x half> %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
264; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv4f16_nxv32f16_nxv4f16:
265; CHECK:       # %bb.0: # %entry
266; CHECK-NEXT:    vsetvli a0, a0, e16,m8,ta,mu
267; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
268; CHECK-NEXT:    jalr zero, 0(ra)
269entry:
270  %a = call <vscale x 4 x half> @llvm.riscv.vfredmax.mask.nxv4f16.nxv32f16.nxv32i1(
271    <vscale x 4 x half> %0,
272    <vscale x 32 x half> %1,
273    <vscale x 4 x half> %2,
274    <vscale x 32 x i1> %3,
275    i32 %4)
276
277  ret <vscale x 4 x half> %a
278}
279
280declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
281  <vscale x 2 x float>,
282  <vscale x 1 x float>,
283  <vscale x 2 x float>,
284  i32);
285
286define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
287; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv1f32_nxv2f32:
288; CHECK:       # %bb.0: # %entry
289; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
290; CHECK-NEXT:    vfredmax.vs v8, v9, v10
291; CHECK-NEXT:    jalr zero, 0(ra)
292entry:
293  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv1f32(
294    <vscale x 2 x float> %0,
295    <vscale x 1 x float> %1,
296    <vscale x 2 x float> %2,
297    i32 %3)
298
299  ret <vscale x 2 x float> %a
300}
301
302declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
303  <vscale x 2 x float>,
304  <vscale x 1 x float>,
305  <vscale x 2 x float>,
306  <vscale x 1 x i1>,
307  i32);
308
309define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 1 x float> %1, <vscale x 2 x float> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
310; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv1f32_nxv2f32:
311; CHECK:       # %bb.0: # %entry
312; CHECK-NEXT:    vsetvli a0, a0, e32,mf2,ta,mu
313; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
314; CHECK-NEXT:    jalr zero, 0(ra)
315entry:
316  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv1f32.nxv1i1(
317    <vscale x 2 x float> %0,
318    <vscale x 1 x float> %1,
319    <vscale x 2 x float> %2,
320    <vscale x 1 x i1> %3,
321    i32 %4)
322
323  ret <vscale x 2 x float> %a
324}
325
326declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
327  <vscale x 2 x float>,
328  <vscale x 2 x float>,
329  <vscale x 2 x float>,
330  i32);
331
332define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
333; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv2f32_nxv2f32:
334; CHECK:       # %bb.0: # %entry
335; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
336; CHECK-NEXT:    vfredmax.vs v8, v9, v10
337; CHECK-NEXT:    jalr zero, 0(ra)
338entry:
339  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv2f32(
340    <vscale x 2 x float> %0,
341    <vscale x 2 x float> %1,
342    <vscale x 2 x float> %2,
343    i32 %3)
344
345  ret <vscale x 2 x float> %a
346}
347
348declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
349  <vscale x 2 x float>,
350  <vscale x 2 x float>,
351  <vscale x 2 x float>,
352  <vscale x 2 x i1>,
353  i32);
354
355define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 2 x float> %1, <vscale x 2 x float> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
356; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv2f32_nxv2f32:
357; CHECK:       # %bb.0: # %entry
358; CHECK-NEXT:    vsetvli a0, a0, e32,m1,ta,mu
359; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
360; CHECK-NEXT:    jalr zero, 0(ra)
361entry:
362  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv2f32.nxv2i1(
363    <vscale x 2 x float> %0,
364    <vscale x 2 x float> %1,
365    <vscale x 2 x float> %2,
366    <vscale x 2 x i1> %3,
367    i32 %4)
368
369  ret <vscale x 2 x float> %a
370}
371
372declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
373  <vscale x 2 x float>,
374  <vscale x 4 x float>,
375  <vscale x 2 x float>,
376  i32);
377
378define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
379; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv4f32_nxv2f32:
380; CHECK:       # %bb.0: # %entry
381; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
382; CHECK-NEXT:    vfredmax.vs v8, v10, v9
383; CHECK-NEXT:    jalr zero, 0(ra)
384entry:
385  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv4f32(
386    <vscale x 2 x float> %0,
387    <vscale x 4 x float> %1,
388    <vscale x 2 x float> %2,
389    i32 %3)
390
391  ret <vscale x 2 x float> %a
392}
393
394declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
395  <vscale x 2 x float>,
396  <vscale x 4 x float>,
397  <vscale x 2 x float>,
398  <vscale x 4 x i1>,
399  i32);
400
401define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 4 x float> %1, <vscale x 2 x float> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
402; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv4f32_nxv2f32:
403; CHECK:       # %bb.0: # %entry
404; CHECK-NEXT:    vsetvli a0, a0, e32,m2,ta,mu
405; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
406; CHECK-NEXT:    jalr zero, 0(ra)
407entry:
408  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv4f32.nxv4i1(
409    <vscale x 2 x float> %0,
410    <vscale x 4 x float> %1,
411    <vscale x 2 x float> %2,
412    <vscale x 4 x i1> %3,
413    i32 %4)
414
415  ret <vscale x 2 x float> %a
416}
417
418declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
419  <vscale x 2 x float>,
420  <vscale x 8 x float>,
421  <vscale x 2 x float>,
422  i32);
423
424define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
425; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv8f32_nxv2f32:
426; CHECK:       # %bb.0: # %entry
427; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
428; CHECK-NEXT:    vfredmax.vs v8, v12, v9
429; CHECK-NEXT:    jalr zero, 0(ra)
430entry:
431  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv8f32(
432    <vscale x 2 x float> %0,
433    <vscale x 8 x float> %1,
434    <vscale x 2 x float> %2,
435    i32 %3)
436
437  ret <vscale x 2 x float> %a
438}
439
440declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
441  <vscale x 2 x float>,
442  <vscale x 8 x float>,
443  <vscale x 2 x float>,
444  <vscale x 8 x i1>,
445  i32);
446
447define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 8 x float> %1, <vscale x 2 x float> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
448; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv8f32_nxv2f32:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli a0, a0, e32,m4,ta,mu
451; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
452; CHECK-NEXT:    jalr zero, 0(ra)
453entry:
454  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv8f32.nxv8i1(
455    <vscale x 2 x float> %0,
456    <vscale x 8 x float> %1,
457    <vscale x 2 x float> %2,
458    <vscale x 8 x i1> %3,
459    i32 %4)
460
461  ret <vscale x 2 x float> %a
462}
463
464declare <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
465  <vscale x 2 x float>,
466  <vscale x 16 x float>,
467  <vscale x 2 x float>,
468  i32);
469
470define <vscale x 2 x float> @intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, i32 %3) nounwind {
471; CHECK-LABEL: intrinsic_vfredmax_vs_nxv2f32_nxv16f32_nxv2f32:
472; CHECK:       # %bb.0: # %entry
473; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
474; CHECK-NEXT:    vfredmax.vs v8, v16, v9
475; CHECK-NEXT:    jalr zero, 0(ra)
476entry:
477  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.nxv2f32.nxv16f32(
478    <vscale x 2 x float> %0,
479    <vscale x 16 x float> %1,
480    <vscale x 2 x float> %2,
481    i32 %3)
482
483  ret <vscale x 2 x float> %a
484}
485
486declare <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
487  <vscale x 2 x float>,
488  <vscale x 16 x float>,
489  <vscale x 2 x float>,
490  <vscale x 16 x i1>,
491  i32);
492
493define <vscale x 2 x float> @intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32(<vscale x 2 x float> %0, <vscale x 16 x float> %1, <vscale x 2 x float> %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
494; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv2f32_nxv16f32_nxv2f32:
495; CHECK:       # %bb.0: # %entry
496; CHECK-NEXT:    vsetvli a0, a0, e32,m8,ta,mu
497; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
498; CHECK-NEXT:    jalr zero, 0(ra)
499entry:
500  %a = call <vscale x 2 x float> @llvm.riscv.vfredmax.mask.nxv2f32.nxv16f32.nxv16i1(
501    <vscale x 2 x float> %0,
502    <vscale x 16 x float> %1,
503    <vscale x 2 x float> %2,
504    <vscale x 16 x i1> %3,
505    i32 %4)
506
507  ret <vscale x 2 x float> %a
508}
509
510declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
511  <vscale x 1 x double>,
512  <vscale x 1 x double>,
513  <vscale x 1 x double>,
514  i32);
515
516define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
517; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv1f64_nxv1f64:
518; CHECK:       # %bb.0: # %entry
519; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
520; CHECK-NEXT:    vfredmax.vs v8, v9, v10
521; CHECK-NEXT:    jalr zero, 0(ra)
522entry:
523  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv1f64(
524    <vscale x 1 x double> %0,
525    <vscale x 1 x double> %1,
526    <vscale x 1 x double> %2,
527    i32 %3)
528
529  ret <vscale x 1 x double> %a
530}
531
532declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
533  <vscale x 1 x double>,
534  <vscale x 1 x double>,
535  <vscale x 1 x double>,
536  <vscale x 1 x i1>,
537  i32);
538
539define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 1 x double> %1, <vscale x 1 x double> %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
540; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv1f64_nxv1f64:
541; CHECK:       # %bb.0: # %entry
542; CHECK-NEXT:    vsetvli a0, a0, e64,m1,ta,mu
543; CHECK-NEXT:    vfredmax.vs v8, v9, v10, v0.t
544; CHECK-NEXT:    jalr zero, 0(ra)
545entry:
546  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv1f64.nxv1i1(
547    <vscale x 1 x double> %0,
548    <vscale x 1 x double> %1,
549    <vscale x 1 x double> %2,
550    <vscale x 1 x i1> %3,
551    i32 %4)
552
553  ret <vscale x 1 x double> %a
554}
555
556declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
557  <vscale x 1 x double>,
558  <vscale x 2 x double>,
559  <vscale x 1 x double>,
560  i32);
561
562define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
563; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv2f64_nxv1f64:
564; CHECK:       # %bb.0: # %entry
565; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
566; CHECK-NEXT:    vfredmax.vs v8, v10, v9
567; CHECK-NEXT:    jalr zero, 0(ra)
568entry:
569  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv2f64(
570    <vscale x 1 x double> %0,
571    <vscale x 2 x double> %1,
572    <vscale x 1 x double> %2,
573    i32 %3)
574
575  ret <vscale x 1 x double> %a
576}
577
578declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
579  <vscale x 1 x double>,
580  <vscale x 2 x double>,
581  <vscale x 1 x double>,
582  <vscale x 2 x i1>,
583  i32);
584
585define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 2 x double> %1, <vscale x 1 x double> %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
586; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv2f64_nxv1f64:
587; CHECK:       # %bb.0: # %entry
588; CHECK-NEXT:    vsetvli a0, a0, e64,m2,ta,mu
589; CHECK-NEXT:    vfredmax.vs v8, v10, v9, v0.t
590; CHECK-NEXT:    jalr zero, 0(ra)
591entry:
592  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv2f64.nxv2i1(
593    <vscale x 1 x double> %0,
594    <vscale x 2 x double> %1,
595    <vscale x 1 x double> %2,
596    <vscale x 2 x i1> %3,
597    i32 %4)
598
599  ret <vscale x 1 x double> %a
600}
601
602declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
603  <vscale x 1 x double>,
604  <vscale x 4 x double>,
605  <vscale x 1 x double>,
606  i32);
607
608define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
609; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv4f64_nxv1f64:
610; CHECK:       # %bb.0: # %entry
611; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
612; CHECK-NEXT:    vfredmax.vs v8, v12, v9
613; CHECK-NEXT:    jalr zero, 0(ra)
614entry:
615  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv4f64(
616    <vscale x 1 x double> %0,
617    <vscale x 4 x double> %1,
618    <vscale x 1 x double> %2,
619    i32 %3)
620
621  ret <vscale x 1 x double> %a
622}
623
624declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
625  <vscale x 1 x double>,
626  <vscale x 4 x double>,
627  <vscale x 1 x double>,
628  <vscale x 4 x i1>,
629  i32);
630
631define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 4 x double> %1, <vscale x 1 x double> %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
632; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv4f64_nxv1f64:
633; CHECK:       # %bb.0: # %entry
634; CHECK-NEXT:    vsetvli a0, a0, e64,m4,ta,mu
635; CHECK-NEXT:    vfredmax.vs v8, v12, v9, v0.t
636; CHECK-NEXT:    jalr zero, 0(ra)
637entry:
638  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv4f64.nxv4i1(
639    <vscale x 1 x double> %0,
640    <vscale x 4 x double> %1,
641    <vscale x 1 x double> %2,
642    <vscale x 4 x i1> %3,
643    i32 %4)
644
645  ret <vscale x 1 x double> %a
646}
647
648declare <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
649  <vscale x 1 x double>,
650  <vscale x 8 x double>,
651  <vscale x 1 x double>,
652  i32);
653
654define <vscale x 1 x double> @intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, i32 %3) nounwind {
655; CHECK-LABEL: intrinsic_vfredmax_vs_nxv1f64_nxv8f64_nxv1f64:
656; CHECK:       # %bb.0: # %entry
657; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
658; CHECK-NEXT:    vfredmax.vs v8, v16, v9
659; CHECK-NEXT:    jalr zero, 0(ra)
660entry:
661  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.nxv1f64.nxv8f64(
662    <vscale x 1 x double> %0,
663    <vscale x 8 x double> %1,
664    <vscale x 1 x double> %2,
665    i32 %3)
666
667  ret <vscale x 1 x double> %a
668}
669
670declare <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
671  <vscale x 1 x double>,
672  <vscale x 8 x double>,
673  <vscale x 1 x double>,
674  <vscale x 8 x i1>,
675  i32);
676
677define <vscale x 1 x double> @intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64(<vscale x 1 x double> %0, <vscale x 8 x double> %1, <vscale x 1 x double> %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
678; CHECK-LABEL: intrinsic_vfredmax_mask_vs_nxv1f64_nxv8f64_nxv1f64:
679; CHECK:       # %bb.0: # %entry
680; CHECK-NEXT:    vsetvli a0, a0, e64,m8,ta,mu
681; CHECK-NEXT:    vfredmax.vs v8, v16, v9, v0.t
682; CHECK-NEXT:    jalr zero, 0(ra)
683entry:
684  %a = call <vscale x 1 x double> @llvm.riscv.vfredmax.mask.nxv1f64.nxv8f64.nxv8i1(
685    <vscale x 1 x double> %0,
686    <vscale x 8 x double> %1,
687    <vscale x 1 x double> %2,
688    <vscale x 8 x i1> %3,
689    i32 %4)
690
691  ret <vscale x 1 x double> %a
692}
693