1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs < %s | FileCheck %s
3
4define <vscale x 1 x i8> @vmax_vv_nxv1i8(<vscale x 1 x i8> %va, <vscale x 1 x i8> %vb) {
5; CHECK-LABEL: vmax_vv_nxv1i8:
6; CHECK:       # %bb.0:
7; CHECK-NEXT:    vsetvli a0, zero, e8, mf8, ta, mu
8; CHECK-NEXT:    vmaxu.vv v8, v8, v9
9; CHECK-NEXT:    ret
10  %cmp = icmp ugt <vscale x 1 x i8> %va, %vb
11  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %vb
12  ret <vscale x 1 x i8> %vc
13}
14
15define <vscale x 1 x i8> @vmax_vx_nxv1i8(<vscale x 1 x i8> %va, i8 signext %b) {
16; CHECK-LABEL: vmax_vx_nxv1i8:
17; CHECK:       # %bb.0:
18; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
19; CHECK-NEXT:    vmaxu.vx v8, v8, a0
20; CHECK-NEXT:    ret
21  %head = insertelement <vscale x 1 x i8> undef, i8 %b, i32 0
22  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
23  %cmp = icmp ugt <vscale x 1 x i8> %va, %splat
24  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
25  ret <vscale x 1 x i8> %vc
26}
27
28define <vscale x 1 x i8> @vmax_vi_nxv1i8_0(<vscale x 1 x i8> %va) {
29; CHECK-LABEL: vmax_vi_nxv1i8_0:
30; CHECK:       # %bb.0:
31; CHECK-NEXT:    addi a0, zero, -3
32; CHECK-NEXT:    vsetvli a1, zero, e8, mf8, ta, mu
33; CHECK-NEXT:    vmaxu.vx v8, v8, a0
34; CHECK-NEXT:    ret
35  %head = insertelement <vscale x 1 x i8> undef, i8 -3, i32 0
36  %splat = shufflevector <vscale x 1 x i8> %head, <vscale x 1 x i8> undef, <vscale x 1 x i32> zeroinitializer
37  %cmp = icmp ugt <vscale x 1 x i8> %va, %splat
38  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i8> %va, <vscale x 1 x i8> %splat
39  ret <vscale x 1 x i8> %vc
40}
41
42define <vscale x 2 x i8> @vmax_vv_nxv2i8(<vscale x 2 x i8> %va, <vscale x 2 x i8> %vb) {
43; CHECK-LABEL: vmax_vv_nxv2i8:
44; CHECK:       # %bb.0:
45; CHECK-NEXT:    vsetvli a0, zero, e8, mf4, ta, mu
46; CHECK-NEXT:    vmaxu.vv v8, v8, v9
47; CHECK-NEXT:    ret
48  %cmp = icmp ugt <vscale x 2 x i8> %va, %vb
49  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %vb
50  ret <vscale x 2 x i8> %vc
51}
52
53define <vscale x 2 x i8> @vmax_vx_nxv2i8(<vscale x 2 x i8> %va, i8 signext %b) {
54; CHECK-LABEL: vmax_vx_nxv2i8:
55; CHECK:       # %bb.0:
56; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
57; CHECK-NEXT:    vmaxu.vx v8, v8, a0
58; CHECK-NEXT:    ret
59  %head = insertelement <vscale x 2 x i8> undef, i8 %b, i32 0
60  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
61  %cmp = icmp ugt <vscale x 2 x i8> %va, %splat
62  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
63  ret <vscale x 2 x i8> %vc
64}
65
66define <vscale x 2 x i8> @vmax_vi_nxv2i8_0(<vscale x 2 x i8> %va) {
67; CHECK-LABEL: vmax_vi_nxv2i8_0:
68; CHECK:       # %bb.0:
69; CHECK-NEXT:    addi a0, zero, -3
70; CHECK-NEXT:    vsetvli a1, zero, e8, mf4, ta, mu
71; CHECK-NEXT:    vmaxu.vx v8, v8, a0
72; CHECK-NEXT:    ret
73  %head = insertelement <vscale x 2 x i8> undef, i8 -3, i32 0
74  %splat = shufflevector <vscale x 2 x i8> %head, <vscale x 2 x i8> undef, <vscale x 2 x i32> zeroinitializer
75  %cmp = icmp ugt <vscale x 2 x i8> %va, %splat
76  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i8> %va, <vscale x 2 x i8> %splat
77  ret <vscale x 2 x i8> %vc
78}
79
80define <vscale x 4 x i8> @vmax_vv_nxv4i8(<vscale x 4 x i8> %va, <vscale x 4 x i8> %vb) {
81; CHECK-LABEL: vmax_vv_nxv4i8:
82; CHECK:       # %bb.0:
83; CHECK-NEXT:    vsetvli a0, zero, e8, mf2, ta, mu
84; CHECK-NEXT:    vmaxu.vv v8, v8, v9
85; CHECK-NEXT:    ret
86  %cmp = icmp ugt <vscale x 4 x i8> %va, %vb
87  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %vb
88  ret <vscale x 4 x i8> %vc
89}
90
91define <vscale x 4 x i8> @vmax_vx_nxv4i8(<vscale x 4 x i8> %va, i8 signext %b) {
92; CHECK-LABEL: vmax_vx_nxv4i8:
93; CHECK:       # %bb.0:
94; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
95; CHECK-NEXT:    vmaxu.vx v8, v8, a0
96; CHECK-NEXT:    ret
97  %head = insertelement <vscale x 4 x i8> undef, i8 %b, i32 0
98  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
99  %cmp = icmp ugt <vscale x 4 x i8> %va, %splat
100  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
101  ret <vscale x 4 x i8> %vc
102}
103
104define <vscale x 4 x i8> @vmax_vi_nxv4i8_0(<vscale x 4 x i8> %va) {
105; CHECK-LABEL: vmax_vi_nxv4i8_0:
106; CHECK:       # %bb.0:
107; CHECK-NEXT:    addi a0, zero, -3
108; CHECK-NEXT:    vsetvli a1, zero, e8, mf2, ta, mu
109; CHECK-NEXT:    vmaxu.vx v8, v8, a0
110; CHECK-NEXT:    ret
111  %head = insertelement <vscale x 4 x i8> undef, i8 -3, i32 0
112  %splat = shufflevector <vscale x 4 x i8> %head, <vscale x 4 x i8> undef, <vscale x 4 x i32> zeroinitializer
113  %cmp = icmp ugt <vscale x 4 x i8> %va, %splat
114  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i8> %va, <vscale x 4 x i8> %splat
115  ret <vscale x 4 x i8> %vc
116}
117
118define <vscale x 8 x i8> @vmax_vv_nxv8i8(<vscale x 8 x i8> %va, <vscale x 8 x i8> %vb) {
119; CHECK-LABEL: vmax_vv_nxv8i8:
120; CHECK:       # %bb.0:
121; CHECK-NEXT:    vsetvli a0, zero, e8, m1, ta, mu
122; CHECK-NEXT:    vmaxu.vv v8, v8, v9
123; CHECK-NEXT:    ret
124  %cmp = icmp ugt <vscale x 8 x i8> %va, %vb
125  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %vb
126  ret <vscale x 8 x i8> %vc
127}
128
129define <vscale x 8 x i8> @vmax_vx_nxv8i8(<vscale x 8 x i8> %va, i8 signext %b) {
130; CHECK-LABEL: vmax_vx_nxv8i8:
131; CHECK:       # %bb.0:
132; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
133; CHECK-NEXT:    vmaxu.vx v8, v8, a0
134; CHECK-NEXT:    ret
135  %head = insertelement <vscale x 8 x i8> undef, i8 %b, i32 0
136  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
137  %cmp = icmp ugt <vscale x 8 x i8> %va, %splat
138  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
139  ret <vscale x 8 x i8> %vc
140}
141
142define <vscale x 8 x i8> @vmax_vi_nxv8i8_0(<vscale x 8 x i8> %va) {
143; CHECK-LABEL: vmax_vi_nxv8i8_0:
144; CHECK:       # %bb.0:
145; CHECK-NEXT:    addi a0, zero, -3
146; CHECK-NEXT:    vsetvli a1, zero, e8, m1, ta, mu
147; CHECK-NEXT:    vmaxu.vx v8, v8, a0
148; CHECK-NEXT:    ret
149  %head = insertelement <vscale x 8 x i8> undef, i8 -3, i32 0
150  %splat = shufflevector <vscale x 8 x i8> %head, <vscale x 8 x i8> undef, <vscale x 8 x i32> zeroinitializer
151  %cmp = icmp ugt <vscale x 8 x i8> %va, %splat
152  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i8> %va, <vscale x 8 x i8> %splat
153  ret <vscale x 8 x i8> %vc
154}
155
156define <vscale x 16 x i8> @vmax_vv_nxv16i8(<vscale x 16 x i8> %va, <vscale x 16 x i8> %vb) {
157; CHECK-LABEL: vmax_vv_nxv16i8:
158; CHECK:       # %bb.0:
159; CHECK-NEXT:    vsetvli a0, zero, e8, m2, ta, mu
160; CHECK-NEXT:    vmaxu.vv v8, v8, v10
161; CHECK-NEXT:    ret
162  %cmp = icmp ugt <vscale x 16 x i8> %va, %vb
163  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %vb
164  ret <vscale x 16 x i8> %vc
165}
166
167define <vscale x 16 x i8> @vmax_vx_nxv16i8(<vscale x 16 x i8> %va, i8 signext %b) {
168; CHECK-LABEL: vmax_vx_nxv16i8:
169; CHECK:       # %bb.0:
170; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
171; CHECK-NEXT:    vmaxu.vx v8, v8, a0
172; CHECK-NEXT:    ret
173  %head = insertelement <vscale x 16 x i8> undef, i8 %b, i32 0
174  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
175  %cmp = icmp ugt <vscale x 16 x i8> %va, %splat
176  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
177  ret <vscale x 16 x i8> %vc
178}
179
180define <vscale x 16 x i8> @vmax_vi_nxv16i8_0(<vscale x 16 x i8> %va) {
181; CHECK-LABEL: vmax_vi_nxv16i8_0:
182; CHECK:       # %bb.0:
183; CHECK-NEXT:    addi a0, zero, -3
184; CHECK-NEXT:    vsetvli a1, zero, e8, m2, ta, mu
185; CHECK-NEXT:    vmaxu.vx v8, v8, a0
186; CHECK-NEXT:    ret
187  %head = insertelement <vscale x 16 x i8> undef, i8 -3, i32 0
188  %splat = shufflevector <vscale x 16 x i8> %head, <vscale x 16 x i8> undef, <vscale x 16 x i32> zeroinitializer
189  %cmp = icmp ugt <vscale x 16 x i8> %va, %splat
190  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i8> %va, <vscale x 16 x i8> %splat
191  ret <vscale x 16 x i8> %vc
192}
193
194define <vscale x 32 x i8> @vmax_vv_nxv32i8(<vscale x 32 x i8> %va, <vscale x 32 x i8> %vb) {
195; CHECK-LABEL: vmax_vv_nxv32i8:
196; CHECK:       # %bb.0:
197; CHECK-NEXT:    vsetvli a0, zero, e8, m4, ta, mu
198; CHECK-NEXT:    vmaxu.vv v8, v8, v12
199; CHECK-NEXT:    ret
200  %cmp = icmp ugt <vscale x 32 x i8> %va, %vb
201  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %vb
202  ret <vscale x 32 x i8> %vc
203}
204
205define <vscale x 32 x i8> @vmax_vx_nxv32i8(<vscale x 32 x i8> %va, i8 signext %b) {
206; CHECK-LABEL: vmax_vx_nxv32i8:
207; CHECK:       # %bb.0:
208; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
209; CHECK-NEXT:    vmaxu.vx v8, v8, a0
210; CHECK-NEXT:    ret
211  %head = insertelement <vscale x 32 x i8> undef, i8 %b, i32 0
212  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
213  %cmp = icmp ugt <vscale x 32 x i8> %va, %splat
214  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
215  ret <vscale x 32 x i8> %vc
216}
217
218define <vscale x 32 x i8> @vmax_vi_nxv32i8_0(<vscale x 32 x i8> %va) {
219; CHECK-LABEL: vmax_vi_nxv32i8_0:
220; CHECK:       # %bb.0:
221; CHECK-NEXT:    addi a0, zero, -3
222; CHECK-NEXT:    vsetvli a1, zero, e8, m4, ta, mu
223; CHECK-NEXT:    vmaxu.vx v8, v8, a0
224; CHECK-NEXT:    ret
225  %head = insertelement <vscale x 32 x i8> undef, i8 -3, i32 0
226  %splat = shufflevector <vscale x 32 x i8> %head, <vscale x 32 x i8> undef, <vscale x 32 x i32> zeroinitializer
227  %cmp = icmp ugt <vscale x 32 x i8> %va, %splat
228  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i8> %va, <vscale x 32 x i8> %splat
229  ret <vscale x 32 x i8> %vc
230}
231
232define <vscale x 64 x i8> @vmax_vv_nxv64i8(<vscale x 64 x i8> %va, <vscale x 64 x i8> %vb) {
233; CHECK-LABEL: vmax_vv_nxv64i8:
234; CHECK:       # %bb.0:
235; CHECK-NEXT:    vsetvli a0, zero, e8, m8, ta, mu
236; CHECK-NEXT:    vmaxu.vv v8, v8, v16
237; CHECK-NEXT:    ret
238  %cmp = icmp ugt <vscale x 64 x i8> %va, %vb
239  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %vb
240  ret <vscale x 64 x i8> %vc
241}
242
243define <vscale x 64 x i8> @vmax_vx_nxv64i8(<vscale x 64 x i8> %va, i8 signext %b) {
244; CHECK-LABEL: vmax_vx_nxv64i8:
245; CHECK:       # %bb.0:
246; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
247; CHECK-NEXT:    vmaxu.vx v8, v8, a0
248; CHECK-NEXT:    ret
249  %head = insertelement <vscale x 64 x i8> undef, i8 %b, i32 0
250  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
251  %cmp = icmp ugt <vscale x 64 x i8> %va, %splat
252  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
253  ret <vscale x 64 x i8> %vc
254}
255
256define <vscale x 64 x i8> @vmax_vi_nxv64i8_0(<vscale x 64 x i8> %va) {
257; CHECK-LABEL: vmax_vi_nxv64i8_0:
258; CHECK:       # %bb.0:
259; CHECK-NEXT:    addi a0, zero, -3
260; CHECK-NEXT:    vsetvli a1, zero, e8, m8, ta, mu
261; CHECK-NEXT:    vmaxu.vx v8, v8, a0
262; CHECK-NEXT:    ret
263  %head = insertelement <vscale x 64 x i8> undef, i8 -3, i32 0
264  %splat = shufflevector <vscale x 64 x i8> %head, <vscale x 64 x i8> undef, <vscale x 64 x i32> zeroinitializer
265  %cmp = icmp ugt <vscale x 64 x i8> %va, %splat
266  %vc = select <vscale x 64 x i1> %cmp, <vscale x 64 x i8> %va, <vscale x 64 x i8> %splat
267  ret <vscale x 64 x i8> %vc
268}
269
270define <vscale x 1 x i16> @vmax_vv_nxv1i16(<vscale x 1 x i16> %va, <vscale x 1 x i16> %vb) {
271; CHECK-LABEL: vmax_vv_nxv1i16:
272; CHECK:       # %bb.0:
273; CHECK-NEXT:    vsetvli a0, zero, e16, mf4, ta, mu
274; CHECK-NEXT:    vmaxu.vv v8, v8, v9
275; CHECK-NEXT:    ret
276  %cmp = icmp ugt <vscale x 1 x i16> %va, %vb
277  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %vb
278  ret <vscale x 1 x i16> %vc
279}
280
281define <vscale x 1 x i16> @vmax_vx_nxv1i16(<vscale x 1 x i16> %va, i16 signext %b) {
282; CHECK-LABEL: vmax_vx_nxv1i16:
283; CHECK:       # %bb.0:
284; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
285; CHECK-NEXT:    vmaxu.vx v8, v8, a0
286; CHECK-NEXT:    ret
287  %head = insertelement <vscale x 1 x i16> undef, i16 %b, i32 0
288  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
289  %cmp = icmp ugt <vscale x 1 x i16> %va, %splat
290  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
291  ret <vscale x 1 x i16> %vc
292}
293
294define <vscale x 1 x i16> @vmax_vi_nxv1i16_0(<vscale x 1 x i16> %va) {
295; CHECK-LABEL: vmax_vi_nxv1i16_0:
296; CHECK:       # %bb.0:
297; CHECK-NEXT:    addi a0, zero, -3
298; CHECK-NEXT:    vsetvli a1, zero, e16, mf4, ta, mu
299; CHECK-NEXT:    vmaxu.vx v8, v8, a0
300; CHECK-NEXT:    ret
301  %head = insertelement <vscale x 1 x i16> undef, i16 -3, i32 0
302  %splat = shufflevector <vscale x 1 x i16> %head, <vscale x 1 x i16> undef, <vscale x 1 x i32> zeroinitializer
303  %cmp = icmp ugt <vscale x 1 x i16> %va, %splat
304  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i16> %va, <vscale x 1 x i16> %splat
305  ret <vscale x 1 x i16> %vc
306}
307
308define <vscale x 2 x i16> @vmax_vv_nxv2i16(<vscale x 2 x i16> %va, <vscale x 2 x i16> %vb) {
309; CHECK-LABEL: vmax_vv_nxv2i16:
310; CHECK:       # %bb.0:
311; CHECK-NEXT:    vsetvli a0, zero, e16, mf2, ta, mu
312; CHECK-NEXT:    vmaxu.vv v8, v8, v9
313; CHECK-NEXT:    ret
314  %cmp = icmp ugt <vscale x 2 x i16> %va, %vb
315  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %vb
316  ret <vscale x 2 x i16> %vc
317}
318
319define <vscale x 2 x i16> @vmax_vx_nxv2i16(<vscale x 2 x i16> %va, i16 signext %b) {
320; CHECK-LABEL: vmax_vx_nxv2i16:
321; CHECK:       # %bb.0:
322; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
323; CHECK-NEXT:    vmaxu.vx v8, v8, a0
324; CHECK-NEXT:    ret
325  %head = insertelement <vscale x 2 x i16> undef, i16 %b, i32 0
326  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
327  %cmp = icmp ugt <vscale x 2 x i16> %va, %splat
328  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
329  ret <vscale x 2 x i16> %vc
330}
331
332define <vscale x 2 x i16> @vmax_vi_nxv2i16_0(<vscale x 2 x i16> %va) {
333; CHECK-LABEL: vmax_vi_nxv2i16_0:
334; CHECK:       # %bb.0:
335; CHECK-NEXT:    addi a0, zero, -3
336; CHECK-NEXT:    vsetvli a1, zero, e16, mf2, ta, mu
337; CHECK-NEXT:    vmaxu.vx v8, v8, a0
338; CHECK-NEXT:    ret
339  %head = insertelement <vscale x 2 x i16> undef, i16 -3, i32 0
340  %splat = shufflevector <vscale x 2 x i16> %head, <vscale x 2 x i16> undef, <vscale x 2 x i32> zeroinitializer
341  %cmp = icmp ugt <vscale x 2 x i16> %va, %splat
342  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i16> %va, <vscale x 2 x i16> %splat
343  ret <vscale x 2 x i16> %vc
344}
345
346define <vscale x 4 x i16> @vmax_vv_nxv4i16(<vscale x 4 x i16> %va, <vscale x 4 x i16> %vb) {
347; CHECK-LABEL: vmax_vv_nxv4i16:
348; CHECK:       # %bb.0:
349; CHECK-NEXT:    vsetvli a0, zero, e16, m1, ta, mu
350; CHECK-NEXT:    vmaxu.vv v8, v8, v9
351; CHECK-NEXT:    ret
352  %cmp = icmp ugt <vscale x 4 x i16> %va, %vb
353  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %vb
354  ret <vscale x 4 x i16> %vc
355}
356
357define <vscale x 4 x i16> @vmax_vx_nxv4i16(<vscale x 4 x i16> %va, i16 signext %b) {
358; CHECK-LABEL: vmax_vx_nxv4i16:
359; CHECK:       # %bb.0:
360; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
361; CHECK-NEXT:    vmaxu.vx v8, v8, a0
362; CHECK-NEXT:    ret
363  %head = insertelement <vscale x 4 x i16> undef, i16 %b, i32 0
364  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
365  %cmp = icmp ugt <vscale x 4 x i16> %va, %splat
366  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
367  ret <vscale x 4 x i16> %vc
368}
369
370define <vscale x 4 x i16> @vmax_vi_nxv4i16_0(<vscale x 4 x i16> %va) {
371; CHECK-LABEL: vmax_vi_nxv4i16_0:
372; CHECK:       # %bb.0:
373; CHECK-NEXT:    addi a0, zero, -3
374; CHECK-NEXT:    vsetvli a1, zero, e16, m1, ta, mu
375; CHECK-NEXT:    vmaxu.vx v8, v8, a0
376; CHECK-NEXT:    ret
377  %head = insertelement <vscale x 4 x i16> undef, i16 -3, i32 0
378  %splat = shufflevector <vscale x 4 x i16> %head, <vscale x 4 x i16> undef, <vscale x 4 x i32> zeroinitializer
379  %cmp = icmp ugt <vscale x 4 x i16> %va, %splat
380  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i16> %va, <vscale x 4 x i16> %splat
381  ret <vscale x 4 x i16> %vc
382}
383
384define <vscale x 8 x i16> @vmax_vv_nxv8i16(<vscale x 8 x i16> %va, <vscale x 8 x i16> %vb) {
385; CHECK-LABEL: vmax_vv_nxv8i16:
386; CHECK:       # %bb.0:
387; CHECK-NEXT:    vsetvli a0, zero, e16, m2, ta, mu
388; CHECK-NEXT:    vmaxu.vv v8, v8, v10
389; CHECK-NEXT:    ret
390  %cmp = icmp ugt <vscale x 8 x i16> %va, %vb
391  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %vb
392  ret <vscale x 8 x i16> %vc
393}
394
395define <vscale x 8 x i16> @vmax_vx_nxv8i16(<vscale x 8 x i16> %va, i16 signext %b) {
396; CHECK-LABEL: vmax_vx_nxv8i16:
397; CHECK:       # %bb.0:
398; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
399; CHECK-NEXT:    vmaxu.vx v8, v8, a0
400; CHECK-NEXT:    ret
401  %head = insertelement <vscale x 8 x i16> undef, i16 %b, i32 0
402  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
403  %cmp = icmp ugt <vscale x 8 x i16> %va, %splat
404  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
405  ret <vscale x 8 x i16> %vc
406}
407
408define <vscale x 8 x i16> @vmax_vi_nxv8i16_0(<vscale x 8 x i16> %va) {
409; CHECK-LABEL: vmax_vi_nxv8i16_0:
410; CHECK:       # %bb.0:
411; CHECK-NEXT:    addi a0, zero, -3
412; CHECK-NEXT:    vsetvli a1, zero, e16, m2, ta, mu
413; CHECK-NEXT:    vmaxu.vx v8, v8, a0
414; CHECK-NEXT:    ret
415  %head = insertelement <vscale x 8 x i16> undef, i16 -3, i32 0
416  %splat = shufflevector <vscale x 8 x i16> %head, <vscale x 8 x i16> undef, <vscale x 8 x i32> zeroinitializer
417  %cmp = icmp ugt <vscale x 8 x i16> %va, %splat
418  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i16> %va, <vscale x 8 x i16> %splat
419  ret <vscale x 8 x i16> %vc
420}
421
422define <vscale x 16 x i16> @vmax_vv_nxv16i16(<vscale x 16 x i16> %va, <vscale x 16 x i16> %vb) {
423; CHECK-LABEL: vmax_vv_nxv16i16:
424; CHECK:       # %bb.0:
425; CHECK-NEXT:    vsetvli a0, zero, e16, m4, ta, mu
426; CHECK-NEXT:    vmaxu.vv v8, v8, v12
427; CHECK-NEXT:    ret
428  %cmp = icmp ugt <vscale x 16 x i16> %va, %vb
429  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %vb
430  ret <vscale x 16 x i16> %vc
431}
432
433define <vscale x 16 x i16> @vmax_vx_nxv16i16(<vscale x 16 x i16> %va, i16 signext %b) {
434; CHECK-LABEL: vmax_vx_nxv16i16:
435; CHECK:       # %bb.0:
436; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
437; CHECK-NEXT:    vmaxu.vx v8, v8, a0
438; CHECK-NEXT:    ret
439  %head = insertelement <vscale x 16 x i16> undef, i16 %b, i32 0
440  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
441  %cmp = icmp ugt <vscale x 16 x i16> %va, %splat
442  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
443  ret <vscale x 16 x i16> %vc
444}
445
446define <vscale x 16 x i16> @vmax_vi_nxv16i16_0(<vscale x 16 x i16> %va) {
447; CHECK-LABEL: vmax_vi_nxv16i16_0:
448; CHECK:       # %bb.0:
449; CHECK-NEXT:    addi a0, zero, -3
450; CHECK-NEXT:    vsetvli a1, zero, e16, m4, ta, mu
451; CHECK-NEXT:    vmaxu.vx v8, v8, a0
452; CHECK-NEXT:    ret
453  %head = insertelement <vscale x 16 x i16> undef, i16 -3, i32 0
454  %splat = shufflevector <vscale x 16 x i16> %head, <vscale x 16 x i16> undef, <vscale x 16 x i32> zeroinitializer
455  %cmp = icmp ugt <vscale x 16 x i16> %va, %splat
456  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i16> %va, <vscale x 16 x i16> %splat
457  ret <vscale x 16 x i16> %vc
458}
459
460define <vscale x 32 x i16> @vmax_vv_nxv32i16(<vscale x 32 x i16> %va, <vscale x 32 x i16> %vb) {
461; CHECK-LABEL: vmax_vv_nxv32i16:
462; CHECK:       # %bb.0:
463; CHECK-NEXT:    vsetvli a0, zero, e16, m8, ta, mu
464; CHECK-NEXT:    vmaxu.vv v8, v8, v16
465; CHECK-NEXT:    ret
466  %cmp = icmp ugt <vscale x 32 x i16> %va, %vb
467  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %vb
468  ret <vscale x 32 x i16> %vc
469}
470
471define <vscale x 32 x i16> @vmax_vx_nxv32i16(<vscale x 32 x i16> %va, i16 signext %b) {
472; CHECK-LABEL: vmax_vx_nxv32i16:
473; CHECK:       # %bb.0:
474; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
475; CHECK-NEXT:    vmaxu.vx v8, v8, a0
476; CHECK-NEXT:    ret
477  %head = insertelement <vscale x 32 x i16> undef, i16 %b, i32 0
478  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
479  %cmp = icmp ugt <vscale x 32 x i16> %va, %splat
480  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
481  ret <vscale x 32 x i16> %vc
482}
483
484define <vscale x 32 x i16> @vmax_vi_nxv32i16_0(<vscale x 32 x i16> %va) {
485; CHECK-LABEL: vmax_vi_nxv32i16_0:
486; CHECK:       # %bb.0:
487; CHECK-NEXT:    addi a0, zero, -3
488; CHECK-NEXT:    vsetvli a1, zero, e16, m8, ta, mu
489; CHECK-NEXT:    vmaxu.vx v8, v8, a0
490; CHECK-NEXT:    ret
491  %head = insertelement <vscale x 32 x i16> undef, i16 -3, i32 0
492  %splat = shufflevector <vscale x 32 x i16> %head, <vscale x 32 x i16> undef, <vscale x 32 x i32> zeroinitializer
493  %cmp = icmp ugt <vscale x 32 x i16> %va, %splat
494  %vc = select <vscale x 32 x i1> %cmp, <vscale x 32 x i16> %va, <vscale x 32 x i16> %splat
495  ret <vscale x 32 x i16> %vc
496}
497
498define <vscale x 1 x i32> @vmax_vv_nxv1i32(<vscale x 1 x i32> %va, <vscale x 1 x i32> %vb) {
499; CHECK-LABEL: vmax_vv_nxv1i32:
500; CHECK:       # %bb.0:
501; CHECK-NEXT:    vsetvli a0, zero, e32, mf2, ta, mu
502; CHECK-NEXT:    vmaxu.vv v8, v8, v9
503; CHECK-NEXT:    ret
504  %cmp = icmp ugt <vscale x 1 x i32> %va, %vb
505  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %vb
506  ret <vscale x 1 x i32> %vc
507}
508
509define <vscale x 1 x i32> @vmax_vx_nxv1i32(<vscale x 1 x i32> %va, i32 signext %b) {
510; CHECK-LABEL: vmax_vx_nxv1i32:
511; CHECK:       # %bb.0:
512; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
513; CHECK-NEXT:    vmaxu.vx v8, v8, a0
514; CHECK-NEXT:    ret
515  %head = insertelement <vscale x 1 x i32> undef, i32 %b, i32 0
516  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
517  %cmp = icmp ugt <vscale x 1 x i32> %va, %splat
518  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
519  ret <vscale x 1 x i32> %vc
520}
521
522define <vscale x 1 x i32> @vmax_vi_nxv1i32_0(<vscale x 1 x i32> %va) {
523; CHECK-LABEL: vmax_vi_nxv1i32_0:
524; CHECK:       # %bb.0:
525; CHECK-NEXT:    addi a0, zero, -3
526; CHECK-NEXT:    vsetvli a1, zero, e32, mf2, ta, mu
527; CHECK-NEXT:    vmaxu.vx v8, v8, a0
528; CHECK-NEXT:    ret
529  %head = insertelement <vscale x 1 x i32> undef, i32 -3, i32 0
530  %splat = shufflevector <vscale x 1 x i32> %head, <vscale x 1 x i32> undef, <vscale x 1 x i32> zeroinitializer
531  %cmp = icmp ugt <vscale x 1 x i32> %va, %splat
532  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i32> %va, <vscale x 1 x i32> %splat
533  ret <vscale x 1 x i32> %vc
534}
535
536define <vscale x 2 x i32> @vmax_vv_nxv2i32(<vscale x 2 x i32> %va, <vscale x 2 x i32> %vb) {
537; CHECK-LABEL: vmax_vv_nxv2i32:
538; CHECK:       # %bb.0:
539; CHECK-NEXT:    vsetvli a0, zero, e32, m1, ta, mu
540; CHECK-NEXT:    vmaxu.vv v8, v8, v9
541; CHECK-NEXT:    ret
542  %cmp = icmp ugt <vscale x 2 x i32> %va, %vb
543  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %vb
544  ret <vscale x 2 x i32> %vc
545}
546
547define <vscale x 2 x i32> @vmax_vx_nxv2i32(<vscale x 2 x i32> %va, i32 signext %b) {
548; CHECK-LABEL: vmax_vx_nxv2i32:
549; CHECK:       # %bb.0:
550; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
551; CHECK-NEXT:    vmaxu.vx v8, v8, a0
552; CHECK-NEXT:    ret
553  %head = insertelement <vscale x 2 x i32> undef, i32 %b, i32 0
554  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
555  %cmp = icmp ugt <vscale x 2 x i32> %va, %splat
556  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
557  ret <vscale x 2 x i32> %vc
558}
559
560define <vscale x 2 x i32> @vmax_vi_nxv2i32_0(<vscale x 2 x i32> %va) {
561; CHECK-LABEL: vmax_vi_nxv2i32_0:
562; CHECK:       # %bb.0:
563; CHECK-NEXT:    addi a0, zero, -3
564; CHECK-NEXT:    vsetvli a1, zero, e32, m1, ta, mu
565; CHECK-NEXT:    vmaxu.vx v8, v8, a0
566; CHECK-NEXT:    ret
567  %head = insertelement <vscale x 2 x i32> undef, i32 -3, i32 0
568  %splat = shufflevector <vscale x 2 x i32> %head, <vscale x 2 x i32> undef, <vscale x 2 x i32> zeroinitializer
569  %cmp = icmp ugt <vscale x 2 x i32> %va, %splat
570  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i32> %va, <vscale x 2 x i32> %splat
571  ret <vscale x 2 x i32> %vc
572}
573
574define <vscale x 4 x i32> @vmax_vv_nxv4i32(<vscale x 4 x i32> %va, <vscale x 4 x i32> %vb) {
575; CHECK-LABEL: vmax_vv_nxv4i32:
576; CHECK:       # %bb.0:
577; CHECK-NEXT:    vsetvli a0, zero, e32, m2, ta, mu
578; CHECK-NEXT:    vmaxu.vv v8, v8, v10
579; CHECK-NEXT:    ret
580  %cmp = icmp ugt <vscale x 4 x i32> %va, %vb
581  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %vb
582  ret <vscale x 4 x i32> %vc
583}
584
585define <vscale x 4 x i32> @vmax_vx_nxv4i32(<vscale x 4 x i32> %va, i32 signext %b) {
586; CHECK-LABEL: vmax_vx_nxv4i32:
587; CHECK:       # %bb.0:
588; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
589; CHECK-NEXT:    vmaxu.vx v8, v8, a0
590; CHECK-NEXT:    ret
591  %head = insertelement <vscale x 4 x i32> undef, i32 %b, i32 0
592  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
593  %cmp = icmp ugt <vscale x 4 x i32> %va, %splat
594  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
595  ret <vscale x 4 x i32> %vc
596}
597
598define <vscale x 4 x i32> @vmax_vi_nxv4i32_0(<vscale x 4 x i32> %va) {
599; CHECK-LABEL: vmax_vi_nxv4i32_0:
600; CHECK:       # %bb.0:
601; CHECK-NEXT:    addi a0, zero, -3
602; CHECK-NEXT:    vsetvli a1, zero, e32, m2, ta, mu
603; CHECK-NEXT:    vmaxu.vx v8, v8, a0
604; CHECK-NEXT:    ret
605  %head = insertelement <vscale x 4 x i32> undef, i32 -3, i32 0
606  %splat = shufflevector <vscale x 4 x i32> %head, <vscale x 4 x i32> undef, <vscale x 4 x i32> zeroinitializer
607  %cmp = icmp ugt <vscale x 4 x i32> %va, %splat
608  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i32> %va, <vscale x 4 x i32> %splat
609  ret <vscale x 4 x i32> %vc
610}
611
612define <vscale x 8 x i32> @vmax_vv_nxv8i32(<vscale x 8 x i32> %va, <vscale x 8 x i32> %vb) {
613; CHECK-LABEL: vmax_vv_nxv8i32:
614; CHECK:       # %bb.0:
615; CHECK-NEXT:    vsetvli a0, zero, e32, m4, ta, mu
616; CHECK-NEXT:    vmaxu.vv v8, v8, v12
617; CHECK-NEXT:    ret
618  %cmp = icmp ugt <vscale x 8 x i32> %va, %vb
619  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %vb
620  ret <vscale x 8 x i32> %vc
621}
622
623define <vscale x 8 x i32> @vmax_vx_nxv8i32(<vscale x 8 x i32> %va, i32 signext %b) {
624; CHECK-LABEL: vmax_vx_nxv8i32:
625; CHECK:       # %bb.0:
626; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
627; CHECK-NEXT:    vmaxu.vx v8, v8, a0
628; CHECK-NEXT:    ret
629  %head = insertelement <vscale x 8 x i32> undef, i32 %b, i32 0
630  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
631  %cmp = icmp ugt <vscale x 8 x i32> %va, %splat
632  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
633  ret <vscale x 8 x i32> %vc
634}
635
636define <vscale x 8 x i32> @vmax_vi_nxv8i32_0(<vscale x 8 x i32> %va) {
637; CHECK-LABEL: vmax_vi_nxv8i32_0:
638; CHECK:       # %bb.0:
639; CHECK-NEXT:    addi a0, zero, -3
640; CHECK-NEXT:    vsetvli a1, zero, e32, m4, ta, mu
641; CHECK-NEXT:    vmaxu.vx v8, v8, a0
642; CHECK-NEXT:    ret
643  %head = insertelement <vscale x 8 x i32> undef, i32 -3, i32 0
644  %splat = shufflevector <vscale x 8 x i32> %head, <vscale x 8 x i32> undef, <vscale x 8 x i32> zeroinitializer
645  %cmp = icmp ugt <vscale x 8 x i32> %va, %splat
646  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i32> %va, <vscale x 8 x i32> %splat
647  ret <vscale x 8 x i32> %vc
648}
649
650define <vscale x 16 x i32> @vmax_vv_nxv16i32(<vscale x 16 x i32> %va, <vscale x 16 x i32> %vb) {
651; CHECK-LABEL: vmax_vv_nxv16i32:
652; CHECK:       # %bb.0:
653; CHECK-NEXT:    vsetvli a0, zero, e32, m8, ta, mu
654; CHECK-NEXT:    vmaxu.vv v8, v8, v16
655; CHECK-NEXT:    ret
656  %cmp = icmp ugt <vscale x 16 x i32> %va, %vb
657  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %vb
658  ret <vscale x 16 x i32> %vc
659}
660
661define <vscale x 16 x i32> @vmax_vx_nxv16i32(<vscale x 16 x i32> %va, i32 signext %b) {
662; CHECK-LABEL: vmax_vx_nxv16i32:
663; CHECK:       # %bb.0:
664; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
665; CHECK-NEXT:    vmaxu.vx v8, v8, a0
666; CHECK-NEXT:    ret
667  %head = insertelement <vscale x 16 x i32> undef, i32 %b, i32 0
668  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
669  %cmp = icmp ugt <vscale x 16 x i32> %va, %splat
670  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
671  ret <vscale x 16 x i32> %vc
672}
673
674define <vscale x 16 x i32> @vmax_vi_nxv16i32_0(<vscale x 16 x i32> %va) {
675; CHECK-LABEL: vmax_vi_nxv16i32_0:
676; CHECK:       # %bb.0:
677; CHECK-NEXT:    addi a0, zero, -3
678; CHECK-NEXT:    vsetvli a1, zero, e32, m8, ta, mu
679; CHECK-NEXT:    vmaxu.vx v8, v8, a0
680; CHECK-NEXT:    ret
681  %head = insertelement <vscale x 16 x i32> undef, i32 -3, i32 0
682  %splat = shufflevector <vscale x 16 x i32> %head, <vscale x 16 x i32> undef, <vscale x 16 x i32> zeroinitializer
683  %cmp = icmp ugt <vscale x 16 x i32> %va, %splat
684  %vc = select <vscale x 16 x i1> %cmp, <vscale x 16 x i32> %va, <vscale x 16 x i32> %splat
685  ret <vscale x 16 x i32> %vc
686}
687
688define <vscale x 1 x i64> @vmax_vv_nxv1i64(<vscale x 1 x i64> %va, <vscale x 1 x i64> %vb) {
689; CHECK-LABEL: vmax_vv_nxv1i64:
690; CHECK:       # %bb.0:
691; CHECK-NEXT:    vsetvli a0, zero, e64, m1, ta, mu
692; CHECK-NEXT:    vmaxu.vv v8, v8, v9
693; CHECK-NEXT:    ret
694  %cmp = icmp ugt <vscale x 1 x i64> %va, %vb
695  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %vb
696  ret <vscale x 1 x i64> %vc
697}
698
699define <vscale x 1 x i64> @vmax_vx_nxv1i64(<vscale x 1 x i64> %va, i64 %b) {
700; CHECK-LABEL: vmax_vx_nxv1i64:
701; CHECK:       # %bb.0:
702; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
703; CHECK-NEXT:    vmaxu.vx v8, v8, a0
704; CHECK-NEXT:    ret
705  %head = insertelement <vscale x 1 x i64> undef, i64 %b, i32 0
706  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
707  %cmp = icmp ugt <vscale x 1 x i64> %va, %splat
708  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
709  ret <vscale x 1 x i64> %vc
710}
711
712define <vscale x 1 x i64> @vmax_vi_nxv1i64_0(<vscale x 1 x i64> %va) {
713; CHECK-LABEL: vmax_vi_nxv1i64_0:
714; CHECK:       # %bb.0:
715; CHECK-NEXT:    addi a0, zero, -3
716; CHECK-NEXT:    vsetvli a1, zero, e64, m1, ta, mu
717; CHECK-NEXT:    vmaxu.vx v8, v8, a0
718; CHECK-NEXT:    ret
719  %head = insertelement <vscale x 1 x i64> undef, i64 -3, i32 0
720  %splat = shufflevector <vscale x 1 x i64> %head, <vscale x 1 x i64> undef, <vscale x 1 x i32> zeroinitializer
721  %cmp = icmp ugt <vscale x 1 x i64> %va, %splat
722  %vc = select <vscale x 1 x i1> %cmp, <vscale x 1 x i64> %va, <vscale x 1 x i64> %splat
723  ret <vscale x 1 x i64> %vc
724}
725
726define <vscale x 2 x i64> @vmax_vv_nxv2i64(<vscale x 2 x i64> %va, <vscale x 2 x i64> %vb) {
727; CHECK-LABEL: vmax_vv_nxv2i64:
728; CHECK:       # %bb.0:
729; CHECK-NEXT:    vsetvli a0, zero, e64, m2, ta, mu
730; CHECK-NEXT:    vmaxu.vv v8, v8, v10
731; CHECK-NEXT:    ret
732  %cmp = icmp ugt <vscale x 2 x i64> %va, %vb
733  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %vb
734  ret <vscale x 2 x i64> %vc
735}
736
737define <vscale x 2 x i64> @vmax_vx_nxv2i64(<vscale x 2 x i64> %va, i64 %b) {
738; CHECK-LABEL: vmax_vx_nxv2i64:
739; CHECK:       # %bb.0:
740; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
741; CHECK-NEXT:    vmaxu.vx v8, v8, a0
742; CHECK-NEXT:    ret
743  %head = insertelement <vscale x 2 x i64> undef, i64 %b, i32 0
744  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
745  %cmp = icmp ugt <vscale x 2 x i64> %va, %splat
746  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
747  ret <vscale x 2 x i64> %vc
748}
749
750define <vscale x 2 x i64> @vmax_vi_nxv2i64_0(<vscale x 2 x i64> %va) {
751; CHECK-LABEL: vmax_vi_nxv2i64_0:
752; CHECK:       # %bb.0:
753; CHECK-NEXT:    addi a0, zero, -3
754; CHECK-NEXT:    vsetvli a1, zero, e64, m2, ta, mu
755; CHECK-NEXT:    vmaxu.vx v8, v8, a0
756; CHECK-NEXT:    ret
757  %head = insertelement <vscale x 2 x i64> undef, i64 -3, i32 0
758  %splat = shufflevector <vscale x 2 x i64> %head, <vscale x 2 x i64> undef, <vscale x 2 x i32> zeroinitializer
759  %cmp = icmp ugt <vscale x 2 x i64> %va, %splat
760  %vc = select <vscale x 2 x i1> %cmp, <vscale x 2 x i64> %va, <vscale x 2 x i64> %splat
761  ret <vscale x 2 x i64> %vc
762}
763
764define <vscale x 4 x i64> @vmax_vv_nxv4i64(<vscale x 4 x i64> %va, <vscale x 4 x i64> %vb) {
765; CHECK-LABEL: vmax_vv_nxv4i64:
766; CHECK:       # %bb.0:
767; CHECK-NEXT:    vsetvli a0, zero, e64, m4, ta, mu
768; CHECK-NEXT:    vmaxu.vv v8, v8, v12
769; CHECK-NEXT:    ret
770  %cmp = icmp ugt <vscale x 4 x i64> %va, %vb
771  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %vb
772  ret <vscale x 4 x i64> %vc
773}
774
775define <vscale x 4 x i64> @vmax_vx_nxv4i64(<vscale x 4 x i64> %va, i64 %b) {
776; CHECK-LABEL: vmax_vx_nxv4i64:
777; CHECK:       # %bb.0:
778; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
779; CHECK-NEXT:    vmaxu.vx v8, v8, a0
780; CHECK-NEXT:    ret
781  %head = insertelement <vscale x 4 x i64> undef, i64 %b, i32 0
782  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
783  %cmp = icmp ugt <vscale x 4 x i64> %va, %splat
784  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
785  ret <vscale x 4 x i64> %vc
786}
787
788define <vscale x 4 x i64> @vmax_vi_nxv4i64_0(<vscale x 4 x i64> %va) {
789; CHECK-LABEL: vmax_vi_nxv4i64_0:
790; CHECK:       # %bb.0:
791; CHECK-NEXT:    addi a0, zero, -3
792; CHECK-NEXT:    vsetvli a1, zero, e64, m4, ta, mu
793; CHECK-NEXT:    vmaxu.vx v8, v8, a0
794; CHECK-NEXT:    ret
795  %head = insertelement <vscale x 4 x i64> undef, i64 -3, i32 0
796  %splat = shufflevector <vscale x 4 x i64> %head, <vscale x 4 x i64> undef, <vscale x 4 x i32> zeroinitializer
797  %cmp = icmp ugt <vscale x 4 x i64> %va, %splat
798  %vc = select <vscale x 4 x i1> %cmp, <vscale x 4 x i64> %va, <vscale x 4 x i64> %splat
799  ret <vscale x 4 x i64> %vc
800}
801
802define <vscale x 8 x i64> @vmax_vv_nxv8i64(<vscale x 8 x i64> %va, <vscale x 8 x i64> %vb) {
803; CHECK-LABEL: vmax_vv_nxv8i64:
804; CHECK:       # %bb.0:
805; CHECK-NEXT:    vsetvli a0, zero, e64, m8, ta, mu
806; CHECK-NEXT:    vmaxu.vv v8, v8, v16
807; CHECK-NEXT:    ret
808  %cmp = icmp ugt <vscale x 8 x i64> %va, %vb
809  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %vb
810  ret <vscale x 8 x i64> %vc
811}
812
813define <vscale x 8 x i64> @vmax_vx_nxv8i64(<vscale x 8 x i64> %va, i64 %b) {
814; CHECK-LABEL: vmax_vx_nxv8i64:
815; CHECK:       # %bb.0:
816; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
817; CHECK-NEXT:    vmaxu.vx v8, v8, a0
818; CHECK-NEXT:    ret
819  %head = insertelement <vscale x 8 x i64> undef, i64 %b, i32 0
820  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
821  %cmp = icmp ugt <vscale x 8 x i64> %va, %splat
822  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
823  ret <vscale x 8 x i64> %vc
824}
825
826define <vscale x 8 x i64> @vmax_vi_nxv8i64_0(<vscale x 8 x i64> %va) {
827; CHECK-LABEL: vmax_vi_nxv8i64_0:
828; CHECK:       # %bb.0:
829; CHECK-NEXT:    addi a0, zero, -3
830; CHECK-NEXT:    vsetvli a1, zero, e64, m8, ta, mu
831; CHECK-NEXT:    vmaxu.vx v8, v8, a0
832; CHECK-NEXT:    ret
833  %head = insertelement <vscale x 8 x i64> undef, i64 -3, i32 0
834  %splat = shufflevector <vscale x 8 x i64> %head, <vscale x 8 x i64> undef, <vscale x 8 x i32> zeroinitializer
835  %cmp = icmp ugt <vscale x 8 x i64> %va, %splat
836  %vc = select <vscale x 8 x i1> %cmp, <vscale x 8 x i64> %va, <vscale x 8 x i64> %splat
837  ret <vscale x 8 x i64> %vc
838}
839
840