1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv64 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  i64);
8
9define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i64 %2) nounwind {
10; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i8_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vmsle.vv v0, v9, v8
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
17    <vscale x 1 x i8> %0,
18    <vscale x 1 x i8> %1,
19    i64 %2)
20
21  ret <vscale x 1 x i1> %a
22}
23
24declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
25  <vscale x 1 x i1>,
26  <vscale x 1 x i8>,
27  <vscale x 1 x i8>,
28  <vscale x 1 x i1>,
29  i64);
30
31define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i64 %4) nounwind {
32; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i8_nxv1i8:
33; CHECK:       # %bb.0: # %entry
34; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
35; CHECK-NEXT:    vmsle.vv v25, v9, v8
36; CHECK-NEXT:    vmv1r.v v26, v0
37; CHECK-NEXT:    vmv1r.v v0, v25
38; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
39; CHECK-NEXT:    vmv1r.v v0, v26
40; CHECK-NEXT:    ret
41entry:
42  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8(
43    <vscale x 1 x i8> %1,
44    <vscale x 1 x i8> %2,
45    i64 %4)
46  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8(
47    <vscale x 1 x i1> %0,
48    <vscale x 1 x i8> %2,
49    <vscale x 1 x i8> %3,
50    <vscale x 1 x i1> %mask,
51    i64 %4)
52
53  ret <vscale x 1 x i1> %a
54}
55
56declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
57  <vscale x 2 x i8>,
58  <vscale x 2 x i8>,
59  i64);
60
61define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i64 %2) nounwind {
62; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i8_nxv2i8:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
65; CHECK-NEXT:    vmsle.vv v0, v9, v8
66; CHECK-NEXT:    ret
67entry:
68  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
69    <vscale x 2 x i8> %0,
70    <vscale x 2 x i8> %1,
71    i64 %2)
72
73  ret <vscale x 2 x i1> %a
74}
75
76declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
77  <vscale x 2 x i1>,
78  <vscale x 2 x i8>,
79  <vscale x 2 x i8>,
80  <vscale x 2 x i1>,
81  i64);
82
83define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i64 %4) nounwind {
84; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i8_nxv2i8:
85; CHECK:       # %bb.0: # %entry
86; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
87; CHECK-NEXT:    vmsle.vv v25, v9, v8
88; CHECK-NEXT:    vmv1r.v v26, v0
89; CHECK-NEXT:    vmv1r.v v0, v25
90; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
91; CHECK-NEXT:    vmv1r.v v0, v26
92; CHECK-NEXT:    ret
93entry:
94  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8(
95    <vscale x 2 x i8> %1,
96    <vscale x 2 x i8> %2,
97    i64 %4)
98  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8(
99    <vscale x 2 x i1> %0,
100    <vscale x 2 x i8> %2,
101    <vscale x 2 x i8> %3,
102    <vscale x 2 x i1> %mask,
103    i64 %4)
104
105  ret <vscale x 2 x i1> %a
106}
107
108declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
109  <vscale x 4 x i8>,
110  <vscale x 4 x i8>,
111  i64);
112
113define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i64 %2) nounwind {
114; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i8_nxv4i8:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
117; CHECK-NEXT:    vmsle.vv v0, v9, v8
118; CHECK-NEXT:    ret
119entry:
120  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
121    <vscale x 4 x i8> %0,
122    <vscale x 4 x i8> %1,
123    i64 %2)
124
125  ret <vscale x 4 x i1> %a
126}
127
128declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
129  <vscale x 4 x i1>,
130  <vscale x 4 x i8>,
131  <vscale x 4 x i8>,
132  <vscale x 4 x i1>,
133  i64);
134
135define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i64 %4) nounwind {
136; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i8_nxv4i8:
137; CHECK:       # %bb.0: # %entry
138; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
139; CHECK-NEXT:    vmsle.vv v25, v9, v8
140; CHECK-NEXT:    vmv1r.v v26, v0
141; CHECK-NEXT:    vmv1r.v v0, v25
142; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
143; CHECK-NEXT:    vmv1r.v v0, v26
144; CHECK-NEXT:    ret
145entry:
146  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8(
147    <vscale x 4 x i8> %1,
148    <vscale x 4 x i8> %2,
149    i64 %4)
150  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8(
151    <vscale x 4 x i1> %0,
152    <vscale x 4 x i8> %2,
153    <vscale x 4 x i8> %3,
154    <vscale x 4 x i1> %mask,
155    i64 %4)
156
157  ret <vscale x 4 x i1> %a
158}
159
160declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
161  <vscale x 8 x i8>,
162  <vscale x 8 x i8>,
163  i64);
164
165define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i64 %2) nounwind {
166; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i8_nxv8i8:
167; CHECK:       # %bb.0: # %entry
168; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
169; CHECK-NEXT:    vmsle.vv v0, v9, v8
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
173    <vscale x 8 x i8> %0,
174    <vscale x 8 x i8> %1,
175    i64 %2)
176
177  ret <vscale x 8 x i1> %a
178}
179
180declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
181  <vscale x 8 x i1>,
182  <vscale x 8 x i8>,
183  <vscale x 8 x i8>,
184  <vscale x 8 x i1>,
185  i64);
186
187define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i64 %4) nounwind {
188; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i8_nxv8i8:
189; CHECK:       # %bb.0: # %entry
190; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
191; CHECK-NEXT:    vmsle.vv v25, v9, v8
192; CHECK-NEXT:    vmv1r.v v26, v0
193; CHECK-NEXT:    vmv1r.v v0, v25
194; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
195; CHECK-NEXT:    vmv1r.v v0, v26
196; CHECK-NEXT:    ret
197entry:
198  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8(
199    <vscale x 8 x i8> %1,
200    <vscale x 8 x i8> %2,
201    i64 %4)
202  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8(
203    <vscale x 8 x i1> %0,
204    <vscale x 8 x i8> %2,
205    <vscale x 8 x i8> %3,
206    <vscale x 8 x i1> %mask,
207    i64 %4)
208
209  ret <vscale x 8 x i1> %a
210}
211
212declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
213  <vscale x 16 x i8>,
214  <vscale x 16 x i8>,
215  i64);
216
217define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i64 %2) nounwind {
218; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i8_nxv16i8:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
221; CHECK-NEXT:    vmsle.vv v0, v10, v8
222; CHECK-NEXT:    ret
223entry:
224  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
225    <vscale x 16 x i8> %0,
226    <vscale x 16 x i8> %1,
227    i64 %2)
228
229  ret <vscale x 16 x i1> %a
230}
231
232declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
233  <vscale x 16 x i1>,
234  <vscale x 16 x i8>,
235  <vscale x 16 x i8>,
236  <vscale x 16 x i1>,
237  i64);
238
239define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i64 %4) nounwind {
240; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i8_nxv16i8:
241; CHECK:       # %bb.0: # %entry
242; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
243; CHECK-NEXT:    vmsle.vv v25, v10, v8
244; CHECK-NEXT:    vmv1r.v v26, v0
245; CHECK-NEXT:    vmv1r.v v0, v25
246; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
247; CHECK-NEXT:    vmv1r.v v0, v26
248; CHECK-NEXT:    ret
249entry:
250  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8(
251    <vscale x 16 x i8> %1,
252    <vscale x 16 x i8> %2,
253    i64 %4)
254  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8(
255    <vscale x 16 x i1> %0,
256    <vscale x 16 x i8> %2,
257    <vscale x 16 x i8> %3,
258    <vscale x 16 x i1> %mask,
259    i64 %4)
260
261  ret <vscale x 16 x i1> %a
262}
263
264declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
265  <vscale x 32 x i8>,
266  <vscale x 32 x i8>,
267  i64);
268
269define <vscale x 32 x i1> @intrinsic_vmsge_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i64 %2) nounwind {
270; CHECK-LABEL: intrinsic_vmsge_vv_nxv32i8_nxv32i8:
271; CHECK:       # %bb.0: # %entry
272; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
273; CHECK-NEXT:    vmsle.vv v0, v12, v8
274; CHECK-NEXT:    ret
275entry:
276  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
277    <vscale x 32 x i8> %0,
278    <vscale x 32 x i8> %1,
279    i64 %2)
280
281  ret <vscale x 32 x i1> %a
282}
283
284declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
285  <vscale x 32 x i1>,
286  <vscale x 32 x i8>,
287  <vscale x 32 x i8>,
288  <vscale x 32 x i1>,
289  i64);
290
291define <vscale x 32 x i1> @intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i64 %4) nounwind {
292; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv32i8_nxv32i8:
293; CHECK:       # %bb.0: # %entry
294; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
295; CHECK-NEXT:    vmsle.vv v25, v12, v8
296; CHECK-NEXT:    vmv1r.v v26, v0
297; CHECK-NEXT:    vmv1r.v v0, v25
298; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
299; CHECK-NEXT:    vmv1r.v v0, v26
300; CHECK-NEXT:    ret
301entry:
302  %mask = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8(
303    <vscale x 32 x i8> %1,
304    <vscale x 32 x i8> %2,
305    i64 %4)
306  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8(
307    <vscale x 32 x i1> %0,
308    <vscale x 32 x i8> %2,
309    <vscale x 32 x i8> %3,
310    <vscale x 32 x i1> %mask,
311    i64 %4)
312
313  ret <vscale x 32 x i1> %a
314}
315
316declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
317  <vscale x 1 x i16>,
318  <vscale x 1 x i16>,
319  i64);
320
321define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i64 %2) nounwind {
322; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i16_nxv1i16:
323; CHECK:       # %bb.0: # %entry
324; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
325; CHECK-NEXT:    vmsle.vv v0, v9, v8
326; CHECK-NEXT:    ret
327entry:
328  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
329    <vscale x 1 x i16> %0,
330    <vscale x 1 x i16> %1,
331    i64 %2)
332
333  ret <vscale x 1 x i1> %a
334}
335
336declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
337  <vscale x 1 x i1>,
338  <vscale x 1 x i16>,
339  <vscale x 1 x i16>,
340  <vscale x 1 x i1>,
341  i64);
342
343define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i64 %4) nounwind {
344; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i16_nxv1i16:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
347; CHECK-NEXT:    vmsle.vv v25, v9, v8
348; CHECK-NEXT:    vmv1r.v v26, v0
349; CHECK-NEXT:    vmv1r.v v0, v25
350; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
351; CHECK-NEXT:    vmv1r.v v0, v26
352; CHECK-NEXT:    ret
353entry:
354  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16(
355    <vscale x 1 x i16> %1,
356    <vscale x 1 x i16> %2,
357    i64 %4)
358  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16(
359    <vscale x 1 x i1> %0,
360    <vscale x 1 x i16> %2,
361    <vscale x 1 x i16> %3,
362    <vscale x 1 x i1> %mask,
363    i64 %4)
364
365  ret <vscale x 1 x i1> %a
366}
367
368declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
369  <vscale x 2 x i16>,
370  <vscale x 2 x i16>,
371  i64);
372
373define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i64 %2) nounwind {
374; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i16_nxv2i16:
375; CHECK:       # %bb.0: # %entry
376; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
377; CHECK-NEXT:    vmsle.vv v0, v9, v8
378; CHECK-NEXT:    ret
379entry:
380  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
381    <vscale x 2 x i16> %0,
382    <vscale x 2 x i16> %1,
383    i64 %2)
384
385  ret <vscale x 2 x i1> %a
386}
387
388declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
389  <vscale x 2 x i1>,
390  <vscale x 2 x i16>,
391  <vscale x 2 x i16>,
392  <vscale x 2 x i1>,
393  i64);
394
395define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i64 %4) nounwind {
396; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i16_nxv2i16:
397; CHECK:       # %bb.0: # %entry
398; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
399; CHECK-NEXT:    vmsle.vv v25, v9, v8
400; CHECK-NEXT:    vmv1r.v v26, v0
401; CHECK-NEXT:    vmv1r.v v0, v25
402; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
403; CHECK-NEXT:    vmv1r.v v0, v26
404; CHECK-NEXT:    ret
405entry:
406  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16(
407    <vscale x 2 x i16> %1,
408    <vscale x 2 x i16> %2,
409    i64 %4)
410  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16(
411    <vscale x 2 x i1> %0,
412    <vscale x 2 x i16> %2,
413    <vscale x 2 x i16> %3,
414    <vscale x 2 x i1> %mask,
415    i64 %4)
416
417  ret <vscale x 2 x i1> %a
418}
419
420declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
421  <vscale x 4 x i16>,
422  <vscale x 4 x i16>,
423  i64);
424
425define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i64 %2) nounwind {
426; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i16_nxv4i16:
427; CHECK:       # %bb.0: # %entry
428; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
429; CHECK-NEXT:    vmsle.vv v0, v9, v8
430; CHECK-NEXT:    ret
431entry:
432  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
433    <vscale x 4 x i16> %0,
434    <vscale x 4 x i16> %1,
435    i64 %2)
436
437  ret <vscale x 4 x i1> %a
438}
439
440declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
441  <vscale x 4 x i1>,
442  <vscale x 4 x i16>,
443  <vscale x 4 x i16>,
444  <vscale x 4 x i1>,
445  i64);
446
447define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i64 %4) nounwind {
448; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i16_nxv4i16:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
451; CHECK-NEXT:    vmsle.vv v25, v9, v8
452; CHECK-NEXT:    vmv1r.v v26, v0
453; CHECK-NEXT:    vmv1r.v v0, v25
454; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
455; CHECK-NEXT:    vmv1r.v v0, v26
456; CHECK-NEXT:    ret
457entry:
458  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16(
459    <vscale x 4 x i16> %1,
460    <vscale x 4 x i16> %2,
461    i64 %4)
462  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16(
463    <vscale x 4 x i1> %0,
464    <vscale x 4 x i16> %2,
465    <vscale x 4 x i16> %3,
466    <vscale x 4 x i1> %mask,
467    i64 %4)
468
469  ret <vscale x 4 x i1> %a
470}
471
472declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
473  <vscale x 8 x i16>,
474  <vscale x 8 x i16>,
475  i64);
476
477define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i64 %2) nounwind {
478; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i16_nxv8i16:
479; CHECK:       # %bb.0: # %entry
480; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
481; CHECK-NEXT:    vmsle.vv v0, v10, v8
482; CHECK-NEXT:    ret
483entry:
484  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
485    <vscale x 8 x i16> %0,
486    <vscale x 8 x i16> %1,
487    i64 %2)
488
489  ret <vscale x 8 x i1> %a
490}
491
492declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
493  <vscale x 8 x i1>,
494  <vscale x 8 x i16>,
495  <vscale x 8 x i16>,
496  <vscale x 8 x i1>,
497  i64);
498
499define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i64 %4) nounwind {
500; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i16_nxv8i16:
501; CHECK:       # %bb.0: # %entry
502; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
503; CHECK-NEXT:    vmsle.vv v25, v10, v8
504; CHECK-NEXT:    vmv1r.v v26, v0
505; CHECK-NEXT:    vmv1r.v v0, v25
506; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
507; CHECK-NEXT:    vmv1r.v v0, v26
508; CHECK-NEXT:    ret
509entry:
510  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16(
511    <vscale x 8 x i16> %1,
512    <vscale x 8 x i16> %2,
513    i64 %4)
514  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16(
515    <vscale x 8 x i1> %0,
516    <vscale x 8 x i16> %2,
517    <vscale x 8 x i16> %3,
518    <vscale x 8 x i1> %mask,
519    i64 %4)
520
521  ret <vscale x 8 x i1> %a
522}
523
524declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
525  <vscale x 16 x i16>,
526  <vscale x 16 x i16>,
527  i64);
528
529define <vscale x 16 x i1> @intrinsic_vmsge_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i64 %2) nounwind {
530; CHECK-LABEL: intrinsic_vmsge_vv_nxv16i16_nxv16i16:
531; CHECK:       # %bb.0: # %entry
532; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
533; CHECK-NEXT:    vmsle.vv v0, v12, v8
534; CHECK-NEXT:    ret
535entry:
536  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
537    <vscale x 16 x i16> %0,
538    <vscale x 16 x i16> %1,
539    i64 %2)
540
541  ret <vscale x 16 x i1> %a
542}
543
544declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
545  <vscale x 16 x i1>,
546  <vscale x 16 x i16>,
547  <vscale x 16 x i16>,
548  <vscale x 16 x i1>,
549  i64);
550
551define <vscale x 16 x i1> @intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i64 %4) nounwind {
552; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv16i16_nxv16i16:
553; CHECK:       # %bb.0: # %entry
554; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
555; CHECK-NEXT:    vmsle.vv v25, v12, v8
556; CHECK-NEXT:    vmv1r.v v26, v0
557; CHECK-NEXT:    vmv1r.v v0, v25
558; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
559; CHECK-NEXT:    vmv1r.v v0, v26
560; CHECK-NEXT:    ret
561entry:
562  %mask = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16(
563    <vscale x 16 x i16> %1,
564    <vscale x 16 x i16> %2,
565    i64 %4)
566  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16(
567    <vscale x 16 x i1> %0,
568    <vscale x 16 x i16> %2,
569    <vscale x 16 x i16> %3,
570    <vscale x 16 x i1> %mask,
571    i64 %4)
572
573  ret <vscale x 16 x i1> %a
574}
575
576declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
577  <vscale x 1 x i32>,
578  <vscale x 1 x i32>,
579  i64);
580
581define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i64 %2) nounwind {
582; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i32_nxv1i32:
583; CHECK:       # %bb.0: # %entry
584; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
585; CHECK-NEXT:    vmsle.vv v0, v9, v8
586; CHECK-NEXT:    ret
587entry:
588  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
589    <vscale x 1 x i32> %0,
590    <vscale x 1 x i32> %1,
591    i64 %2)
592
593  ret <vscale x 1 x i1> %a
594}
595
596declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
597  <vscale x 1 x i1>,
598  <vscale x 1 x i32>,
599  <vscale x 1 x i32>,
600  <vscale x 1 x i1>,
601  i64);
602
603define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i64 %4) nounwind {
604; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i32_nxv1i32:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
607; CHECK-NEXT:    vmsle.vv v25, v9, v8
608; CHECK-NEXT:    vmv1r.v v26, v0
609; CHECK-NEXT:    vmv1r.v v0, v25
610; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
611; CHECK-NEXT:    vmv1r.v v0, v26
612; CHECK-NEXT:    ret
613entry:
614  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32(
615    <vscale x 1 x i32> %1,
616    <vscale x 1 x i32> %2,
617    i64 %4)
618  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32(
619    <vscale x 1 x i1> %0,
620    <vscale x 1 x i32> %2,
621    <vscale x 1 x i32> %3,
622    <vscale x 1 x i1> %mask,
623    i64 %4)
624
625  ret <vscale x 1 x i1> %a
626}
627
628declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
629  <vscale x 2 x i32>,
630  <vscale x 2 x i32>,
631  i64);
632
633define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i64 %2) nounwind {
634; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i32_nxv2i32:
635; CHECK:       # %bb.0: # %entry
636; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
637; CHECK-NEXT:    vmsle.vv v0, v9, v8
638; CHECK-NEXT:    ret
639entry:
640  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
641    <vscale x 2 x i32> %0,
642    <vscale x 2 x i32> %1,
643    i64 %2)
644
645  ret <vscale x 2 x i1> %a
646}
647
648declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
649  <vscale x 2 x i1>,
650  <vscale x 2 x i32>,
651  <vscale x 2 x i32>,
652  <vscale x 2 x i1>,
653  i64);
654
655define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i64 %4) nounwind {
656; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i32_nxv2i32:
657; CHECK:       # %bb.0: # %entry
658; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
659; CHECK-NEXT:    vmsle.vv v25, v9, v8
660; CHECK-NEXT:    vmv1r.v v26, v0
661; CHECK-NEXT:    vmv1r.v v0, v25
662; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
663; CHECK-NEXT:    vmv1r.v v0, v26
664; CHECK-NEXT:    ret
665entry:
666  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32(
667    <vscale x 2 x i32> %1,
668    <vscale x 2 x i32> %2,
669    i64 %4)
670  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32(
671    <vscale x 2 x i1> %0,
672    <vscale x 2 x i32> %2,
673    <vscale x 2 x i32> %3,
674    <vscale x 2 x i1> %mask,
675    i64 %4)
676
677  ret <vscale x 2 x i1> %a
678}
679
680declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
681  <vscale x 4 x i32>,
682  <vscale x 4 x i32>,
683  i64);
684
685define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i64 %2) nounwind {
686; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i32_nxv4i32:
687; CHECK:       # %bb.0: # %entry
688; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
689; CHECK-NEXT:    vmsle.vv v0, v10, v8
690; CHECK-NEXT:    ret
691entry:
692  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
693    <vscale x 4 x i32> %0,
694    <vscale x 4 x i32> %1,
695    i64 %2)
696
697  ret <vscale x 4 x i1> %a
698}
699
700declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
701  <vscale x 4 x i1>,
702  <vscale x 4 x i32>,
703  <vscale x 4 x i32>,
704  <vscale x 4 x i1>,
705  i64);
706
707define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i64 %4) nounwind {
708; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i32_nxv4i32:
709; CHECK:       # %bb.0: # %entry
710; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
711; CHECK-NEXT:    vmsle.vv v25, v10, v8
712; CHECK-NEXT:    vmv1r.v v26, v0
713; CHECK-NEXT:    vmv1r.v v0, v25
714; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
715; CHECK-NEXT:    vmv1r.v v0, v26
716; CHECK-NEXT:    ret
717entry:
718  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32(
719    <vscale x 4 x i32> %1,
720    <vscale x 4 x i32> %2,
721    i64 %4)
722  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32(
723    <vscale x 4 x i1> %0,
724    <vscale x 4 x i32> %2,
725    <vscale x 4 x i32> %3,
726    <vscale x 4 x i1> %mask,
727    i64 %4)
728
729  ret <vscale x 4 x i1> %a
730}
731
732declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
733  <vscale x 8 x i32>,
734  <vscale x 8 x i32>,
735  i64);
736
737define <vscale x 8 x i1> @intrinsic_vmsge_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i64 %2) nounwind {
738; CHECK-LABEL: intrinsic_vmsge_vv_nxv8i32_nxv8i32:
739; CHECK:       # %bb.0: # %entry
740; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
741; CHECK-NEXT:    vmsle.vv v0, v12, v8
742; CHECK-NEXT:    ret
743entry:
744  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
745    <vscale x 8 x i32> %0,
746    <vscale x 8 x i32> %1,
747    i64 %2)
748
749  ret <vscale x 8 x i1> %a
750}
751
752declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
753  <vscale x 8 x i1>,
754  <vscale x 8 x i32>,
755  <vscale x 8 x i32>,
756  <vscale x 8 x i1>,
757  i64);
758
759define <vscale x 8 x i1> @intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i64 %4) nounwind {
760; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv8i32_nxv8i32:
761; CHECK:       # %bb.0: # %entry
762; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
763; CHECK-NEXT:    vmsle.vv v25, v12, v8
764; CHECK-NEXT:    vmv1r.v v26, v0
765; CHECK-NEXT:    vmv1r.v v0, v25
766; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
767; CHECK-NEXT:    vmv1r.v v0, v26
768; CHECK-NEXT:    ret
769entry:
770  %mask = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32(
771    <vscale x 8 x i32> %1,
772    <vscale x 8 x i32> %2,
773    i64 %4)
774  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32(
775    <vscale x 8 x i1> %0,
776    <vscale x 8 x i32> %2,
777    <vscale x 8 x i32> %3,
778    <vscale x 8 x i1> %mask,
779    i64 %4)
780
781  ret <vscale x 8 x i1> %a
782}
783
784declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
785  <vscale x 1 x i64>,
786  <vscale x 1 x i64>,
787  i64);
788
789define <vscale x 1 x i1> @intrinsic_vmsge_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i64 %2) nounwind {
790; CHECK-LABEL: intrinsic_vmsge_vv_nxv1i64_nxv1i64:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
793; CHECK-NEXT:    vmsle.vv v0, v9, v8
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
797    <vscale x 1 x i64> %0,
798    <vscale x 1 x i64> %1,
799    i64 %2)
800
801  ret <vscale x 1 x i1> %a
802}
803
804declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
805  <vscale x 1 x i1>,
806  <vscale x 1 x i64>,
807  <vscale x 1 x i64>,
808  <vscale x 1 x i1>,
809  i64);
810
811define <vscale x 1 x i1> @intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i64 %4) nounwind {
812; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv1i64_nxv1i64:
813; CHECK:       # %bb.0: # %entry
814; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
815; CHECK-NEXT:    vmsle.vv v25, v9, v8
816; CHECK-NEXT:    vmv1r.v v26, v0
817; CHECK-NEXT:    vmv1r.v v0, v25
818; CHECK-NEXT:    vmsle.vv v26, v10, v9, v0.t
819; CHECK-NEXT:    vmv1r.v v0, v26
820; CHECK-NEXT:    ret
821entry:
822  %mask = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64(
823    <vscale x 1 x i64> %1,
824    <vscale x 1 x i64> %2,
825    i64 %4)
826  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64(
827    <vscale x 1 x i1> %0,
828    <vscale x 1 x i64> %2,
829    <vscale x 1 x i64> %3,
830    <vscale x 1 x i1> %mask,
831    i64 %4)
832
833  ret <vscale x 1 x i1> %a
834}
835
836declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
837  <vscale x 2 x i64>,
838  <vscale x 2 x i64>,
839  i64);
840
841define <vscale x 2 x i1> @intrinsic_vmsge_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i64 %2) nounwind {
842; CHECK-LABEL: intrinsic_vmsge_vv_nxv2i64_nxv2i64:
843; CHECK:       # %bb.0: # %entry
844; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
845; CHECK-NEXT:    vmsle.vv v0, v10, v8
846; CHECK-NEXT:    ret
847entry:
848  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
849    <vscale x 2 x i64> %0,
850    <vscale x 2 x i64> %1,
851    i64 %2)
852
853  ret <vscale x 2 x i1> %a
854}
855
856declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
857  <vscale x 2 x i1>,
858  <vscale x 2 x i64>,
859  <vscale x 2 x i64>,
860  <vscale x 2 x i1>,
861  i64);
862
863define <vscale x 2 x i1> @intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i64 %4) nounwind {
864; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv2i64_nxv2i64:
865; CHECK:       # %bb.0: # %entry
866; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
867; CHECK-NEXT:    vmsle.vv v25, v10, v8
868; CHECK-NEXT:    vmv1r.v v26, v0
869; CHECK-NEXT:    vmv1r.v v0, v25
870; CHECK-NEXT:    vmsle.vv v26, v12, v10, v0.t
871; CHECK-NEXT:    vmv1r.v v0, v26
872; CHECK-NEXT:    ret
873entry:
874  %mask = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64(
875    <vscale x 2 x i64> %1,
876    <vscale x 2 x i64> %2,
877    i64 %4)
878  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64(
879    <vscale x 2 x i1> %0,
880    <vscale x 2 x i64> %2,
881    <vscale x 2 x i64> %3,
882    <vscale x 2 x i1> %mask,
883    i64 %4)
884
885  ret <vscale x 2 x i1> %a
886}
887
888declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
889  <vscale x 4 x i64>,
890  <vscale x 4 x i64>,
891  i64);
892
893define <vscale x 4 x i1> @intrinsic_vmsge_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i64 %2) nounwind {
894; CHECK-LABEL: intrinsic_vmsge_vv_nxv4i64_nxv4i64:
895; CHECK:       # %bb.0: # %entry
896; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
897; CHECK-NEXT:    vmsle.vv v0, v12, v8
898; CHECK-NEXT:    ret
899entry:
900  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
901    <vscale x 4 x i64> %0,
902    <vscale x 4 x i64> %1,
903    i64 %2)
904
905  ret <vscale x 4 x i1> %a
906}
907
908declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
909  <vscale x 4 x i1>,
910  <vscale x 4 x i64>,
911  <vscale x 4 x i64>,
912  <vscale x 4 x i1>,
913  i64);
914
915define <vscale x 4 x i1> @intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i64 %4) nounwind {
916; CHECK-LABEL: intrinsic_vmsge_mask_vv_nxv4i64_nxv4i64:
917; CHECK:       # %bb.0: # %entry
918; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
919; CHECK-NEXT:    vmsle.vv v25, v12, v8
920; CHECK-NEXT:    vmv1r.v v26, v0
921; CHECK-NEXT:    vmv1r.v v0, v25
922; CHECK-NEXT:    vmsle.vv v26, v16, v12, v0.t
923; CHECK-NEXT:    vmv1r.v v0, v26
924; CHECK-NEXT:    ret
925entry:
926  %mask = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64(
927    <vscale x 4 x i64> %1,
928    <vscale x 4 x i64> %2,
929    i64 %4)
930  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64(
931    <vscale x 4 x i1> %0,
932    <vscale x 4 x i64> %2,
933    <vscale x 4 x i64> %3,
934    <vscale x 4 x i1> %mask,
935    i64 %4)
936
937  ret <vscale x 4 x i1> %a
938}
939
940declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
941  <vscale x 1 x i8>,
942  i8,
943  i64);
944
945define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i64 %2) nounwind {
946; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i8_i8:
947; CHECK:       # %bb.0: # %entry
948; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
949; CHECK-NEXT:    vmslt.vx v25, v8, a0
950; CHECK-NEXT:    vmnand.mm v0, v25, v25
951; CHECK-NEXT:    ret
952entry:
953  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
954    <vscale x 1 x i8> %0,
955    i8 %1,
956    i64 %2)
957
958  ret <vscale x 1 x i1> %a
959}
960
961declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
962  <vscale x 1 x i1>,
963  <vscale x 1 x i8>,
964  i8,
965  <vscale x 1 x i1>,
966  i64);
967
968define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
969; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i8_i8:
970; CHECK:       # %bb.0: # %entry
971; CHECK-NEXT:    vmv1r.v v25, v0
972; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
973; CHECK-NEXT:    vmv1r.v v0, v9
974; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
975; CHECK-NEXT:    vmxor.mm v0, v25, v9
976; CHECK-NEXT:    ret
977entry:
978  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
979    <vscale x 1 x i1> %0,
980    <vscale x 1 x i8> %1,
981    i8 %2,
982    <vscale x 1 x i1> %3,
983    i64 %4)
984
985  ret <vscale x 1 x i1> %a
986}
987
988declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
989  <vscale x 2 x i8>,
990  i8,
991  i64);
992
993define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i64 %2) nounwind {
994; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i8_i8:
995; CHECK:       # %bb.0: # %entry
996; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
997; CHECK-NEXT:    vmslt.vx v25, v8, a0
998; CHECK-NEXT:    vmnand.mm v0, v25, v25
999; CHECK-NEXT:    ret
1000entry:
1001  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
1002    <vscale x 2 x i8> %0,
1003    i8 %1,
1004    i64 %2)
1005
1006  ret <vscale x 2 x i1> %a
1007}
1008
1009declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
1010  <vscale x 2 x i1>,
1011  <vscale x 2 x i8>,
1012  i8,
1013  <vscale x 2 x i1>,
1014  i64);
1015
1016define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1017; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i8_i8:
1018; CHECK:       # %bb.0: # %entry
1019; CHECK-NEXT:    vmv1r.v v25, v0
1020; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1021; CHECK-NEXT:    vmv1r.v v0, v9
1022; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1023; CHECK-NEXT:    vmxor.mm v0, v25, v9
1024; CHECK-NEXT:    ret
1025entry:
1026  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
1027    <vscale x 2 x i1> %0,
1028    <vscale x 2 x i8> %1,
1029    i8 %2,
1030    <vscale x 2 x i1> %3,
1031    i64 %4)
1032
1033  ret <vscale x 2 x i1> %a
1034}
1035
1036declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1037  <vscale x 4 x i8>,
1038  i8,
1039  i64);
1040
1041define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i64 %2) nounwind {
1042; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i8_i8:
1043; CHECK:       # %bb.0: # %entry
1044; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1045; CHECK-NEXT:    vmslt.vx v25, v8, a0
1046; CHECK-NEXT:    vmnand.mm v0, v25, v25
1047; CHECK-NEXT:    ret
1048entry:
1049  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1050    <vscale x 4 x i8> %0,
1051    i8 %1,
1052    i64 %2)
1053
1054  ret <vscale x 4 x i1> %a
1055}
1056
1057declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1058  <vscale x 4 x i1>,
1059  <vscale x 4 x i8>,
1060  i8,
1061  <vscale x 4 x i1>,
1062  i64);
1063
1064define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1065; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i8_i8:
1066; CHECK:       # %bb.0: # %entry
1067; CHECK-NEXT:    vmv1r.v v25, v0
1068; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1069; CHECK-NEXT:    vmv1r.v v0, v9
1070; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1071; CHECK-NEXT:    vmxor.mm v0, v25, v9
1072; CHECK-NEXT:    ret
1073entry:
1074  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1075    <vscale x 4 x i1> %0,
1076    <vscale x 4 x i8> %1,
1077    i8 %2,
1078    <vscale x 4 x i1> %3,
1079    i64 %4)
1080
1081  ret <vscale x 4 x i1> %a
1082}
1083
1084declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
1085  <vscale x 8 x i8>,
1086  i8,
1087  i64);
1088
1089define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i64 %2) nounwind {
1090; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i8_i8:
1091; CHECK:       # %bb.0: # %entry
1092; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1093; CHECK-NEXT:    vmslt.vx v25, v8, a0
1094; CHECK-NEXT:    vmnand.mm v0, v25, v25
1095; CHECK-NEXT:    ret
1096entry:
1097  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
1098    <vscale x 8 x i8> %0,
1099    i8 %1,
1100    i64 %2)
1101
1102  ret <vscale x 8 x i1> %a
1103}
1104
1105declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
1106  <vscale x 8 x i1>,
1107  <vscale x 8 x i8>,
1108  i8,
1109  <vscale x 8 x i1>,
1110  i64);
1111
1112define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1113; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i8_i8:
1114; CHECK:       # %bb.0: # %entry
1115; CHECK-NEXT:    vmv1r.v v25, v0
1116; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1117; CHECK-NEXT:    vmv1r.v v0, v9
1118; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1119; CHECK-NEXT:    vmxor.mm v0, v25, v9
1120; CHECK-NEXT:    ret
1121entry:
1122  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
1123    <vscale x 8 x i1> %0,
1124    <vscale x 8 x i8> %1,
1125    i8 %2,
1126    <vscale x 8 x i1> %3,
1127    i64 %4)
1128
1129  ret <vscale x 8 x i1> %a
1130}
1131
1132declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
1133  <vscale x 16 x i8>,
1134  i8,
1135  i64);
1136
1137define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i64 %2) nounwind {
1138; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i8_i8:
1139; CHECK:       # %bb.0: # %entry
1140; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1141; CHECK-NEXT:    vmslt.vx v25, v8, a0
1142; CHECK-NEXT:    vmnand.mm v0, v25, v25
1143; CHECK-NEXT:    ret
1144entry:
1145  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
1146    <vscale x 16 x i8> %0,
1147    i8 %1,
1148    i64 %2)
1149
1150  ret <vscale x 16 x i1> %a
1151}
1152
1153declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
1154  <vscale x 16 x i1>,
1155  <vscale x 16 x i8>,
1156  i8,
1157  <vscale x 16 x i1>,
1158  i64);
1159
1160define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1161; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i8_i8:
1162; CHECK:       # %bb.0: # %entry
1163; CHECK-NEXT:    vmv1r.v v25, v0
1164; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1165; CHECK-NEXT:    vmv1r.v v0, v10
1166; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1167; CHECK-NEXT:    vmxor.mm v0, v25, v10
1168; CHECK-NEXT:    ret
1169entry:
1170  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
1171    <vscale x 16 x i1> %0,
1172    <vscale x 16 x i8> %1,
1173    i8 %2,
1174    <vscale x 16 x i1> %3,
1175    i64 %4)
1176
1177  ret <vscale x 16 x i1> %a
1178}
1179
1180declare <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
1181  <vscale x 32 x i8>,
1182  i8,
1183  i64);
1184
1185define <vscale x 32 x i1> @intrinsic_vmsge_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i64 %2) nounwind {
1186; CHECK-LABEL: intrinsic_vmsge_vx_nxv32i8_i8:
1187; CHECK:       # %bb.0: # %entry
1188; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1189; CHECK-NEXT:    vmslt.vx v25, v8, a0
1190; CHECK-NEXT:    vmnand.mm v0, v25, v25
1191; CHECK-NEXT:    ret
1192entry:
1193  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
1194    <vscale x 32 x i8> %0,
1195    i8 %1,
1196    i64 %2)
1197
1198  ret <vscale x 32 x i1> %a
1199}
1200
1201declare <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
1202  <vscale x 32 x i1>,
1203  <vscale x 32 x i8>,
1204  i8,
1205  <vscale x 32 x i1>,
1206  i64);
1207
1208define <vscale x 32 x i1> @intrinsic_vmsge_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i64 %4) nounwind {
1209; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv32i8_i8:
1210; CHECK:       # %bb.0: # %entry
1211; CHECK-NEXT:    vmv1r.v v25, v0
1212; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1213; CHECK-NEXT:    vmv1r.v v0, v12
1214; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1215; CHECK-NEXT:    vmxor.mm v0, v25, v12
1216; CHECK-NEXT:    ret
1217entry:
1218  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
1219    <vscale x 32 x i1> %0,
1220    <vscale x 32 x i8> %1,
1221    i8 %2,
1222    <vscale x 32 x i1> %3,
1223    i64 %4)
1224
1225  ret <vscale x 32 x i1> %a
1226}
1227
1228declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
1229  <vscale x 1 x i16>,
1230  i16,
1231  i64);
1232
1233define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i64 %2) nounwind {
1234; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i16_i16:
1235; CHECK:       # %bb.0: # %entry
1236; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1237; CHECK-NEXT:    vmslt.vx v25, v8, a0
1238; CHECK-NEXT:    vmnand.mm v0, v25, v25
1239; CHECK-NEXT:    ret
1240entry:
1241  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
1242    <vscale x 1 x i16> %0,
1243    i16 %1,
1244    i64 %2)
1245
1246  ret <vscale x 1 x i1> %a
1247}
1248
1249declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
1250  <vscale x 1 x i1>,
1251  <vscale x 1 x i16>,
1252  i16,
1253  <vscale x 1 x i1>,
1254  i64);
1255
1256define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1257; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i16_i16:
1258; CHECK:       # %bb.0: # %entry
1259; CHECK-NEXT:    vmv1r.v v25, v0
1260; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1261; CHECK-NEXT:    vmv1r.v v0, v9
1262; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1263; CHECK-NEXT:    vmxor.mm v0, v25, v9
1264; CHECK-NEXT:    ret
1265entry:
1266  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
1267    <vscale x 1 x i1> %0,
1268    <vscale x 1 x i16> %1,
1269    i16 %2,
1270    <vscale x 1 x i1> %3,
1271    i64 %4)
1272
1273  ret <vscale x 1 x i1> %a
1274}
1275
1276declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
1277  <vscale x 2 x i16>,
1278  i16,
1279  i64);
1280
1281define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i64 %2) nounwind {
1282; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i16_i16:
1283; CHECK:       # %bb.0: # %entry
1284; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1285; CHECK-NEXT:    vmslt.vx v25, v8, a0
1286; CHECK-NEXT:    vmnand.mm v0, v25, v25
1287; CHECK-NEXT:    ret
1288entry:
1289  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
1290    <vscale x 2 x i16> %0,
1291    i16 %1,
1292    i64 %2)
1293
1294  ret <vscale x 2 x i1> %a
1295}
1296
1297declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
1298  <vscale x 2 x i1>,
1299  <vscale x 2 x i16>,
1300  i16,
1301  <vscale x 2 x i1>,
1302  i64);
1303
1304define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1305; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i16_i16:
1306; CHECK:       # %bb.0: # %entry
1307; CHECK-NEXT:    vmv1r.v v25, v0
1308; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1309; CHECK-NEXT:    vmv1r.v v0, v9
1310; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1311; CHECK-NEXT:    vmxor.mm v0, v25, v9
1312; CHECK-NEXT:    ret
1313entry:
1314  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
1315    <vscale x 2 x i1> %0,
1316    <vscale x 2 x i16> %1,
1317    i16 %2,
1318    <vscale x 2 x i1> %3,
1319    i64 %4)
1320
1321  ret <vscale x 2 x i1> %a
1322}
1323
1324declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
1325  <vscale x 4 x i16>,
1326  i16,
1327  i64);
1328
1329define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i64 %2) nounwind {
1330; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i16_i16:
1331; CHECK:       # %bb.0: # %entry
1332; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1333; CHECK-NEXT:    vmslt.vx v25, v8, a0
1334; CHECK-NEXT:    vmnand.mm v0, v25, v25
1335; CHECK-NEXT:    ret
1336entry:
1337  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
1338    <vscale x 4 x i16> %0,
1339    i16 %1,
1340    i64 %2)
1341
1342  ret <vscale x 4 x i1> %a
1343}
1344
1345declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
1346  <vscale x 4 x i1>,
1347  <vscale x 4 x i16>,
1348  i16,
1349  <vscale x 4 x i1>,
1350  i64);
1351
1352define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1353; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i16_i16:
1354; CHECK:       # %bb.0: # %entry
1355; CHECK-NEXT:    vmv1r.v v25, v0
1356; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1357; CHECK-NEXT:    vmv1r.v v0, v9
1358; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1359; CHECK-NEXT:    vmxor.mm v0, v25, v9
1360; CHECK-NEXT:    ret
1361entry:
1362  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
1363    <vscale x 4 x i1> %0,
1364    <vscale x 4 x i16> %1,
1365    i16 %2,
1366    <vscale x 4 x i1> %3,
1367    i64 %4)
1368
1369  ret <vscale x 4 x i1> %a
1370}
1371
1372declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
1373  <vscale x 8 x i16>,
1374  i16,
1375  i64);
1376
1377define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i64 %2) nounwind {
1378; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i16_i16:
1379; CHECK:       # %bb.0: # %entry
1380; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1381; CHECK-NEXT:    vmslt.vx v25, v8, a0
1382; CHECK-NEXT:    vmnand.mm v0, v25, v25
1383; CHECK-NEXT:    ret
1384entry:
1385  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
1386    <vscale x 8 x i16> %0,
1387    i16 %1,
1388    i64 %2)
1389
1390  ret <vscale x 8 x i1> %a
1391}
1392
1393declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
1394  <vscale x 8 x i1>,
1395  <vscale x 8 x i16>,
1396  i16,
1397  <vscale x 8 x i1>,
1398  i64);
1399
1400define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1401; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i16_i16:
1402; CHECK:       # %bb.0: # %entry
1403; CHECK-NEXT:    vmv1r.v v25, v0
1404; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1405; CHECK-NEXT:    vmv1r.v v0, v10
1406; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1407; CHECK-NEXT:    vmxor.mm v0, v25, v10
1408; CHECK-NEXT:    ret
1409entry:
1410  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
1411    <vscale x 8 x i1> %0,
1412    <vscale x 8 x i16> %1,
1413    i16 %2,
1414    <vscale x 8 x i1> %3,
1415    i64 %4)
1416
1417  ret <vscale x 8 x i1> %a
1418}
1419
1420declare <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
1421  <vscale x 16 x i16>,
1422  i16,
1423  i64);
1424
1425define <vscale x 16 x i1> @intrinsic_vmsge_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i64 %2) nounwind {
1426; CHECK-LABEL: intrinsic_vmsge_vx_nxv16i16_i16:
1427; CHECK:       # %bb.0: # %entry
1428; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1429; CHECK-NEXT:    vmslt.vx v25, v8, a0
1430; CHECK-NEXT:    vmnand.mm v0, v25, v25
1431; CHECK-NEXT:    ret
1432entry:
1433  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
1434    <vscale x 16 x i16> %0,
1435    i16 %1,
1436    i64 %2)
1437
1438  ret <vscale x 16 x i1> %a
1439}
1440
1441declare <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
1442  <vscale x 16 x i1>,
1443  <vscale x 16 x i16>,
1444  i16,
1445  <vscale x 16 x i1>,
1446  i64);
1447
1448define <vscale x 16 x i1> @intrinsic_vmsge_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i64 %4) nounwind {
1449; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv16i16_i16:
1450; CHECK:       # %bb.0: # %entry
1451; CHECK-NEXT:    vmv1r.v v25, v0
1452; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1453; CHECK-NEXT:    vmv1r.v v0, v12
1454; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1455; CHECK-NEXT:    vmxor.mm v0, v25, v12
1456; CHECK-NEXT:    ret
1457entry:
1458  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
1459    <vscale x 16 x i1> %0,
1460    <vscale x 16 x i16> %1,
1461    i16 %2,
1462    <vscale x 16 x i1> %3,
1463    i64 %4)
1464
1465  ret <vscale x 16 x i1> %a
1466}
1467
1468declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
1469  <vscale x 1 x i32>,
1470  i32,
1471  i64);
1472
1473define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i64 %2) nounwind {
1474; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i32_i32:
1475; CHECK:       # %bb.0: # %entry
1476; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1477; CHECK-NEXT:    vmslt.vx v25, v8, a0
1478; CHECK-NEXT:    vmnand.mm v0, v25, v25
1479; CHECK-NEXT:    ret
1480entry:
1481  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
1482    <vscale x 1 x i32> %0,
1483    i32 %1,
1484    i64 %2)
1485
1486  ret <vscale x 1 x i1> %a
1487}
1488
1489declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
1490  <vscale x 1 x i1>,
1491  <vscale x 1 x i32>,
1492  i32,
1493  <vscale x 1 x i1>,
1494  i64);
1495
1496define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1497; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i32_i32:
1498; CHECK:       # %bb.0: # %entry
1499; CHECK-NEXT:    vmv1r.v v25, v0
1500; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1501; CHECK-NEXT:    vmv1r.v v0, v9
1502; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1503; CHECK-NEXT:    vmxor.mm v0, v25, v9
1504; CHECK-NEXT:    ret
1505entry:
1506  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
1507    <vscale x 1 x i1> %0,
1508    <vscale x 1 x i32> %1,
1509    i32 %2,
1510    <vscale x 1 x i1> %3,
1511    i64 %4)
1512
1513  ret <vscale x 1 x i1> %a
1514}
1515
1516declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
1517  <vscale x 2 x i32>,
1518  i32,
1519  i64);
1520
1521define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i64 %2) nounwind {
1522; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i32_i32:
1523; CHECK:       # %bb.0: # %entry
1524; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1525; CHECK-NEXT:    vmslt.vx v25, v8, a0
1526; CHECK-NEXT:    vmnand.mm v0, v25, v25
1527; CHECK-NEXT:    ret
1528entry:
1529  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
1530    <vscale x 2 x i32> %0,
1531    i32 %1,
1532    i64 %2)
1533
1534  ret <vscale x 2 x i1> %a
1535}
1536
1537declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
1538  <vscale x 2 x i1>,
1539  <vscale x 2 x i32>,
1540  i32,
1541  <vscale x 2 x i1>,
1542  i64);
1543
1544define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1545; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i32_i32:
1546; CHECK:       # %bb.0: # %entry
1547; CHECK-NEXT:    vmv1r.v v25, v0
1548; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1549; CHECK-NEXT:    vmv1r.v v0, v9
1550; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1551; CHECK-NEXT:    vmxor.mm v0, v25, v9
1552; CHECK-NEXT:    ret
1553entry:
1554  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
1555    <vscale x 2 x i1> %0,
1556    <vscale x 2 x i32> %1,
1557    i32 %2,
1558    <vscale x 2 x i1> %3,
1559    i64 %4)
1560
1561  ret <vscale x 2 x i1> %a
1562}
1563
1564declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
1565  <vscale x 4 x i32>,
1566  i32,
1567  i64);
1568
1569define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i64 %2) nounwind {
1570; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i32_i32:
1571; CHECK:       # %bb.0: # %entry
1572; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1573; CHECK-NEXT:    vmslt.vx v25, v8, a0
1574; CHECK-NEXT:    vmnand.mm v0, v25, v25
1575; CHECK-NEXT:    ret
1576entry:
1577  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
1578    <vscale x 4 x i32> %0,
1579    i32 %1,
1580    i64 %2)
1581
1582  ret <vscale x 4 x i1> %a
1583}
1584
1585declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
1586  <vscale x 4 x i1>,
1587  <vscale x 4 x i32>,
1588  i32,
1589  <vscale x 4 x i1>,
1590  i64);
1591
1592define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1593; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i32_i32:
1594; CHECK:       # %bb.0: # %entry
1595; CHECK-NEXT:    vmv1r.v v25, v0
1596; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1597; CHECK-NEXT:    vmv1r.v v0, v10
1598; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1599; CHECK-NEXT:    vmxor.mm v0, v25, v10
1600; CHECK-NEXT:    ret
1601entry:
1602  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
1603    <vscale x 4 x i1> %0,
1604    <vscale x 4 x i32> %1,
1605    i32 %2,
1606    <vscale x 4 x i1> %3,
1607    i64 %4)
1608
1609  ret <vscale x 4 x i1> %a
1610}
1611
1612declare <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
1613  <vscale x 8 x i32>,
1614  i32,
1615  i64);
1616
1617define <vscale x 8 x i1> @intrinsic_vmsge_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i64 %2) nounwind {
1618; CHECK-LABEL: intrinsic_vmsge_vx_nxv8i32_i32:
1619; CHECK:       # %bb.0: # %entry
1620; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1621; CHECK-NEXT:    vmslt.vx v25, v8, a0
1622; CHECK-NEXT:    vmnand.mm v0, v25, v25
1623; CHECK-NEXT:    ret
1624entry:
1625  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
1626    <vscale x 8 x i32> %0,
1627    i32 %1,
1628    i64 %2)
1629
1630  ret <vscale x 8 x i1> %a
1631}
1632
1633declare <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
1634  <vscale x 8 x i1>,
1635  <vscale x 8 x i32>,
1636  i32,
1637  <vscale x 8 x i1>,
1638  i64);
1639
1640define <vscale x 8 x i1> @intrinsic_vmsge_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i64 %4) nounwind {
1641; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv8i32_i32:
1642; CHECK:       # %bb.0: # %entry
1643; CHECK-NEXT:    vmv1r.v v25, v0
1644; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1645; CHECK-NEXT:    vmv1r.v v0, v12
1646; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1647; CHECK-NEXT:    vmxor.mm v0, v25, v12
1648; CHECK-NEXT:    ret
1649entry:
1650  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
1651    <vscale x 8 x i1> %0,
1652    <vscale x 8 x i32> %1,
1653    i32 %2,
1654    <vscale x 8 x i1> %3,
1655    i64 %4)
1656
1657  ret <vscale x 8 x i1> %a
1658}
1659
1660declare <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
1661  <vscale x 1 x i64>,
1662  i64,
1663  i64);
1664
1665define <vscale x 1 x i1> @intrinsic_vmsge_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i64 %2) nounwind {
1666; CHECK-LABEL: intrinsic_vmsge_vx_nxv1i64_i64:
1667; CHECK:       # %bb.0: # %entry
1668; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1669; CHECK-NEXT:    vmslt.vx v25, v8, a0
1670; CHECK-NEXT:    vmnand.mm v0, v25, v25
1671; CHECK-NEXT:    ret
1672entry:
1673  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
1674    <vscale x 1 x i64> %0,
1675    i64 %1,
1676    i64 %2)
1677
1678  ret <vscale x 1 x i1> %a
1679}
1680
1681declare <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1682  <vscale x 1 x i1>,
1683  <vscale x 1 x i64>,
1684  i64,
1685  <vscale x 1 x i1>,
1686  i64);
1687
1688define <vscale x 1 x i1> @intrinsic_vmsge_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i64 %4) nounwind {
1689; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv1i64_i64:
1690; CHECK:       # %bb.0: # %entry
1691; CHECK-NEXT:    vmv1r.v v25, v0
1692; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
1693; CHECK-NEXT:    vmv1r.v v0, v9
1694; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1695; CHECK-NEXT:    vmxor.mm v0, v25, v9
1696; CHECK-NEXT:    ret
1697entry:
1698  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
1699    <vscale x 1 x i1> %0,
1700    <vscale x 1 x i64> %1,
1701    i64 %2,
1702    <vscale x 1 x i1> %3,
1703    i64 %4)
1704
1705  ret <vscale x 1 x i1> %a
1706}
1707
1708declare <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
1709  <vscale x 2 x i64>,
1710  i64,
1711  i64);
1712
1713define <vscale x 2 x i1> @intrinsic_vmsge_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i64 %2) nounwind {
1714; CHECK-LABEL: intrinsic_vmsge_vx_nxv2i64_i64:
1715; CHECK:       # %bb.0: # %entry
1716; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1717; CHECK-NEXT:    vmslt.vx v25, v8, a0
1718; CHECK-NEXT:    vmnand.mm v0, v25, v25
1719; CHECK-NEXT:    ret
1720entry:
1721  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
1722    <vscale x 2 x i64> %0,
1723    i64 %1,
1724    i64 %2)
1725
1726  ret <vscale x 2 x i1> %a
1727}
1728
1729declare <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
1730  <vscale x 2 x i1>,
1731  <vscale x 2 x i64>,
1732  i64,
1733  <vscale x 2 x i1>,
1734  i64);
1735
1736define <vscale x 2 x i1> @intrinsic_vmsge_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i64 %4) nounwind {
1737; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv2i64_i64:
1738; CHECK:       # %bb.0: # %entry
1739; CHECK-NEXT:    vmv1r.v v25, v0
1740; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
1741; CHECK-NEXT:    vmv1r.v v0, v10
1742; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1743; CHECK-NEXT:    vmxor.mm v0, v25, v10
1744; CHECK-NEXT:    ret
1745entry:
1746  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
1747    <vscale x 2 x i1> %0,
1748    <vscale x 2 x i64> %1,
1749    i64 %2,
1750    <vscale x 2 x i1> %3,
1751    i64 %4)
1752
1753  ret <vscale x 2 x i1> %a
1754}
1755
1756declare <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
1757  <vscale x 4 x i64>,
1758  i64,
1759  i64);
1760
1761define <vscale x 4 x i1> @intrinsic_vmsge_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i64 %2) nounwind {
1762; CHECK-LABEL: intrinsic_vmsge_vx_nxv4i64_i64:
1763; CHECK:       # %bb.0: # %entry
1764; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1765; CHECK-NEXT:    vmslt.vx v25, v8, a0
1766; CHECK-NEXT:    vmnand.mm v0, v25, v25
1767; CHECK-NEXT:    ret
1768entry:
1769  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
1770    <vscale x 4 x i64> %0,
1771    i64 %1,
1772    i64 %2)
1773
1774  ret <vscale x 4 x i1> %a
1775}
1776
1777declare <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
1778  <vscale x 4 x i1>,
1779  <vscale x 4 x i64>,
1780  i64,
1781  <vscale x 4 x i1>,
1782  i64);
1783
1784define <vscale x 4 x i1> @intrinsic_vmsge_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i64 %4) nounwind {
1785; CHECK-LABEL: intrinsic_vmsge_mask_vx_nxv4i64_i64:
1786; CHECK:       # %bb.0: # %entry
1787; CHECK-NEXT:    vmv1r.v v25, v0
1788; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
1789; CHECK-NEXT:    vmv1r.v v0, v12
1790; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1791; CHECK-NEXT:    vmxor.mm v0, v25, v12
1792; CHECK-NEXT:    ret
1793entry:
1794  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
1795    <vscale x 4 x i1> %0,
1796    <vscale x 4 x i64> %1,
1797    i64 %2,
1798    <vscale x 4 x i1> %3,
1799    i64 %4)
1800
1801  ret <vscale x 4 x i1> %a
1802}
1803
1804define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i64 %1) nounwind {
1805; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i8_i8:
1806; CHECK:       # %bb.0: # %entry
1807; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1808; CHECK-NEXT:    vmsgt.vi v0, v8, -16
1809; CHECK-NEXT:    ret
1810entry:
1811  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i8.i8(
1812    <vscale x 1 x i8> %0,
1813    i8 -15,
1814    i64 %1)
1815
1816  ret <vscale x 1 x i1> %a
1817}
1818
1819define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
1820; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i8_i8:
1821; CHECK:       # %bb.0: # %entry
1822; CHECK-NEXT:    vmv1r.v v25, v0
1823; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1824; CHECK-NEXT:    vmv1r.v v0, v9
1825; CHECK-NEXT:    vmsgt.vi v25, v8, -15, v0.t
1826; CHECK-NEXT:    vmv1r.v v0, v25
1827; CHECK-NEXT:    ret
1828entry:
1829  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
1830    <vscale x 1 x i1> %0,
1831    <vscale x 1 x i8> %1,
1832    i8 -14,
1833    <vscale x 1 x i1> %2,
1834    i64 %3)
1835
1836  ret <vscale x 1 x i1> %a
1837}
1838
1839define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i64 %1) nounwind {
1840; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i8_i8:
1841; CHECK:       # %bb.0: # %entry
1842; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1843; CHECK-NEXT:    vmsgt.vi v0, v8, -14
1844; CHECK-NEXT:    ret
1845entry:
1846  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i8.i8(
1847    <vscale x 2 x i8> %0,
1848    i8 -13,
1849    i64 %1)
1850
1851  ret <vscale x 2 x i1> %a
1852}
1853
1854define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
1855; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i8_i8:
1856; CHECK:       # %bb.0: # %entry
1857; CHECK-NEXT:    vmv1r.v v25, v0
1858; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1859; CHECK-NEXT:    vmv1r.v v0, v9
1860; CHECK-NEXT:    vmsgt.vi v25, v8, -13, v0.t
1861; CHECK-NEXT:    vmv1r.v v0, v25
1862; CHECK-NEXT:    ret
1863entry:
1864  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
1865    <vscale x 2 x i1> %0,
1866    <vscale x 2 x i8> %1,
1867    i8 -12,
1868    <vscale x 2 x i1> %2,
1869    i64 %3)
1870
1871  ret <vscale x 2 x i1> %a
1872}
1873
1874define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i64 %1) nounwind {
1875; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i8_i8:
1876; CHECK:       # %bb.0: # %entry
1877; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1878; CHECK-NEXT:    vmsgt.vi v0, v8, -12
1879; CHECK-NEXT:    ret
1880entry:
1881  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i8.i8(
1882    <vscale x 4 x i8> %0,
1883    i8 -11,
1884    i64 %1)
1885
1886  ret <vscale x 4 x i1> %a
1887}
1888
1889define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
1890; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i8_i8:
1891; CHECK:       # %bb.0: # %entry
1892; CHECK-NEXT:    vmv1r.v v25, v0
1893; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1894; CHECK-NEXT:    vmv1r.v v0, v9
1895; CHECK-NEXT:    vmsgt.vi v25, v8, -11, v0.t
1896; CHECK-NEXT:    vmv1r.v v0, v25
1897; CHECK-NEXT:    ret
1898entry:
1899  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
1900    <vscale x 4 x i1> %0,
1901    <vscale x 4 x i8> %1,
1902    i8 -10,
1903    <vscale x 4 x i1> %2,
1904    i64 %3)
1905
1906  ret <vscale x 4 x i1> %a
1907}
1908
1909define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i64 %1) nounwind {
1910; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i8_i8:
1911; CHECK:       # %bb.0: # %entry
1912; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1913; CHECK-NEXT:    vmsgt.vi v0, v8, -10
1914; CHECK-NEXT:    ret
1915entry:
1916  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i8.i8(
1917    <vscale x 8 x i8> %0,
1918    i8 -9,
1919    i64 %1)
1920
1921  ret <vscale x 8 x i1> %a
1922}
1923
1924define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
1925; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i8_i8:
1926; CHECK:       # %bb.0: # %entry
1927; CHECK-NEXT:    vmv1r.v v25, v0
1928; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1929; CHECK-NEXT:    vmv1r.v v0, v9
1930; CHECK-NEXT:    vmsgt.vi v25, v8, -9, v0.t
1931; CHECK-NEXT:    vmv1r.v v0, v25
1932; CHECK-NEXT:    ret
1933entry:
1934  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
1935    <vscale x 8 x i1> %0,
1936    <vscale x 8 x i8> %1,
1937    i8 -8,
1938    <vscale x 8 x i1> %2,
1939    i64 %3)
1940
1941  ret <vscale x 8 x i1> %a
1942}
1943
1944define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i64 %1) nounwind {
1945; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i8_i8:
1946; CHECK:       # %bb.0: # %entry
1947; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1948; CHECK-NEXT:    vmsgt.vi v0, v8, -8
1949; CHECK-NEXT:    ret
1950entry:
1951  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i8.i8(
1952    <vscale x 16 x i8> %0,
1953    i8 -7,
1954    i64 %1)
1955
1956  ret <vscale x 16 x i1> %a
1957}
1958
1959define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
1960; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i8_i8:
1961; CHECK:       # %bb.0: # %entry
1962; CHECK-NEXT:    vmv1r.v v25, v0
1963; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1964; CHECK-NEXT:    vmv1r.v v0, v10
1965; CHECK-NEXT:    vmsgt.vi v25, v8, -7, v0.t
1966; CHECK-NEXT:    vmv1r.v v0, v25
1967; CHECK-NEXT:    ret
1968entry:
1969  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
1970    <vscale x 16 x i1> %0,
1971    <vscale x 16 x i8> %1,
1972    i8 -6,
1973    <vscale x 16 x i1> %2,
1974    i64 %3)
1975
1976  ret <vscale x 16 x i1> %a
1977}
1978
1979define <vscale x 32 x i1> @intrinsic_vmsge_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i64 %1) nounwind {
1980; CHECK-LABEL: intrinsic_vmsge_vi_nxv32i8_i8:
1981; CHECK:       # %bb.0: # %entry
1982; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1983; CHECK-NEXT:    vmsgt.vi v0, v8, -6
1984; CHECK-NEXT:    ret
1985entry:
1986  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.nxv32i8.i8(
1987    <vscale x 32 x i8> %0,
1988    i8 -5,
1989    i64 %1)
1990
1991  ret <vscale x 32 x i1> %a
1992}
1993
1994define <vscale x 32 x i1> @intrinsic_vmsge_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i64 %3) nounwind {
1995; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv32i8_i8:
1996; CHECK:       # %bb.0: # %entry
1997; CHECK-NEXT:    vmv1r.v v25, v0
1998; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
1999; CHECK-NEXT:    vmv1r.v v0, v12
2000; CHECK-NEXT:    vmsgt.vi v25, v8, -5, v0.t
2001; CHECK-NEXT:    vmv1r.v v0, v25
2002; CHECK-NEXT:    ret
2003entry:
2004  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
2005    <vscale x 32 x i1> %0,
2006    <vscale x 32 x i8> %1,
2007    i8 -4,
2008    <vscale x 32 x i1> %2,
2009    i64 %3)
2010
2011  ret <vscale x 32 x i1> %a
2012}
2013
2014define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i64 %1) nounwind {
2015; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i16_i16:
2016; CHECK:       # %bb.0: # %entry
2017; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
2018; CHECK-NEXT:    vmsgt.vi v0, v8, -4
2019; CHECK-NEXT:    ret
2020entry:
2021  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i16.i16(
2022    <vscale x 1 x i16> %0,
2023    i16 -3,
2024    i64 %1)
2025
2026  ret <vscale x 1 x i1> %a
2027}
2028
2029define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
2030; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i16_i16:
2031; CHECK:       # %bb.0: # %entry
2032; CHECK-NEXT:    vmv1r.v v25, v0
2033; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
2034; CHECK-NEXT:    vmv1r.v v0, v9
2035; CHECK-NEXT:    vmsgt.vi v25, v8, -3, v0.t
2036; CHECK-NEXT:    vmv1r.v v0, v25
2037; CHECK-NEXT:    ret
2038entry:
2039  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
2040    <vscale x 1 x i1> %0,
2041    <vscale x 1 x i16> %1,
2042    i16 -2,
2043    <vscale x 1 x i1> %2,
2044    i64 %3)
2045
2046  ret <vscale x 1 x i1> %a
2047}
2048
2049define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i64 %1) nounwind {
2050; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i16_i16:
2051; CHECK:       # %bb.0: # %entry
2052; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
2053; CHECK-NEXT:    vmsgt.vi v0, v8, -2
2054; CHECK-NEXT:    ret
2055entry:
2056  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i16.i16(
2057    <vscale x 2 x i16> %0,
2058    i16 -1,
2059    i64 %1)
2060
2061  ret <vscale x 2 x i1> %a
2062}
2063
2064define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
2065; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i16_i16:
2066; CHECK:       # %bb.0: # %entry
2067; CHECK-NEXT:    vmv1r.v v25, v0
2068; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
2069; CHECK-NEXT:    vmv1r.v v0, v9
2070; CHECK-NEXT:    vmsgt.vi v25, v8, -1, v0.t
2071; CHECK-NEXT:    vmv1r.v v0, v25
2072; CHECK-NEXT:    ret
2073entry:
2074  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
2075    <vscale x 2 x i1> %0,
2076    <vscale x 2 x i16> %1,
2077    i16 0,
2078    <vscale x 2 x i1> %2,
2079    i64 %3)
2080
2081  ret <vscale x 2 x i1> %a
2082}
2083
2084define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i64 %1) nounwind {
2085; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i16_i16:
2086; CHECK:       # %bb.0: # %entry
2087; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
2088; CHECK-NEXT:    vmsgt.vi v0, v8, -1
2089; CHECK-NEXT:    ret
2090entry:
2091  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i16.i16(
2092    <vscale x 4 x i16> %0,
2093    i16 0,
2094    i64 %1)
2095
2096  ret <vscale x 4 x i1> %a
2097}
2098
2099define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
2100; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i16_i16:
2101; CHECK:       # %bb.0: # %entry
2102; CHECK-NEXT:    vmv1r.v v25, v0
2103; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
2104; CHECK-NEXT:    vmv1r.v v0, v9
2105; CHECK-NEXT:    vmsgt.vi v25, v8, 0, v0.t
2106; CHECK-NEXT:    vmv1r.v v0, v25
2107; CHECK-NEXT:    ret
2108entry:
2109  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
2110    <vscale x 4 x i1> %0,
2111    <vscale x 4 x i16> %1,
2112    i16 1,
2113    <vscale x 4 x i1> %2,
2114    i64 %3)
2115
2116  ret <vscale x 4 x i1> %a
2117}
2118
2119define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i64 %1) nounwind {
2120; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i16_i16:
2121; CHECK:       # %bb.0: # %entry
2122; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
2123; CHECK-NEXT:    vmsgt.vi v0, v8, 1
2124; CHECK-NEXT:    ret
2125entry:
2126  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i16.i16(
2127    <vscale x 8 x i16> %0,
2128    i16 2,
2129    i64 %1)
2130
2131  ret <vscale x 8 x i1> %a
2132}
2133
2134define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
2135; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i16_i16:
2136; CHECK:       # %bb.0: # %entry
2137; CHECK-NEXT:    vmv1r.v v25, v0
2138; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
2139; CHECK-NEXT:    vmv1r.v v0, v10
2140; CHECK-NEXT:    vmsgt.vi v25, v8, 2, v0.t
2141; CHECK-NEXT:    vmv1r.v v0, v25
2142; CHECK-NEXT:    ret
2143entry:
2144  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
2145    <vscale x 8 x i1> %0,
2146    <vscale x 8 x i16> %1,
2147    i16 3,
2148    <vscale x 8 x i1> %2,
2149    i64 %3)
2150
2151  ret <vscale x 8 x i1> %a
2152}
2153
2154define <vscale x 16 x i1> @intrinsic_vmsge_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i64 %1) nounwind {
2155; CHECK-LABEL: intrinsic_vmsge_vi_nxv16i16_i16:
2156; CHECK:       # %bb.0: # %entry
2157; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
2158; CHECK-NEXT:    vmsgt.vi v0, v8, 3
2159; CHECK-NEXT:    ret
2160entry:
2161  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.nxv16i16.i16(
2162    <vscale x 16 x i16> %0,
2163    i16 4,
2164    i64 %1)
2165
2166  ret <vscale x 16 x i1> %a
2167}
2168
2169define <vscale x 16 x i1> @intrinsic_vmsge_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i64 %3) nounwind {
2170; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv16i16_i16:
2171; CHECK:       # %bb.0: # %entry
2172; CHECK-NEXT:    vmv1r.v v25, v0
2173; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
2174; CHECK-NEXT:    vmv1r.v v0, v12
2175; CHECK-NEXT:    vmsgt.vi v25, v8, 4, v0.t
2176; CHECK-NEXT:    vmv1r.v v0, v25
2177; CHECK-NEXT:    ret
2178entry:
2179  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
2180    <vscale x 16 x i1> %0,
2181    <vscale x 16 x i16> %1,
2182    i16 5,
2183    <vscale x 16 x i1> %2,
2184    i64 %3)
2185
2186  ret <vscale x 16 x i1> %a
2187}
2188
2189define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i64 %1) nounwind {
2190; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i32_i32:
2191; CHECK:       # %bb.0: # %entry
2192; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2193; CHECK-NEXT:    vmsgt.vi v0, v8, 5
2194; CHECK-NEXT:    ret
2195entry:
2196  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i32.i32(
2197    <vscale x 1 x i32> %0,
2198    i32 6,
2199    i64 %1)
2200
2201  ret <vscale x 1 x i1> %a
2202}
2203
2204define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
2205; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i32_i32:
2206; CHECK:       # %bb.0: # %entry
2207; CHECK-NEXT:    vmv1r.v v25, v0
2208; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2209; CHECK-NEXT:    vmv1r.v v0, v9
2210; CHECK-NEXT:    vmsgt.vi v25, v8, 6, v0.t
2211; CHECK-NEXT:    vmv1r.v v0, v25
2212; CHECK-NEXT:    ret
2213entry:
2214  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
2215    <vscale x 1 x i1> %0,
2216    <vscale x 1 x i32> %1,
2217    i32 7,
2218    <vscale x 1 x i1> %2,
2219    i64 %3)
2220
2221  ret <vscale x 1 x i1> %a
2222}
2223
2224define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i64 %1) nounwind {
2225; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i32_i32:
2226; CHECK:       # %bb.0: # %entry
2227; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2228; CHECK-NEXT:    vmsgt.vi v0, v8, 7
2229; CHECK-NEXT:    ret
2230entry:
2231  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i32.i32(
2232    <vscale x 2 x i32> %0,
2233    i32 8,
2234    i64 %1)
2235
2236  ret <vscale x 2 x i1> %a
2237}
2238
2239define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
2240; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i32_i32:
2241; CHECK:       # %bb.0: # %entry
2242; CHECK-NEXT:    vmv1r.v v25, v0
2243; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2244; CHECK-NEXT:    vmv1r.v v0, v9
2245; CHECK-NEXT:    vmsgt.vi v25, v8, 8, v0.t
2246; CHECK-NEXT:    vmv1r.v v0, v25
2247; CHECK-NEXT:    ret
2248entry:
2249  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
2250    <vscale x 2 x i1> %0,
2251    <vscale x 2 x i32> %1,
2252    i32 9,
2253    <vscale x 2 x i1> %2,
2254    i64 %3)
2255
2256  ret <vscale x 2 x i1> %a
2257}
2258
2259define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i64 %1) nounwind {
2260; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i32_i32:
2261; CHECK:       # %bb.0: # %entry
2262; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2263; CHECK-NEXT:    vmsgt.vi v0, v8, 9
2264; CHECK-NEXT:    ret
2265entry:
2266  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i32.i32(
2267    <vscale x 4 x i32> %0,
2268    i32 10,
2269    i64 %1)
2270
2271  ret <vscale x 4 x i1> %a
2272}
2273
2274define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
2275; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i32_i32:
2276; CHECK:       # %bb.0: # %entry
2277; CHECK-NEXT:    vmv1r.v v25, v0
2278; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2279; CHECK-NEXT:    vmv1r.v v0, v10
2280; CHECK-NEXT:    vmsgt.vi v25, v8, 10, v0.t
2281; CHECK-NEXT:    vmv1r.v v0, v25
2282; CHECK-NEXT:    ret
2283entry:
2284  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
2285    <vscale x 4 x i1> %0,
2286    <vscale x 4 x i32> %1,
2287    i32 11,
2288    <vscale x 4 x i1> %2,
2289    i64 %3)
2290
2291  ret <vscale x 4 x i1> %a
2292}
2293
2294define <vscale x 8 x i1> @intrinsic_vmsge_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i64 %1) nounwind {
2295; CHECK-LABEL: intrinsic_vmsge_vi_nxv8i32_i32:
2296; CHECK:       # %bb.0: # %entry
2297; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2298; CHECK-NEXT:    vmsgt.vi v0, v8, 11
2299; CHECK-NEXT:    ret
2300entry:
2301  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.nxv8i32.i32(
2302    <vscale x 8 x i32> %0,
2303    i32 12,
2304    i64 %1)
2305
2306  ret <vscale x 8 x i1> %a
2307}
2308
2309define <vscale x 8 x i1> @intrinsic_vmsge_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i64 %3) nounwind {
2310; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv8i32_i32:
2311; CHECK:       # %bb.0: # %entry
2312; CHECK-NEXT:    vmv1r.v v25, v0
2313; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2314; CHECK-NEXT:    vmv1r.v v0, v12
2315; CHECK-NEXT:    vmsgt.vi v25, v8, 12, v0.t
2316; CHECK-NEXT:    vmv1r.v v0, v25
2317; CHECK-NEXT:    ret
2318entry:
2319  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
2320    <vscale x 8 x i1> %0,
2321    <vscale x 8 x i32> %1,
2322    i32 13,
2323    <vscale x 8 x i1> %2,
2324    i64 %3)
2325
2326  ret <vscale x 8 x i1> %a
2327}
2328
2329define <vscale x 1 x i1> @intrinsic_vmsge_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1) nounwind {
2330; CHECK-LABEL: intrinsic_vmsge_vi_nxv1i64_i64:
2331; CHECK:       # %bb.0: # %entry
2332; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
2333; CHECK-NEXT:    vmsgt.vi v0, v8, 13
2334; CHECK-NEXT:    ret
2335entry:
2336  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.nxv1i64.i64(
2337    <vscale x 1 x i64> %0,
2338    i64 14,
2339    i64 %1)
2340
2341  ret <vscale x 1 x i1> %a
2342}
2343
2344define <vscale x 1 x i1> @intrinsic_vmsge_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i64 %3) nounwind {
2345; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv1i64_i64:
2346; CHECK:       # %bb.0: # %entry
2347; CHECK-NEXT:    vmv1r.v v25, v0
2348; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
2349; CHECK-NEXT:    vmv1r.v v0, v9
2350; CHECK-NEXT:    vmsgt.vi v25, v8, 14, v0.t
2351; CHECK-NEXT:    vmv1r.v v0, v25
2352; CHECK-NEXT:    ret
2353entry:
2354  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
2355    <vscale x 1 x i1> %0,
2356    <vscale x 1 x i64> %1,
2357    i64 15,
2358    <vscale x 1 x i1> %2,
2359    i64 %3)
2360
2361  ret <vscale x 1 x i1> %a
2362}
2363
2364define <vscale x 2 x i1> @intrinsic_vmsge_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1) nounwind {
2365; CHECK-LABEL: intrinsic_vmsge_vi_nxv2i64_i64:
2366; CHECK:       # %bb.0: # %entry
2367; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
2368; CHECK-NEXT:    vmsgt.vi v0, v8, 15
2369; CHECK-NEXT:    ret
2370entry:
2371  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.nxv2i64.i64(
2372    <vscale x 2 x i64> %0,
2373    i64 16,
2374    i64 %1)
2375
2376  ret <vscale x 2 x i1> %a
2377}
2378
2379define <vscale x 2 x i1> @intrinsic_vmsge_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i64 %3) nounwind {
2380; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv2i64_i64:
2381; CHECK:       # %bb.0: # %entry
2382; CHECK-NEXT:    vmv1r.v v25, v0
2383; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
2384; CHECK-NEXT:    vmv1r.v v0, v10
2385; CHECK-NEXT:    vmsgt.vi v25, v8, -16, v0.t
2386; CHECK-NEXT:    vmv1r.v v0, v25
2387; CHECK-NEXT:    ret
2388entry:
2389  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
2390    <vscale x 2 x i1> %0,
2391    <vscale x 2 x i64> %1,
2392    i64 -15,
2393    <vscale x 2 x i1> %2,
2394    i64 %3)
2395
2396  ret <vscale x 2 x i1> %a
2397}
2398
2399define <vscale x 4 x i1> @intrinsic_vmsge_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1) nounwind {
2400; CHECK-LABEL: intrinsic_vmsge_vi_nxv4i64_i64:
2401; CHECK:       # %bb.0: # %entry
2402; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
2403; CHECK-NEXT:    vmsgt.vi v0, v8, -15
2404; CHECK-NEXT:    ret
2405entry:
2406  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.nxv4i64.i64(
2407    <vscale x 4 x i64> %0,
2408    i64 -14,
2409    i64 %1)
2410
2411  ret <vscale x 4 x i1> %a
2412}
2413
2414define <vscale x 4 x i1> @intrinsic_vmsge_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i64 %3) nounwind {
2415; CHECK-LABEL: intrinsic_vmsge_mask_vi_nxv4i64_i64:
2416; CHECK:       # %bb.0: # %entry
2417; CHECK-NEXT:    vmv1r.v v25, v0
2418; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
2419; CHECK-NEXT:    vmv1r.v v0, v12
2420; CHECK-NEXT:    vmsgt.vi v25, v8, -14, v0.t
2421; CHECK-NEXT:    vmv1r.v v0, v25
2422; CHECK-NEXT:    ret
2423entry:
2424  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
2425    <vscale x 4 x i1> %0,
2426    <vscale x 4 x i64> %1,
2427    i64 -13,
2428    <vscale x 4 x i1> %2,
2429    i64 %3)
2430
2431  ret <vscale x 4 x i1> %a
2432}
2433
2434; Test cases where the mask and maskedoff are the same value.
2435define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, i64 %3) nounwind {
2436; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i8_i8:
2437; CHECK:       # %bb.0: # %entry
2438; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
2439; CHECK-NEXT:    vmslt.vx v25, v8, a0
2440; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2441; CHECK-NEXT:    ret
2442entry:
2443  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i8.i8(
2444    <vscale x 1 x i1> %0,
2445    <vscale x 1 x i8> %1,
2446    i8 %2,
2447    <vscale x 1 x i1> %0,
2448    i64 %3)
2449
2450  ret <vscale x 1 x i1> %a
2451}
2452
2453define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, i64 %3) nounwind {
2454; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i8_i8:
2455; CHECK:       # %bb.0: # %entry
2456; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
2457; CHECK-NEXT:    vmslt.vx v25, v8, a0
2458; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2459; CHECK-NEXT:    ret
2460entry:
2461  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i8.i8(
2462    <vscale x 2 x i1> %0,
2463    <vscale x 2 x i8> %1,
2464    i8 %2,
2465    <vscale x 2 x i1> %0,
2466    i64 %3)
2467
2468  ret <vscale x 2 x i1> %a
2469}
2470
2471define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, i64 %3) nounwind {
2472; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i8_i8:
2473; CHECK:       # %bb.0: # %entry
2474; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
2475; CHECK-NEXT:    vmslt.vx v25, v8, a0
2476; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2477; CHECK-NEXT:    ret
2478entry:
2479  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i8.i8(
2480    <vscale x 4 x i1> %0,
2481    <vscale x 4 x i8> %1,
2482    i8 %2,
2483    <vscale x 4 x i1> %0,
2484    i64 %3)
2485
2486  ret <vscale x 4 x i1> %a
2487}
2488
2489define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, i64 %3) nounwind {
2490; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i8_i8:
2491; CHECK:       # %bb.0: # %entry
2492; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
2493; CHECK-NEXT:    vmslt.vx v25, v8, a0
2494; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2495; CHECK-NEXT:    ret
2496entry:
2497  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i8.i8(
2498    <vscale x 8 x i1> %0,
2499    <vscale x 8 x i8> %1,
2500    i8 %2,
2501    <vscale x 8 x i1> %0,
2502    i64 %3)
2503
2504  ret <vscale x 8 x i1> %a
2505}
2506
2507define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, i64 %3) nounwind {
2508; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i8_i8:
2509; CHECK:       # %bb.0: # %entry
2510; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
2511; CHECK-NEXT:    vmslt.vx v25, v8, a0
2512; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2513; CHECK-NEXT:    ret
2514entry:
2515  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i8.i8(
2516    <vscale x 16 x i1> %0,
2517    <vscale x 16 x i8> %1,
2518    i8 %2,
2519    <vscale x 16 x i1> %0,
2520    i64 %3)
2521
2522  ret <vscale x 16 x i1> %a
2523}
2524
2525define <vscale x 32 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, i64 %3) nounwind {
2526; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv32i8_i8:
2527; CHECK:       # %bb.0: # %entry
2528; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
2529; CHECK-NEXT:    vmslt.vx v25, v8, a0
2530; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2531; CHECK-NEXT:    ret
2532entry:
2533  %a = call <vscale x 32 x i1> @llvm.riscv.vmsge.mask.nxv32i8.i8(
2534    <vscale x 32 x i1> %0,
2535    <vscale x 32 x i8> %1,
2536    i8 %2,
2537    <vscale x 32 x i1> %0,
2538    i64 %3)
2539
2540  ret <vscale x 32 x i1> %a
2541}
2542
2543define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, i64 %3) nounwind {
2544; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i16_i16:
2545; CHECK:       # %bb.0: # %entry
2546; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
2547; CHECK-NEXT:    vmslt.vx v25, v8, a0
2548; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2549; CHECK-NEXT:    ret
2550entry:
2551  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i16.i16(
2552    <vscale x 1 x i1> %0,
2553    <vscale x 1 x i16> %1,
2554    i16 %2,
2555    <vscale x 1 x i1> %0,
2556    i64 %3)
2557
2558  ret <vscale x 1 x i1> %a
2559}
2560
2561define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, i64 %3) nounwind {
2562; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i16_i16:
2563; CHECK:       # %bb.0: # %entry
2564; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
2565; CHECK-NEXT:    vmslt.vx v25, v8, a0
2566; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2567; CHECK-NEXT:    ret
2568entry:
2569  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i16.i16(
2570    <vscale x 2 x i1> %0,
2571    <vscale x 2 x i16> %1,
2572    i16 %2,
2573    <vscale x 2 x i1> %0,
2574    i64 %3)
2575
2576  ret <vscale x 2 x i1> %a
2577}
2578
2579define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, i64 %3) nounwind {
2580; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i16_i16:
2581; CHECK:       # %bb.0: # %entry
2582; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
2583; CHECK-NEXT:    vmslt.vx v25, v8, a0
2584; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2585; CHECK-NEXT:    ret
2586entry:
2587  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i16.i16(
2588    <vscale x 4 x i1> %0,
2589    <vscale x 4 x i16> %1,
2590    i16 %2,
2591    <vscale x 4 x i1> %0,
2592    i64 %3)
2593
2594  ret <vscale x 4 x i1> %a
2595}
2596
2597define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, i64 %3) nounwind {
2598; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i16_i16:
2599; CHECK:       # %bb.0: # %entry
2600; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
2601; CHECK-NEXT:    vmslt.vx v25, v8, a0
2602; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2603; CHECK-NEXT:    ret
2604entry:
2605  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i16.i16(
2606    <vscale x 8 x i1> %0,
2607    <vscale x 8 x i16> %1,
2608    i16 %2,
2609    <vscale x 8 x i1> %0,
2610    i64 %3)
2611
2612  ret <vscale x 8 x i1> %a
2613}
2614
2615define <vscale x 16 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, i64 %3) nounwind {
2616; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv16i16_i16:
2617; CHECK:       # %bb.0: # %entry
2618; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
2619; CHECK-NEXT:    vmslt.vx v25, v8, a0
2620; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2621; CHECK-NEXT:    ret
2622entry:
2623  %a = call <vscale x 16 x i1> @llvm.riscv.vmsge.mask.nxv16i16.i16(
2624    <vscale x 16 x i1> %0,
2625    <vscale x 16 x i16> %1,
2626    i16 %2,
2627    <vscale x 16 x i1> %0,
2628    i64 %3)
2629
2630  ret <vscale x 16 x i1> %a
2631}
2632
2633define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, i64 %3) nounwind {
2634; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i32_i32:
2635; CHECK:       # %bb.0: # %entry
2636; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
2637; CHECK-NEXT:    vmslt.vx v25, v8, a0
2638; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2639; CHECK-NEXT:    ret
2640entry:
2641  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i32.i32(
2642    <vscale x 1 x i1> %0,
2643    <vscale x 1 x i32> %1,
2644    i32 %2,
2645    <vscale x 1 x i1> %0,
2646    i64 %3)
2647
2648  ret <vscale x 1 x i1> %a
2649}
2650
2651define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, i64 %3) nounwind {
2652; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i32_i32:
2653; CHECK:       # %bb.0: # %entry
2654; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
2655; CHECK-NEXT:    vmslt.vx v25, v8, a0
2656; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2657; CHECK-NEXT:    ret
2658entry:
2659  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i32.i32(
2660    <vscale x 2 x i1> %0,
2661    <vscale x 2 x i32> %1,
2662    i32 %2,
2663    <vscale x 2 x i1> %0,
2664    i64 %3)
2665
2666  ret <vscale x 2 x i1> %a
2667}
2668
2669define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, i64 %3) nounwind {
2670; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i32_i32:
2671; CHECK:       # %bb.0: # %entry
2672; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
2673; CHECK-NEXT:    vmslt.vx v25, v8, a0
2674; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2675; CHECK-NEXT:    ret
2676entry:
2677  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i32.i32(
2678    <vscale x 4 x i1> %0,
2679    <vscale x 4 x i32> %1,
2680    i32 %2,
2681    <vscale x 4 x i1> %0,
2682    i64 %3)
2683
2684  ret <vscale x 4 x i1> %a
2685}
2686
2687define <vscale x 8 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, i64 %3) nounwind {
2688; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv8i32_i32:
2689; CHECK:       # %bb.0: # %entry
2690; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
2691; CHECK-NEXT:    vmslt.vx v25, v8, a0
2692; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2693; CHECK-NEXT:    ret
2694entry:
2695  %a = call <vscale x 8 x i1> @llvm.riscv.vmsge.mask.nxv8i32.i32(
2696    <vscale x 8 x i1> %0,
2697    <vscale x 8 x i32> %1,
2698    i32 %2,
2699    <vscale x 8 x i1> %0,
2700    i64 %3)
2701
2702  ret <vscale x 8 x i1> %a
2703}
2704
2705define <vscale x 1 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, i64 %3) nounwind {
2706; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv1i64_i64:
2707; CHECK:       # %bb.0: # %entry
2708; CHECK-NEXT:    vsetvli zero, a1, e64, m1, ta, mu
2709; CHECK-NEXT:    vmslt.vx v25, v8, a0
2710; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2711; CHECK-NEXT:    ret
2712entry:
2713  %a = call <vscale x 1 x i1> @llvm.riscv.vmsge.mask.nxv1i64.i64(
2714    <vscale x 1 x i1> %0,
2715    <vscale x 1 x i64> %1,
2716    i64 %2,
2717    <vscale x 1 x i1> %0,
2718    i64 %3)
2719
2720  ret <vscale x 1 x i1> %a
2721}
2722
2723define <vscale x 2 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, i64 %3) nounwind {
2724; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv2i64_i64:
2725; CHECK:       # %bb.0: # %entry
2726; CHECK-NEXT:    vsetvli zero, a1, e64, m2, ta, mu
2727; CHECK-NEXT:    vmslt.vx v25, v8, a0
2728; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2729; CHECK-NEXT:    ret
2730entry:
2731  %a = call <vscale x 2 x i1> @llvm.riscv.vmsge.mask.nxv2i64.i64(
2732    <vscale x 2 x i1> %0,
2733    <vscale x 2 x i64> %1,
2734    i64 %2,
2735    <vscale x 2 x i1> %0,
2736    i64 %3)
2737
2738  ret <vscale x 2 x i1> %a
2739}
2740
2741define <vscale x 4 x i1> @intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, i64 %3) nounwind {
2742; CHECK-LABEL: intrinsic_vmsge_maskedoff_mask_vx_nxv4i64_i64:
2743; CHECK:       # %bb.0: # %entry
2744; CHECK-NEXT:    vsetvli zero, a1, e64, m4, ta, mu
2745; CHECK-NEXT:    vmslt.vx v25, v8, a0
2746; CHECK-NEXT:    vmandnot.mm v0, v0, v25
2747; CHECK-NEXT:    ret
2748entry:
2749  %a = call <vscale x 4 x i1> @llvm.riscv.vmsge.mask.nxv4i64.i64(
2750    <vscale x 4 x i1> %0,
2751    <vscale x 4 x i64> %1,
2752    i64 %2,
2753    <vscale x 4 x i1> %0,
2754    i64 %3)
2755
2756  ret <vscale x 4 x i1> %a
2757}
2758