1; NOTE: Assertions have been autogenerated by utils/update_llc_test_checks.py
2; RUN: llc -mtriple=riscv32 -mattr=+experimental-v -verify-machineinstrs \
3; RUN:   < %s | FileCheck %s
4declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
5  <vscale x 1 x i8>,
6  <vscale x 1 x i8>,
7  i32);
8
9define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i8_nxv1i8(<vscale x 1 x i8> %0, <vscale x 1 x i8> %1, i32 %2) nounwind {
10; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i8_nxv1i8:
11; CHECK:       # %bb.0: # %entry
12; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
13; CHECK-NEXT:    vmslt.vv v0, v8, v9
14; CHECK-NEXT:    ret
15entry:
16  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
17    <vscale x 1 x i8> %0,
18    <vscale x 1 x i8> %1,
19    i32 %2)
20
21  ret <vscale x 1 x i1> %a
22}
23
24declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
25  <vscale x 1 x i1>,
26  <vscale x 1 x i8>,
27  <vscale x 1 x i8>,
28  <vscale x 1 x i1>,
29  i32);
30
31define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i8> %2, <vscale x 1 x i8> %3, i32 %4) nounwind {
32; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i8_nxv1i8:
33; CHECK:       # %bb.0: # %entry
34; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
35; CHECK-NEXT:    vmslt.vv v25, v8, v9
36; CHECK-NEXT:    vmv1r.v v26, v0
37; CHECK-NEXT:    vmv1r.v v0, v25
38; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
39; CHECK-NEXT:    vmv1r.v v0, v26
40; CHECK-NEXT:    ret
41entry:
42  %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8(
43    <vscale x 1 x i8> %1,
44    <vscale x 1 x i8> %2,
45    i32 %4)
46  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8(
47    <vscale x 1 x i1> %0,
48    <vscale x 1 x i8> %2,
49    <vscale x 1 x i8> %3,
50    <vscale x 1 x i1> %mask,
51    i32 %4)
52
53  ret <vscale x 1 x i1> %a
54}
55
56declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
57  <vscale x 2 x i8>,
58  <vscale x 2 x i8>,
59  i32);
60
61define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i8_nxv2i8(<vscale x 2 x i8> %0, <vscale x 2 x i8> %1, i32 %2) nounwind {
62; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i8_nxv2i8:
63; CHECK:       # %bb.0: # %entry
64; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
65; CHECK-NEXT:    vmslt.vv v0, v8, v9
66; CHECK-NEXT:    ret
67entry:
68  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
69    <vscale x 2 x i8> %0,
70    <vscale x 2 x i8> %1,
71    i32 %2)
72
73  ret <vscale x 2 x i1> %a
74}
75
76declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
77  <vscale x 2 x i1>,
78  <vscale x 2 x i8>,
79  <vscale x 2 x i8>,
80  <vscale x 2 x i1>,
81  i32);
82
83define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i8> %2, <vscale x 2 x i8> %3, i32 %4) nounwind {
84; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i8_nxv2i8:
85; CHECK:       # %bb.0: # %entry
86; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
87; CHECK-NEXT:    vmslt.vv v25, v8, v9
88; CHECK-NEXT:    vmv1r.v v26, v0
89; CHECK-NEXT:    vmv1r.v v0, v25
90; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
91; CHECK-NEXT:    vmv1r.v v0, v26
92; CHECK-NEXT:    ret
93entry:
94  %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8(
95    <vscale x 2 x i8> %1,
96    <vscale x 2 x i8> %2,
97    i32 %4)
98  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8(
99    <vscale x 2 x i1> %0,
100    <vscale x 2 x i8> %2,
101    <vscale x 2 x i8> %3,
102    <vscale x 2 x i1> %mask,
103    i32 %4)
104
105  ret <vscale x 2 x i1> %a
106}
107
108declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
109  <vscale x 4 x i8>,
110  <vscale x 4 x i8>,
111  i32);
112
113define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i8_nxv4i8(<vscale x 4 x i8> %0, <vscale x 4 x i8> %1, i32 %2) nounwind {
114; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i8_nxv4i8:
115; CHECK:       # %bb.0: # %entry
116; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
117; CHECK-NEXT:    vmslt.vv v0, v8, v9
118; CHECK-NEXT:    ret
119entry:
120  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
121    <vscale x 4 x i8> %0,
122    <vscale x 4 x i8> %1,
123    i32 %2)
124
125  ret <vscale x 4 x i1> %a
126}
127
128declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
129  <vscale x 4 x i1>,
130  <vscale x 4 x i8>,
131  <vscale x 4 x i8>,
132  <vscale x 4 x i1>,
133  i32);
134
135define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i8> %2, <vscale x 4 x i8> %3, i32 %4) nounwind {
136; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i8_nxv4i8:
137; CHECK:       # %bb.0: # %entry
138; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
139; CHECK-NEXT:    vmslt.vv v25, v8, v9
140; CHECK-NEXT:    vmv1r.v v26, v0
141; CHECK-NEXT:    vmv1r.v v0, v25
142; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
143; CHECK-NEXT:    vmv1r.v v0, v26
144; CHECK-NEXT:    ret
145entry:
146  %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8(
147    <vscale x 4 x i8> %1,
148    <vscale x 4 x i8> %2,
149    i32 %4)
150  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8(
151    <vscale x 4 x i1> %0,
152    <vscale x 4 x i8> %2,
153    <vscale x 4 x i8> %3,
154    <vscale x 4 x i1> %mask,
155    i32 %4)
156
157  ret <vscale x 4 x i1> %a
158}
159
160declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
161  <vscale x 8 x i8>,
162  <vscale x 8 x i8>,
163  i32);
164
165define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i8_nxv8i8(<vscale x 8 x i8> %0, <vscale x 8 x i8> %1, i32 %2) nounwind {
166; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i8_nxv8i8:
167; CHECK:       # %bb.0: # %entry
168; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
169; CHECK-NEXT:    vmslt.vv v0, v8, v9
170; CHECK-NEXT:    ret
171entry:
172  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
173    <vscale x 8 x i8> %0,
174    <vscale x 8 x i8> %1,
175    i32 %2)
176
177  ret <vscale x 8 x i1> %a
178}
179
180declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
181  <vscale x 8 x i1>,
182  <vscale x 8 x i8>,
183  <vscale x 8 x i8>,
184  <vscale x 8 x i1>,
185  i32);
186
187define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i8> %2, <vscale x 8 x i8> %3, i32 %4) nounwind {
188; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i8_nxv8i8:
189; CHECK:       # %bb.0: # %entry
190; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
191; CHECK-NEXT:    vmslt.vv v25, v8, v9
192; CHECK-NEXT:    vmv1r.v v26, v0
193; CHECK-NEXT:    vmv1r.v v0, v25
194; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
195; CHECK-NEXT:    vmv1r.v v0, v26
196; CHECK-NEXT:    ret
197entry:
198  %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8(
199    <vscale x 8 x i8> %1,
200    <vscale x 8 x i8> %2,
201    i32 %4)
202  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8(
203    <vscale x 8 x i1> %0,
204    <vscale x 8 x i8> %2,
205    <vscale x 8 x i8> %3,
206    <vscale x 8 x i1> %mask,
207    i32 %4)
208
209  ret <vscale x 8 x i1> %a
210}
211
212declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
213  <vscale x 16 x i8>,
214  <vscale x 16 x i8>,
215  i32);
216
217define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i8_nxv16i8(<vscale x 16 x i8> %0, <vscale x 16 x i8> %1, i32 %2) nounwind {
218; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i8_nxv16i8:
219; CHECK:       # %bb.0: # %entry
220; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
221; CHECK-NEXT:    vmslt.vv v0, v8, v10
222; CHECK-NEXT:    ret
223entry:
224  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
225    <vscale x 16 x i8> %0,
226    <vscale x 16 x i8> %1,
227    i32 %2)
228
229  ret <vscale x 16 x i1> %a
230}
231
232declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
233  <vscale x 16 x i1>,
234  <vscale x 16 x i8>,
235  <vscale x 16 x i8>,
236  <vscale x 16 x i1>,
237  i32);
238
239define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i8> %2, <vscale x 16 x i8> %3, i32 %4) nounwind {
240; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i8_nxv16i8:
241; CHECK:       # %bb.0: # %entry
242; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
243; CHECK-NEXT:    vmslt.vv v25, v8, v10
244; CHECK-NEXT:    vmv1r.v v26, v0
245; CHECK-NEXT:    vmv1r.v v0, v25
246; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
247; CHECK-NEXT:    vmv1r.v v0, v26
248; CHECK-NEXT:    ret
249entry:
250  %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8(
251    <vscale x 16 x i8> %1,
252    <vscale x 16 x i8> %2,
253    i32 %4)
254  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8(
255    <vscale x 16 x i1> %0,
256    <vscale x 16 x i8> %2,
257    <vscale x 16 x i8> %3,
258    <vscale x 16 x i1> %mask,
259    i32 %4)
260
261  ret <vscale x 16 x i1> %a
262}
263
264declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
265  <vscale x 32 x i8>,
266  <vscale x 32 x i8>,
267  i32);
268
269define <vscale x 32 x i1> @intrinsic_vmslt_vv_nxv32i8_nxv32i8(<vscale x 32 x i8> %0, <vscale x 32 x i8> %1, i32 %2) nounwind {
270; CHECK-LABEL: intrinsic_vmslt_vv_nxv32i8_nxv32i8:
271; CHECK:       # %bb.0: # %entry
272; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
273; CHECK-NEXT:    vmslt.vv v0, v8, v12
274; CHECK-NEXT:    ret
275entry:
276  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
277    <vscale x 32 x i8> %0,
278    <vscale x 32 x i8> %1,
279    i32 %2)
280
281  ret <vscale x 32 x i1> %a
282}
283
284declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
285  <vscale x 32 x i1>,
286  <vscale x 32 x i8>,
287  <vscale x 32 x i8>,
288  <vscale x 32 x i1>,
289  i32);
290
291define <vscale x 32 x i1> @intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i8> %2, <vscale x 32 x i8> %3, i32 %4) nounwind {
292; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv32i8_nxv32i8:
293; CHECK:       # %bb.0: # %entry
294; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
295; CHECK-NEXT:    vmslt.vv v25, v8, v12
296; CHECK-NEXT:    vmv1r.v v26, v0
297; CHECK-NEXT:    vmv1r.v v0, v25
298; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
299; CHECK-NEXT:    vmv1r.v v0, v26
300; CHECK-NEXT:    ret
301entry:
302  %mask = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8(
303    <vscale x 32 x i8> %1,
304    <vscale x 32 x i8> %2,
305    i32 %4)
306  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8(
307    <vscale x 32 x i1> %0,
308    <vscale x 32 x i8> %2,
309    <vscale x 32 x i8> %3,
310    <vscale x 32 x i1> %mask,
311    i32 %4)
312
313  ret <vscale x 32 x i1> %a
314}
315
316declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
317  <vscale x 1 x i16>,
318  <vscale x 1 x i16>,
319  i32);
320
321define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i16_nxv1i16(<vscale x 1 x i16> %0, <vscale x 1 x i16> %1, i32 %2) nounwind {
322; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i16_nxv1i16:
323; CHECK:       # %bb.0: # %entry
324; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
325; CHECK-NEXT:    vmslt.vv v0, v8, v9
326; CHECK-NEXT:    ret
327entry:
328  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
329    <vscale x 1 x i16> %0,
330    <vscale x 1 x i16> %1,
331    i32 %2)
332
333  ret <vscale x 1 x i1> %a
334}
335
336declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
337  <vscale x 1 x i1>,
338  <vscale x 1 x i16>,
339  <vscale x 1 x i16>,
340  <vscale x 1 x i1>,
341  i32);
342
343define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i16> %2, <vscale x 1 x i16> %3, i32 %4) nounwind {
344; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i16_nxv1i16:
345; CHECK:       # %bb.0: # %entry
346; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
347; CHECK-NEXT:    vmslt.vv v25, v8, v9
348; CHECK-NEXT:    vmv1r.v v26, v0
349; CHECK-NEXT:    vmv1r.v v0, v25
350; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
351; CHECK-NEXT:    vmv1r.v v0, v26
352; CHECK-NEXT:    ret
353entry:
354  %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16(
355    <vscale x 1 x i16> %1,
356    <vscale x 1 x i16> %2,
357    i32 %4)
358  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16(
359    <vscale x 1 x i1> %0,
360    <vscale x 1 x i16> %2,
361    <vscale x 1 x i16> %3,
362    <vscale x 1 x i1> %mask,
363    i32 %4)
364
365  ret <vscale x 1 x i1> %a
366}
367
368declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
369  <vscale x 2 x i16>,
370  <vscale x 2 x i16>,
371  i32);
372
373define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i16_nxv2i16(<vscale x 2 x i16> %0, <vscale x 2 x i16> %1, i32 %2) nounwind {
374; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i16_nxv2i16:
375; CHECK:       # %bb.0: # %entry
376; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
377; CHECK-NEXT:    vmslt.vv v0, v8, v9
378; CHECK-NEXT:    ret
379entry:
380  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
381    <vscale x 2 x i16> %0,
382    <vscale x 2 x i16> %1,
383    i32 %2)
384
385  ret <vscale x 2 x i1> %a
386}
387
388declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
389  <vscale x 2 x i1>,
390  <vscale x 2 x i16>,
391  <vscale x 2 x i16>,
392  <vscale x 2 x i1>,
393  i32);
394
395define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i16> %2, <vscale x 2 x i16> %3, i32 %4) nounwind {
396; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i16_nxv2i16:
397; CHECK:       # %bb.0: # %entry
398; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
399; CHECK-NEXT:    vmslt.vv v25, v8, v9
400; CHECK-NEXT:    vmv1r.v v26, v0
401; CHECK-NEXT:    vmv1r.v v0, v25
402; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
403; CHECK-NEXT:    vmv1r.v v0, v26
404; CHECK-NEXT:    ret
405entry:
406  %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16(
407    <vscale x 2 x i16> %1,
408    <vscale x 2 x i16> %2,
409    i32 %4)
410  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16(
411    <vscale x 2 x i1> %0,
412    <vscale x 2 x i16> %2,
413    <vscale x 2 x i16> %3,
414    <vscale x 2 x i1> %mask,
415    i32 %4)
416
417  ret <vscale x 2 x i1> %a
418}
419
420declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
421  <vscale x 4 x i16>,
422  <vscale x 4 x i16>,
423  i32);
424
425define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i16_nxv4i16(<vscale x 4 x i16> %0, <vscale x 4 x i16> %1, i32 %2) nounwind {
426; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i16_nxv4i16:
427; CHECK:       # %bb.0: # %entry
428; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
429; CHECK-NEXT:    vmslt.vv v0, v8, v9
430; CHECK-NEXT:    ret
431entry:
432  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
433    <vscale x 4 x i16> %0,
434    <vscale x 4 x i16> %1,
435    i32 %2)
436
437  ret <vscale x 4 x i1> %a
438}
439
440declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
441  <vscale x 4 x i1>,
442  <vscale x 4 x i16>,
443  <vscale x 4 x i16>,
444  <vscale x 4 x i1>,
445  i32);
446
447define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i16> %2, <vscale x 4 x i16> %3, i32 %4) nounwind {
448; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i16_nxv4i16:
449; CHECK:       # %bb.0: # %entry
450; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
451; CHECK-NEXT:    vmslt.vv v25, v8, v9
452; CHECK-NEXT:    vmv1r.v v26, v0
453; CHECK-NEXT:    vmv1r.v v0, v25
454; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
455; CHECK-NEXT:    vmv1r.v v0, v26
456; CHECK-NEXT:    ret
457entry:
458  %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16(
459    <vscale x 4 x i16> %1,
460    <vscale x 4 x i16> %2,
461    i32 %4)
462  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16(
463    <vscale x 4 x i1> %0,
464    <vscale x 4 x i16> %2,
465    <vscale x 4 x i16> %3,
466    <vscale x 4 x i1> %mask,
467    i32 %4)
468
469  ret <vscale x 4 x i1> %a
470}
471
472declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
473  <vscale x 8 x i16>,
474  <vscale x 8 x i16>,
475  i32);
476
477define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i16_nxv8i16(<vscale x 8 x i16> %0, <vscale x 8 x i16> %1, i32 %2) nounwind {
478; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i16_nxv8i16:
479; CHECK:       # %bb.0: # %entry
480; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
481; CHECK-NEXT:    vmslt.vv v0, v8, v10
482; CHECK-NEXT:    ret
483entry:
484  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
485    <vscale x 8 x i16> %0,
486    <vscale x 8 x i16> %1,
487    i32 %2)
488
489  ret <vscale x 8 x i1> %a
490}
491
492declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
493  <vscale x 8 x i1>,
494  <vscale x 8 x i16>,
495  <vscale x 8 x i16>,
496  <vscale x 8 x i1>,
497  i32);
498
499define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i16> %2, <vscale x 8 x i16> %3, i32 %4) nounwind {
500; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i16_nxv8i16:
501; CHECK:       # %bb.0: # %entry
502; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
503; CHECK-NEXT:    vmslt.vv v25, v8, v10
504; CHECK-NEXT:    vmv1r.v v26, v0
505; CHECK-NEXT:    vmv1r.v v0, v25
506; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
507; CHECK-NEXT:    vmv1r.v v0, v26
508; CHECK-NEXT:    ret
509entry:
510  %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16(
511    <vscale x 8 x i16> %1,
512    <vscale x 8 x i16> %2,
513    i32 %4)
514  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16(
515    <vscale x 8 x i1> %0,
516    <vscale x 8 x i16> %2,
517    <vscale x 8 x i16> %3,
518    <vscale x 8 x i1> %mask,
519    i32 %4)
520
521  ret <vscale x 8 x i1> %a
522}
523
524declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
525  <vscale x 16 x i16>,
526  <vscale x 16 x i16>,
527  i32);
528
529define <vscale x 16 x i1> @intrinsic_vmslt_vv_nxv16i16_nxv16i16(<vscale x 16 x i16> %0, <vscale x 16 x i16> %1, i32 %2) nounwind {
530; CHECK-LABEL: intrinsic_vmslt_vv_nxv16i16_nxv16i16:
531; CHECK:       # %bb.0: # %entry
532; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
533; CHECK-NEXT:    vmslt.vv v0, v8, v12
534; CHECK-NEXT:    ret
535entry:
536  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
537    <vscale x 16 x i16> %0,
538    <vscale x 16 x i16> %1,
539    i32 %2)
540
541  ret <vscale x 16 x i1> %a
542}
543
544declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
545  <vscale x 16 x i1>,
546  <vscale x 16 x i16>,
547  <vscale x 16 x i16>,
548  <vscale x 16 x i1>,
549  i32);
550
551define <vscale x 16 x i1> @intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i16> %2, <vscale x 16 x i16> %3, i32 %4) nounwind {
552; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv16i16_nxv16i16:
553; CHECK:       # %bb.0: # %entry
554; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
555; CHECK-NEXT:    vmslt.vv v25, v8, v12
556; CHECK-NEXT:    vmv1r.v v26, v0
557; CHECK-NEXT:    vmv1r.v v0, v25
558; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
559; CHECK-NEXT:    vmv1r.v v0, v26
560; CHECK-NEXT:    ret
561entry:
562  %mask = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16(
563    <vscale x 16 x i16> %1,
564    <vscale x 16 x i16> %2,
565    i32 %4)
566  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16(
567    <vscale x 16 x i1> %0,
568    <vscale x 16 x i16> %2,
569    <vscale x 16 x i16> %3,
570    <vscale x 16 x i1> %mask,
571    i32 %4)
572
573  ret <vscale x 16 x i1> %a
574}
575
576declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
577  <vscale x 1 x i32>,
578  <vscale x 1 x i32>,
579  i32);
580
581define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i32_nxv1i32(<vscale x 1 x i32> %0, <vscale x 1 x i32> %1, i32 %2) nounwind {
582; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i32_nxv1i32:
583; CHECK:       # %bb.0: # %entry
584; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
585; CHECK-NEXT:    vmslt.vv v0, v8, v9
586; CHECK-NEXT:    ret
587entry:
588  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
589    <vscale x 1 x i32> %0,
590    <vscale x 1 x i32> %1,
591    i32 %2)
592
593  ret <vscale x 1 x i1> %a
594}
595
596declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
597  <vscale x 1 x i1>,
598  <vscale x 1 x i32>,
599  <vscale x 1 x i32>,
600  <vscale x 1 x i1>,
601  i32);
602
603define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i32> %2, <vscale x 1 x i32> %3, i32 %4) nounwind {
604; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i32_nxv1i32:
605; CHECK:       # %bb.0: # %entry
606; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
607; CHECK-NEXT:    vmslt.vv v25, v8, v9
608; CHECK-NEXT:    vmv1r.v v26, v0
609; CHECK-NEXT:    vmv1r.v v0, v25
610; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
611; CHECK-NEXT:    vmv1r.v v0, v26
612; CHECK-NEXT:    ret
613entry:
614  %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32(
615    <vscale x 1 x i32> %1,
616    <vscale x 1 x i32> %2,
617    i32 %4)
618  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32(
619    <vscale x 1 x i1> %0,
620    <vscale x 1 x i32> %2,
621    <vscale x 1 x i32> %3,
622    <vscale x 1 x i1> %mask,
623    i32 %4)
624
625  ret <vscale x 1 x i1> %a
626}
627
628declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
629  <vscale x 2 x i32>,
630  <vscale x 2 x i32>,
631  i32);
632
633define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i32_nxv2i32(<vscale x 2 x i32> %0, <vscale x 2 x i32> %1, i32 %2) nounwind {
634; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i32_nxv2i32:
635; CHECK:       # %bb.0: # %entry
636; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
637; CHECK-NEXT:    vmslt.vv v0, v8, v9
638; CHECK-NEXT:    ret
639entry:
640  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
641    <vscale x 2 x i32> %0,
642    <vscale x 2 x i32> %1,
643    i32 %2)
644
645  ret <vscale x 2 x i1> %a
646}
647
648declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
649  <vscale x 2 x i1>,
650  <vscale x 2 x i32>,
651  <vscale x 2 x i32>,
652  <vscale x 2 x i1>,
653  i32);
654
655define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i32> %2, <vscale x 2 x i32> %3, i32 %4) nounwind {
656; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i32_nxv2i32:
657; CHECK:       # %bb.0: # %entry
658; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
659; CHECK-NEXT:    vmslt.vv v25, v8, v9
660; CHECK-NEXT:    vmv1r.v v26, v0
661; CHECK-NEXT:    vmv1r.v v0, v25
662; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
663; CHECK-NEXT:    vmv1r.v v0, v26
664; CHECK-NEXT:    ret
665entry:
666  %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32(
667    <vscale x 2 x i32> %1,
668    <vscale x 2 x i32> %2,
669    i32 %4)
670  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32(
671    <vscale x 2 x i1> %0,
672    <vscale x 2 x i32> %2,
673    <vscale x 2 x i32> %3,
674    <vscale x 2 x i1> %mask,
675    i32 %4)
676
677  ret <vscale x 2 x i1> %a
678}
679
680declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
681  <vscale x 4 x i32>,
682  <vscale x 4 x i32>,
683  i32);
684
685define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i32_nxv4i32(<vscale x 4 x i32> %0, <vscale x 4 x i32> %1, i32 %2) nounwind {
686; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i32_nxv4i32:
687; CHECK:       # %bb.0: # %entry
688; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
689; CHECK-NEXT:    vmslt.vv v0, v8, v10
690; CHECK-NEXT:    ret
691entry:
692  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
693    <vscale x 4 x i32> %0,
694    <vscale x 4 x i32> %1,
695    i32 %2)
696
697  ret <vscale x 4 x i1> %a
698}
699
700declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
701  <vscale x 4 x i1>,
702  <vscale x 4 x i32>,
703  <vscale x 4 x i32>,
704  <vscale x 4 x i1>,
705  i32);
706
707define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i32> %2, <vscale x 4 x i32> %3, i32 %4) nounwind {
708; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i32_nxv4i32:
709; CHECK:       # %bb.0: # %entry
710; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
711; CHECK-NEXT:    vmslt.vv v25, v8, v10
712; CHECK-NEXT:    vmv1r.v v26, v0
713; CHECK-NEXT:    vmv1r.v v0, v25
714; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
715; CHECK-NEXT:    vmv1r.v v0, v26
716; CHECK-NEXT:    ret
717entry:
718  %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32(
719    <vscale x 4 x i32> %1,
720    <vscale x 4 x i32> %2,
721    i32 %4)
722  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32(
723    <vscale x 4 x i1> %0,
724    <vscale x 4 x i32> %2,
725    <vscale x 4 x i32> %3,
726    <vscale x 4 x i1> %mask,
727    i32 %4)
728
729  ret <vscale x 4 x i1> %a
730}
731
732declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
733  <vscale x 8 x i32>,
734  <vscale x 8 x i32>,
735  i32);
736
737define <vscale x 8 x i1> @intrinsic_vmslt_vv_nxv8i32_nxv8i32(<vscale x 8 x i32> %0, <vscale x 8 x i32> %1, i32 %2) nounwind {
738; CHECK-LABEL: intrinsic_vmslt_vv_nxv8i32_nxv8i32:
739; CHECK:       # %bb.0: # %entry
740; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
741; CHECK-NEXT:    vmslt.vv v0, v8, v12
742; CHECK-NEXT:    ret
743entry:
744  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
745    <vscale x 8 x i32> %0,
746    <vscale x 8 x i32> %1,
747    i32 %2)
748
749  ret <vscale x 8 x i1> %a
750}
751
752declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
753  <vscale x 8 x i1>,
754  <vscale x 8 x i32>,
755  <vscale x 8 x i32>,
756  <vscale x 8 x i1>,
757  i32);
758
759define <vscale x 8 x i1> @intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i32> %2, <vscale x 8 x i32> %3, i32 %4) nounwind {
760; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv8i32_nxv8i32:
761; CHECK:       # %bb.0: # %entry
762; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
763; CHECK-NEXT:    vmslt.vv v25, v8, v12
764; CHECK-NEXT:    vmv1r.v v26, v0
765; CHECK-NEXT:    vmv1r.v v0, v25
766; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
767; CHECK-NEXT:    vmv1r.v v0, v26
768; CHECK-NEXT:    ret
769entry:
770  %mask = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32(
771    <vscale x 8 x i32> %1,
772    <vscale x 8 x i32> %2,
773    i32 %4)
774  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32(
775    <vscale x 8 x i1> %0,
776    <vscale x 8 x i32> %2,
777    <vscale x 8 x i32> %3,
778    <vscale x 8 x i1> %mask,
779    i32 %4)
780
781  ret <vscale x 8 x i1> %a
782}
783
784declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
785  <vscale x 1 x i64>,
786  <vscale x 1 x i64>,
787  i32);
788
789define <vscale x 1 x i1> @intrinsic_vmslt_vv_nxv1i64_nxv1i64(<vscale x 1 x i64> %0, <vscale x 1 x i64> %1, i32 %2) nounwind {
790; CHECK-LABEL: intrinsic_vmslt_vv_nxv1i64_nxv1i64:
791; CHECK:       # %bb.0: # %entry
792; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
793; CHECK-NEXT:    vmslt.vv v0, v8, v9
794; CHECK-NEXT:    ret
795entry:
796  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
797    <vscale x 1 x i64> %0,
798    <vscale x 1 x i64> %1,
799    i32 %2)
800
801  ret <vscale x 1 x i1> %a
802}
803
804declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
805  <vscale x 1 x i1>,
806  <vscale x 1 x i64>,
807  <vscale x 1 x i64>,
808  <vscale x 1 x i1>,
809  i32);
810
811define <vscale x 1 x i1> @intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i64> %2, <vscale x 1 x i64> %3, i32 %4) nounwind {
812; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv1i64_nxv1i64:
813; CHECK:       # %bb.0: # %entry
814; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
815; CHECK-NEXT:    vmslt.vv v25, v8, v9
816; CHECK-NEXT:    vmv1r.v v26, v0
817; CHECK-NEXT:    vmv1r.v v0, v25
818; CHECK-NEXT:    vmslt.vv v26, v9, v10, v0.t
819; CHECK-NEXT:    vmv1r.v v0, v26
820; CHECK-NEXT:    ret
821entry:
822  %mask = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64(
823    <vscale x 1 x i64> %1,
824    <vscale x 1 x i64> %2,
825    i32 %4)
826  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64(
827    <vscale x 1 x i1> %0,
828    <vscale x 1 x i64> %2,
829    <vscale x 1 x i64> %3,
830    <vscale x 1 x i1> %mask,
831    i32 %4)
832
833  ret <vscale x 1 x i1> %a
834}
835
836declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
837  <vscale x 2 x i64>,
838  <vscale x 2 x i64>,
839  i32);
840
841define <vscale x 2 x i1> @intrinsic_vmslt_vv_nxv2i64_nxv2i64(<vscale x 2 x i64> %0, <vscale x 2 x i64> %1, i32 %2) nounwind {
842; CHECK-LABEL: intrinsic_vmslt_vv_nxv2i64_nxv2i64:
843; CHECK:       # %bb.0: # %entry
844; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
845; CHECK-NEXT:    vmslt.vv v0, v8, v10
846; CHECK-NEXT:    ret
847entry:
848  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
849    <vscale x 2 x i64> %0,
850    <vscale x 2 x i64> %1,
851    i32 %2)
852
853  ret <vscale x 2 x i1> %a
854}
855
856declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
857  <vscale x 2 x i1>,
858  <vscale x 2 x i64>,
859  <vscale x 2 x i64>,
860  <vscale x 2 x i1>,
861  i32);
862
863define <vscale x 2 x i1> @intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i64> %2, <vscale x 2 x i64> %3, i32 %4) nounwind {
864; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv2i64_nxv2i64:
865; CHECK:       # %bb.0: # %entry
866; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
867; CHECK-NEXT:    vmslt.vv v25, v8, v10
868; CHECK-NEXT:    vmv1r.v v26, v0
869; CHECK-NEXT:    vmv1r.v v0, v25
870; CHECK-NEXT:    vmslt.vv v26, v10, v12, v0.t
871; CHECK-NEXT:    vmv1r.v v0, v26
872; CHECK-NEXT:    ret
873entry:
874  %mask = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64(
875    <vscale x 2 x i64> %1,
876    <vscale x 2 x i64> %2,
877    i32 %4)
878  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64(
879    <vscale x 2 x i1> %0,
880    <vscale x 2 x i64> %2,
881    <vscale x 2 x i64> %3,
882    <vscale x 2 x i1> %mask,
883    i32 %4)
884
885  ret <vscale x 2 x i1> %a
886}
887
888declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
889  <vscale x 4 x i64>,
890  <vscale x 4 x i64>,
891  i32);
892
893define <vscale x 4 x i1> @intrinsic_vmslt_vv_nxv4i64_nxv4i64(<vscale x 4 x i64> %0, <vscale x 4 x i64> %1, i32 %2) nounwind {
894; CHECK-LABEL: intrinsic_vmslt_vv_nxv4i64_nxv4i64:
895; CHECK:       # %bb.0: # %entry
896; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
897; CHECK-NEXT:    vmslt.vv v0, v8, v12
898; CHECK-NEXT:    ret
899entry:
900  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
901    <vscale x 4 x i64> %0,
902    <vscale x 4 x i64> %1,
903    i32 %2)
904
905  ret <vscale x 4 x i1> %a
906}
907
908declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
909  <vscale x 4 x i1>,
910  <vscale x 4 x i64>,
911  <vscale x 4 x i64>,
912  <vscale x 4 x i1>,
913  i32);
914
915define <vscale x 4 x i1> @intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i64> %2, <vscale x 4 x i64> %3, i32 %4) nounwind {
916; CHECK-LABEL: intrinsic_vmslt_mask_vv_nxv4i64_nxv4i64:
917; CHECK:       # %bb.0: # %entry
918; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
919; CHECK-NEXT:    vmslt.vv v25, v8, v12
920; CHECK-NEXT:    vmv1r.v v26, v0
921; CHECK-NEXT:    vmv1r.v v0, v25
922; CHECK-NEXT:    vmslt.vv v26, v12, v16, v0.t
923; CHECK-NEXT:    vmv1r.v v0, v26
924; CHECK-NEXT:    ret
925entry:
926  %mask = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64(
927    <vscale x 4 x i64> %1,
928    <vscale x 4 x i64> %2,
929    i32 %4)
930  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64(
931    <vscale x 4 x i1> %0,
932    <vscale x 4 x i64> %2,
933    <vscale x 4 x i64> %3,
934    <vscale x 4 x i1> %mask,
935    i32 %4)
936
937  ret <vscale x 4 x i1> %a
938}
939
940declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
941  <vscale x 1 x i8>,
942  i8,
943  i32);
944
945define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i8_i8(<vscale x 1 x i8> %0, i8 %1, i32 %2) nounwind {
946; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i8_i8:
947; CHECK:       # %bb.0: # %entry
948; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
949; CHECK-NEXT:    vmslt.vx v0, v8, a0
950; CHECK-NEXT:    ret
951entry:
952  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
953    <vscale x 1 x i8> %0,
954    i8 %1,
955    i32 %2)
956
957  ret <vscale x 1 x i1> %a
958}
959
960declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
961  <vscale x 1 x i1>,
962  <vscale x 1 x i8>,
963  i8,
964  <vscale x 1 x i1>,
965  i32);
966
967define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, i8 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
968; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i8_i8:
969; CHECK:       # %bb.0: # %entry
970; CHECK-NEXT:    vmv1r.v v25, v0
971; CHECK-NEXT:    vsetvli zero, a1, e8, mf8, ta, mu
972; CHECK-NEXT:    vmv1r.v v0, v9
973; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
974; CHECK-NEXT:    vmv1r.v v0, v25
975; CHECK-NEXT:    ret
976entry:
977  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
978    <vscale x 1 x i1> %0,
979    <vscale x 1 x i8> %1,
980    i8 %2,
981    <vscale x 1 x i1> %3,
982    i32 %4)
983
984  ret <vscale x 1 x i1> %a
985}
986
987declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
988  <vscale x 2 x i8>,
989  i8,
990  i32);
991
992define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i8_i8(<vscale x 2 x i8> %0, i8 %1, i32 %2) nounwind {
993; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i8_i8:
994; CHECK:       # %bb.0: # %entry
995; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
996; CHECK-NEXT:    vmslt.vx v0, v8, a0
997; CHECK-NEXT:    ret
998entry:
999  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
1000    <vscale x 2 x i8> %0,
1001    i8 %1,
1002    i32 %2)
1003
1004  ret <vscale x 2 x i1> %a
1005}
1006
1007declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
1008  <vscale x 2 x i1>,
1009  <vscale x 2 x i8>,
1010  i8,
1011  <vscale x 2 x i1>,
1012  i32);
1013
1014define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, i8 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1015; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i8_i8:
1016; CHECK:       # %bb.0: # %entry
1017; CHECK-NEXT:    vmv1r.v v25, v0
1018; CHECK-NEXT:    vsetvli zero, a1, e8, mf4, ta, mu
1019; CHECK-NEXT:    vmv1r.v v0, v9
1020; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1021; CHECK-NEXT:    vmv1r.v v0, v25
1022; CHECK-NEXT:    ret
1023entry:
1024  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
1025    <vscale x 2 x i1> %0,
1026    <vscale x 2 x i8> %1,
1027    i8 %2,
1028    <vscale x 2 x i1> %3,
1029    i32 %4)
1030
1031  ret <vscale x 2 x i1> %a
1032}
1033
1034declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
1035  <vscale x 4 x i8>,
1036  i8,
1037  i32);
1038
1039define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i8_i8(<vscale x 4 x i8> %0, i8 %1, i32 %2) nounwind {
1040; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i8_i8:
1041; CHECK:       # %bb.0: # %entry
1042; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1043; CHECK-NEXT:    vmslt.vx v0, v8, a0
1044; CHECK-NEXT:    ret
1045entry:
1046  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
1047    <vscale x 4 x i8> %0,
1048    i8 %1,
1049    i32 %2)
1050
1051  ret <vscale x 4 x i1> %a
1052}
1053
1054declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
1055  <vscale x 4 x i1>,
1056  <vscale x 4 x i8>,
1057  i8,
1058  <vscale x 4 x i1>,
1059  i32);
1060
1061define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, i8 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1062; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i8_i8:
1063; CHECK:       # %bb.0: # %entry
1064; CHECK-NEXT:    vmv1r.v v25, v0
1065; CHECK-NEXT:    vsetvli zero, a1, e8, mf2, ta, mu
1066; CHECK-NEXT:    vmv1r.v v0, v9
1067; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1068; CHECK-NEXT:    vmv1r.v v0, v25
1069; CHECK-NEXT:    ret
1070entry:
1071  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
1072    <vscale x 4 x i1> %0,
1073    <vscale x 4 x i8> %1,
1074    i8 %2,
1075    <vscale x 4 x i1> %3,
1076    i32 %4)
1077
1078  ret <vscale x 4 x i1> %a
1079}
1080
1081declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
1082  <vscale x 8 x i8>,
1083  i8,
1084  i32);
1085
1086define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i8_i8(<vscale x 8 x i8> %0, i8 %1, i32 %2) nounwind {
1087; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i8_i8:
1088; CHECK:       # %bb.0: # %entry
1089; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1090; CHECK-NEXT:    vmslt.vx v0, v8, a0
1091; CHECK-NEXT:    ret
1092entry:
1093  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
1094    <vscale x 8 x i8> %0,
1095    i8 %1,
1096    i32 %2)
1097
1098  ret <vscale x 8 x i1> %a
1099}
1100
1101declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
1102  <vscale x 8 x i1>,
1103  <vscale x 8 x i8>,
1104  i8,
1105  <vscale x 8 x i1>,
1106  i32);
1107
1108define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, i8 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1109; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i8_i8:
1110; CHECK:       # %bb.0: # %entry
1111; CHECK-NEXT:    vmv1r.v v25, v0
1112; CHECK-NEXT:    vsetvli zero, a1, e8, m1, ta, mu
1113; CHECK-NEXT:    vmv1r.v v0, v9
1114; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1115; CHECK-NEXT:    vmv1r.v v0, v25
1116; CHECK-NEXT:    ret
1117entry:
1118  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
1119    <vscale x 8 x i1> %0,
1120    <vscale x 8 x i8> %1,
1121    i8 %2,
1122    <vscale x 8 x i1> %3,
1123    i32 %4)
1124
1125  ret <vscale x 8 x i1> %a
1126}
1127
1128declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
1129  <vscale x 16 x i8>,
1130  i8,
1131  i32);
1132
1133define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i8_i8(<vscale x 16 x i8> %0, i8 %1, i32 %2) nounwind {
1134; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i8_i8:
1135; CHECK:       # %bb.0: # %entry
1136; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1137; CHECK-NEXT:    vmslt.vx v0, v8, a0
1138; CHECK-NEXT:    ret
1139entry:
1140  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
1141    <vscale x 16 x i8> %0,
1142    i8 %1,
1143    i32 %2)
1144
1145  ret <vscale x 16 x i1> %a
1146}
1147
1148declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
1149  <vscale x 16 x i1>,
1150  <vscale x 16 x i8>,
1151  i8,
1152  <vscale x 16 x i1>,
1153  i32);
1154
1155define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, i8 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1156; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i8_i8:
1157; CHECK:       # %bb.0: # %entry
1158; CHECK-NEXT:    vmv1r.v v25, v0
1159; CHECK-NEXT:    vsetvli zero, a1, e8, m2, ta, mu
1160; CHECK-NEXT:    vmv1r.v v0, v10
1161; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1162; CHECK-NEXT:    vmv1r.v v0, v25
1163; CHECK-NEXT:    ret
1164entry:
1165  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
1166    <vscale x 16 x i1> %0,
1167    <vscale x 16 x i8> %1,
1168    i8 %2,
1169    <vscale x 16 x i1> %3,
1170    i32 %4)
1171
1172  ret <vscale x 16 x i1> %a
1173}
1174
1175declare <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
1176  <vscale x 32 x i8>,
1177  i8,
1178  i32);
1179
1180define <vscale x 32 x i1> @intrinsic_vmslt_vx_nxv32i8_i8(<vscale x 32 x i8> %0, i8 %1, i32 %2) nounwind {
1181; CHECK-LABEL: intrinsic_vmslt_vx_nxv32i8_i8:
1182; CHECK:       # %bb.0: # %entry
1183; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1184; CHECK-NEXT:    vmslt.vx v0, v8, a0
1185; CHECK-NEXT:    ret
1186entry:
1187  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
1188    <vscale x 32 x i8> %0,
1189    i8 %1,
1190    i32 %2)
1191
1192  ret <vscale x 32 x i1> %a
1193}
1194
1195declare <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
1196  <vscale x 32 x i1>,
1197  <vscale x 32 x i8>,
1198  i8,
1199  <vscale x 32 x i1>,
1200  i32);
1201
1202define <vscale x 32 x i1> @intrinsic_vmslt_mask_vx_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, i8 %2, <vscale x 32 x i1> %3, i32 %4) nounwind {
1203; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv32i8_i8:
1204; CHECK:       # %bb.0: # %entry
1205; CHECK-NEXT:    vmv1r.v v25, v0
1206; CHECK-NEXT:    vsetvli zero, a1, e8, m4, ta, mu
1207; CHECK-NEXT:    vmv1r.v v0, v12
1208; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1209; CHECK-NEXT:    vmv1r.v v0, v25
1210; CHECK-NEXT:    ret
1211entry:
1212  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
1213    <vscale x 32 x i1> %0,
1214    <vscale x 32 x i8> %1,
1215    i8 %2,
1216    <vscale x 32 x i1> %3,
1217    i32 %4)
1218
1219  ret <vscale x 32 x i1> %a
1220}
1221
1222declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
1223  <vscale x 1 x i16>,
1224  i16,
1225  i32);
1226
1227define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i16_i16(<vscale x 1 x i16> %0, i16 %1, i32 %2) nounwind {
1228; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i16_i16:
1229; CHECK:       # %bb.0: # %entry
1230; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1231; CHECK-NEXT:    vmslt.vx v0, v8, a0
1232; CHECK-NEXT:    ret
1233entry:
1234  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
1235    <vscale x 1 x i16> %0,
1236    i16 %1,
1237    i32 %2)
1238
1239  ret <vscale x 1 x i1> %a
1240}
1241
1242declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
1243  <vscale x 1 x i1>,
1244  <vscale x 1 x i16>,
1245  i16,
1246  <vscale x 1 x i1>,
1247  i32);
1248
1249define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, i16 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1250; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i16_i16:
1251; CHECK:       # %bb.0: # %entry
1252; CHECK-NEXT:    vmv1r.v v25, v0
1253; CHECK-NEXT:    vsetvli zero, a1, e16, mf4, ta, mu
1254; CHECK-NEXT:    vmv1r.v v0, v9
1255; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1256; CHECK-NEXT:    vmv1r.v v0, v25
1257; CHECK-NEXT:    ret
1258entry:
1259  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
1260    <vscale x 1 x i1> %0,
1261    <vscale x 1 x i16> %1,
1262    i16 %2,
1263    <vscale x 1 x i1> %3,
1264    i32 %4)
1265
1266  ret <vscale x 1 x i1> %a
1267}
1268
1269declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
1270  <vscale x 2 x i16>,
1271  i16,
1272  i32);
1273
1274define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i16_i16(<vscale x 2 x i16> %0, i16 %1, i32 %2) nounwind {
1275; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i16_i16:
1276; CHECK:       # %bb.0: # %entry
1277; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1278; CHECK-NEXT:    vmslt.vx v0, v8, a0
1279; CHECK-NEXT:    ret
1280entry:
1281  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
1282    <vscale x 2 x i16> %0,
1283    i16 %1,
1284    i32 %2)
1285
1286  ret <vscale x 2 x i1> %a
1287}
1288
1289declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
1290  <vscale x 2 x i1>,
1291  <vscale x 2 x i16>,
1292  i16,
1293  <vscale x 2 x i1>,
1294  i32);
1295
1296define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, i16 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1297; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i16_i16:
1298; CHECK:       # %bb.0: # %entry
1299; CHECK-NEXT:    vmv1r.v v25, v0
1300; CHECK-NEXT:    vsetvli zero, a1, e16, mf2, ta, mu
1301; CHECK-NEXT:    vmv1r.v v0, v9
1302; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1303; CHECK-NEXT:    vmv1r.v v0, v25
1304; CHECK-NEXT:    ret
1305entry:
1306  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
1307    <vscale x 2 x i1> %0,
1308    <vscale x 2 x i16> %1,
1309    i16 %2,
1310    <vscale x 2 x i1> %3,
1311    i32 %4)
1312
1313  ret <vscale x 2 x i1> %a
1314}
1315
1316declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
1317  <vscale x 4 x i16>,
1318  i16,
1319  i32);
1320
1321define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i16_i16(<vscale x 4 x i16> %0, i16 %1, i32 %2) nounwind {
1322; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i16_i16:
1323; CHECK:       # %bb.0: # %entry
1324; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1325; CHECK-NEXT:    vmslt.vx v0, v8, a0
1326; CHECK-NEXT:    ret
1327entry:
1328  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
1329    <vscale x 4 x i16> %0,
1330    i16 %1,
1331    i32 %2)
1332
1333  ret <vscale x 4 x i1> %a
1334}
1335
1336declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
1337  <vscale x 4 x i1>,
1338  <vscale x 4 x i16>,
1339  i16,
1340  <vscale x 4 x i1>,
1341  i32);
1342
1343define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, i16 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1344; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i16_i16:
1345; CHECK:       # %bb.0: # %entry
1346; CHECK-NEXT:    vmv1r.v v25, v0
1347; CHECK-NEXT:    vsetvli zero, a1, e16, m1, ta, mu
1348; CHECK-NEXT:    vmv1r.v v0, v9
1349; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1350; CHECK-NEXT:    vmv1r.v v0, v25
1351; CHECK-NEXT:    ret
1352entry:
1353  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
1354    <vscale x 4 x i1> %0,
1355    <vscale x 4 x i16> %1,
1356    i16 %2,
1357    <vscale x 4 x i1> %3,
1358    i32 %4)
1359
1360  ret <vscale x 4 x i1> %a
1361}
1362
1363declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
1364  <vscale x 8 x i16>,
1365  i16,
1366  i32);
1367
1368define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i16_i16(<vscale x 8 x i16> %0, i16 %1, i32 %2) nounwind {
1369; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i16_i16:
1370; CHECK:       # %bb.0: # %entry
1371; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1372; CHECK-NEXT:    vmslt.vx v0, v8, a0
1373; CHECK-NEXT:    ret
1374entry:
1375  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
1376    <vscale x 8 x i16> %0,
1377    i16 %1,
1378    i32 %2)
1379
1380  ret <vscale x 8 x i1> %a
1381}
1382
1383declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
1384  <vscale x 8 x i1>,
1385  <vscale x 8 x i16>,
1386  i16,
1387  <vscale x 8 x i1>,
1388  i32);
1389
1390define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, i16 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1391; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i16_i16:
1392; CHECK:       # %bb.0: # %entry
1393; CHECK-NEXT:    vmv1r.v v25, v0
1394; CHECK-NEXT:    vsetvli zero, a1, e16, m2, ta, mu
1395; CHECK-NEXT:    vmv1r.v v0, v10
1396; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1397; CHECK-NEXT:    vmv1r.v v0, v25
1398; CHECK-NEXT:    ret
1399entry:
1400  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
1401    <vscale x 8 x i1> %0,
1402    <vscale x 8 x i16> %1,
1403    i16 %2,
1404    <vscale x 8 x i1> %3,
1405    i32 %4)
1406
1407  ret <vscale x 8 x i1> %a
1408}
1409
1410declare <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
1411  <vscale x 16 x i16>,
1412  i16,
1413  i32);
1414
1415define <vscale x 16 x i1> @intrinsic_vmslt_vx_nxv16i16_i16(<vscale x 16 x i16> %0, i16 %1, i32 %2) nounwind {
1416; CHECK-LABEL: intrinsic_vmslt_vx_nxv16i16_i16:
1417; CHECK:       # %bb.0: # %entry
1418; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1419; CHECK-NEXT:    vmslt.vx v0, v8, a0
1420; CHECK-NEXT:    ret
1421entry:
1422  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
1423    <vscale x 16 x i16> %0,
1424    i16 %1,
1425    i32 %2)
1426
1427  ret <vscale x 16 x i1> %a
1428}
1429
1430declare <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
1431  <vscale x 16 x i1>,
1432  <vscale x 16 x i16>,
1433  i16,
1434  <vscale x 16 x i1>,
1435  i32);
1436
1437define <vscale x 16 x i1> @intrinsic_vmslt_mask_vx_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, i16 %2, <vscale x 16 x i1> %3, i32 %4) nounwind {
1438; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv16i16_i16:
1439; CHECK:       # %bb.0: # %entry
1440; CHECK-NEXT:    vmv1r.v v25, v0
1441; CHECK-NEXT:    vsetvli zero, a1, e16, m4, ta, mu
1442; CHECK-NEXT:    vmv1r.v v0, v12
1443; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1444; CHECK-NEXT:    vmv1r.v v0, v25
1445; CHECK-NEXT:    ret
1446entry:
1447  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
1448    <vscale x 16 x i1> %0,
1449    <vscale x 16 x i16> %1,
1450    i16 %2,
1451    <vscale x 16 x i1> %3,
1452    i32 %4)
1453
1454  ret <vscale x 16 x i1> %a
1455}
1456
1457declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
1458  <vscale x 1 x i32>,
1459  i32,
1460  i32);
1461
1462define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1, i32 %2) nounwind {
1463; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i32_i32:
1464; CHECK:       # %bb.0: # %entry
1465; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1466; CHECK-NEXT:    vmslt.vx v0, v8, a0
1467; CHECK-NEXT:    ret
1468entry:
1469  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
1470    <vscale x 1 x i32> %0,
1471    i32 %1,
1472    i32 %2)
1473
1474  ret <vscale x 1 x i1> %a
1475}
1476
1477declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
1478  <vscale x 1 x i1>,
1479  <vscale x 1 x i32>,
1480  i32,
1481  <vscale x 1 x i1>,
1482  i32);
1483
1484define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, i32 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1485; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i32_i32:
1486; CHECK:       # %bb.0: # %entry
1487; CHECK-NEXT:    vmv1r.v v25, v0
1488; CHECK-NEXT:    vsetvli zero, a1, e32, mf2, ta, mu
1489; CHECK-NEXT:    vmv1r.v v0, v9
1490; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1491; CHECK-NEXT:    vmv1r.v v0, v25
1492; CHECK-NEXT:    ret
1493entry:
1494  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
1495    <vscale x 1 x i1> %0,
1496    <vscale x 1 x i32> %1,
1497    i32 %2,
1498    <vscale x 1 x i1> %3,
1499    i32 %4)
1500
1501  ret <vscale x 1 x i1> %a
1502}
1503
1504declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
1505  <vscale x 2 x i32>,
1506  i32,
1507  i32);
1508
1509define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1, i32 %2) nounwind {
1510; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i32_i32:
1511; CHECK:       # %bb.0: # %entry
1512; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1513; CHECK-NEXT:    vmslt.vx v0, v8, a0
1514; CHECK-NEXT:    ret
1515entry:
1516  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
1517    <vscale x 2 x i32> %0,
1518    i32 %1,
1519    i32 %2)
1520
1521  ret <vscale x 2 x i1> %a
1522}
1523
1524declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
1525  <vscale x 2 x i1>,
1526  <vscale x 2 x i32>,
1527  i32,
1528  <vscale x 2 x i1>,
1529  i32);
1530
1531define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, i32 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1532; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i32_i32:
1533; CHECK:       # %bb.0: # %entry
1534; CHECK-NEXT:    vmv1r.v v25, v0
1535; CHECK-NEXT:    vsetvli zero, a1, e32, m1, ta, mu
1536; CHECK-NEXT:    vmv1r.v v0, v9
1537; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1538; CHECK-NEXT:    vmv1r.v v0, v25
1539; CHECK-NEXT:    ret
1540entry:
1541  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
1542    <vscale x 2 x i1> %0,
1543    <vscale x 2 x i32> %1,
1544    i32 %2,
1545    <vscale x 2 x i1> %3,
1546    i32 %4)
1547
1548  ret <vscale x 2 x i1> %a
1549}
1550
1551declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
1552  <vscale x 4 x i32>,
1553  i32,
1554  i32);
1555
1556define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1, i32 %2) nounwind {
1557; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i32_i32:
1558; CHECK:       # %bb.0: # %entry
1559; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1560; CHECK-NEXT:    vmslt.vx v0, v8, a0
1561; CHECK-NEXT:    ret
1562entry:
1563  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
1564    <vscale x 4 x i32> %0,
1565    i32 %1,
1566    i32 %2)
1567
1568  ret <vscale x 4 x i1> %a
1569}
1570
1571declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
1572  <vscale x 4 x i1>,
1573  <vscale x 4 x i32>,
1574  i32,
1575  <vscale x 4 x i1>,
1576  i32);
1577
1578define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, i32 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1579; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i32_i32:
1580; CHECK:       # %bb.0: # %entry
1581; CHECK-NEXT:    vmv1r.v v25, v0
1582; CHECK-NEXT:    vsetvli zero, a1, e32, m2, ta, mu
1583; CHECK-NEXT:    vmv1r.v v0, v10
1584; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1585; CHECK-NEXT:    vmv1r.v v0, v25
1586; CHECK-NEXT:    ret
1587entry:
1588  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
1589    <vscale x 4 x i1> %0,
1590    <vscale x 4 x i32> %1,
1591    i32 %2,
1592    <vscale x 4 x i1> %3,
1593    i32 %4)
1594
1595  ret <vscale x 4 x i1> %a
1596}
1597
1598declare <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
1599  <vscale x 8 x i32>,
1600  i32,
1601  i32);
1602
1603define <vscale x 8 x i1> @intrinsic_vmslt_vx_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1, i32 %2) nounwind {
1604; CHECK-LABEL: intrinsic_vmslt_vx_nxv8i32_i32:
1605; CHECK:       # %bb.0: # %entry
1606; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1607; CHECK-NEXT:    vmslt.vx v0, v8, a0
1608; CHECK-NEXT:    ret
1609entry:
1610  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
1611    <vscale x 8 x i32> %0,
1612    i32 %1,
1613    i32 %2)
1614
1615  ret <vscale x 8 x i1> %a
1616}
1617
1618declare <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
1619  <vscale x 8 x i1>,
1620  <vscale x 8 x i32>,
1621  i32,
1622  <vscale x 8 x i1>,
1623  i32);
1624
1625define <vscale x 8 x i1> @intrinsic_vmslt_mask_vx_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, i32 %2, <vscale x 8 x i1> %3, i32 %4) nounwind {
1626; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv8i32_i32:
1627; CHECK:       # %bb.0: # %entry
1628; CHECK-NEXT:    vmv1r.v v25, v0
1629; CHECK-NEXT:    vsetvli zero, a1, e32, m4, ta, mu
1630; CHECK-NEXT:    vmv1r.v v0, v12
1631; CHECK-NEXT:    vmslt.vx v25, v8, a0, v0.t
1632; CHECK-NEXT:    vmv1r.v v0, v25
1633; CHECK-NEXT:    ret
1634entry:
1635  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
1636    <vscale x 8 x i1> %0,
1637    <vscale x 8 x i32> %1,
1638    i32 %2,
1639    <vscale x 8 x i1> %3,
1640    i32 %4)
1641
1642  ret <vscale x 8 x i1> %a
1643}
1644
1645declare <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
1646  <vscale x 1 x i64>,
1647  i64,
1648  i32);
1649
1650define <vscale x 1 x i1> @intrinsic_vmslt_vx_nxv1i64_i64(<vscale x 1 x i64> %0, i64 %1, i32 %2) nounwind {
1651; CHECK-LABEL: intrinsic_vmslt_vx_nxv1i64_i64:
1652; CHECK:       # %bb.0: # %entry
1653; CHECK-NEXT:    addi sp, sp, -16
1654; CHECK-NEXT:    sw a1, 12(sp)
1655; CHECK-NEXT:    sw a0, 8(sp)
1656; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1657; CHECK-NEXT:    addi a0, sp, 8
1658; CHECK-NEXT:    vlse64.v v25, (a0), zero
1659; CHECK-NEXT:    vmslt.vv v0, v8, v25
1660; CHECK-NEXT:    addi sp, sp, 16
1661; CHECK-NEXT:    ret
1662entry:
1663  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
1664    <vscale x 1 x i64> %0,
1665    i64 %1,
1666    i32 %2)
1667
1668  ret <vscale x 1 x i1> %a
1669}
1670
1671declare <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
1672  <vscale x 1 x i1>,
1673  <vscale x 1 x i64>,
1674  i64,
1675  <vscale x 1 x i1>,
1676  i32);
1677
1678define <vscale x 1 x i1> @intrinsic_vmslt_mask_vx_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, i64 %2, <vscale x 1 x i1> %3, i32 %4) nounwind {
1679; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv1i64_i64:
1680; CHECK:       # %bb.0: # %entry
1681; CHECK-NEXT:    addi sp, sp, -16
1682; CHECK-NEXT:    sw a1, 12(sp)
1683; CHECK-NEXT:    sw a0, 8(sp)
1684; CHECK-NEXT:    vsetvli zero, a2, e64, m1, ta, mu
1685; CHECK-NEXT:    addi a0, sp, 8
1686; CHECK-NEXT:    vlse64.v v26, (a0), zero
1687; CHECK-NEXT:    vmv1r.v v25, v0
1688; CHECK-NEXT:    vmv1r.v v0, v9
1689; CHECK-NEXT:    vmslt.vv v25, v8, v26, v0.t
1690; CHECK-NEXT:    vmv1r.v v0, v25
1691; CHECK-NEXT:    addi sp, sp, 16
1692; CHECK-NEXT:    ret
1693entry:
1694  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
1695    <vscale x 1 x i1> %0,
1696    <vscale x 1 x i64> %1,
1697    i64 %2,
1698    <vscale x 1 x i1> %3,
1699    i32 %4)
1700
1701  ret <vscale x 1 x i1> %a
1702}
1703
1704declare <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
1705  <vscale x 2 x i64>,
1706  i64,
1707  i32);
1708
1709define <vscale x 2 x i1> @intrinsic_vmslt_vx_nxv2i64_i64(<vscale x 2 x i64> %0, i64 %1, i32 %2) nounwind {
1710; CHECK-LABEL: intrinsic_vmslt_vx_nxv2i64_i64:
1711; CHECK:       # %bb.0: # %entry
1712; CHECK-NEXT:    addi sp, sp, -16
1713; CHECK-NEXT:    sw a1, 12(sp)
1714; CHECK-NEXT:    sw a0, 8(sp)
1715; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1716; CHECK-NEXT:    addi a0, sp, 8
1717; CHECK-NEXT:    vlse64.v v26, (a0), zero
1718; CHECK-NEXT:    vmslt.vv v0, v8, v26
1719; CHECK-NEXT:    addi sp, sp, 16
1720; CHECK-NEXT:    ret
1721entry:
1722  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
1723    <vscale x 2 x i64> %0,
1724    i64 %1,
1725    i32 %2)
1726
1727  ret <vscale x 2 x i1> %a
1728}
1729
1730declare <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
1731  <vscale x 2 x i1>,
1732  <vscale x 2 x i64>,
1733  i64,
1734  <vscale x 2 x i1>,
1735  i32);
1736
1737define <vscale x 2 x i1> @intrinsic_vmslt_mask_vx_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, i64 %2, <vscale x 2 x i1> %3, i32 %4) nounwind {
1738; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv2i64_i64:
1739; CHECK:       # %bb.0: # %entry
1740; CHECK-NEXT:    addi sp, sp, -16
1741; CHECK-NEXT:    sw a1, 12(sp)
1742; CHECK-NEXT:    sw a0, 8(sp)
1743; CHECK-NEXT:    vsetvli zero, a2, e64, m2, ta, mu
1744; CHECK-NEXT:    addi a0, sp, 8
1745; CHECK-NEXT:    vlse64.v v26, (a0), zero
1746; CHECK-NEXT:    vmv1r.v v25, v0
1747; CHECK-NEXT:    vmv1r.v v0, v10
1748; CHECK-NEXT:    vmslt.vv v25, v8, v26, v0.t
1749; CHECK-NEXT:    vmv1r.v v0, v25
1750; CHECK-NEXT:    addi sp, sp, 16
1751; CHECK-NEXT:    ret
1752entry:
1753  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
1754    <vscale x 2 x i1> %0,
1755    <vscale x 2 x i64> %1,
1756    i64 %2,
1757    <vscale x 2 x i1> %3,
1758    i32 %4)
1759
1760  ret <vscale x 2 x i1> %a
1761}
1762
1763declare <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
1764  <vscale x 4 x i64>,
1765  i64,
1766  i32);
1767
1768define <vscale x 4 x i1> @intrinsic_vmslt_vx_nxv4i64_i64(<vscale x 4 x i64> %0, i64 %1, i32 %2) nounwind {
1769; CHECK-LABEL: intrinsic_vmslt_vx_nxv4i64_i64:
1770; CHECK:       # %bb.0: # %entry
1771; CHECK-NEXT:    addi sp, sp, -16
1772; CHECK-NEXT:    sw a1, 12(sp)
1773; CHECK-NEXT:    sw a0, 8(sp)
1774; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1775; CHECK-NEXT:    addi a0, sp, 8
1776; CHECK-NEXT:    vlse64.v v28, (a0), zero
1777; CHECK-NEXT:    vmslt.vv v0, v8, v28
1778; CHECK-NEXT:    addi sp, sp, 16
1779; CHECK-NEXT:    ret
1780entry:
1781  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
1782    <vscale x 4 x i64> %0,
1783    i64 %1,
1784    i32 %2)
1785
1786  ret <vscale x 4 x i1> %a
1787}
1788
1789declare <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
1790  <vscale x 4 x i1>,
1791  <vscale x 4 x i64>,
1792  i64,
1793  <vscale x 4 x i1>,
1794  i32);
1795
1796define <vscale x 4 x i1> @intrinsic_vmslt_mask_vx_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, i64 %2, <vscale x 4 x i1> %3, i32 %4) nounwind {
1797; CHECK-LABEL: intrinsic_vmslt_mask_vx_nxv4i64_i64:
1798; CHECK:       # %bb.0: # %entry
1799; CHECK-NEXT:    addi sp, sp, -16
1800; CHECK-NEXT:    sw a1, 12(sp)
1801; CHECK-NEXT:    sw a0, 8(sp)
1802; CHECK-NEXT:    vsetvli zero, a2, e64, m4, ta, mu
1803; CHECK-NEXT:    addi a0, sp, 8
1804; CHECK-NEXT:    vlse64.v v28, (a0), zero
1805; CHECK-NEXT:    vmv1r.v v25, v0
1806; CHECK-NEXT:    vmv1r.v v0, v12
1807; CHECK-NEXT:    vmslt.vv v25, v8, v28, v0.t
1808; CHECK-NEXT:    vmv1r.v v0, v25
1809; CHECK-NEXT:    addi sp, sp, 16
1810; CHECK-NEXT:    ret
1811entry:
1812  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
1813    <vscale x 4 x i1> %0,
1814    <vscale x 4 x i64> %1,
1815    i64 %2,
1816    <vscale x 4 x i1> %3,
1817    i32 %4)
1818
1819  ret <vscale x 4 x i1> %a
1820}
1821
1822define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i8_i8(<vscale x 1 x i8> %0, i32 %1) nounwind {
1823; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i8_i8:
1824; CHECK:       # %bb.0: # %entry
1825; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1826; CHECK-NEXT:    vmsle.vi v0, v8, -16
1827; CHECK-NEXT:    ret
1828entry:
1829  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i8.i8(
1830    <vscale x 1 x i8> %0,
1831    i8 -15,
1832    i32 %1)
1833
1834  ret <vscale x 1 x i1> %a
1835}
1836
1837define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i8_i8(<vscale x 1 x i1> %0, <vscale x 1 x i8> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
1838; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i8_i8:
1839; CHECK:       # %bb.0: # %entry
1840; CHECK-NEXT:    vmv1r.v v25, v0
1841; CHECK-NEXT:    vsetvli zero, a0, e8, mf8, ta, mu
1842; CHECK-NEXT:    vmv1r.v v0, v9
1843; CHECK-NEXT:    vmsle.vi v25, v8, -15, v0.t
1844; CHECK-NEXT:    vmv1r.v v0, v25
1845; CHECK-NEXT:    ret
1846entry:
1847  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i8.i8(
1848    <vscale x 1 x i1> %0,
1849    <vscale x 1 x i8> %1,
1850    i8 -14,
1851    <vscale x 1 x i1> %2,
1852    i32 %3)
1853
1854  ret <vscale x 1 x i1> %a
1855}
1856
1857define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i8_i8(<vscale x 2 x i8> %0, i32 %1) nounwind {
1858; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i8_i8:
1859; CHECK:       # %bb.0: # %entry
1860; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1861; CHECK-NEXT:    vmsle.vi v0, v8, -14
1862; CHECK-NEXT:    ret
1863entry:
1864  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i8.i8(
1865    <vscale x 2 x i8> %0,
1866    i8 -13,
1867    i32 %1)
1868
1869  ret <vscale x 2 x i1> %a
1870}
1871
1872define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i8_i8(<vscale x 2 x i1> %0, <vscale x 2 x i8> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
1873; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i8_i8:
1874; CHECK:       # %bb.0: # %entry
1875; CHECK-NEXT:    vmv1r.v v25, v0
1876; CHECK-NEXT:    vsetvli zero, a0, e8, mf4, ta, mu
1877; CHECK-NEXT:    vmv1r.v v0, v9
1878; CHECK-NEXT:    vmsle.vi v25, v8, -13, v0.t
1879; CHECK-NEXT:    vmv1r.v v0, v25
1880; CHECK-NEXT:    ret
1881entry:
1882  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i8.i8(
1883    <vscale x 2 x i1> %0,
1884    <vscale x 2 x i8> %1,
1885    i8 -12,
1886    <vscale x 2 x i1> %2,
1887    i32 %3)
1888
1889  ret <vscale x 2 x i1> %a
1890}
1891
1892define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i8_i8(<vscale x 4 x i8> %0, i32 %1) nounwind {
1893; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i8_i8:
1894; CHECK:       # %bb.0: # %entry
1895; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1896; CHECK-NEXT:    vmsle.vi v0, v8, -12
1897; CHECK-NEXT:    ret
1898entry:
1899  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i8.i8(
1900    <vscale x 4 x i8> %0,
1901    i8 -11,
1902    i32 %1)
1903
1904  ret <vscale x 4 x i1> %a
1905}
1906
1907define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i8_i8(<vscale x 4 x i1> %0, <vscale x 4 x i8> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
1908; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i8_i8:
1909; CHECK:       # %bb.0: # %entry
1910; CHECK-NEXT:    vmv1r.v v25, v0
1911; CHECK-NEXT:    vsetvli zero, a0, e8, mf2, ta, mu
1912; CHECK-NEXT:    vmv1r.v v0, v9
1913; CHECK-NEXT:    vmsle.vi v25, v8, -11, v0.t
1914; CHECK-NEXT:    vmv1r.v v0, v25
1915; CHECK-NEXT:    ret
1916entry:
1917  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i8.i8(
1918    <vscale x 4 x i1> %0,
1919    <vscale x 4 x i8> %1,
1920    i8 -10,
1921    <vscale x 4 x i1> %2,
1922    i32 %3)
1923
1924  ret <vscale x 4 x i1> %a
1925}
1926
1927define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i8_i8(<vscale x 8 x i8> %0, i32 %1) nounwind {
1928; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i8_i8:
1929; CHECK:       # %bb.0: # %entry
1930; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1931; CHECK-NEXT:    vmsle.vi v0, v8, -10
1932; CHECK-NEXT:    ret
1933entry:
1934  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i8.i8(
1935    <vscale x 8 x i8> %0,
1936    i8 -9,
1937    i32 %1)
1938
1939  ret <vscale x 8 x i1> %a
1940}
1941
1942define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i8_i8(<vscale x 8 x i1> %0, <vscale x 8 x i8> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
1943; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i8_i8:
1944; CHECK:       # %bb.0: # %entry
1945; CHECK-NEXT:    vmv1r.v v25, v0
1946; CHECK-NEXT:    vsetvli zero, a0, e8, m1, ta, mu
1947; CHECK-NEXT:    vmv1r.v v0, v9
1948; CHECK-NEXT:    vmsle.vi v25, v8, -9, v0.t
1949; CHECK-NEXT:    vmv1r.v v0, v25
1950; CHECK-NEXT:    ret
1951entry:
1952  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i8.i8(
1953    <vscale x 8 x i1> %0,
1954    <vscale x 8 x i8> %1,
1955    i8 -8,
1956    <vscale x 8 x i1> %2,
1957    i32 %3)
1958
1959  ret <vscale x 8 x i1> %a
1960}
1961
1962define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i8_i8(<vscale x 16 x i8> %0, i32 %1) nounwind {
1963; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i8_i8:
1964; CHECK:       # %bb.0: # %entry
1965; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1966; CHECK-NEXT:    vmsle.vi v0, v8, -8
1967; CHECK-NEXT:    ret
1968entry:
1969  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i8.i8(
1970    <vscale x 16 x i8> %0,
1971    i8 -7,
1972    i32 %1)
1973
1974  ret <vscale x 16 x i1> %a
1975}
1976
1977define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i8_i8(<vscale x 16 x i1> %0, <vscale x 16 x i8> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
1978; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i8_i8:
1979; CHECK:       # %bb.0: # %entry
1980; CHECK-NEXT:    vmv1r.v v25, v0
1981; CHECK-NEXT:    vsetvli zero, a0, e8, m2, ta, mu
1982; CHECK-NEXT:    vmv1r.v v0, v10
1983; CHECK-NEXT:    vmsle.vi v25, v8, -7, v0.t
1984; CHECK-NEXT:    vmv1r.v v0, v25
1985; CHECK-NEXT:    ret
1986entry:
1987  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i8.i8(
1988    <vscale x 16 x i1> %0,
1989    <vscale x 16 x i8> %1,
1990    i8 -6,
1991    <vscale x 16 x i1> %2,
1992    i32 %3)
1993
1994  ret <vscale x 16 x i1> %a
1995}
1996
1997define <vscale x 32 x i1> @intrinsic_vmslt_vi_nxv32i8_i8(<vscale x 32 x i8> %0, i32 %1) nounwind {
1998; CHECK-LABEL: intrinsic_vmslt_vi_nxv32i8_i8:
1999; CHECK:       # %bb.0: # %entry
2000; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
2001; CHECK-NEXT:    vmsle.vi v0, v8, -6
2002; CHECK-NEXT:    ret
2003entry:
2004  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.nxv32i8.i8(
2005    <vscale x 32 x i8> %0,
2006    i8 -5,
2007    i32 %1)
2008
2009  ret <vscale x 32 x i1> %a
2010}
2011
2012define <vscale x 32 x i1> @intrinsic_vmslt_mask_vi_nxv32i8_i8(<vscale x 32 x i1> %0, <vscale x 32 x i8> %1, <vscale x 32 x i1> %2, i32 %3) nounwind {
2013; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv32i8_i8:
2014; CHECK:       # %bb.0: # %entry
2015; CHECK-NEXT:    vmv1r.v v25, v0
2016; CHECK-NEXT:    vsetvli zero, a0, e8, m4, ta, mu
2017; CHECK-NEXT:    vmv1r.v v0, v12
2018; CHECK-NEXT:    vmsle.vi v25, v8, -5, v0.t
2019; CHECK-NEXT:    vmv1r.v v0, v25
2020; CHECK-NEXT:    ret
2021entry:
2022  %a = call <vscale x 32 x i1> @llvm.riscv.vmslt.mask.nxv32i8.i8(
2023    <vscale x 32 x i1> %0,
2024    <vscale x 32 x i8> %1,
2025    i8 -4,
2026    <vscale x 32 x i1> %2,
2027    i32 %3)
2028
2029  ret <vscale x 32 x i1> %a
2030}
2031
2032define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i16_i16(<vscale x 1 x i16> %0, i32 %1) nounwind {
2033; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i16_i16:
2034; CHECK:       # %bb.0: # %entry
2035; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
2036; CHECK-NEXT:    vmsle.vi v0, v8, -4
2037; CHECK-NEXT:    ret
2038entry:
2039  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i16.i16(
2040    <vscale x 1 x i16> %0,
2041    i16 -3,
2042    i32 %1)
2043
2044  ret <vscale x 1 x i1> %a
2045}
2046
2047define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i16_i16(<vscale x 1 x i1> %0, <vscale x 1 x i16> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
2048; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i16_i16:
2049; CHECK:       # %bb.0: # %entry
2050; CHECK-NEXT:    vmv1r.v v25, v0
2051; CHECK-NEXT:    vsetvli zero, a0, e16, mf4, ta, mu
2052; CHECK-NEXT:    vmv1r.v v0, v9
2053; CHECK-NEXT:    vmsle.vi v25, v8, -3, v0.t
2054; CHECK-NEXT:    vmv1r.v v0, v25
2055; CHECK-NEXT:    ret
2056entry:
2057  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i16.i16(
2058    <vscale x 1 x i1> %0,
2059    <vscale x 1 x i16> %1,
2060    i16 -2,
2061    <vscale x 1 x i1> %2,
2062    i32 %3)
2063
2064  ret <vscale x 1 x i1> %a
2065}
2066
2067define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i16_i16(<vscale x 2 x i16> %0, i32 %1) nounwind {
2068; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i16_i16:
2069; CHECK:       # %bb.0: # %entry
2070; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
2071; CHECK-NEXT:    vmsle.vi v0, v8, -2
2072; CHECK-NEXT:    ret
2073entry:
2074  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i16.i16(
2075    <vscale x 2 x i16> %0,
2076    i16 -1,
2077    i32 %1)
2078
2079  ret <vscale x 2 x i1> %a
2080}
2081
2082define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i16_i16(<vscale x 2 x i1> %0, <vscale x 2 x i16> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
2083; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i16_i16:
2084; CHECK:       # %bb.0: # %entry
2085; CHECK-NEXT:    vmv1r.v v25, v0
2086; CHECK-NEXT:    vsetvli zero, a0, e16, mf2, ta, mu
2087; CHECK-NEXT:    vmv1r.v v0, v9
2088; CHECK-NEXT:    vmsle.vi v25, v8, -1, v0.t
2089; CHECK-NEXT:    vmv1r.v v0, v25
2090; CHECK-NEXT:    ret
2091entry:
2092  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i16.i16(
2093    <vscale x 2 x i1> %0,
2094    <vscale x 2 x i16> %1,
2095    i16 0,
2096    <vscale x 2 x i1> %2,
2097    i32 %3)
2098
2099  ret <vscale x 2 x i1> %a
2100}
2101
2102define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i16_i16(<vscale x 4 x i16> %0, i32 %1) nounwind {
2103; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i16_i16:
2104; CHECK:       # %bb.0: # %entry
2105; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
2106; CHECK-NEXT:    vmsle.vi v0, v8, -1
2107; CHECK-NEXT:    ret
2108entry:
2109  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i16.i16(
2110    <vscale x 4 x i16> %0,
2111    i16 0,
2112    i32 %1)
2113
2114  ret <vscale x 4 x i1> %a
2115}
2116
2117define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i16_i16(<vscale x 4 x i1> %0, <vscale x 4 x i16> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2118; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i16_i16:
2119; CHECK:       # %bb.0: # %entry
2120; CHECK-NEXT:    vmv1r.v v25, v0
2121; CHECK-NEXT:    vsetvli zero, a0, e16, m1, ta, mu
2122; CHECK-NEXT:    vmv1r.v v0, v9
2123; CHECK-NEXT:    vmsle.vi v25, v8, 0, v0.t
2124; CHECK-NEXT:    vmv1r.v v0, v25
2125; CHECK-NEXT:    ret
2126entry:
2127  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i16.i16(
2128    <vscale x 4 x i1> %0,
2129    <vscale x 4 x i16> %1,
2130    i16 1,
2131    <vscale x 4 x i1> %2,
2132    i32 %3)
2133
2134  ret <vscale x 4 x i1> %a
2135}
2136
2137define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i16_i16(<vscale x 8 x i16> %0, i32 %1) nounwind {
2138; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i16_i16:
2139; CHECK:       # %bb.0: # %entry
2140; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
2141; CHECK-NEXT:    vmsle.vi v0, v8, 1
2142; CHECK-NEXT:    ret
2143entry:
2144  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i16.i16(
2145    <vscale x 8 x i16> %0,
2146    i16 2,
2147    i32 %1)
2148
2149  ret <vscale x 8 x i1> %a
2150}
2151
2152define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i16_i16(<vscale x 8 x i1> %0, <vscale x 8 x i16> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
2153; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i16_i16:
2154; CHECK:       # %bb.0: # %entry
2155; CHECK-NEXT:    vmv1r.v v25, v0
2156; CHECK-NEXT:    vsetvli zero, a0, e16, m2, ta, mu
2157; CHECK-NEXT:    vmv1r.v v0, v10
2158; CHECK-NEXT:    vmsle.vi v25, v8, 2, v0.t
2159; CHECK-NEXT:    vmv1r.v v0, v25
2160; CHECK-NEXT:    ret
2161entry:
2162  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i16.i16(
2163    <vscale x 8 x i1> %0,
2164    <vscale x 8 x i16> %1,
2165    i16 3,
2166    <vscale x 8 x i1> %2,
2167    i32 %3)
2168
2169  ret <vscale x 8 x i1> %a
2170}
2171
2172define <vscale x 16 x i1> @intrinsic_vmslt_vi_nxv16i16_i16(<vscale x 16 x i16> %0, i32 %1) nounwind {
2173; CHECK-LABEL: intrinsic_vmslt_vi_nxv16i16_i16:
2174; CHECK:       # %bb.0: # %entry
2175; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
2176; CHECK-NEXT:    vmsle.vi v0, v8, 3
2177; CHECK-NEXT:    ret
2178entry:
2179  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.nxv16i16.i16(
2180    <vscale x 16 x i16> %0,
2181    i16 4,
2182    i32 %1)
2183
2184  ret <vscale x 16 x i1> %a
2185}
2186
2187define <vscale x 16 x i1> @intrinsic_vmslt_mask_vi_nxv16i16_i16(<vscale x 16 x i1> %0, <vscale x 16 x i16> %1, <vscale x 16 x i1> %2, i32 %3) nounwind {
2188; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv16i16_i16:
2189; CHECK:       # %bb.0: # %entry
2190; CHECK-NEXT:    vmv1r.v v25, v0
2191; CHECK-NEXT:    vsetvli zero, a0, e16, m4, ta, mu
2192; CHECK-NEXT:    vmv1r.v v0, v12
2193; CHECK-NEXT:    vmsle.vi v25, v8, 4, v0.t
2194; CHECK-NEXT:    vmv1r.v v0, v25
2195; CHECK-NEXT:    ret
2196entry:
2197  %a = call <vscale x 16 x i1> @llvm.riscv.vmslt.mask.nxv16i16.i16(
2198    <vscale x 16 x i1> %0,
2199    <vscale x 16 x i16> %1,
2200    i16 5,
2201    <vscale x 16 x i1> %2,
2202    i32 %3)
2203
2204  ret <vscale x 16 x i1> %a
2205}
2206
2207define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i32_i32(<vscale x 1 x i32> %0, i32 %1) nounwind {
2208; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i32_i32:
2209; CHECK:       # %bb.0: # %entry
2210; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2211; CHECK-NEXT:    vmsle.vi v0, v8, 5
2212; CHECK-NEXT:    ret
2213entry:
2214  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i32.i32(
2215    <vscale x 1 x i32> %0,
2216    i32 6,
2217    i32 %1)
2218
2219  ret <vscale x 1 x i1> %a
2220}
2221
2222define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i32_i32(<vscale x 1 x i1> %0, <vscale x 1 x i32> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
2223; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i32_i32:
2224; CHECK:       # %bb.0: # %entry
2225; CHECK-NEXT:    vmv1r.v v25, v0
2226; CHECK-NEXT:    vsetvli zero, a0, e32, mf2, ta, mu
2227; CHECK-NEXT:    vmv1r.v v0, v9
2228; CHECK-NEXT:    vmsle.vi v25, v8, 6, v0.t
2229; CHECK-NEXT:    vmv1r.v v0, v25
2230; CHECK-NEXT:    ret
2231entry:
2232  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i32.i32(
2233    <vscale x 1 x i1> %0,
2234    <vscale x 1 x i32> %1,
2235    i32 7,
2236    <vscale x 1 x i1> %2,
2237    i32 %3)
2238
2239  ret <vscale x 1 x i1> %a
2240}
2241
2242define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i32_i32(<vscale x 2 x i32> %0, i32 %1) nounwind {
2243; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i32_i32:
2244; CHECK:       # %bb.0: # %entry
2245; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2246; CHECK-NEXT:    vmsle.vi v0, v8, 7
2247; CHECK-NEXT:    ret
2248entry:
2249  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i32.i32(
2250    <vscale x 2 x i32> %0,
2251    i32 8,
2252    i32 %1)
2253
2254  ret <vscale x 2 x i1> %a
2255}
2256
2257define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i32_i32(<vscale x 2 x i1> %0, <vscale x 2 x i32> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
2258; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i32_i32:
2259; CHECK:       # %bb.0: # %entry
2260; CHECK-NEXT:    vmv1r.v v25, v0
2261; CHECK-NEXT:    vsetvli zero, a0, e32, m1, ta, mu
2262; CHECK-NEXT:    vmv1r.v v0, v9
2263; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
2264; CHECK-NEXT:    vmv1r.v v0, v25
2265; CHECK-NEXT:    ret
2266entry:
2267  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i32.i32(
2268    <vscale x 2 x i1> %0,
2269    <vscale x 2 x i32> %1,
2270    i32 9,
2271    <vscale x 2 x i1> %2,
2272    i32 %3)
2273
2274  ret <vscale x 2 x i1> %a
2275}
2276
2277define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i32_i32(<vscale x 4 x i32> %0, i32 %1) nounwind {
2278; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i32_i32:
2279; CHECK:       # %bb.0: # %entry
2280; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2281; CHECK-NEXT:    vmsle.vi v0, v8, 9
2282; CHECK-NEXT:    ret
2283entry:
2284  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i32.i32(
2285    <vscale x 4 x i32> %0,
2286    i32 10,
2287    i32 %1)
2288
2289  ret <vscale x 4 x i1> %a
2290}
2291
2292define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i32_i32(<vscale x 4 x i1> %0, <vscale x 4 x i32> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2293; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i32_i32:
2294; CHECK:       # %bb.0: # %entry
2295; CHECK-NEXT:    vmv1r.v v25, v0
2296; CHECK-NEXT:    vsetvli zero, a0, e32, m2, ta, mu
2297; CHECK-NEXT:    vmv1r.v v0, v10
2298; CHECK-NEXT:    vmsle.vi v25, v8, 10, v0.t
2299; CHECK-NEXT:    vmv1r.v v0, v25
2300; CHECK-NEXT:    ret
2301entry:
2302  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i32.i32(
2303    <vscale x 4 x i1> %0,
2304    <vscale x 4 x i32> %1,
2305    i32 11,
2306    <vscale x 4 x i1> %2,
2307    i32 %3)
2308
2309  ret <vscale x 4 x i1> %a
2310}
2311
2312define <vscale x 8 x i1> @intrinsic_vmslt_vi_nxv8i32_i32(<vscale x 8 x i32> %0, i32 %1) nounwind {
2313; CHECK-LABEL: intrinsic_vmslt_vi_nxv8i32_i32:
2314; CHECK:       # %bb.0: # %entry
2315; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2316; CHECK-NEXT:    vmsle.vi v0, v8, 11
2317; CHECK-NEXT:    ret
2318entry:
2319  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.nxv8i32.i32(
2320    <vscale x 8 x i32> %0,
2321    i32 12,
2322    i32 %1)
2323
2324  ret <vscale x 8 x i1> %a
2325}
2326
2327define <vscale x 8 x i1> @intrinsic_vmslt_mask_vi_nxv8i32_i32(<vscale x 8 x i1> %0, <vscale x 8 x i32> %1, <vscale x 8 x i1> %2, i32 %3) nounwind {
2328; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv8i32_i32:
2329; CHECK:       # %bb.0: # %entry
2330; CHECK-NEXT:    vmv1r.v v25, v0
2331; CHECK-NEXT:    vsetvli zero, a0, e32, m4, ta, mu
2332; CHECK-NEXT:    vmv1r.v v0, v12
2333; CHECK-NEXT:    vmsle.vi v25, v8, 12, v0.t
2334; CHECK-NEXT:    vmv1r.v v0, v25
2335; CHECK-NEXT:    ret
2336entry:
2337  %a = call <vscale x 8 x i1> @llvm.riscv.vmslt.mask.nxv8i32.i32(
2338    <vscale x 8 x i1> %0,
2339    <vscale x 8 x i32> %1,
2340    i32 13,
2341    <vscale x 8 x i1> %2,
2342    i32 %3)
2343
2344  ret <vscale x 8 x i1> %a
2345}
2346
2347define <vscale x 1 x i1> @intrinsic_vmslt_vi_nxv1i64_i64(<vscale x 1 x i64> %0, i32 %1) nounwind {
2348; CHECK-LABEL: intrinsic_vmslt_vi_nxv1i64_i64:
2349; CHECK:       # %bb.0: # %entry
2350; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
2351; CHECK-NEXT:    vmsle.vi v0, v8, 8
2352; CHECK-NEXT:    ret
2353entry:
2354  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.nxv1i64.i64(
2355    <vscale x 1 x i64> %0,
2356    i64 9,
2357    i32 %1)
2358
2359  ret <vscale x 1 x i1> %a
2360}
2361
2362define <vscale x 1 x i1> @intrinsic_vmslt_mask_vi_nxv1i64_i64(<vscale x 1 x i1> %0, <vscale x 1 x i64> %1, <vscale x 1 x i1> %2, i32 %3) nounwind {
2363; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv1i64_i64:
2364; CHECK:       # %bb.0: # %entry
2365; CHECK-NEXT:    vmv1r.v v25, v0
2366; CHECK-NEXT:    vsetvli zero, a0, e64, m1, ta, mu
2367; CHECK-NEXT:    vmv1r.v v0, v9
2368; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
2369; CHECK-NEXT:    vmv1r.v v0, v25
2370; CHECK-NEXT:    ret
2371entry:
2372  %a = call <vscale x 1 x i1> @llvm.riscv.vmslt.mask.nxv1i64.i64(
2373    <vscale x 1 x i1> %0,
2374    <vscale x 1 x i64> %1,
2375    i64 9,
2376    <vscale x 1 x i1> %2,
2377    i32 %3)
2378
2379  ret <vscale x 1 x i1> %a
2380}
2381
2382define <vscale x 2 x i1> @intrinsic_vmslt_vi_nxv2i64_i64(<vscale x 2 x i64> %0, i32 %1) nounwind {
2383; CHECK-LABEL: intrinsic_vmslt_vi_nxv2i64_i64:
2384; CHECK:       # %bb.0: # %entry
2385; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
2386; CHECK-NEXT:    vmsle.vi v0, v8, 8
2387; CHECK-NEXT:    ret
2388entry:
2389  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.nxv2i64.i64(
2390    <vscale x 2 x i64> %0,
2391    i64 9,
2392    i32 %1)
2393
2394  ret <vscale x 2 x i1> %a
2395}
2396
2397define <vscale x 2 x i1> @intrinsic_vmslt_mask_vi_nxv2i64_i64(<vscale x 2 x i1> %0, <vscale x 2 x i64> %1, <vscale x 2 x i1> %2, i32 %3) nounwind {
2398; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv2i64_i64:
2399; CHECK:       # %bb.0: # %entry
2400; CHECK-NEXT:    vmv1r.v v25, v0
2401; CHECK-NEXT:    vsetvli zero, a0, e64, m2, ta, mu
2402; CHECK-NEXT:    vmv1r.v v0, v10
2403; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
2404; CHECK-NEXT:    vmv1r.v v0, v25
2405; CHECK-NEXT:    ret
2406entry:
2407  %a = call <vscale x 2 x i1> @llvm.riscv.vmslt.mask.nxv2i64.i64(
2408    <vscale x 2 x i1> %0,
2409    <vscale x 2 x i64> %1,
2410    i64 9,
2411    <vscale x 2 x i1> %2,
2412    i32 %3)
2413
2414  ret <vscale x 2 x i1> %a
2415}
2416
2417define <vscale x 4 x i1> @intrinsic_vmslt_vi_nxv4i64_i64(<vscale x 4 x i64> %0, i32 %1) nounwind {
2418; CHECK-LABEL: intrinsic_vmslt_vi_nxv4i64_i64:
2419; CHECK:       # %bb.0: # %entry
2420; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
2421; CHECK-NEXT:    vmsle.vi v0, v8, 8
2422; CHECK-NEXT:    ret
2423entry:
2424  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.nxv4i64.i64(
2425    <vscale x 4 x i64> %0,
2426    i64 9,
2427    i32 %1)
2428
2429  ret <vscale x 4 x i1> %a
2430}
2431
2432define <vscale x 4 x i1> @intrinsic_vmslt_mask_vi_nxv4i64_i64(<vscale x 4 x i1> %0, <vscale x 4 x i64> %1, <vscale x 4 x i1> %2, i32 %3) nounwind {
2433; CHECK-LABEL: intrinsic_vmslt_mask_vi_nxv4i64_i64:
2434; CHECK:       # %bb.0: # %entry
2435; CHECK-NEXT:    vmv1r.v v25, v0
2436; CHECK-NEXT:    vsetvli zero, a0, e64, m4, ta, mu
2437; CHECK-NEXT:    vmv1r.v v0, v12
2438; CHECK-NEXT:    vmsle.vi v25, v8, 8, v0.t
2439; CHECK-NEXT:    vmv1r.v v0, v25
2440; CHECK-NEXT:    ret
2441entry:
2442  %a = call <vscale x 4 x i1> @llvm.riscv.vmslt.mask.nxv4i64.i64(
2443    <vscale x 4 x i1> %0,
2444    <vscale x 4 x i64> %1,
2445    i64 9,
2446    <vscale x 4 x i1> %2,
2447    i32 %3)
2448
2449  ret <vscale x 4 x i1> %a
2450}
2451